FFmpeg
dxtory.c
Go to the documentation of this file.
1 /*
2  * Dxtory decoder
3  *
4  * Copyright (c) 2011 Konstantin Shishkov
5  *
6  * This file is part of FFmpeg.
7  *
8  * FFmpeg is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU Lesser General Public
10  * License as published by the Free Software Foundation; either
11  * version 2.1 of the License, or (at your option) any later version.
12  *
13  * FFmpeg is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16  * Lesser General Public License for more details.
17  *
18  * You should have received a copy of the GNU Lesser General Public
19  * License along with FFmpeg; if not, write to the Free Software
20  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21  */
22 
23 #include <inttypes.h>
24 
25 #include "libavutil/common.h"
26 #include "libavutil/intreadwrite.h"
27 
28 #define BITSTREAM_READER_LE
29 #include "avcodec.h"
30 #include "bytestream.h"
31 #include "codec_internal.h"
32 #include "get_bits.h"
33 #include "unary.h"
34 #include "thread.h"
35 
36 static int64_t get_raw_size(enum AVPixelFormat fmt, int width, int height)
37 {
38  switch (fmt) {
41  return width * height * 2LL;
42  case AV_PIX_FMT_RGB24:
43  case AV_PIX_FMT_BGR24:
44  case AV_PIX_FMT_YUV444P:
45  return width * height * 3LL;
46  case AV_PIX_FMT_YUV420P:
47  return (int64_t)(width * height) + 2 * AV_CEIL_RSHIFT(width, 1) * AV_CEIL_RSHIFT(height, 1);
48  case AV_PIX_FMT_YUV410P:
49  return (int64_t)(width * height) + 2 * AV_CEIL_RSHIFT(width, 2) * AV_CEIL_RSHIFT(height, 2);
50  }
51 
52  return 0;
53 }
54 
55 static void do_vflip(AVCodecContext *avctx, AVFrame *pic, int vflip)
56 {
57  if (!vflip)
58  return;
59 
60  switch (pic->format) {
61  case AV_PIX_FMT_YUV444P:
62  pic->data[1] += (avctx->height - 1) * pic->linesize[1];
63  pic->linesize[1] = -pic->linesize[1];
64  pic->data[2] += (avctx->height - 1) * pic->linesize[2];
65  pic->linesize[2] = -pic->linesize[2];
68  case AV_PIX_FMT_BGR24:
69  case AV_PIX_FMT_RGB24:
70  pic->data[0] += (avctx->height - 1) * pic->linesize[0];
71  pic->linesize[0] = -pic->linesize[0];
72  break;
73  case AV_PIX_FMT_YUV410P:
74  pic->data[0] += (avctx->height - 1) * pic->linesize[0];
75  pic->linesize[0] = -pic->linesize[0];
76  pic->data[1] += (AV_CEIL_RSHIFT(avctx->height, 2) - 1) * pic->linesize[1];
77  pic->linesize[1] = -pic->linesize[1];
78  pic->data[2] += (AV_CEIL_RSHIFT(avctx->height, 2) - 1) * pic->linesize[2];
79  pic->linesize[2] = -pic->linesize[2];
80  break;
81  case AV_PIX_FMT_YUV420P:
82  pic->data[0] += (avctx->height - 1) * pic->linesize[0];
83  pic->linesize[0] = -pic->linesize[0];
84  pic->data[1] += (AV_CEIL_RSHIFT(avctx->height, 1) - 1) * pic->linesize[1];
85  pic->linesize[1] = -pic->linesize[1];
86  pic->data[2] += (AV_CEIL_RSHIFT(avctx->height, 1) - 1) * pic->linesize[2];
87  pic->linesize[2] = -pic->linesize[2];
88  break;
89  }
90 }
91 
93  const uint8_t *src, int src_size,
94  int id, int bpp, uint32_t vflipped)
95 {
96  int h;
97  uint8_t *dst;
98  int ret;
99 
100  if (src_size < get_raw_size(id, avctx->width, avctx->height)) {
101  av_log(avctx, AV_LOG_ERROR, "packet too small\n");
102  return AVERROR_INVALIDDATA;
103  }
104 
105  avctx->pix_fmt = id;
106  if ((ret = ff_thread_get_buffer(avctx, pic, 0)) < 0)
107  return ret;
108 
109  do_vflip(avctx, pic, vflipped);
110 
111  dst = pic->data[0];
112  for (h = 0; h < avctx->height; h++) {
113  memcpy(dst, src, avctx->width * bpp);
114  src += avctx->width * bpp;
115  dst += pic->linesize[0];
116  }
117 
118  do_vflip(avctx, pic, vflipped);
119 
120  return 0;
121 }
122 
124  const uint8_t *src, int src_size,
125  uint32_t vflipped)
126 {
127  int h, w;
128  uint8_t *Y1, *Y2, *Y3, *Y4, *U, *V;
129  int height, width, hmargin, vmargin;
130  int huvborder;
131  int ret;
132 
133  if (src_size < get_raw_size(AV_PIX_FMT_YUV410P, avctx->width, avctx->height)) {
134  av_log(avctx, AV_LOG_ERROR, "packet too small\n");
135  return AVERROR_INVALIDDATA;
136  }
137 
138  avctx->pix_fmt = AV_PIX_FMT_YUV410P;
139  if ((ret = ff_thread_get_buffer(avctx, pic, 0)) < 0)
140  return ret;
141 
142  do_vflip(avctx, pic, vflipped);
143 
144  height = avctx->height & ~3;
145  width = avctx->width & ~3;
146  hmargin = avctx->width - width;
147  vmargin = avctx->height - height;
148  huvborder = AV_CEIL_RSHIFT(avctx->width, 2) - 1;
149 
150  Y1 = pic->data[0];
151  Y2 = pic->data[0] + pic->linesize[0];
152  Y3 = pic->data[0] + pic->linesize[0] * 2;
153  Y4 = pic->data[0] + pic->linesize[0] * 3;
154  U = pic->data[1];
155  V = pic->data[2];
156  for (h = 0; h < height; h += 4) {
157  for (w = 0; w < width; w += 4) {
158  AV_COPY32U(Y1 + w, src);
159  AV_COPY32U(Y2 + w, src + 4);
160  AV_COPY32U(Y3 + w, src + 8);
161  AV_COPY32U(Y4 + w, src + 12);
162  U[w >> 2] = src[16] + 0x80;
163  V[w >> 2] = src[17] + 0x80;
164  src += 18;
165  }
166  if (hmargin) {
167  for (w = 0; w < hmargin; w++) {
168  Y1[width + w] = src[w];
169  Y2[width + w] = src[w + hmargin * 1];
170  Y3[width + w] = src[w + hmargin * 2];
171  Y4[width + w] = src[w + hmargin * 3];
172  }
173  src += 4 * hmargin;
174  U[huvborder] = src[0] + 0x80;
175  V[huvborder] = src[1] + 0x80;
176  src += 2;
177  }
178  Y1 += pic->linesize[0] * 4;
179  Y2 += pic->linesize[0] * 4;
180  Y3 += pic->linesize[0] * 4;
181  Y4 += pic->linesize[0] * 4;
182  U += pic->linesize[1];
183  V += pic->linesize[2];
184  }
185 
186  if (vmargin) {
187  for (w = 0; w < width; w += 4) {
188  AV_COPY32U(Y1 + w, src);
189  if (vmargin > 1)
190  AV_COPY32U(Y2 + w, src + 4);
191  if (vmargin > 2)
192  AV_COPY32U(Y3 + w, src + 8);
193  src += 4 * vmargin;
194  U[w >> 2] = src[0] + 0x80;
195  V[w >> 2] = src[1] + 0x80;
196  src += 2;
197  }
198  if (hmargin) {
199  for (w = 0; w < hmargin; w++) {
200  AV_COPY32U(Y1 + w, src);
201  if (vmargin > 1)
202  AV_COPY32U(Y2 + w, src + 4);
203  if (vmargin > 2)
204  AV_COPY32U(Y3 + w, src + 8);
205  src += 4 * vmargin;
206  }
207  U[huvborder] = src[0] + 0x80;
208  V[huvborder] = src[1] + 0x80;
209  src += 2;
210  }
211  }
212 
213  do_vflip(avctx, pic, vflipped);
214 
215  return 0;
216 }
217 
219  const uint8_t *src, int src_size,
220  uint32_t vflipped)
221 {
222  int h, w;
223  uint8_t *Y1, *Y2, *U, *V;
224  int height, width, hmargin, vmargin;
225  int huvborder;
226  int ret;
227 
228  if (src_size < get_raw_size(AV_PIX_FMT_YUV420P, avctx->width, avctx->height)) {
229  av_log(avctx, AV_LOG_ERROR, "packet too small\n");
230  return AVERROR_INVALIDDATA;
231  }
232 
233  avctx->pix_fmt = AV_PIX_FMT_YUV420P;
234  if ((ret = ff_thread_get_buffer(avctx, pic, 0)) < 0)
235  return ret;
236 
237  do_vflip(avctx, pic, vflipped);
238 
239  height = avctx->height & ~1;
240  width = avctx->width & ~1;
241  hmargin = avctx->width - width;
242  vmargin = avctx->height - height;
243  huvborder = AV_CEIL_RSHIFT(avctx->width, 1) - 1;
244 
245  Y1 = pic->data[0];
246  Y2 = pic->data[0] + pic->linesize[0];
247  U = pic->data[1];
248  V = pic->data[2];
249  for (h = 0; h < height; h += 2) {
250  for (w = 0; w < width; w += 2) {
251  AV_COPY16(Y1 + w, src);
252  AV_COPY16(Y2 + w, src + 2);
253  U[w >> 1] = src[4] + 0x80;
254  V[w >> 1] = src[5] + 0x80;
255  src += 6;
256  }
257  if (hmargin) {
258  Y1[width + 1] = src[0];
259  Y2[width + 1] = src[1];
260  U[huvborder] = src[2] + 0x80;
261  V[huvborder] = src[3] + 0x80;
262  src += 4;
263  }
264  Y1 += pic->linesize[0] * 2;
265  Y2 += pic->linesize[0] * 2;
266  U += pic->linesize[1];
267  V += pic->linesize[2];
268  }
269 
270  if (vmargin) {
271  for (w = 0; w < width; w += 2) {
272  AV_COPY16U(Y1 + w, src);
273  U[w >> 1] = src[0] + 0x80;
274  V[w >> 1] = src[1] + 0x80;
275  src += 4;
276  }
277  if (hmargin) {
278  Y1[w] = src[0];
279  U[huvborder] = src[1] + 0x80;
280  V[huvborder] = src[2] + 0x80;
281  src += 3;
282  }
283  }
284 
285  do_vflip(avctx, pic, vflipped);
286 
287  return 0;
288 }
289 
291  const uint8_t *src, int src_size,
292  uint32_t vflipped)
293 {
294  int h, w;
295  uint8_t *Y, *U, *V;
296  int ret;
297 
298  if (src_size < get_raw_size(AV_PIX_FMT_YUV444P, avctx->width, avctx->height)) {
299  av_log(avctx, AV_LOG_ERROR, "packet too small\n");
300  return AVERROR_INVALIDDATA;
301  }
302 
303  avctx->pix_fmt = AV_PIX_FMT_YUV444P;
304  if ((ret = ff_thread_get_buffer(avctx, pic, 0)) < 0)
305  return ret;
306 
307  do_vflip(avctx, pic, vflipped);
308 
309  Y = pic->data[0];
310  U = pic->data[1];
311  V = pic->data[2];
312  for (h = 0; h < avctx->height; h++) {
313  for (w = 0; w < avctx->width; w++) {
314  Y[w] = *src++;
315  U[w] = *src++ ^ 0x80;
316  V[w] = *src++ ^ 0x80;
317  }
318  Y += pic->linesize[0];
319  U += pic->linesize[1];
320  V += pic->linesize[2];
321  }
322 
323  do_vflip(avctx, pic, vflipped);
324 
325  return 0;
326 }
327 
328 static const uint8_t def_lru[8] = { 0x00, 0x20, 0x40, 0x60, 0x80, 0xA0, 0xC0, 0xFF };
329 static const uint8_t def_lru_555[8] = { 0x00, 0x08, 0x10, 0x18, 0x1F };
330 static const uint8_t def_lru_565[8] = { 0x00, 0x08, 0x10, 0x20, 0x30, 0x3F };
331 
332 static inline uint8_t decode_sym(GetBitContext *gb, uint8_t lru[8])
333 {
334  uint8_t c, val;
335 
336  c = get_unary(gb, 0, 8);
337  if (!c) {
338  val = get_bits(gb, 8);
339  memmove(lru + 1, lru, sizeof(*lru) * (8 - 1));
340  } else {
341  val = lru[c - 1];
342  memmove(lru + 1, lru, sizeof(*lru) * (c - 1));
343  }
344  lru[0] = val;
345 
346  return val;
347 }
348 
350  const uint8_t *src, int src_size,
351  int slice_size, int off)
352 {
353  int cur_slice_size;
354 
355  if (slice_size > src_size - off) {
356  av_log(avctx, AV_LOG_ERROR,
357  "invalid slice size %d (only %d bytes left)\n",
358  slice_size, src_size - off);
359  return AVERROR_INVALIDDATA;
360  }
361  if (slice_size <= 16) {
362  av_log(avctx, AV_LOG_ERROR, "invalid slice size %d\n",
363  slice_size);
364  return AVERROR_INVALIDDATA;
365  }
366 
367  cur_slice_size = AV_RL32(src + off);
368  if (cur_slice_size != slice_size - 16) {
369  av_log(avctx, AV_LOG_ERROR,
370  "Slice sizes mismatch: got %d instead of %d\n",
371  cur_slice_size, slice_size - 16);
372  }
373 
374  return 0;
375 }
376 
377 static int load_buffer(AVCodecContext *avctx,
378  const uint8_t *src, int src_size,
379  GetByteContext *gb,
380  int *nslices, int *off)
381 {
382  bytestream2_init(gb, src, src_size);
383  *nslices = bytestream2_get_le16(gb);
384  *off = FFALIGN(*nslices * 4 + 2, 16);
385  if (src_size < *off) {
386  av_log(avctx, AV_LOG_ERROR, "no slice data\n");
387  return AVERROR_INVALIDDATA;
388  }
389 
390  if (!*nslices) {
391  avpriv_request_sample(avctx, "%d slices for %dx%d", *nslices,
392  avctx->width, avctx->height);
393  return AVERROR_PATCHWELCOME;
394  }
395 
396  return 0;
397 }
398 
399 static inline uint8_t decode_sym_565(GetBitContext *gb, uint8_t lru[8],
400  int bits)
401 {
402  uint8_t c, val;
403 
404  c = get_unary(gb, 0, bits);
405  if (!c) {
406  val = get_bits(gb, bits);
407  memmove(lru + 1, lru, sizeof(*lru) * (6 - 1));
408  } else {
409  val = lru[c - 1];
410  memmove(lru + 1, lru, sizeof(*lru) * (c - 1));
411  }
412  lru[0] = val;
413 
414  return val;
415 }
416 
418  int line, int height, uint8_t lru[3][8]);
419 
420 typedef void (*setup_lru_func)(uint8_t lru[3][8]);
421 
422 static int dxtory_decode_v2(AVCodecContext *avctx, AVFrame *pic,
423  const uint8_t *src, int src_size,
425  setup_lru_func setup_lru,
426  enum AVPixelFormat fmt,
427  uint32_t vflipped)
428 {
429  GetByteContext gb, gb_check;
430  GetBitContext gb2;
431  int nslices, slice, line = 0;
432  uint32_t off, slice_size;
433  uint64_t off_check;
434  uint8_t lru[3][8];
435  int ret;
436 
437  ret = load_buffer(avctx, src, src_size, &gb, &nslices, &off);
438  if (ret < 0)
439  return ret;
440 
441  off_check = off;
442  gb_check = gb;
443  for (slice = 0; slice < nslices; slice++) {
444  slice_size = bytestream2_get_le32(&gb_check);
445 
446  if (slice_size <= 16 + (avctx->height * avctx->width / (8 * nslices)))
447  return AVERROR_INVALIDDATA;
448  off_check += slice_size;
449  }
450 
451  if (off_check - avctx->discard_damaged_percentage*off_check/100 > src_size)
452  return AVERROR_INVALIDDATA;
453 
454  avctx->pix_fmt = fmt;
455  if ((ret = ff_thread_get_buffer(avctx, pic, 0)) < 0)
456  return ret;
457 
458  do_vflip(avctx, pic, vflipped);
459 
460  for (slice = 0; slice < nslices; slice++) {
461  slice_size = bytestream2_get_le32(&gb);
462 
463  setup_lru(lru);
464 
465  ret = check_slice_size(avctx, src, src_size, slice_size, off);
466  if (ret < 0)
467  return ret;
468 
469  if ((ret = init_get_bits8(&gb2, src + off + 16, slice_size - 16)) < 0)
470  return ret;
471 
472  line += decode_slice(&gb2, pic, line, avctx->height - line, lru);
473 
474  off += slice_size;
475  }
476 
477  if (avctx->height - line) {
478  avpriv_request_sample(avctx, "Not enough slice data available");
479  }
480 
481  do_vflip(avctx, pic, vflipped);
482 
483  return 0;
484 }
485 
488  int line, int left, uint8_t lru[3][8],
489  int is_565)
490 {
491  int x, y;
492  int r, g, b;
493  int width = frame->width;
494  int stride = frame->linesize[0];
495  uint8_t *dst = frame->data[0] + stride * line;
496 
497  for (y = 0; y < left && get_bits_left(gb) >= 3 * width; y++) {
498  for (x = 0; x < width; x++) {
499  b = decode_sym_565(gb, lru[0], 5);
500  g = decode_sym_565(gb, lru[1], is_565 ? 6 : 5);
501  r = decode_sym_565(gb, lru[2], 5);
502  dst[x * 3 + 0] = (r << 3) | (r >> 2);
503  dst[x * 3 + 1] = is_565 ? (g << 2) | (g >> 4) : (g << 3) | (g >> 2);
504  dst[x * 3 + 2] = (b << 3) | (b >> 2);
505  }
506 
507  dst += stride;
508  }
509 
510  return y;
511 }
512 
513 static void setup_lru_555(uint8_t lru[3][8])
514 {
515  memcpy(lru[0], def_lru_555, 8 * sizeof(*def_lru));
516  memcpy(lru[1], def_lru_555, 8 * sizeof(*def_lru));
517  memcpy(lru[2], def_lru_555, 8 * sizeof(*def_lru));
518 }
519 
520 static void setup_lru_565(uint8_t lru[3][8])
521 {
522  memcpy(lru[0], def_lru_555, 8 * sizeof(*def_lru));
523  memcpy(lru[1], def_lru_565, 8 * sizeof(*def_lru));
524  memcpy(lru[2], def_lru_555, 8 * sizeof(*def_lru));
525 }
526 
528  int line, int left, uint8_t lru[3][8])
529 {
530  return dx2_decode_slice_5x5(gb, frame, line, left, lru, 0);
531 }
532 
534  int line, int left, uint8_t lru[3][8])
535 {
536  return dx2_decode_slice_5x5(gb, frame, line, left, lru, 1);
537 }
538 
540  const uint8_t *src, int src_size, int is_565,
541  uint32_t vflipped)
542 {
543  enum AVPixelFormat fmt = AV_PIX_FMT_RGB24;
544  if (is_565)
545  return dxtory_decode_v2(avctx, pic, src, src_size,
548  fmt, vflipped);
549  else
550  return dxtory_decode_v2(avctx, pic, src, src_size,
553  fmt, vflipped);
554 }
555 
557  int line, int left, uint8_t lru[3][8])
558 {
559  int x, y;
560  int width = frame->width;
561  int stride = frame->linesize[0];
562  uint8_t *dst = frame->data[0] + stride * line;
563 
564  for (y = 0; y < left && get_bits_left(gb) >= 3 * width; y++) {
565  for (x = 0; x < width; x++) {
566  dst[x * 3 + 0] = decode_sym(gb, lru[0]);
567  dst[x * 3 + 1] = decode_sym(gb, lru[1]);
568  dst[x * 3 + 2] = decode_sym(gb, lru[2]);
569  }
570 
571  dst += stride;
572  }
573 
574  return y;
575 }
576 
577 static void default_setup_lru(uint8_t lru[3][8])
578 {
579  int i;
580 
581  for (i = 0; i < 3; i++)
582  memcpy(lru[i], def_lru, 8 * sizeof(*def_lru));
583 }
584 
586  const uint8_t *src, int src_size,
587  uint32_t vflipped)
588 {
589  return dxtory_decode_v2(avctx, pic, src, src_size,
592  AV_PIX_FMT_BGR24, vflipped);
593 }
594 
596  int line, int left,
597  uint8_t lru[3][8])
598 {
599  int x, y, i, j;
600  int width = frame->width;
601 
602  int ystride = frame->linesize[0];
603  int ustride = frame->linesize[1];
604  int vstride = frame->linesize[2];
605 
606  uint8_t *Y = frame->data[0] + ystride * line;
607  uint8_t *U = frame->data[1] + (ustride >> 2) * line;
608  uint8_t *V = frame->data[2] + (vstride >> 2) * line;
609 
610  int h, w, hmargin, vmargin;
611  int huvborder;
612 
613  h = frame->height & ~3;
614  w = frame->width & ~3;
615  hmargin = frame->width - w;
616  vmargin = frame->height - h;
617  huvborder = AV_CEIL_RSHIFT(frame->width, 2) - 1;
618 
619  for (y = 0; y < left - 3 && get_bits_left(gb) >= 18 * w / 4 + hmargin * 4 + (!!hmargin * 2); y += 4) {
620  for (x = 0; x < w; x += 4) {
621  for (j = 0; j < 4; j++)
622  for (i = 0; i < 4; i++)
623  Y[x + i + j * ystride] = decode_sym(gb, lru[0]);
624  U[x >> 2] = decode_sym(gb, lru[1]) ^ 0x80;
625  V[x >> 2] = decode_sym(gb, lru[2]) ^ 0x80;
626  }
627  if (hmargin) {
628  for (j = 0; j < 4; j++)
629  for (i = 0; i < hmargin; i++)
630  Y[x + i + j * ystride] = decode_sym(gb, lru[0]);
631  U[huvborder] = decode_sym(gb, lru[1]) ^ 0x80;
632  V[huvborder] = decode_sym(gb, lru[2]) ^ 0x80;
633  }
634 
635  Y += ystride * 4;
636  U += ustride;
637  V += vstride;
638  }
639 
640  if (vmargin && y + vmargin == left) {
641  for (x = 0; x < width; x += 4) {
642  for (j = 0; j < vmargin; j++)
643  for (i = 0; i < 4; i++)
644  Y[x + i + j * ystride] = decode_sym(gb, lru[0]);
645  U[x >> 2] = decode_sym(gb, lru[1]) ^ 0x80;
646  V[x >> 2] = decode_sym(gb, lru[2]) ^ 0x80;
647  }
648  if (hmargin) {
649  for (j = 0; j < vmargin; j++) {
650  for (i = 0; i < hmargin; i++)
651  Y[x + i + j * ystride] = decode_sym(gb, lru[0]);
652  }
653  U[huvborder] = decode_sym(gb, lru[1]) ^ 0x80;
654  V[huvborder] = decode_sym(gb, lru[2]) ^ 0x80;
655  }
656 
657  y += vmargin;
658  }
659 
660  return y;
661 }
662 
663 
665  const uint8_t *src, int src_size,
666  uint32_t vflipped)
667 {
668  return dxtory_decode_v2(avctx, pic, src, src_size,
671  AV_PIX_FMT_YUV410P, vflipped);
672 }
673 
675  int line, int left,
676  uint8_t lru[3][8])
677 {
678  int x, y;
679 
680  int width = frame->width;
681 
682  int ystride = frame->linesize[0];
683  int ustride = frame->linesize[1];
684  int vstride = frame->linesize[2];
685 
686  uint8_t *Y = frame->data[0] + ystride * line;
687  uint8_t *U = frame->data[1] + (ustride >> 1) * line;
688  uint8_t *V = frame->data[2] + (vstride >> 1) * line;
689 
690  int h, w, hmargin, vmargin;
691  int huvborder;
692 
693  h = frame->height & ~1;
694  w = frame->width & ~1;
695  hmargin = frame->width - w;
696  vmargin = frame->height - h;
697  huvborder = AV_CEIL_RSHIFT(frame->width, 1) - 1;
698 
699  for (y = 0; y < left - 1 && get_bits_left(gb) >= 3 * w + hmargin * 4; y += 2) {
700  for (x = 0; x < w; x += 2) {
701  Y[x + 0 + 0 * ystride] = decode_sym(gb, lru[0]);
702  Y[x + 1 + 0 * ystride] = decode_sym(gb, lru[0]);
703  Y[x + 0 + 1 * ystride] = decode_sym(gb, lru[0]);
704  Y[x + 1 + 1 * ystride] = decode_sym(gb, lru[0]);
705  U[x >> 1] = decode_sym(gb, lru[1]) ^ 0x80;
706  V[x >> 1] = decode_sym(gb, lru[2]) ^ 0x80;
707  }
708  if (hmargin) {
709  Y[x + 0 * ystride] = decode_sym(gb, lru[0]);
710  Y[x + 1 * ystride] = decode_sym(gb, lru[0]);
711  U[huvborder] = decode_sym(gb, lru[1]) ^ 0x80;
712  V[huvborder] = decode_sym(gb, lru[2]) ^ 0x80;
713  }
714 
715  Y += ystride * 2;
716  U += ustride;
717  V += vstride;
718  }
719 
720  if (vmargin) {
721  for (x = 0; x < width; x += 2) {
722  Y[x + 0] = decode_sym(gb, lru[0]);
723  U[x >> 1] = decode_sym(gb, lru[1]) ^ 0x80;
724  V[x >> 1] = decode_sym(gb, lru[2]) ^ 0x80;
725  }
726  if (hmargin) {
727  Y[x] = decode_sym(gb, lru[0]);
728  U[huvborder] = decode_sym(gb, lru[1]) ^ 0x80;
729  V[huvborder] = decode_sym(gb, lru[2]) ^ 0x80;
730  }
731  }
732 
733  return y;
734 }
735 
737  const uint8_t *src, int src_size,
738  uint32_t vflipped)
739 {
740  return dxtory_decode_v2(avctx, pic, src, src_size,
743  AV_PIX_FMT_YUV420P, vflipped);
744 }
745 
747  int line, int left,
748  uint8_t lru[3][8])
749 {
750  int x, y;
751 
752  int width = frame->width;
753 
754  int ystride = frame->linesize[0];
755  int ustride = frame->linesize[1];
756  int vstride = frame->linesize[2];
757 
758  uint8_t *Y = frame->data[0] + ystride * line;
759  uint8_t *U = frame->data[1] + ustride * line;
760  uint8_t *V = frame->data[2] + vstride * line;
761 
762  for (y = 0; y < left && get_bits_left(gb) >= 3 * width; y++) {
763  for (x = 0; x < width; x++) {
764  Y[x] = decode_sym(gb, lru[0]);
765  U[x] = decode_sym(gb, lru[1]) ^ 0x80;
766  V[x] = decode_sym(gb, lru[2]) ^ 0x80;
767  }
768 
769  Y += ystride;
770  U += ustride;
771  V += vstride;
772  }
773 
774  return y;
775 }
776 
778  const uint8_t *src, int src_size,
779  uint32_t vflipped)
780 {
781  return dxtory_decode_v2(avctx, pic, src, src_size,
784  AV_PIX_FMT_YUV444P, vflipped);
785 }
786 
787 static int decode_frame(AVCodecContext *avctx, AVFrame *pic,
788  int *got_frame, AVPacket *avpkt)
789 {
790  const uint8_t *src = avpkt->data;
791  uint32_t type;
792  int vflipped, ret;
793 
794  if (avpkt->size < 16) {
795  av_log(avctx, AV_LOG_ERROR, "packet too small\n");
796  return AVERROR_INVALIDDATA;
797  }
798 
799  type = AV_RB32(src);
800  vflipped = !!(type & 0x20);
801 
802  switch (type) {
803  case 0x01000021:
804  case 0x01000001:
805  ret = dxtory_decode_v1_rgb(avctx, pic, src + 16, avpkt->size - 16,
806  AV_PIX_FMT_BGR24, 3, vflipped);
807  break;
808  case 0x01000029:
809  case 0x01000009:
810  ret = dxtory_decode_v2_rgb(avctx, pic, src + 16, avpkt->size - 16, vflipped);
811  break;
812  case 0x02000021:
813  case 0x02000001:
814  ret = dxtory_decode_v1_420(avctx, pic, src + 16, avpkt->size - 16, vflipped);
815  break;
816  case 0x02000029:
817  case 0x02000009:
818  ret = dxtory_decode_v2_420(avctx, pic, src + 16, avpkt->size - 16, vflipped);
819  break;
820  case 0x03000021:
821  case 0x03000001:
822  ret = dxtory_decode_v1_410(avctx, pic, src + 16, avpkt->size - 16, vflipped);
823  break;
824  case 0x03000029:
825  case 0x03000009:
826  ret = dxtory_decode_v2_410(avctx, pic, src + 16, avpkt->size - 16, vflipped);
827  break;
828  case 0x04000021:
829  case 0x04000001:
830  ret = dxtory_decode_v1_444(avctx, pic, src + 16, avpkt->size - 16, vflipped);
831  break;
832  case 0x04000029:
833  case 0x04000009:
834  ret = dxtory_decode_v2_444(avctx, pic, src + 16, avpkt->size - 16, vflipped);
835  break;
836  case 0x17000021:
837  case 0x17000001:
838  ret = dxtory_decode_v1_rgb(avctx, pic, src + 16, avpkt->size - 16,
839  AV_PIX_FMT_RGB565LE, 2, vflipped);
840  break;
841  case 0x17000029:
842  case 0x17000009:
843  ret = dxtory_decode_v2_565(avctx, pic, src + 16, avpkt->size - 16, 1, vflipped);
844  break;
845  case 0x18000021:
846  case 0x19000021:
847  case 0x18000001:
848  case 0x19000001:
849  ret = dxtory_decode_v1_rgb(avctx, pic, src + 16, avpkt->size - 16,
850  AV_PIX_FMT_RGB555LE, 2, vflipped);
851  break;
852  case 0x18000029:
853  case 0x19000029:
854  case 0x18000009:
855  case 0x19000009:
856  ret = dxtory_decode_v2_565(avctx, pic, src + 16, avpkt->size - 16, 0, vflipped);
857  break;
858  default:
859  avpriv_request_sample(avctx, "Frame header %"PRIX32, type);
860  return AVERROR_PATCHWELCOME;
861  }
862 
863  if (ret)
864  return ret;
865 
866  *got_frame = 1;
867 
868  return avpkt->size;
869 }
870 
872  .p.name = "dxtory",
873  CODEC_LONG_NAME("Dxtory"),
874  .p.type = AVMEDIA_TYPE_VIDEO,
875  .p.id = AV_CODEC_ID_DXTORY,
877  .p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS,
878 };
decode_slice_func
int(* decode_slice_func)(GetBitContext *gb, AVFrame *frame, int line, int height, uint8_t lru[3][8])
Definition: dxtory.c:417
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:71
get_bits_left
static int get_bits_left(GetBitContext *gb)
Definition: get_bits.h:695
def_lru_565
static const uint8_t def_lru_565[8]
Definition: dxtory.c:330
r
const char * r
Definition: vf_curves.c:127
decode_slice
static int decode_slice(AVCodecContext *c, void *arg)
Definition: ffv1dec.c:266
GetByteContext
Definition: bytestream.h:33
setup_lru_func
void(* setup_lru_func)(uint8_t lru[3][8])
Definition: dxtory.c:420
int64_t
long long int64_t
Definition: coverity.c:34
def_lru
static const uint8_t def_lru[8]
Definition: dxtory.c:328
do_vflip
static void do_vflip(AVCodecContext *avctx, AVFrame *pic, int vflip)
Definition: dxtory.c:55
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:389
w
uint8_t w
Definition: llviddspenc.c:38
AVPacket::data
uint8_t * data
Definition: packet.h:539
b
#define b
Definition: input.c:41
ff_dxtory_decoder
const FFCodec ff_dxtory_decoder
Definition: dxtory.c:871
FFCodec
Definition: codec_internal.h:127
AV_PIX_FMT_BGR24
@ AV_PIX_FMT_BGR24
packed RGB 8:8:8, 24bpp, BGRBGR...
Definition: pixfmt.h:76
thread.h
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:410
get_bits
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:335
load_buffer
static int load_buffer(AVCodecContext *avctx, const uint8_t *src, int src_size, GetByteContext *gb, int *nslices, int *off)
Definition: dxtory.c:377
FFCodec::p
AVCodec p
The public AVCodec.
Definition: codec_internal.h:131
GetBitContext
Definition: get_bits.h:108
dxtory_decode_v1_rgb
static int dxtory_decode_v1_rgb(AVCodecContext *avctx, AVFrame *pic, const uint8_t *src, int src_size, int id, int bpp, uint32_t vflipped)
Definition: dxtory.c:92
val
static double val(void *priv, double ch)
Definition: aeval.c:77
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
dxtory_decode_v2_410
static int dxtory_decode_v2_410(AVCodecContext *avctx, AVFrame *pic, const uint8_t *src, int src_size, uint32_t vflipped)
Definition: dxtory.c:664
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:209
AV_COPY32U
#define AV_COPY32U(d, s)
Definition: intreadwrite.h:607
init_get_bits8
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
Definition: get_bits.h:545
check_slice_size
static int check_slice_size(AVCodecContext *avctx, const uint8_t *src, int src_size, int slice_size, int off)
Definition: dxtory.c:349
FF_CODEC_DECODE_CB
#define FF_CODEC_DECODE_CB(func)
Definition: codec_internal.h:311
decode_sym_565
static uint8_t decode_sym_565(GetBitContext *gb, uint8_t lru[8], int bits)
Definition: dxtory.c:399
intreadwrite.h
dxtory_decode_v1_444
static int dxtory_decode_v1_444(AVCodecContext *avctx, AVFrame *pic, const uint8_t *src, int src_size, uint32_t vflipped)
Definition: dxtory.c:290
AV_CEIL_RSHIFT
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:60
g
const char * g
Definition: vf_curves.c:128
ff_thread_get_buffer
int ff_thread_get_buffer(AVCodecContext *avctx, AVFrame *f, int flags)
Wrapper around get_buffer() for frame-multithreaded codecs.
Definition: pthread_frame.c:1049
bits
uint8_t bits
Definition: vp3data.h:128
AV_COPY16U
#define AV_COPY16U(d, s)
Definition: intreadwrite.h:603
dxtory_decode_v2_rgb
static int dxtory_decode_v2_rgb(AVCodecContext *avctx, AVFrame *pic, const uint8_t *src, int src_size, uint32_t vflipped)
Definition: dxtory.c:585
get_bits.h
dxtory_decode_v1_410
static int dxtory_decode_v1_410(AVCodecContext *avctx, AVFrame *pic, const uint8_t *src, int src_size, uint32_t vflipped)
Definition: dxtory.c:123
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:73
CODEC_LONG_NAME
#define CODEC_LONG_NAME(str)
Definition: codec_internal.h:296
AV_CODEC_CAP_FRAME_THREADS
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
Definition: codec.h:110
AV_PIX_FMT_RGB565LE
@ AV_PIX_FMT_RGB565LE
packed RGB 5:6:5, 16bpp, (msb) 5R 6G 5B(lsb), little-endian
Definition: pixfmt.h:113
default_setup_lru
static void default_setup_lru(uint8_t lru[3][8])
Definition: dxtory.c:577
AVERROR_PATCHWELCOME
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:64
def_lru_555
static const uint8_t def_lru_555[8]
Definition: dxtory.c:329
V
#define V
Definition: avdct.c:31
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
get_unary
static int get_unary(GetBitContext *gb, int stop, int len)
Get unary code of limited length.
Definition: unary.h:46
get_raw_size
static int64_t get_raw_size(enum AVPixelFormat fmt, int width, int height)
Definition: dxtory.c:36
dx2_decode_slice_5x5
static av_always_inline int dx2_decode_slice_5x5(GetBitContext *gb, AVFrame *frame, int line, int left, uint8_t lru[3][8], int is_565)
Definition: dxtory.c:487
AV_PIX_FMT_RGB24
@ AV_PIX_FMT_RGB24
packed RGB 8:8:8, 24bpp, RGBRGB...
Definition: pixfmt.h:75
AV_CODEC_CAP_DR1
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:52
AVPacket::size
int size
Definition: packet.h:540
dx2_decode_slice_444
static int dx2_decode_slice_444(GetBitContext *gb, AVFrame *frame, int line, int left, uint8_t lru[3][8])
Definition: dxtory.c:746
height
#define height
Definition: dsp.h:85
codec_internal.h
dst
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t intptr_t int int16_t * dst
Definition: dsp.h:83
dxtory_decode_v2_420
static int dxtory_decode_v2_420(AVCodecContext *avctx, AVFrame *pic, const uint8_t *src, int src_size, uint32_t vflipped)
Definition: dxtory.c:736
AV_RB32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_RB32
Definition: bytestream.h:96
AVFrame::format
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames,...
Definition: frame.h:476
line
Definition: graph2dot.c:48
decode_frame
static int decode_frame(AVCodecContext *avctx, AVFrame *pic, int *got_frame, AVPacket *avpkt)
Definition: dxtory.c:787
unary.h
dxtory_decode_v1_420
static int dxtory_decode_v1_420(AVCodecContext *avctx, AVFrame *pic, const uint8_t *src, int src_size, uint32_t vflipped)
Definition: dxtory.c:218
Y
#define Y
Definition: boxblur.h:37
AV_COPY16
#define AV_COPY16(d, s)
Definition: intreadwrite.h:630
AV_PIX_FMT_RGB555LE
@ AV_PIX_FMT_RGB555LE
packed RGB 5:5:5, 16bpp, (msb)1X 5R 5G 5B(lsb), little-endian, X=unused/undefined
Definition: pixfmt.h:115
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:256
common.h
av_always_inline
#define av_always_inline
Definition: attributes.h:49
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:194
setup_lru_555
static void setup_lru_555(uint8_t lru[3][8])
Definition: dxtory.c:513
AVCodecContext::height
int height
Definition: avcodec.h:624
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:663
setup_lru_565
static void setup_lru_565(uint8_t lru[3][8])
Definition: dxtory.c:520
avcodec.h
stride
#define stride
Definition: h264pred_template.c:537
ret
ret
Definition: filter_design.txt:187
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
dx2_decode_slice_420
static int dx2_decode_slice_420(GetBitContext *gb, AVFrame *frame, int line, int left, uint8_t lru[3][8])
Definition: dxtory.c:674
id
enum AVCodecID id
Definition: dts2pts.c:367
left
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled left
Definition: snow.txt:386
AV_RL32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
Definition: bytestream.h:92
U
#define U(x)
Definition: vpx_arith.h:37
AVCodecContext
main external API structure.
Definition: avcodec.h:451
AVCodecContext::discard_damaged_percentage
int discard_damaged_percentage
The percentage of damaged samples to discard a frame.
Definition: avcodec.h:1980
dx2_decode_slice_565
static int dx2_decode_slice_565(GetBitContext *gb, AVFrame *frame, int line, int left, uint8_t lru[3][8])
Definition: dxtory.c:533
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:78
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
dx2_decode_slice_410
static int dx2_decode_slice_410(GetBitContext *gb, AVFrame *frame, int line, int left, uint8_t lru[3][8])
Definition: dxtory.c:595
avpriv_request_sample
#define avpriv_request_sample(...)
Definition: tableprint_vlc.h:36
dxtory_decode_v2_444
static int dxtory_decode_v2_444(AVCodecContext *avctx, AVFrame *pic, const uint8_t *src, int src_size, uint32_t vflipped)
Definition: dxtory.c:777
FFALIGN
#define FFALIGN(x, a)
Definition: macros.h:78
AVPacket
This structure stores compressed data.
Definition: packet.h:516
dx2_decode_slice_rgb
static int dx2_decode_slice_rgb(GetBitContext *gb, AVFrame *frame, int line, int left, uint8_t lru[3][8])
Definition: dxtory.c:556
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:624
bytestream.h
bytestream2_init
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
Definition: bytestream.h:137
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
Definition: frame.h:434
AV_PIX_FMT_YUV410P
@ AV_PIX_FMT_YUV410P
planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
Definition: pixfmt.h:79
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
dxtory_decode_v2
static int dxtory_decode_v2(AVCodecContext *avctx, AVFrame *pic, const uint8_t *src, int src_size, decode_slice_func decode_slice, setup_lru_func setup_lru, enum AVPixelFormat fmt, uint32_t vflipped)
Definition: dxtory.c:422
dx2_decode_slice_555
static int dx2_decode_slice_555(GetBitContext *gb, AVFrame *frame, int line, int left, uint8_t lru[3][8])
Definition: dxtory.c:527
h
h
Definition: vp9dsp_template.c:2070
decode_sym
static uint8_t decode_sym(GetBitContext *gb, uint8_t lru[8])
Definition: dxtory.c:332
AV_CODEC_ID_DXTORY
@ AV_CODEC_ID_DXTORY
Definition: codec_id.h:208
width
#define width
Definition: dsp.h:85
src
#define src
Definition: vp8dsp.c:248
line
The official guide to swscale for confused that consecutive non overlapping rectangles of slice_bottom special converter These generally are unscaled converters of common like for each output line the vertical scaler pulls lines from a ring buffer When the ring buffer does not contain the wanted line
Definition: swscale.txt:40
dxtory_decode_v2_565
static int dxtory_decode_v2_565(AVCodecContext *avctx, AVFrame *pic, const uint8_t *src, int src_size, int is_565, uint32_t vflipped)
Definition: dxtory.c:539