FFmpeg
vp3.c
Go to the documentation of this file.
1 /*
2  * Copyright (C) 2003-2004 The FFmpeg project
3  * Copyright (C) 2019 Peter Ross
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 /**
23  * @file
24  * On2 VP3/VP4 Video Decoder
25  *
26  * VP3 Video Decoder by Mike Melanson (mike at multimedia.cx)
27  * For more information about the VP3 coding process, visit:
28  * http://wiki.multimedia.cx/index.php?title=On2_VP3
29  *
30  * Theora decoder by Alex Beregszaszi
31  */
32 
33 #include "config_components.h"
34 
35 #include <stddef.h>
36 #include <string.h>
37 
38 #include "libavutil/emms.h"
39 #include "libavutil/imgutils.h"
40 #include "libavutil/mem_internal.h"
41 
42 #include "avcodec.h"
43 #include "codec_internal.h"
44 #include "decode.h"
45 #include "get_bits.h"
46 #include "hpeldsp.h"
47 #include "jpegquanttables.h"
48 #include "mathops.h"
49 #include "thread.h"
50 #include "threadframe.h"
51 #include "videodsp.h"
52 #include "vp3data.h"
53 #include "vp4data.h"
54 #include "vp3dsp.h"
55 #include "xiph.h"
56 
57 #define VP3_MV_VLC_BITS 6
58 #define VP4_MV_VLC_BITS 6
59 #define SUPERBLOCK_VLC_BITS 6
60 
61 #define FRAGMENT_PIXELS 8
62 
63 // FIXME split things out into their own arrays
64 typedef struct Vp3Fragment {
65  int16_t dc;
66  uint8_t coding_method;
67  uint8_t qpi;
68 } Vp3Fragment;
69 
70 #define SB_NOT_CODED 0
71 #define SB_PARTIALLY_CODED 1
72 #define SB_FULLY_CODED 2
73 
74 // This is the maximum length of a single long bit run that can be encoded
75 // for superblock coding or block qps. Theora special-cases this to read a
76 // bit instead of flipping the current bit to allow for runs longer than 4129.
77 #define MAXIMUM_LONG_BIT_RUN 4129
78 
79 #define MODE_INTER_NO_MV 0
80 #define MODE_INTRA 1
81 #define MODE_INTER_PLUS_MV 2
82 #define MODE_INTER_LAST_MV 3
83 #define MODE_INTER_PRIOR_LAST 4
84 #define MODE_USING_GOLDEN 5
85 #define MODE_GOLDEN_MV 6
86 #define MODE_INTER_FOURMV 7
87 #define CODING_MODE_COUNT 8
88 
89 /* special internal mode */
90 #define MODE_COPY 8
91 
92 static int theora_decode_header(AVCodecContext *avctx, GetBitContext *gb);
93 static int theora_decode_tables(AVCodecContext *avctx, GetBitContext *gb);
94 
95 
96 /* There are 6 preset schemes, plus a free-form scheme */
97 static const int ModeAlphabet[6][CODING_MODE_COUNT] = {
98  /* scheme 1: Last motion vector dominates */
103 
104  /* scheme 2 */
109 
110  /* scheme 3 */
115 
116  /* scheme 4 */
121 
122  /* scheme 5: No motion vector dominates */
127 
128  /* scheme 6 */
133 };
134 
135 static const uint8_t hilbert_offset[16][2] = {
136  { 0, 0 }, { 1, 0 }, { 1, 1 }, { 0, 1 },
137  { 0, 2 }, { 0, 3 }, { 1, 3 }, { 1, 2 },
138  { 2, 2 }, { 2, 3 }, { 3, 3 }, { 3, 2 },
139  { 3, 1 }, { 2, 1 }, { 2, 0 }, { 3, 0 }
140 };
141 
142 enum {
148 };
149 
150 static const uint8_t vp4_pred_block_type_map[8] = {
159 };
160 
161 typedef struct {
162  int dc;
163  int type;
164 } VP4Predictor;
165 
166 #define MIN_DEQUANT_VAL 2
167 
168 typedef struct HuffEntry {
169  uint8_t len, sym;
170 } HuffEntry;
171 
172 typedef struct HuffTable {
174  uint8_t nb_entries;
175 } HuffTable;
176 
177 typedef struct Vp3DecodeContext {
180  int version;
181  int width, height;
186  int keyframe;
187  uint8_t idct_permutation[64];
188  uint8_t idct_scantable[64];
192  DECLARE_ALIGNED(16, int16_t, block)[64];
196 
197  int qps[3];
198  int nqps;
199  int last_qps[3];
200 
210  unsigned char *superblock_coding;
211 
212  int macroblock_count; /* y macroblock count */
218  int yuv_macroblock_count; /* y+u+v macroblock count */
219 
223 
226  int data_offset[3];
227  uint8_t offset_x;
228  uint8_t offset_y;
230 
231  int8_t (*motion_val[2])[2];
232 
233  /* tables */
234  uint16_t coded_dc_scale_factor[2][64];
235  uint32_t coded_ac_scale_factor[64];
236  uint8_t base_matrix[384][64];
237  uint8_t qr_count[2][3];
238  uint8_t qr_size[2][3][64];
239  uint16_t qr_base[2][3][64];
240 
241  /**
242  * This is a list of all tokens in bitstream order. Reordering takes place
243  * by pulling from each level during IDCT. As a consequence, IDCT must be
244  * in Hilbert order, making the minimum slice height 64 for 4:2:0 and 32
245  * otherwise. The 32 different tokens with up to 12 bits of extradata are
246  * collapsed into 3 types, packed as follows:
247  * (from the low to high bits)
248  *
249  * 2 bits: type (0,1,2)
250  * 0: EOB run, 14 bits for run length (12 needed)
251  * 1: zero run, 7 bits for run length
252  * 7 bits for the next coefficient (3 needed)
253  * 2: coefficient, 14 bits (11 needed)
254  *
255  * Coefficients are signed, so are packed in the highest bits for automatic
256  * sign extension.
257  */
258  int16_t *dct_tokens[3][64];
259  int16_t *dct_tokens_base;
260 #define TOKEN_EOB(eob_run) ((eob_run) << 2)
261 #define TOKEN_ZERO_RUN(coeff, zero_run) (((coeff) * 512) + ((zero_run) << 2) + 1)
262 #define TOKEN_COEFF(coeff) (((coeff) * 4) + 2)
263 
264  /**
265  * number of blocks that contain DCT coefficients at
266  * the given level or higher
267  */
268  int num_coded_frags[3][64];
270 
271  /* this is a list of indexes into the all_fragments array indicating
272  * which of the fragments are coded */
274 
278 
279  /* The first 16 of the following VLCs are for the dc coefficients;
280  the others are four groups of 16 VLCs each for ac coefficients. */
281  VLC coeff_vlc[5 * 16];
282 
283  VLC superblock_run_length_vlc; /* version < 2 */
284  VLC fragment_run_length_vlc; /* version < 2 */
285  VLC block_pattern_vlc[2]; /* version >= 2*/
287  VLC motion_vector_vlc; /* version < 2 */
288  VLC vp4_mv_vlc[2][7]; /* version >=2 */
289 
290  /* these arrays need to be on 16-byte boundaries since SSE2 operations
291  * index into them */
292  DECLARE_ALIGNED(16, int16_t, qmat)[3][2][3][64]; ///< qmat[qpi][is_inter][plane]
293 
294  /* This table contains superblock_count * 16 entries. Each set of 16
295  * numbers corresponds to the fragment indexes 0..15 of the superblock.
296  * An entry will be -1 to indicate that no entry corresponds to that
297  * index. */
299 
300  /* This is an array that indicates how a particular macroblock
301  * is coded. */
302  unsigned char *macroblock_coding;
303 
304  uint8_t *edge_emu_buffer;
305 
306  /* Huffman decode */
308 
309  uint8_t filter_limit_values[64];
311 
312  VP4Predictor * dc_pred_row; /* dc_pred_row[y_superblock_width * 4] */
314 
315 /************************************************************************
316  * VP3 specific functions
317  ************************************************************************/
318 
319 static av_cold void free_tables(AVCodecContext *avctx)
320 {
321  Vp3DecodeContext *s = avctx->priv_data;
322 
323  av_freep(&s->superblock_coding);
324  av_freep(&s->all_fragments);
325  av_freep(&s->nkf_coded_fragment_list);
326  av_freep(&s->kf_coded_fragment_list);
327  av_freep(&s->dct_tokens_base);
328  av_freep(&s->superblock_fragments);
329  av_freep(&s->macroblock_coding);
330  av_freep(&s->dc_pred_row);
331  av_freep(&s->motion_val[0]);
332  av_freep(&s->motion_val[1]);
333 }
334 
335 static void vp3_decode_flush(AVCodecContext *avctx)
336 {
337  Vp3DecodeContext *s = avctx->priv_data;
338 
339  if (s->golden_frame.f)
340  ff_thread_release_ext_buffer(&s->golden_frame);
341  if (s->last_frame.f)
342  ff_thread_release_ext_buffer(&s->last_frame);
343  if (s->current_frame.f)
344  ff_thread_release_ext_buffer(&s->current_frame);
345 }
346 
348 {
349  Vp3DecodeContext *s = avctx->priv_data;
350 
351  free_tables(avctx);
352  av_freep(&s->edge_emu_buffer);
353 
354  s->theora_tables = 0;
355 
356  /* release all frames */
357  vp3_decode_flush(avctx);
358  av_frame_free(&s->current_frame.f);
359  av_frame_free(&s->last_frame.f);
360  av_frame_free(&s->golden_frame.f);
361 
362  for (int i = 0; i < FF_ARRAY_ELEMS(s->coeff_vlc); i++)
363  ff_vlc_free(&s->coeff_vlc[i]);
364 
365  ff_vlc_free(&s->superblock_run_length_vlc);
366  ff_vlc_free(&s->fragment_run_length_vlc);
367  ff_vlc_free(&s->mode_code_vlc);
368  ff_vlc_free(&s->motion_vector_vlc);
369 
370  for (int j = 0; j < 2; j++)
371  for (int i = 0; i < 7; i++)
372  ff_vlc_free(&s->vp4_mv_vlc[j][i]);
373 
374  for (int i = 0; i < 2; i++)
375  ff_vlc_free(&s->block_pattern_vlc[i]);
376  return 0;
377 }
378 
379 /**
380  * This function sets up all of the various blocks mappings:
381  * superblocks <-> fragments, macroblocks <-> fragments,
382  * superblocks <-> macroblocks
383  *
384  * @return 0 is successful; returns 1 if *anything* went wrong.
385  */
387 {
388  int j = 0;
389 
390  for (int plane = 0; plane < 3; plane++) {
391  int sb_width = plane ? s->c_superblock_width
392  : s->y_superblock_width;
393  int sb_height = plane ? s->c_superblock_height
394  : s->y_superblock_height;
395  int frag_width = s->fragment_width[!!plane];
396  int frag_height = s->fragment_height[!!plane];
397 
398  for (int sb_y = 0; sb_y < sb_height; sb_y++)
399  for (int sb_x = 0; sb_x < sb_width; sb_x++)
400  for (int i = 0; i < 16; i++) {
401  int x = 4 * sb_x + hilbert_offset[i][0];
402  int y = 4 * sb_y + hilbert_offset[i][1];
403 
404  if (x < frag_width && y < frag_height)
405  s->superblock_fragments[j++] = s->fragment_start[plane] +
406  y * frag_width + x;
407  else
408  s->superblock_fragments[j++] = -1;
409  }
410  }
411 
412  return 0; /* successful path out */
413 }
414 
415 /*
416  * This function sets up the dequantization tables used for a particular
417  * frame.
418  */
419 static void init_dequantizer(Vp3DecodeContext *s, int qpi)
420 {
421  int ac_scale_factor = s->coded_ac_scale_factor[s->qps[qpi]];
422 
423  for (int inter = 0; inter < 2; inter++) {
424  for (int plane = 0; plane < 3; plane++) {
425  int dc_scale_factor = s->coded_dc_scale_factor[!!plane][s->qps[qpi]];
426  int sum = 0, bmi, bmj, qistart, qri;
427  for (qri = 0; qri < s->qr_count[inter][plane]; qri++) {
428  sum += s->qr_size[inter][plane][qri];
429  if (s->qps[qpi] <= sum)
430  break;
431  }
432  qistart = sum - s->qr_size[inter][plane][qri];
433  bmi = s->qr_base[inter][plane][qri];
434  bmj = s->qr_base[inter][plane][qri + 1];
435  for (int i = 0; i < 64; i++) {
436  int coeff = (2 * (sum - s->qps[qpi]) * s->base_matrix[bmi][i] -
437  2 * (qistart - s->qps[qpi]) * s->base_matrix[bmj][i] +
438  s->qr_size[inter][plane][qri]) /
439  (2 * s->qr_size[inter][plane][qri]);
440 
441  int qmin = 8 << (inter + !i);
442  int qscale = i ? ac_scale_factor : dc_scale_factor;
443  int qbias = (1 + inter) * 3;
444  s->qmat[qpi][inter][plane][s->idct_permutation[i]] =
445  (i == 0 || s->version < 2) ? av_clip((qscale * coeff) / 100 * 4, qmin, 4096)
446  : (qscale * (coeff - qbias) / 100 + qbias) * 4;
447  }
448  /* all DC coefficients use the same quant so as not to interfere
449  * with DC prediction */
450  s->qmat[qpi][inter][plane][0] = s->qmat[0][inter][plane][0];
451  }
452  }
453 }
454 
455 /*
456  * This function initializes the loop filter boundary limits if the frame's
457  * quality index is different from the previous frame's.
458  *
459  * The filter_limit_values may not be larger than 127.
460  */
462 {
463  ff_vp3dsp_set_bounding_values(s->bounding_values_array, s->filter_limit_values[s->qps[0]]);
464 }
465 
466 /*
467  * This function unpacks all of the superblock/macroblock/fragment coding
468  * information from the bitstream.
469  */
471 {
472  const int superblock_starts[3] = {
473  0, s->u_superblock_start, s->v_superblock_start
474  };
475  int bit = 0;
476  int current_superblock = 0;
477  int current_run = 0;
478  int num_partial_superblocks = 0;
479 
480  int current_fragment;
481  int plane0_num_coded_frags = 0;
482 
483  if (s->keyframe) {
484  memset(s->superblock_coding, SB_FULLY_CODED, s->superblock_count);
485  } else {
486  /* unpack the list of partially-coded superblocks */
487  bit = get_bits1(gb) ^ 1;
488  current_run = 0;
489 
490  while (current_superblock < s->superblock_count && get_bits_left(gb) > 0) {
491  if (s->theora && current_run == MAXIMUM_LONG_BIT_RUN)
492  bit = get_bits1(gb);
493  else
494  bit ^= 1;
495 
496  current_run = get_vlc2(gb, s->superblock_run_length_vlc.table,
498  if (current_run == 34)
499  current_run += get_bits(gb, 12);
500 
501  if (current_run > s->superblock_count - current_superblock) {
502  av_log(s->avctx, AV_LOG_ERROR,
503  "Invalid partially coded superblock run length\n");
504  return -1;
505  }
506 
507  memset(s->superblock_coding + current_superblock, bit, current_run);
508 
509  current_superblock += current_run;
510  if (bit)
511  num_partial_superblocks += current_run;
512  }
513 
514  /* unpack the list of fully coded superblocks if any of the blocks were
515  * not marked as partially coded in the previous step */
516  if (num_partial_superblocks < s->superblock_count) {
517  int superblocks_decoded = 0;
518 
519  current_superblock = 0;
520  bit = get_bits1(gb) ^ 1;
521  current_run = 0;
522 
523  while (superblocks_decoded < s->superblock_count - num_partial_superblocks &&
524  get_bits_left(gb) > 0) {
525  if (s->theora && current_run == MAXIMUM_LONG_BIT_RUN)
526  bit = get_bits1(gb);
527  else
528  bit ^= 1;
529 
530  current_run = get_vlc2(gb, s->superblock_run_length_vlc.table,
532  if (current_run == 34)
533  current_run += get_bits(gb, 12);
534 
535  for (int j = 0; j < current_run; current_superblock++) {
536  if (current_superblock >= s->superblock_count) {
537  av_log(s->avctx, AV_LOG_ERROR,
538  "Invalid fully coded superblock run length\n");
539  return -1;
540  }
541 
542  /* skip any superblocks already marked as partially coded */
543  if (s->superblock_coding[current_superblock] == SB_NOT_CODED) {
544  s->superblock_coding[current_superblock] = 2 * bit;
545  j++;
546  }
547  }
548  superblocks_decoded += current_run;
549  }
550  }
551 
552  /* if there were partial blocks, initialize bitstream for
553  * unpacking fragment codings */
554  if (num_partial_superblocks) {
555  current_run = 0;
556  bit = get_bits1(gb);
557  /* toggle the bit because as soon as the first run length is
558  * fetched the bit will be toggled again */
559  bit ^= 1;
560  }
561  }
562 
563  /* figure out which fragments are coded; iterate through each
564  * superblock (all planes) */
565  s->total_num_coded_frags = 0;
566  memset(s->macroblock_coding, MODE_COPY, s->macroblock_count);
567 
568  s->coded_fragment_list[0] = s->keyframe ? s->kf_coded_fragment_list
569  : s->nkf_coded_fragment_list;
570 
571  for (int plane = 0; plane < 3; plane++) {
572  int sb_start = superblock_starts[plane];
573  int sb_end = sb_start + (plane ? s->c_superblock_count
574  : s->y_superblock_count);
575  int num_coded_frags = 0;
576 
577  if (s->keyframe) {
578  if (s->num_kf_coded_fragment[plane] == -1) {
579  for (int i = sb_start; i < sb_end; i++) {
580  /* iterate through all 16 fragments in a superblock */
581  for (int j = 0; j < 16; j++) {
582  /* if the fragment is in bounds, check its coding status */
583  current_fragment = s->superblock_fragments[i * 16 + j];
584  if (current_fragment != -1) {
585  s->coded_fragment_list[plane][num_coded_frags++] =
586  current_fragment;
587  }
588  }
589  }
590  s->num_kf_coded_fragment[plane] = num_coded_frags;
591  } else
592  num_coded_frags = s->num_kf_coded_fragment[plane];
593  } else {
594  for (int i = sb_start; i < sb_end && get_bits_left(gb) > 0; i++) {
595  if (get_bits_left(gb) < plane0_num_coded_frags >> 2) {
596  return AVERROR_INVALIDDATA;
597  }
598  /* iterate through all 16 fragments in a superblock */
599  for (int j = 0; j < 16; j++) {
600  /* if the fragment is in bounds, check its coding status */
601  current_fragment = s->superblock_fragments[i * 16 + j];
602  if (current_fragment != -1) {
603  int coded = s->superblock_coding[i];
604 
605  if (coded == SB_PARTIALLY_CODED) {
606  /* fragment may or may not be coded; this is the case
607  * that cares about the fragment coding runs */
608  if (current_run-- == 0) {
609  bit ^= 1;
610  current_run = get_vlc2(gb, s->fragment_run_length_vlc.table, 5, 2);
611  }
612  coded = bit;
613  }
614 
615  if (coded) {
616  /* default mode; actual mode will be decoded in
617  * the next phase */
618  s->all_fragments[current_fragment].coding_method =
620  s->coded_fragment_list[plane][num_coded_frags++] =
621  current_fragment;
622  } else {
623  /* not coded; copy this fragment from the prior frame */
624  s->all_fragments[current_fragment].coding_method =
625  MODE_COPY;
626  }
627  }
628  }
629  }
630  }
631  if (!plane)
632  plane0_num_coded_frags = num_coded_frags;
633  s->total_num_coded_frags += num_coded_frags;
634  for (int i = 0; i < 64; i++)
635  s->num_coded_frags[plane][i] = num_coded_frags;
636  if (plane < 2)
637  s->coded_fragment_list[plane + 1] = s->coded_fragment_list[plane] +
638  num_coded_frags;
639  }
640  return 0;
641 }
642 
643 #define BLOCK_X (2 * mb_x + (k & 1))
644 #define BLOCK_Y (2 * mb_y + (k >> 1))
645 
646 #if CONFIG_VP4_DECODER
647 /**
648  * @return number of blocks, or > yuv_macroblock_count on error.
649  * return value is always >= 1.
650  */
651 static int vp4_get_mb_count(Vp3DecodeContext *s, GetBitContext *gb)
652 {
653  int v = 1;
654  int bits;
655  while ((bits = show_bits(gb, 9)) == 0x1ff) {
656  skip_bits(gb, 9);
657  v += 256;
658  if (v > s->yuv_macroblock_count) {
659  av_log(s->avctx, AV_LOG_ERROR, "Invalid run length\n");
660  return v;
661  }
662  }
663 #define body(n) { \
664  skip_bits(gb, 2 + n); \
665  v += (1 << n) + get_bits(gb, n); }
666 #define thresh(n) (0x200 - (0x80 >> n))
667 #define else_if(n) else if (bits < thresh(n)) body(n)
668  if (bits < 0x100) {
669  skip_bits(gb, 1);
670  } else if (bits < thresh(0)) {
671  skip_bits(gb, 2);
672  v += 1;
673  }
674  else_if(1)
675  else_if(2)
676  else_if(3)
677  else_if(4)
678  else_if(5)
679  else_if(6)
680  else body(7)
681 #undef body
682 #undef thresh
683 #undef else_if
684  return v;
685 }
686 
687 static int vp4_get_block_pattern(Vp3DecodeContext *s, GetBitContext *gb, int *next_block_pattern_table)
688 {
689  int v = get_vlc2(gb, s->block_pattern_vlc[*next_block_pattern_table].table, 3, 2);
690  *next_block_pattern_table = vp4_block_pattern_table_selector[v];
691  return v + 1;
692 }
693 
694 static int vp4_unpack_macroblocks(Vp3DecodeContext *s, GetBitContext *gb)
695 {
696  int fragment;
697  int next_block_pattern_table;
698  int bit, current_run, has_partial;
699 
700  memset(s->macroblock_coding, MODE_COPY, s->macroblock_count);
701 
702  if (s->keyframe)
703  return 0;
704 
705  has_partial = 0;
706  bit = get_bits1(gb);
707  for (int i = 0; i < s->yuv_macroblock_count; i += current_run) {
708  if (get_bits_left(gb) <= 0)
709  return AVERROR_INVALIDDATA;
710  current_run = vp4_get_mb_count(s, gb);
711  if (current_run > s->yuv_macroblock_count - i)
712  return -1;
713  memset(s->superblock_coding + i, 2 * bit, current_run);
714  bit ^= 1;
715  has_partial |= bit;
716  }
717 
718  if (has_partial) {
719  if (get_bits_left(gb) <= 0)
720  return AVERROR_INVALIDDATA;
721  bit = get_bits1(gb);
722  current_run = vp4_get_mb_count(s, gb);
723  for (int i = 0; i < s->yuv_macroblock_count; i++) {
724  if (!s->superblock_coding[i]) {
725  if (!current_run) {
726  bit ^= 1;
727  current_run = vp4_get_mb_count(s, gb);
728  }
729  s->superblock_coding[i] = bit;
730  current_run--;
731  }
732  }
733  if (current_run) /* handle situation when vp4_get_mb_count() fails */
734  return -1;
735  }
736 
737  next_block_pattern_table = 0;
738  for (int plane = 0, i = 0; plane < 3; plane++) {
739  int sb_width = plane ? s->c_superblock_width : s->y_superblock_width;
740  int sb_height = plane ? s->c_superblock_height : s->y_superblock_height;
741  int mb_width = plane ? s->c_macroblock_width : s->macroblock_width;
742  int mb_height = plane ? s->c_macroblock_height : s->macroblock_height;
743  int fragment_width = s->fragment_width[!!plane];
744  int fragment_height = s->fragment_height[!!plane];
745 
746  for (int sb_y = 0; sb_y < sb_height; sb_y++) {
747  for (int sb_x = 0; sb_x < sb_width; sb_x++) {
748  for (int j = 0; j < 4; j++) {
749  int mb_x = 2 * sb_x + (j >> 1);
750  int mb_y = 2 * sb_y + (j >> 1) ^ (j & 1);
751  int mb_coded, pattern, coded;
752 
753  if (mb_x >= mb_width || mb_y >= mb_height)
754  continue;
755 
756  mb_coded = s->superblock_coding[i++];
757 
758  if (mb_coded == SB_FULLY_CODED)
759  pattern = 0xF;
760  else if (mb_coded == SB_PARTIALLY_CODED)
761  pattern = vp4_get_block_pattern(s, gb, &next_block_pattern_table);
762  else
763  pattern = 0;
764 
765  for (int k = 0; k < 4; k++) {
766  if (BLOCK_X >= fragment_width || BLOCK_Y >= fragment_height)
767  continue;
768  fragment = s->fragment_start[plane] + BLOCK_Y * fragment_width + BLOCK_X;
769  coded = pattern & (8 >> k);
770  /* MODE_INTER_NO_MV is the default for coded fragments.
771  the actual method is decoded in the next phase. */
772  s->all_fragments[fragment].coding_method = coded ? MODE_INTER_NO_MV : MODE_COPY;
773  }
774  }
775  }
776  }
777  }
778  return 0;
779 }
780 #endif
781 
782 /*
783  * This function unpacks all the coding mode data for individual macroblocks
784  * from the bitstream.
785  */
787 {
788  int scheme;
789  int current_macroblock;
790  int current_fragment;
791  int coding_mode;
792  int custom_mode_alphabet[CODING_MODE_COUNT];
793  const int *alphabet;
794  Vp3Fragment *frag;
795 
796  if (s->keyframe) {
797  for (int i = 0; i < s->fragment_count; i++)
798  s->all_fragments[i].coding_method = MODE_INTRA;
799  } else {
800  /* fetch the mode coding scheme for this frame */
801  scheme = get_bits(gb, 3);
802 
803  /* is it a custom coding scheme? */
804  if (scheme == 0) {
805  for (int i = 0; i < 8; i++)
806  custom_mode_alphabet[i] = MODE_INTER_NO_MV;
807  for (int i = 0; i < 8; i++)
808  custom_mode_alphabet[get_bits(gb, 3)] = i;
809  alphabet = custom_mode_alphabet;
810  } else
811  alphabet = ModeAlphabet[scheme - 1];
812 
813  /* iterate through all of the macroblocks that contain 1 or more
814  * coded fragments */
815  for (int sb_y = 0; sb_y < s->y_superblock_height; sb_y++) {
816  for (int sb_x = 0; sb_x < s->y_superblock_width; sb_x++) {
817  if (get_bits_left(gb) <= 0)
818  return -1;
819 
820  for (int j = 0; j < 4; j++) {
821  int k;
822  int mb_x = 2 * sb_x + (j >> 1);
823  int mb_y = 2 * sb_y + (((j >> 1) + j) & 1);
824  current_macroblock = mb_y * s->macroblock_width + mb_x;
825 
826  if (mb_x >= s->macroblock_width ||
827  mb_y >= s->macroblock_height)
828  continue;
829 
830  /* coding modes are only stored if the macroblock has
831  * at least one luma block coded, otherwise it must be
832  * INTER_NO_MV */
833  for (k = 0; k < 4; k++) {
834  current_fragment = BLOCK_Y *
835  s->fragment_width[0] + BLOCK_X;
836  if (s->all_fragments[current_fragment].coding_method != MODE_COPY)
837  break;
838  }
839  if (k == 4) {
840  s->macroblock_coding[current_macroblock] = MODE_INTER_NO_MV;
841  continue;
842  }
843 
844  /* mode 7 means get 3 bits for each coding mode */
845  if (scheme == 7)
846  coding_mode = get_bits(gb, 3);
847  else
848  coding_mode = alphabet[get_vlc2(gb, s->mode_code_vlc.table, 3, 3)];
849 
850  s->macroblock_coding[current_macroblock] = coding_mode;
851  for (k = 0; k < 4; k++) {
852  frag = s->all_fragments + BLOCK_Y * s->fragment_width[0] + BLOCK_X;
853  if (frag->coding_method != MODE_COPY)
854  frag->coding_method = coding_mode;
855  }
856 
857 #define SET_CHROMA_MODES \
858  if (frag[s->fragment_start[1]].coding_method != MODE_COPY) \
859  frag[s->fragment_start[1]].coding_method = coding_mode; \
860  if (frag[s->fragment_start[2]].coding_method != MODE_COPY) \
861  frag[s->fragment_start[2]].coding_method = coding_mode;
862 
863  if (s->chroma_y_shift) {
864  frag = s->all_fragments + mb_y *
865  s->fragment_width[1] + mb_x;
867  } else if (s->chroma_x_shift) {
868  frag = s->all_fragments +
869  2 * mb_y * s->fragment_width[1] + mb_x;
870  for (k = 0; k < 2; k++) {
872  frag += s->fragment_width[1];
873  }
874  } else {
875  for (k = 0; k < 4; k++) {
876  frag = s->all_fragments +
877  BLOCK_Y * s->fragment_width[1] + BLOCK_X;
879  }
880  }
881  }
882  }
883  }
884  }
885 
886  return 0;
887 }
888 
889 static int vp4_get_mv(Vp3DecodeContext *s, GetBitContext *gb, int axis, int last_motion)
890 {
891  int v = get_vlc2(gb, s->vp4_mv_vlc[axis][vp4_mv_table_selector[FFABS(last_motion)]].table,
892  VP4_MV_VLC_BITS, 2);
893  return last_motion < 0 ? -v : v;
894 }
895 
896 /*
897  * This function unpacks all the motion vectors for the individual
898  * macroblocks from the bitstream.
899  */
901 {
902  int coding_mode;
903  int motion_x[4];
904  int motion_y[4];
905  int last_motion_x = 0;
906  int last_motion_y = 0;
907  int prior_last_motion_x = 0;
908  int prior_last_motion_y = 0;
909  int last_gold_motion_x = 0;
910  int last_gold_motion_y = 0;
911  int current_macroblock;
912  int current_fragment;
913  int frag;
914 
915  if (s->keyframe)
916  return 0;
917 
918  /* coding mode 0 is the VLC scheme; 1 is the fixed code scheme; 2 is VP4 code scheme */
919  coding_mode = s->version < 2 ? get_bits1(gb) : 2;
920 
921  /* iterate through all of the macroblocks that contain 1 or more
922  * coded fragments */
923  for (int sb_y = 0; sb_y < s->y_superblock_height; sb_y++) {
924  for (int sb_x = 0; sb_x < s->y_superblock_width; sb_x++) {
925  if (get_bits_left(gb) <= 0)
926  return -1;
927 
928  for (int j = 0; j < 4; j++) {
929  int mb_x = 2 * sb_x + (j >> 1);
930  int mb_y = 2 * sb_y + (((j >> 1) + j) & 1);
931  current_macroblock = mb_y * s->macroblock_width + mb_x;
932 
933  if (mb_x >= s->macroblock_width ||
934  mb_y >= s->macroblock_height ||
935  s->macroblock_coding[current_macroblock] == MODE_COPY)
936  continue;
937 
938  switch (s->macroblock_coding[current_macroblock]) {
939  case MODE_GOLDEN_MV:
940  if (coding_mode == 2) { /* VP4 */
941  last_gold_motion_x = motion_x[0] = vp4_get_mv(s, gb, 0, last_gold_motion_x);
942  last_gold_motion_y = motion_y[0] = vp4_get_mv(s, gb, 1, last_gold_motion_y);
943  break;
944  } /* otherwise fall through */
945  case MODE_INTER_PLUS_MV:
946  /* all 6 fragments use the same motion vector */
947  if (coding_mode == 0) {
948  motion_x[0] = get_vlc2(gb, s->motion_vector_vlc.table,
949  VP3_MV_VLC_BITS, 2);
950  motion_y[0] = get_vlc2(gb, s->motion_vector_vlc.table,
951  VP3_MV_VLC_BITS, 2);
952  } else if (coding_mode == 1) {
953  motion_x[0] = fixed_motion_vector_table[get_bits(gb, 6)];
954  motion_y[0] = fixed_motion_vector_table[get_bits(gb, 6)];
955  } else { /* VP4 */
956  motion_x[0] = vp4_get_mv(s, gb, 0, last_motion_x);
957  motion_y[0] = vp4_get_mv(s, gb, 1, last_motion_y);
958  }
959 
960  /* vector maintenance, only on MODE_INTER_PLUS_MV */
961  if (s->macroblock_coding[current_macroblock] == MODE_INTER_PLUS_MV) {
962  prior_last_motion_x = last_motion_x;
963  prior_last_motion_y = last_motion_y;
964  last_motion_x = motion_x[0];
965  last_motion_y = motion_y[0];
966  }
967  break;
968 
969  case MODE_INTER_FOURMV:
970  /* vector maintenance */
971  prior_last_motion_x = last_motion_x;
972  prior_last_motion_y = last_motion_y;
973 
974  /* fetch 4 vectors from the bitstream, one for each
975  * Y fragment, then average for the C fragment vectors */
976  for (int k = 0; k < 4; k++) {
977  current_fragment = BLOCK_Y * s->fragment_width[0] + BLOCK_X;
978  if (s->all_fragments[current_fragment].coding_method != MODE_COPY) {
979  if (coding_mode == 0) {
980  motion_x[k] = get_vlc2(gb, s->motion_vector_vlc.table,
981  VP3_MV_VLC_BITS, 2);
982  motion_y[k] = get_vlc2(gb, s->motion_vector_vlc.table,
983  VP3_MV_VLC_BITS, 2);
984  } else if (coding_mode == 1) {
985  motion_x[k] = fixed_motion_vector_table[get_bits(gb, 6)];
986  motion_y[k] = fixed_motion_vector_table[get_bits(gb, 6)];
987  } else { /* VP4 */
988  motion_x[k] = vp4_get_mv(s, gb, 0, prior_last_motion_x);
989  motion_y[k] = vp4_get_mv(s, gb, 1, prior_last_motion_y);
990  }
991  last_motion_x = motion_x[k];
992  last_motion_y = motion_y[k];
993  } else {
994  motion_x[k] = 0;
995  motion_y[k] = 0;
996  }
997  }
998  break;
999 
1000  case MODE_INTER_LAST_MV:
1001  /* all 6 fragments use the last motion vector */
1002  motion_x[0] = last_motion_x;
1003  motion_y[0] = last_motion_y;
1004 
1005  /* no vector maintenance (last vector remains the
1006  * last vector) */
1007  break;
1008 
1009  case MODE_INTER_PRIOR_LAST:
1010  /* all 6 fragments use the motion vector prior to the
1011  * last motion vector */
1012  motion_x[0] = prior_last_motion_x;
1013  motion_y[0] = prior_last_motion_y;
1014 
1015  /* vector maintenance */
1016  prior_last_motion_x = last_motion_x;
1017  prior_last_motion_y = last_motion_y;
1018  last_motion_x = motion_x[0];
1019  last_motion_y = motion_y[0];
1020  break;
1021 
1022  default:
1023  /* covers intra, inter without MV, golden without MV */
1024  motion_x[0] = 0;
1025  motion_y[0] = 0;
1026 
1027  /* no vector maintenance */
1028  break;
1029  }
1030 
1031  /* assign the motion vectors to the correct fragments */
1032  for (int k = 0; k < 4; k++) {
1033  current_fragment =
1034  BLOCK_Y * s->fragment_width[0] + BLOCK_X;
1035  if (s->macroblock_coding[current_macroblock] == MODE_INTER_FOURMV) {
1036  s->motion_val[0][current_fragment][0] = motion_x[k];
1037  s->motion_val[0][current_fragment][1] = motion_y[k];
1038  } else {
1039  s->motion_val[0][current_fragment][0] = motion_x[0];
1040  s->motion_val[0][current_fragment][1] = motion_y[0];
1041  }
1042  }
1043 
1044  if (s->chroma_y_shift) {
1045  if (s->macroblock_coding[current_macroblock] == MODE_INTER_FOURMV) {
1046  motion_x[0] = RSHIFT(motion_x[0] + motion_x[1] +
1047  motion_x[2] + motion_x[3], 2);
1048  motion_y[0] = RSHIFT(motion_y[0] + motion_y[1] +
1049  motion_y[2] + motion_y[3], 2);
1050  }
1051  if (s->version <= 2) {
1052  motion_x[0] = (motion_x[0] >> 1) | (motion_x[0] & 1);
1053  motion_y[0] = (motion_y[0] >> 1) | (motion_y[0] & 1);
1054  }
1055  frag = mb_y * s->fragment_width[1] + mb_x;
1056  s->motion_val[1][frag][0] = motion_x[0];
1057  s->motion_val[1][frag][1] = motion_y[0];
1058  } else if (s->chroma_x_shift) {
1059  if (s->macroblock_coding[current_macroblock] == MODE_INTER_FOURMV) {
1060  motion_x[0] = RSHIFT(motion_x[0] + motion_x[1], 1);
1061  motion_y[0] = RSHIFT(motion_y[0] + motion_y[1], 1);
1062  motion_x[1] = RSHIFT(motion_x[2] + motion_x[3], 1);
1063  motion_y[1] = RSHIFT(motion_y[2] + motion_y[3], 1);
1064  } else {
1065  motion_x[1] = motion_x[0];
1066  motion_y[1] = motion_y[0];
1067  }
1068  if (s->version <= 2) {
1069  motion_x[0] = (motion_x[0] >> 1) | (motion_x[0] & 1);
1070  motion_x[1] = (motion_x[1] >> 1) | (motion_x[1] & 1);
1071  }
1072  frag = 2 * mb_y * s->fragment_width[1] + mb_x;
1073  for (int k = 0; k < 2; k++) {
1074  s->motion_val[1][frag][0] = motion_x[k];
1075  s->motion_val[1][frag][1] = motion_y[k];
1076  frag += s->fragment_width[1];
1077  }
1078  } else {
1079  for (int k = 0; k < 4; k++) {
1080  frag = BLOCK_Y * s->fragment_width[1] + BLOCK_X;
1081  if (s->macroblock_coding[current_macroblock] == MODE_INTER_FOURMV) {
1082  s->motion_val[1][frag][0] = motion_x[k];
1083  s->motion_val[1][frag][1] = motion_y[k];
1084  } else {
1085  s->motion_val[1][frag][0] = motion_x[0];
1086  s->motion_val[1][frag][1] = motion_y[0];
1087  }
1088  }
1089  }
1090  }
1091  }
1092  }
1093 
1094  return 0;
1095 }
1096 
1098 {
1099  int num_blocks = s->total_num_coded_frags;
1100 
1101  for (int qpi = 0; qpi < s->nqps - 1 && num_blocks > 0; qpi++) {
1102  int i = 0, blocks_decoded = 0, num_blocks_at_qpi = 0;
1103  int bit, run_length;
1104 
1105  bit = get_bits1(gb) ^ 1;
1106  run_length = 0;
1107 
1108  do {
1109  if (run_length == MAXIMUM_LONG_BIT_RUN)
1110  bit = get_bits1(gb);
1111  else
1112  bit ^= 1;
1113 
1114  run_length = get_vlc2(gb, s->superblock_run_length_vlc.table,
1115  SUPERBLOCK_VLC_BITS, 2);
1116  if (run_length == 34)
1117  run_length += get_bits(gb, 12);
1118  blocks_decoded += run_length;
1119 
1120  if (!bit)
1121  num_blocks_at_qpi += run_length;
1122 
1123  for (int j = 0; j < run_length; i++) {
1124  if (i >= s->total_num_coded_frags)
1125  return -1;
1126 
1127  if (s->all_fragments[s->coded_fragment_list[0][i]].qpi == qpi) {
1128  s->all_fragments[s->coded_fragment_list[0][i]].qpi += bit;
1129  j++;
1130  }
1131  }
1132  } while (blocks_decoded < num_blocks && get_bits_left(gb) > 0);
1133 
1134  num_blocks -= num_blocks_at_qpi;
1135  }
1136 
1137  return 0;
1138 }
1139 
1140 static inline int get_eob_run(GetBitContext *gb, int token)
1141 {
1142  int v = eob_run_table[token].base;
1143  if (eob_run_table[token].bits)
1144  v += get_bits(gb, eob_run_table[token].bits);
1145  return v;
1146 }
1147 
1148 static inline int get_coeff(GetBitContext *gb, int token, int16_t *coeff)
1149 {
1150  int bits_to_get, zero_run;
1151 
1152  bits_to_get = coeff_get_bits[token];
1153  if (bits_to_get)
1154  bits_to_get = get_bits(gb, bits_to_get);
1155  *coeff = coeff_tables[token][bits_to_get];
1156 
1157  zero_run = zero_run_base[token];
1158  if (zero_run_get_bits[token])
1159  zero_run += get_bits(gb, zero_run_get_bits[token]);
1160 
1161  return zero_run;
1162 }
1163 
1164 /*
1165  * This function is called by unpack_dct_coeffs() to extract the VLCs from
1166  * the bitstream. The VLCs encode tokens which are used to unpack DCT
1167  * data. This function unpacks all the VLCs for either the Y plane or both
1168  * C planes, and is called for DC coefficients or different AC coefficient
1169  * levels (since different coefficient types require different VLC tables.
1170  *
1171  * This function returns a residual eob run. E.g, if a particular token gave
1172  * instructions to EOB the next 5 fragments and there were only 2 fragments
1173  * left in the current fragment range, 3 would be returned so that it could
1174  * be passed into the next call to this same function.
1175  */
1177  const VLC *table, int coeff_index,
1178  int plane,
1179  int eob_run)
1180 {
1181  int j = 0;
1182  int token;
1183  int zero_run = 0;
1184  int16_t coeff = 0;
1185  int blocks_ended;
1186  int coeff_i = 0;
1187  int num_coeffs = s->num_coded_frags[plane][coeff_index];
1188  int16_t *dct_tokens = s->dct_tokens[plane][coeff_index];
1189 
1190  /* local references to structure members to avoid repeated dereferences */
1191  const int *coded_fragment_list = s->coded_fragment_list[plane];
1192  Vp3Fragment *all_fragments = s->all_fragments;
1193  const VLCElem *vlc_table = table->table;
1194 
1195  if (num_coeffs < 0) {
1196  av_log(s->avctx, AV_LOG_ERROR,
1197  "Invalid number of coefficients at level %d\n", coeff_index);
1198  return AVERROR_INVALIDDATA;
1199  }
1200 
1201  if (eob_run > num_coeffs) {
1202  coeff_i =
1203  blocks_ended = num_coeffs;
1204  eob_run -= num_coeffs;
1205  } else {
1206  coeff_i =
1207  blocks_ended = eob_run;
1208  eob_run = 0;
1209  }
1210 
1211  // insert fake EOB token to cover the split between planes or zzi
1212  if (blocks_ended)
1213  dct_tokens[j++] = blocks_ended << 2;
1214 
1215  while (coeff_i < num_coeffs && get_bits_left(gb) > 0) {
1216  /* decode a VLC into a token */
1217  token = get_vlc2(gb, vlc_table, 11, 3);
1218  /* use the token to get a zero run, a coefficient, and an eob run */
1219  if ((unsigned) token <= 6U) {
1220  eob_run = get_eob_run(gb, token);
1221  if (!eob_run)
1222  eob_run = INT_MAX;
1223 
1224  // record only the number of blocks ended in this plane,
1225  // any spill will be recorded in the next plane.
1226  if (eob_run > num_coeffs - coeff_i) {
1227  dct_tokens[j++] = TOKEN_EOB(num_coeffs - coeff_i);
1228  blocks_ended += num_coeffs - coeff_i;
1229  eob_run -= num_coeffs - coeff_i;
1230  coeff_i = num_coeffs;
1231  } else {
1232  dct_tokens[j++] = TOKEN_EOB(eob_run);
1233  blocks_ended += eob_run;
1234  coeff_i += eob_run;
1235  eob_run = 0;
1236  }
1237  } else if (token >= 0) {
1238  zero_run = get_coeff(gb, token, &coeff);
1239 
1240  if (zero_run) {
1241  dct_tokens[j++] = TOKEN_ZERO_RUN(coeff, zero_run);
1242  } else {
1243  // Save DC into the fragment structure. DC prediction is
1244  // done in raster order, so the actual DC can't be in with
1245  // other tokens. We still need the token in dct_tokens[]
1246  // however, or else the structure collapses on itself.
1247  if (!coeff_index)
1248  all_fragments[coded_fragment_list[coeff_i]].dc = coeff;
1249 
1250  dct_tokens[j++] = TOKEN_COEFF(coeff);
1251  }
1252 
1253  if (coeff_index + zero_run > 64) {
1254  av_log(s->avctx, AV_LOG_DEBUG,
1255  "Invalid zero run of %d with %d coeffs left\n",
1256  zero_run, 64 - coeff_index);
1257  zero_run = 64 - coeff_index;
1258  }
1259 
1260  // zero runs code multiple coefficients,
1261  // so don't try to decode coeffs for those higher levels
1262  for (int i = coeff_index + 1; i <= coeff_index + zero_run; i++)
1263  s->num_coded_frags[plane][i]--;
1264  coeff_i++;
1265  } else {
1266  av_log(s->avctx, AV_LOG_ERROR, "Invalid token %d\n", token);
1267  return -1;
1268  }
1269  }
1270 
1271  if (blocks_ended > s->num_coded_frags[plane][coeff_index])
1272  av_log(s->avctx, AV_LOG_ERROR, "More blocks ended than coded!\n");
1273 
1274  // decrement the number of blocks that have higher coefficients for each
1275  // EOB run at this level
1276  if (blocks_ended)
1277  for (int i = coeff_index + 1; i < 64; i++)
1278  s->num_coded_frags[plane][i] -= blocks_ended;
1279 
1280  // setup the next buffer
1281  if (plane < 2)
1282  s->dct_tokens[plane + 1][coeff_index] = dct_tokens + j;
1283  else if (coeff_index < 63)
1284  s->dct_tokens[0][coeff_index + 1] = dct_tokens + j;
1285 
1286  return eob_run;
1287 }
1288 
1290  int first_fragment,
1291  int fragment_width,
1292  int fragment_height);
1293 /*
1294  * This function unpacks all of the DCT coefficient data from the
1295  * bitstream.
1296  */
1298 {
1299  int dc_y_table;
1300  int dc_c_table;
1301  int ac_y_table;
1302  int ac_c_table;
1303  int residual_eob_run = 0;
1304  VLC *y_tables[64];
1305  VLC *c_tables[64];
1306 
1307  s->dct_tokens[0][0] = s->dct_tokens_base;
1308 
1309  if (get_bits_left(gb) < 16)
1310  return AVERROR_INVALIDDATA;
1311 
1312  /* fetch the DC table indexes */
1313  dc_y_table = get_bits(gb, 4);
1314  dc_c_table = get_bits(gb, 4);
1315 
1316  /* unpack the Y plane DC coefficients */
1317  residual_eob_run = unpack_vlcs(s, gb, &s->coeff_vlc[dc_y_table], 0,
1318  0, residual_eob_run);
1319  if (residual_eob_run < 0)
1320  return residual_eob_run;
1321  if (get_bits_left(gb) < 8)
1322  return AVERROR_INVALIDDATA;
1323 
1324  /* reverse prediction of the Y-plane DC coefficients */
1325  reverse_dc_prediction(s, 0, s->fragment_width[0], s->fragment_height[0]);
1326 
1327  /* unpack the C plane DC coefficients */
1328  residual_eob_run = unpack_vlcs(s, gb, &s->coeff_vlc[dc_c_table], 0,
1329  1, residual_eob_run);
1330  if (residual_eob_run < 0)
1331  return residual_eob_run;
1332  residual_eob_run = unpack_vlcs(s, gb, &s->coeff_vlc[dc_c_table], 0,
1333  2, residual_eob_run);
1334  if (residual_eob_run < 0)
1335  return residual_eob_run;
1336 
1337  /* reverse prediction of the C-plane DC coefficients */
1338  if (!(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
1339  reverse_dc_prediction(s, s->fragment_start[1],
1340  s->fragment_width[1], s->fragment_height[1]);
1341  reverse_dc_prediction(s, s->fragment_start[2],
1342  s->fragment_width[1], s->fragment_height[1]);
1343  }
1344 
1345  if (get_bits_left(gb) < 8)
1346  return AVERROR_INVALIDDATA;
1347  /* fetch the AC table indexes */
1348  ac_y_table = get_bits(gb, 4);
1349  ac_c_table = get_bits(gb, 4);
1350 
1351  /* build tables of AC VLC tables */
1352  for (int i = 1; i <= 5; i++) {
1353  /* AC VLC table group 1 */
1354  y_tables[i] = &s->coeff_vlc[ac_y_table + 16];
1355  c_tables[i] = &s->coeff_vlc[ac_c_table + 16];
1356  }
1357  for (int i = 6; i <= 14; i++) {
1358  /* AC VLC table group 2 */
1359  y_tables[i] = &s->coeff_vlc[ac_y_table + 32];
1360  c_tables[i] = &s->coeff_vlc[ac_c_table + 32];
1361  }
1362  for (int i = 15; i <= 27; i++) {
1363  /* AC VLC table group 3 */
1364  y_tables[i] = &s->coeff_vlc[ac_y_table + 48];
1365  c_tables[i] = &s->coeff_vlc[ac_c_table + 48];
1366  }
1367  for (int i = 28; i <= 63; i++) {
1368  /* AC VLC table group 4 */
1369  y_tables[i] = &s->coeff_vlc[ac_y_table + 64];
1370  c_tables[i] = &s->coeff_vlc[ac_c_table + 64];
1371  }
1372 
1373  /* decode all AC coefficients */
1374  for (int i = 1; i <= 63; i++) {
1375  residual_eob_run = unpack_vlcs(s, gb, y_tables[i], i,
1376  0, residual_eob_run);
1377  if (residual_eob_run < 0)
1378  return residual_eob_run;
1379 
1380  residual_eob_run = unpack_vlcs(s, gb, c_tables[i], i,
1381  1, residual_eob_run);
1382  if (residual_eob_run < 0)
1383  return residual_eob_run;
1384  residual_eob_run = unpack_vlcs(s, gb, c_tables[i], i,
1385  2, residual_eob_run);
1386  if (residual_eob_run < 0)
1387  return residual_eob_run;
1388  }
1389 
1390  return 0;
1391 }
1392 
1393 #if CONFIG_VP4_DECODER
1394 /**
1395  * eob_tracker[] is instead of TOKEN_EOB(value)
1396  * a dummy TOKEN_EOB(0) value is used to make vp3_dequant work
1397  *
1398  * @return < 0 on error
1399  */
1400 static int vp4_unpack_vlcs(Vp3DecodeContext *s, GetBitContext *gb,
1401  const VLC *vlc_tables[64],
1402  int plane, int eob_tracker[64], int fragment)
1403 {
1404  int token;
1405  int zero_run = 0;
1406  int16_t coeff = 0;
1407  int coeff_i = 0;
1408  int eob_run;
1409 
1410  while (!eob_tracker[coeff_i]) {
1411  if (get_bits_left(gb) < 1)
1412  return AVERROR_INVALIDDATA;
1413 
1414  token = get_vlc2(gb, vlc_tables[coeff_i]->table, 11, 3);
1415 
1416  /* use the token to get a zero run, a coefficient, and an eob run */
1417  if ((unsigned) token <= 6U) {
1418  eob_run = get_eob_run(gb, token);
1419  *s->dct_tokens[plane][coeff_i]++ = TOKEN_EOB(0);
1420  eob_tracker[coeff_i] = eob_run - 1;
1421  return 0;
1422  } else if (token >= 0) {
1423  zero_run = get_coeff(gb, token, &coeff);
1424 
1425  if (zero_run) {
1426  if (coeff_i + zero_run > 64) {
1427  av_log(s->avctx, AV_LOG_DEBUG,
1428  "Invalid zero run of %d with %d coeffs left\n",
1429  zero_run, 64 - coeff_i);
1430  zero_run = 64 - coeff_i;
1431  }
1432  *s->dct_tokens[plane][coeff_i]++ = TOKEN_ZERO_RUN(coeff, zero_run);
1433  coeff_i += zero_run;
1434  } else {
1435  if (!coeff_i)
1436  s->all_fragments[fragment].dc = coeff;
1437 
1438  *s->dct_tokens[plane][coeff_i]++ = TOKEN_COEFF(coeff);
1439  }
1440  coeff_i++;
1441  if (coeff_i >= 64) /* > 64 occurs when there is a zero_run overflow */
1442  return 0; /* stop */
1443  } else {
1444  av_log(s->avctx, AV_LOG_ERROR, "Invalid token %d\n", token);
1445  return -1;
1446  }
1447  }
1448  *s->dct_tokens[plane][coeff_i]++ = TOKEN_EOB(0);
1449  eob_tracker[coeff_i]--;
1450  return 0;
1451 }
1452 
1453 static void vp4_dc_predictor_reset(VP4Predictor *p)
1454 {
1455  p->dc = 0;
1456  p->type = VP4_DC_UNDEFINED;
1457 }
1458 
1459 static void vp4_dc_pred_before(const Vp3DecodeContext *s, VP4Predictor dc_pred[6][6], int sb_x)
1460 {
1461  for (int i = 0; i < 4; i++)
1462  dc_pred[0][i + 1] = s->dc_pred_row[sb_x * 4 + i];
1463 
1464  for (int j = 1; j < 5; j++)
1465  for (int i = 0; i < 4; i++)
1466  vp4_dc_predictor_reset(&dc_pred[j][i + 1]);
1467 }
1468 
1469 static void vp4_dc_pred_after(Vp3DecodeContext *s, VP4Predictor dc_pred[6][6], int sb_x)
1470 {
1471  for (int i = 0; i < 4; i++)
1472  s->dc_pred_row[sb_x * 4 + i] = dc_pred[4][i + 1];
1473 
1474  for (int i = 1; i < 5; i++)
1475  dc_pred[i][0] = dc_pred[i][4];
1476 }
1477 
1478 /* note: dc_pred points to the current block */
1479 static int vp4_dc_pred(const Vp3DecodeContext *s, const VP4Predictor * dc_pred, const int * last_dc, int type, int plane)
1480 {
1481  int count = 0;
1482  int dc = 0;
1483 
1484  if (dc_pred[-6].type == type) {
1485  dc += dc_pred[-6].dc;
1486  count++;
1487  }
1488 
1489  if (dc_pred[6].type == type) {
1490  dc += dc_pred[6].dc;
1491  count++;
1492  }
1493 
1494  if (count != 2 && dc_pred[-1].type == type) {
1495  dc += dc_pred[-1].dc;
1496  count++;
1497  }
1498 
1499  if (count != 2 && dc_pred[1].type == type) {
1500  dc += dc_pred[1].dc;
1501  count++;
1502  }
1503 
1504  /* using division instead of shift to correctly handle negative values */
1505  return count == 2 ? dc / 2 : last_dc[type];
1506 }
1507 
1508 static void vp4_set_tokens_base(Vp3DecodeContext *s)
1509 {
1510  int16_t *base = s->dct_tokens_base;
1511  for (int plane = 0; plane < 3; plane++) {
1512  for (int i = 0; i < 64; i++) {
1513  s->dct_tokens[plane][i] = base;
1514  base += s->fragment_width[!!plane] * s->fragment_height[!!plane];
1515  }
1516  }
1517 }
1518 
1519 static int vp4_unpack_dct_coeffs(Vp3DecodeContext *s, GetBitContext *gb)
1520 {
1521  int dc_y_table;
1522  int dc_c_table;
1523  int ac_y_table;
1524  int ac_c_table;
1525  const VLC *tables[2][64];
1526  int eob_tracker[64];
1527  VP4Predictor dc_pred[6][6];
1528  int last_dc[NB_VP4_DC_TYPES];
1529 
1530  if (get_bits_left(gb) < 16)
1531  return AVERROR_INVALIDDATA;
1532 
1533  /* fetch the DC table indexes */
1534  dc_y_table = get_bits(gb, 4);
1535  dc_c_table = get_bits(gb, 4);
1536 
1537  ac_y_table = get_bits(gb, 4);
1538  ac_c_table = get_bits(gb, 4);
1539 
1540  /* build tables of DC/AC VLC tables */
1541 
1542  /* DC table group */
1543  tables[0][0] = &s->coeff_vlc[dc_y_table];
1544  tables[1][0] = &s->coeff_vlc[dc_c_table];
1545  for (int i = 1; i <= 5; i++) {
1546  /* AC VLC table group 1 */
1547  tables[0][i] = &s->coeff_vlc[ac_y_table + 16];
1548  tables[1][i] = &s->coeff_vlc[ac_c_table + 16];
1549  }
1550  for (int i = 6; i <= 14; i++) {
1551  /* AC VLC table group 2 */
1552  tables[0][i] = &s->coeff_vlc[ac_y_table + 32];
1553  tables[1][i] = &s->coeff_vlc[ac_c_table + 32];
1554  }
1555  for (int i = 15; i <= 27; i++) {
1556  /* AC VLC table group 3 */
1557  tables[0][i] = &s->coeff_vlc[ac_y_table + 48];
1558  tables[1][i] = &s->coeff_vlc[ac_c_table + 48];
1559  }
1560  for (int i = 28; i <= 63; i++) {
1561  /* AC VLC table group 4 */
1562  tables[0][i] = &s->coeff_vlc[ac_y_table + 64];
1563  tables[1][i] = &s->coeff_vlc[ac_c_table + 64];
1564  }
1565 
1566  vp4_set_tokens_base(s);
1567 
1568  memset(last_dc, 0, sizeof(last_dc));
1569 
1570  for (int plane = 0; plane < ((s->avctx->flags & AV_CODEC_FLAG_GRAY) ? 1 : 3); plane++) {
1571  memset(eob_tracker, 0, sizeof(eob_tracker));
1572 
1573  /* initialise dc prediction */
1574  for (int i = 0; i < s->fragment_width[!!plane]; i++)
1575  vp4_dc_predictor_reset(&s->dc_pred_row[i]);
1576 
1577  for (int j = 0; j < 6; j++)
1578  for (int i = 0; i < 6; i++)
1579  vp4_dc_predictor_reset(&dc_pred[j][i]);
1580 
1581  for (int sb_y = 0; sb_y * 4 < s->fragment_height[!!plane]; sb_y++) {
1582  for (int sb_x = 0; sb_x *4 < s->fragment_width[!!plane]; sb_x++) {
1583  vp4_dc_pred_before(s, dc_pred, sb_x);
1584  for (int j = 0; j < 16; j++) {
1585  int hx = hilbert_offset[j][0];
1586  int hy = hilbert_offset[j][1];
1587  int x = 4 * sb_x + hx;
1588  int y = 4 * sb_y + hy;
1589  VP4Predictor *this_dc_pred = &dc_pred[hy + 1][hx + 1];
1590  int fragment, dc_block_type;
1591 
1592  if (x >= s->fragment_width[!!plane] || y >= s->fragment_height[!!plane])
1593  continue;
1594 
1595  fragment = s->fragment_start[plane] + y * s->fragment_width[!!plane] + x;
1596 
1597  if (s->all_fragments[fragment].coding_method == MODE_COPY)
1598  continue;
1599 
1600  if (vp4_unpack_vlcs(s, gb, tables[!!plane], plane, eob_tracker, fragment) < 0)
1601  return -1;
1602 
1603  dc_block_type = vp4_pred_block_type_map[s->all_fragments[fragment].coding_method];
1604 
1605  s->all_fragments[fragment].dc +=
1606  vp4_dc_pred(s, this_dc_pred, last_dc, dc_block_type, plane);
1607 
1608  this_dc_pred->type = dc_block_type,
1609  this_dc_pred->dc = last_dc[dc_block_type] = s->all_fragments[fragment].dc;
1610  }
1611  vp4_dc_pred_after(s, dc_pred, sb_x);
1612  }
1613  }
1614  }
1615 
1616  vp4_set_tokens_base(s);
1617 
1618  return 0;
1619 }
1620 #endif
1621 
1622 /*
1623  * This function reverses the DC prediction for each coded fragment in
1624  * the frame. Much of this function is adapted directly from the original
1625  * VP3 source code.
1626  */
1627 #define COMPATIBLE_FRAME(x) \
1628  (compatible_frame[s->all_fragments[x].coding_method] == current_frame_type)
1629 #define DC_COEFF(u) s->all_fragments[u].dc
1630 
1632  int first_fragment,
1633  int fragment_width,
1634  int fragment_height)
1635 {
1636 #define PUL 8
1637 #define PU 4
1638 #define PUR 2
1639 #define PL 1
1640 
1641  int i = first_fragment;
1642 
1643  int predicted_dc;
1644 
1645  /* DC values for the left, up-left, up, and up-right fragments */
1646  int vl, vul, vu, vur;
1647 
1648  /* indexes for the left, up-left, up, and up-right fragments */
1649  int l, ul, u, ur;
1650 
1651  /*
1652  * The 6 fields mean:
1653  * 0: up-left multiplier
1654  * 1: up multiplier
1655  * 2: up-right multiplier
1656  * 3: left multiplier
1657  */
1658  static const int predictor_transform[16][4] = {
1659  { 0, 0, 0, 0 },
1660  { 0, 0, 0, 128 }, // PL
1661  { 0, 0, 128, 0 }, // PUR
1662  { 0, 0, 53, 75 }, // PUR|PL
1663  { 0, 128, 0, 0 }, // PU
1664  { 0, 64, 0, 64 }, // PU |PL
1665  { 0, 128, 0, 0 }, // PU |PUR
1666  { 0, 0, 53, 75 }, // PU |PUR|PL
1667  { 128, 0, 0, 0 }, // PUL
1668  { 0, 0, 0, 128 }, // PUL|PL
1669  { 64, 0, 64, 0 }, // PUL|PUR
1670  { 0, 0, 53, 75 }, // PUL|PUR|PL
1671  { 0, 128, 0, 0 }, // PUL|PU
1672  { -104, 116, 0, 116 }, // PUL|PU |PL
1673  { 24, 80, 24, 0 }, // PUL|PU |PUR
1674  { -104, 116, 0, 116 } // PUL|PU |PUR|PL
1675  };
1676 
1677  /* This table shows which types of blocks can use other blocks for
1678  * prediction. For example, INTRA is the only mode in this table to
1679  * have a frame number of 0. That means INTRA blocks can only predict
1680  * from other INTRA blocks. There are 2 golden frame coding types;
1681  * blocks encoding in these modes can only predict from other blocks
1682  * that were encoded with these 1 of these 2 modes. */
1683  static const unsigned char compatible_frame[9] = {
1684  1, /* MODE_INTER_NO_MV */
1685  0, /* MODE_INTRA */
1686  1, /* MODE_INTER_PLUS_MV */
1687  1, /* MODE_INTER_LAST_MV */
1688  1, /* MODE_INTER_PRIOR_MV */
1689  2, /* MODE_USING_GOLDEN */
1690  2, /* MODE_GOLDEN_MV */
1691  1, /* MODE_INTER_FOUR_MV */
1692  3 /* MODE_COPY */
1693  };
1694  int current_frame_type;
1695 
1696  /* there is a last DC predictor for each of the 3 frame types */
1697  short last_dc[3];
1698 
1699  int transform = 0;
1700 
1701  vul =
1702  vu =
1703  vur =
1704  vl = 0;
1705  last_dc[0] =
1706  last_dc[1] =
1707  last_dc[2] = 0;
1708 
1709  /* for each fragment row... */
1710  for (int y = 0; y < fragment_height; y++) {
1711  /* for each fragment in a row... */
1712  for (int x = 0; x < fragment_width; x++, i++) {
1713 
1714  /* reverse prediction if this block was coded */
1715  if (s->all_fragments[i].coding_method != MODE_COPY) {
1716  current_frame_type =
1717  compatible_frame[s->all_fragments[i].coding_method];
1718 
1719  transform = 0;
1720  if (x) {
1721  l = i - 1;
1722  vl = DC_COEFF(l);
1723  if (COMPATIBLE_FRAME(l))
1724  transform |= PL;
1725  }
1726  if (y) {
1727  u = i - fragment_width;
1728  vu = DC_COEFF(u);
1729  if (COMPATIBLE_FRAME(u))
1730  transform |= PU;
1731  if (x) {
1732  ul = i - fragment_width - 1;
1733  vul = DC_COEFF(ul);
1734  if (COMPATIBLE_FRAME(ul))
1735  transform |= PUL;
1736  }
1737  if (x + 1 < fragment_width) {
1738  ur = i - fragment_width + 1;
1739  vur = DC_COEFF(ur);
1740  if (COMPATIBLE_FRAME(ur))
1741  transform |= PUR;
1742  }
1743  }
1744 
1745  if (transform == 0) {
1746  /* if there were no fragments to predict from, use last
1747  * DC saved */
1748  predicted_dc = last_dc[current_frame_type];
1749  } else {
1750  /* apply the appropriate predictor transform */
1751  predicted_dc =
1752  (predictor_transform[transform][0] * vul) +
1753  (predictor_transform[transform][1] * vu) +
1754  (predictor_transform[transform][2] * vur) +
1755  (predictor_transform[transform][3] * vl);
1756 
1757  predicted_dc /= 128;
1758 
1759  /* check for outranging on the [ul u l] and
1760  * [ul u ur l] predictors */
1761  if ((transform == 15) || (transform == 13)) {
1762  if (FFABS(predicted_dc - vu) > 128)
1763  predicted_dc = vu;
1764  else if (FFABS(predicted_dc - vl) > 128)
1765  predicted_dc = vl;
1766  else if (FFABS(predicted_dc - vul) > 128)
1767  predicted_dc = vul;
1768  }
1769  }
1770 
1771  /* at long last, apply the predictor */
1772  DC_COEFF(i) += predicted_dc;
1773  /* save the DC */
1774  last_dc[current_frame_type] = DC_COEFF(i);
1775  }
1776  }
1777  }
1778 }
1779 
1780 static void apply_loop_filter(Vp3DecodeContext *s, int plane,
1781  int ystart, int yend)
1782 {
1783  int *bounding_values = s->bounding_values_array + 127;
1784 
1785  int width = s->fragment_width[!!plane];
1786  int height = s->fragment_height[!!plane];
1787  int fragment = s->fragment_start[plane] + ystart * width;
1788  ptrdiff_t stride = s->current_frame.f->linesize[plane];
1789  uint8_t *plane_data = s->current_frame.f->data[plane];
1790  if (!s->flipped_image)
1791  stride = -stride;
1792  plane_data += s->data_offset[plane] + 8 * ystart * stride;
1793 
1794  for (int y = ystart; y < yend; y++) {
1795  for (int x = 0; x < width; x++) {
1796  /* This code basically just deblocks on the edges of coded blocks.
1797  * However, it has to be much more complicated because of the
1798  * brain damaged deblock ordering used in VP3/Theora. Order matters
1799  * because some pixels get filtered twice. */
1800  if (s->all_fragments[fragment].coding_method != MODE_COPY) {
1801  /* do not perform left edge filter for left columns frags */
1802  if (x > 0) {
1803  s->vp3dsp.h_loop_filter(
1804  plane_data + 8 * x,
1805  stride, bounding_values);
1806  }
1807 
1808  /* do not perform top edge filter for top row fragments */
1809  if (y > 0) {
1810  s->vp3dsp.v_loop_filter(
1811  plane_data + 8 * x,
1812  stride, bounding_values);
1813  }
1814 
1815  /* do not perform right edge filter for right column
1816  * fragments or if right fragment neighbor is also coded
1817  * in this frame (it will be filtered in next iteration) */
1818  if ((x < width - 1) &&
1819  (s->all_fragments[fragment + 1].coding_method == MODE_COPY)) {
1820  s->vp3dsp.h_loop_filter(
1821  plane_data + 8 * x + 8,
1822  stride, bounding_values);
1823  }
1824 
1825  /* do not perform bottom edge filter for bottom row
1826  * fragments or if bottom fragment neighbor is also coded
1827  * in this frame (it will be filtered in the next row) */
1828  if ((y < height - 1) &&
1829  (s->all_fragments[fragment + width].coding_method == MODE_COPY)) {
1830  s->vp3dsp.v_loop_filter(
1831  plane_data + 8 * x + 8 * stride,
1832  stride, bounding_values);
1833  }
1834  }
1835 
1836  fragment++;
1837  }
1838  plane_data += 8 * stride;
1839  }
1840 }
1841 
1842 /**
1843  * Pull DCT tokens from the 64 levels to decode and dequant the coefficients
1844  * for the next block in coding order
1845  */
1846 static inline int vp3_dequant(Vp3DecodeContext *s, const Vp3Fragment *frag,
1847  int plane, int inter, int16_t block[64])
1848 {
1849  const int16_t *dequantizer = s->qmat[frag->qpi][inter][plane];
1850  const uint8_t *perm = s->idct_scantable;
1851  int i = 0;
1852 
1853  do {
1854  int token = *s->dct_tokens[plane][i];
1855  switch (token & 3) {
1856  case 0: // EOB
1857  if (--token < 4) // 0-3 are token types so the EOB run must now be 0
1858  s->dct_tokens[plane][i]++;
1859  else
1860  *s->dct_tokens[plane][i] = token & ~3;
1861  goto end;
1862  case 1: // zero run
1863  s->dct_tokens[plane][i]++;
1864  i += (token >> 2) & 0x7f;
1865  if (i > 63) {
1866  av_log(s->avctx, AV_LOG_ERROR, "Coefficient index overflow\n");
1867  return i;
1868  }
1869  block[perm[i]] = (token >> 9) * dequantizer[perm[i]];
1870  i++;
1871  break;
1872  case 2: // coeff
1873  block[perm[i]] = (token >> 2) * dequantizer[perm[i]];
1874  s->dct_tokens[plane][i++]++;
1875  break;
1876  default: // shouldn't happen
1877  return i;
1878  }
1879  } while (i < 64);
1880  // return value is expected to be a valid level
1881  i--;
1882 end:
1883  // the actual DC+prediction is in the fragment structure
1884  block[0] = frag->dc * s->qmat[0][inter][plane][0];
1885  return i;
1886 }
1887 
1888 /**
1889  * called when all pixels up to row y are complete
1890  */
1892 {
1893  int h, cy;
1895 
1896  if (HAVE_THREADS && s->avctx->active_thread_type & FF_THREAD_FRAME) {
1897  int y_flipped = s->flipped_image ? s->height - y : y;
1898 
1899  /* At the end of the frame, report INT_MAX instead of the height of
1900  * the frame. This makes the other threads' ff_thread_await_progress()
1901  * calls cheaper, because they don't have to clip their values. */
1902  ff_thread_report_progress(&s->current_frame,
1903  y_flipped == s->height ? INT_MAX
1904  : y_flipped - 1,
1905  0);
1906  }
1907 
1908  if (!s->avctx->draw_horiz_band)
1909  return;
1910 
1911  h = y - s->last_slice_end;
1912  s->last_slice_end = y;
1913  y -= h;
1914 
1915  if (!s->flipped_image)
1916  y = s->height - y - h;
1917 
1918  cy = y >> s->chroma_y_shift;
1919  offset[0] = s->current_frame.f->linesize[0] * y;
1920  offset[1] = s->current_frame.f->linesize[1] * cy;
1921  offset[2] = s->current_frame.f->linesize[2] * cy;
1922  for (int i = 3; i < AV_NUM_DATA_POINTERS; i++)
1923  offset[i] = 0;
1924 
1925  emms_c();
1926  s->avctx->draw_horiz_band(s->avctx, s->current_frame.f, offset, y, 3, h);
1927 }
1928 
1929 /**
1930  * Wait for the reference frame of the current fragment.
1931  * The progress value is in luma pixel rows.
1932  */
1934  int motion_y, int y)
1935 {
1936  const ThreadFrame *ref_frame;
1937  int ref_row;
1938  int border = motion_y & 1;
1939 
1940  if (fragment->coding_method == MODE_USING_GOLDEN ||
1941  fragment->coding_method == MODE_GOLDEN_MV)
1942  ref_frame = &s->golden_frame;
1943  else
1944  ref_frame = &s->last_frame;
1945 
1946  ref_row = y + (motion_y >> 1);
1947  ref_row = FFMAX(FFABS(ref_row), ref_row + 8 + border);
1948 
1949  ff_thread_await_progress(ref_frame, ref_row, 0);
1950 }
1951 
1952 #if CONFIG_VP4_DECODER
1953 /**
1954  * @return non-zero if temp (edge_emu_buffer) was populated
1955  */
1956 static int vp4_mc_loop_filter(Vp3DecodeContext *s, int plane, int motion_x, int motion_y, int bx, int by,
1957  const uint8_t *motion_source, ptrdiff_t stride,
1958  int src_x, int src_y, uint8_t *temp)
1959 {
1960  int motion_shift = plane ? 4 : 2;
1961  int subpel_mask = plane ? 3 : 1;
1962  int *bounding_values = s->bounding_values_array + 127;
1963 
1964  int x, y;
1965  int x2, y2;
1966  int x_subpel, y_subpel;
1967  int x_offset, y_offset;
1968 
1969  int block_width = plane ? 8 : 16;
1970  int plane_width = s->width >> (plane && s->chroma_x_shift);
1971  int plane_height = s->height >> (plane && s->chroma_y_shift);
1972 
1973 #define loop_stride 12
1974  uint8_t loop[12 * loop_stride];
1975 
1976  /* using division instead of shift to correctly handle negative values */
1977  x = 8 * bx + motion_x / motion_shift;
1978  y = 8 * by + motion_y / motion_shift;
1979 
1980  x_subpel = motion_x & subpel_mask;
1981  y_subpel = motion_y & subpel_mask;
1982 
1983  if (x_subpel || y_subpel) {
1984  x--;
1985  y--;
1986 
1987  if (x_subpel)
1988  x = FFMIN(x, x + FFSIGN(motion_x));
1989 
1990  if (y_subpel)
1991  y = FFMIN(y, y + FFSIGN(motion_y));
1992 
1993  x2 = x + block_width;
1994  y2 = y + block_width;
1995 
1996  if (x2 < 0 || x2 >= plane_width || y2 < 0 || y2 >= plane_height)
1997  return 0;
1998 
1999  x_offset = (-(x + 2) & 7) + 2;
2000  y_offset = (-(y + 2) & 7) + 2;
2001 
2002  if (x_offset > 8 + x_subpel && y_offset > 8 + y_subpel)
2003  return 0;
2004 
2005  s->vdsp.emulated_edge_mc(loop, motion_source - stride - 1,
2006  loop_stride, stride,
2007  12, 12, src_x - 1, src_y - 1,
2008  plane_width,
2009  plane_height);
2010 
2011  if (x_offset <= 8 + x_subpel)
2012  ff_vp3dsp_h_loop_filter_12(loop + x_offset, loop_stride, bounding_values);
2013 
2014  if (y_offset <= 8 + y_subpel)
2015  ff_vp3dsp_v_loop_filter_12(loop + y_offset*loop_stride, loop_stride, bounding_values);
2016 
2017  } else {
2018 
2019  x_offset = -x & 7;
2020  y_offset = -y & 7;
2021 
2022  if (!x_offset && !y_offset)
2023  return 0;
2024 
2025  s->vdsp.emulated_edge_mc(loop, motion_source - stride - 1,
2026  loop_stride, stride,
2027  12, 12, src_x - 1, src_y - 1,
2028  plane_width,
2029  plane_height);
2030 
2031 #define safe_loop_filter(name, ptr, stride, bounding_values) \
2032  if ((uintptr_t)(ptr) & 7) \
2033  s->vp3dsp.name##_unaligned(ptr, stride, bounding_values); \
2034  else \
2035  s->vp3dsp.name(ptr, stride, bounding_values);
2036 
2037  if (x_offset)
2038  safe_loop_filter(h_loop_filter, loop + loop_stride + x_offset + 1, loop_stride, bounding_values);
2039 
2040  if (y_offset)
2041  safe_loop_filter(v_loop_filter, loop + (y_offset + 1)*loop_stride + 1, loop_stride, bounding_values);
2042  }
2043 
2044  for (int i = 0; i < 9; i++)
2045  memcpy(temp + i*stride, loop + (i + 1) * loop_stride + 1, 9);
2046 
2047  return 1;
2048 }
2049 #endif
2050 
2051 /*
2052  * Perform the final rendering for a particular slice of data.
2053  * The slice number ranges from 0..(c_superblock_height - 1).
2054  */
2055 static void render_slice(Vp3DecodeContext *s, int slice)
2056 {
2057  int16_t *block = s->block;
2058  int motion_x = 0xdeadbeef, motion_y = 0xdeadbeef;
2059  /* When decoding keyframes, the earlier frames may not be available,
2060  * so to avoid using undefined pointer arithmetic on them we just
2061  * use the current frame instead. Nothing is ever read from these
2062  * frames in case of a keyframe. */
2063  const AVFrame *last_frame = s->last_frame.f->data[0] ?
2064  s->last_frame.f : s->current_frame.f;
2065  const AVFrame *golden_frame = s->golden_frame.f->data[0] ?
2066  s->golden_frame.f : s->current_frame.f;
2067  int motion_halfpel_index;
2068  int first_pixel;
2069 
2070  if (slice >= s->c_superblock_height)
2071  return;
2072 
2073  for (int plane = 0; plane < 3; plane++) {
2074  uint8_t *output_plane = s->current_frame.f->data[plane] +
2075  s->data_offset[plane];
2076  const uint8_t *last_plane = last_frame->data[plane] +
2077  s->data_offset[plane];
2078  const uint8_t *golden_plane = golden_frame->data[plane] +
2079  s->data_offset[plane];
2080  ptrdiff_t stride = s->current_frame.f->linesize[plane];
2081  int plane_width = s->width >> (plane && s->chroma_x_shift);
2082  int plane_height = s->height >> (plane && s->chroma_y_shift);
2083  const int8_t (*motion_val)[2] = s->motion_val[!!plane];
2084 
2085  int sb_y = slice << (!plane && s->chroma_y_shift);
2086  int slice_height = sb_y + 1 + (!plane && s->chroma_y_shift);
2087  int slice_width = plane ? s->c_superblock_width
2088  : s->y_superblock_width;
2089 
2090  int fragment_width = s->fragment_width[!!plane];
2091  int fragment_height = s->fragment_height[!!plane];
2092  int fragment_start = s->fragment_start[plane];
2093 
2094  int do_await = !plane && HAVE_THREADS &&
2095  (s->avctx->active_thread_type & FF_THREAD_FRAME);
2096 
2097  if (!s->flipped_image)
2098  stride = -stride;
2099  if (CONFIG_GRAY && plane && (s->avctx->flags & AV_CODEC_FLAG_GRAY))
2100  continue;
2101 
2102  /* for each superblock row in the slice (both of them)... */
2103  for (; sb_y < slice_height; sb_y++) {
2104  /* for each superblock in a row... */
2105  for (int sb_x = 0; sb_x < slice_width; sb_x++) {
2106  /* for each block in a superblock... */
2107  for (int j = 0; j < 16; j++) {
2108  int x = 4 * sb_x + hilbert_offset[j][0];
2109  int y = 4 * sb_y + hilbert_offset[j][1];
2110  int fragment = y * fragment_width + x;
2111 
2112  int i = fragment_start + fragment;
2113 
2114  // bounds check
2115  if (x >= fragment_width || y >= fragment_height)
2116  continue;
2117 
2118  first_pixel = 8 * y * stride + 8 * x;
2119 
2120  if (do_await &&
2121  s->all_fragments[i].coding_method != MODE_INTRA)
2122  await_reference_row(s, &s->all_fragments[i],
2123  motion_val[fragment][1],
2124  (16 * y) >> s->chroma_y_shift);
2125 
2126  /* transform if this block was coded */
2127  if (s->all_fragments[i].coding_method != MODE_COPY) {
2128  const uint8_t *motion_source;
2129  if ((s->all_fragments[i].coding_method == MODE_USING_GOLDEN) ||
2130  (s->all_fragments[i].coding_method == MODE_GOLDEN_MV))
2131  motion_source = golden_plane;
2132  else
2133  motion_source = last_plane;
2134 
2135  motion_source += first_pixel;
2136  motion_halfpel_index = 0;
2137 
2138  /* sort out the motion vector if this fragment is coded
2139  * using a motion vector method */
2140  if ((s->all_fragments[i].coding_method > MODE_INTRA) &&
2141  (s->all_fragments[i].coding_method != MODE_USING_GOLDEN)) {
2142  int src_x, src_y;
2143  int standard_mc = 1;
2144  motion_x = motion_val[fragment][0];
2145  motion_y = motion_val[fragment][1];
2146 #if CONFIG_VP4_DECODER
2147  if (plane && s->version >= 2) {
2148  motion_x = (motion_x >> 1) | (motion_x & 1);
2149  motion_y = (motion_y >> 1) | (motion_y & 1);
2150  }
2151 #endif
2152 
2153  src_x = (motion_x >> 1) + 8 * x;
2154  src_y = (motion_y >> 1) + 8 * y;
2155 
2156  motion_halfpel_index = motion_x & 0x01;
2157  motion_source += (motion_x >> 1);
2158 
2159  motion_halfpel_index |= (motion_y & 0x01) << 1;
2160  motion_source += ((motion_y >> 1) * stride);
2161 
2162 #if CONFIG_VP4_DECODER
2163  if (s->version >= 2) {
2164  uint8_t *temp = s->edge_emu_buffer;
2165  if (stride < 0)
2166  temp -= 8 * stride;
2167  if (vp4_mc_loop_filter(s, plane, motion_val[fragment][0], motion_val[fragment][1], x, y, motion_source, stride, src_x, src_y, temp)) {
2168  motion_source = temp;
2169  standard_mc = 0;
2170  }
2171  }
2172 #endif
2173 
2174  if (standard_mc && (
2175  src_x < 0 || src_y < 0 ||
2176  src_x + 9 >= plane_width ||
2177  src_y + 9 >= plane_height)) {
2178  uint8_t *temp = s->edge_emu_buffer;
2179  if (stride < 0)
2180  temp -= 8 * stride;
2181 
2182  s->vdsp.emulated_edge_mc(temp, motion_source,
2183  stride, stride,
2184  9, 9, src_x, src_y,
2185  plane_width,
2186  plane_height);
2187  motion_source = temp;
2188  }
2189  }
2190 
2191  /* first, take care of copying a block from either the
2192  * previous or the golden frame */
2193  if (s->all_fragments[i].coding_method != MODE_INTRA) {
2194  /* Note, it is possible to implement all MC cases
2195  * with put_no_rnd_pixels_l2 which would look more
2196  * like the VP3 source but this would be slower as
2197  * put_no_rnd_pixels_tab is better optimized */
2198  if (motion_halfpel_index != 3) {
2199  s->hdsp.put_no_rnd_pixels_tab[1][motion_halfpel_index](
2200  output_plane + first_pixel,
2201  motion_source, stride, 8);
2202  } else {
2203  /* d is 0 if motion_x and _y have the same sign,
2204  * else -1 */
2205  int d = (motion_x ^ motion_y) >> 31;
2206  s->vp3dsp.put_no_rnd_pixels_l2(output_plane + first_pixel,
2207  motion_source - d,
2208  motion_source + stride + 1 + d,
2209  stride, 8);
2210  }
2211  }
2212 
2213  /* invert DCT and place (or add) in final output */
2214 
2215  if (s->all_fragments[i].coding_method == MODE_INTRA) {
2216  vp3_dequant(s, s->all_fragments + i,
2217  plane, 0, block);
2218  s->vp3dsp.idct_put(output_plane + first_pixel,
2219  stride,
2220  block);
2221  } else {
2222  if (vp3_dequant(s, s->all_fragments + i,
2223  plane, 1, block)) {
2224  s->vp3dsp.idct_add(output_plane + first_pixel,
2225  stride,
2226  block);
2227  } else {
2228  s->vp3dsp.idct_dc_add(output_plane + first_pixel,
2229  stride, block);
2230  }
2231  }
2232  } else {
2233  /* copy directly from the previous frame */
2234  s->hdsp.put_pixels_tab[1][0](
2235  output_plane + first_pixel,
2236  last_plane + first_pixel,
2237  stride, 8);
2238  }
2239  }
2240  }
2241 
2242  // Filter up to the last row in the superblock row
2243  if (s->version < 2 && !s->skip_loop_filter)
2244  apply_loop_filter(s, plane, 4 * sb_y - !!sb_y,
2245  FFMIN(4 * sb_y + 3, fragment_height - 1));
2246  }
2247  }
2248 
2249  /* this looks like a good place for slice dispatch... */
2250  /* algorithm:
2251  * if (slice == s->macroblock_height - 1)
2252  * dispatch (both last slice & 2nd-to-last slice);
2253  * else if (slice > 0)
2254  * dispatch (slice - 1);
2255  */
2256 
2257  vp3_draw_horiz_band(s, FFMIN((32 << s->chroma_y_shift) * (slice + 1) - 16,
2258  s->height - 16));
2259 }
2260 
2261 /// Allocate tables for per-frame data in Vp3DecodeContext
2263 {
2264  Vp3DecodeContext *s = avctx->priv_data;
2265  int y_fragment_count, c_fragment_count;
2266 
2267  free_tables(avctx);
2268 
2269  y_fragment_count = s->fragment_width[0] * s->fragment_height[0];
2270  c_fragment_count = s->fragment_width[1] * s->fragment_height[1];
2271 
2272  /* superblock_coding is used by unpack_superblocks (VP3/Theora) and vp4_unpack_macroblocks (VP4) */
2273  s->superblock_coding = av_mallocz(FFMAX(s->superblock_count, s->yuv_macroblock_count));
2274  s->all_fragments = av_calloc(s->fragment_count, sizeof(*s->all_fragments));
2275 
2276  s-> kf_coded_fragment_list = av_calloc(s->fragment_count, sizeof(int));
2277  s->nkf_coded_fragment_list = av_calloc(s->fragment_count, sizeof(int));
2278  memset(s-> num_kf_coded_fragment, -1, sizeof(s-> num_kf_coded_fragment));
2279 
2280  s->dct_tokens_base = av_calloc(s->fragment_count,
2281  64 * sizeof(*s->dct_tokens_base));
2282  s->motion_val[0] = av_calloc(y_fragment_count, sizeof(*s->motion_val[0]));
2283  s->motion_val[1] = av_calloc(c_fragment_count, sizeof(*s->motion_val[1]));
2284 
2285  /* work out the block mapping tables */
2286  s->superblock_fragments = av_calloc(s->superblock_count, 16 * sizeof(int));
2287  s->macroblock_coding = av_mallocz(s->macroblock_count + 1);
2288 
2289  s->dc_pred_row = av_malloc_array(s->y_superblock_width * 4, sizeof(*s->dc_pred_row));
2290 
2291  if (!s->superblock_coding || !s->all_fragments ||
2292  !s->dct_tokens_base || !s->kf_coded_fragment_list ||
2293  !s->nkf_coded_fragment_list ||
2294  !s->superblock_fragments || !s->macroblock_coding ||
2295  !s->dc_pred_row ||
2296  !s->motion_val[0] || !s->motion_val[1]) {
2297  return -1;
2298  }
2299 
2301 
2302  return 0;
2303 }
2304 
2306 {
2307  s->current_frame.f = av_frame_alloc();
2308  s->last_frame.f = av_frame_alloc();
2309  s->golden_frame.f = av_frame_alloc();
2310 
2311  if (!s->current_frame.f || !s->last_frame.f || !s->golden_frame.f)
2312  return AVERROR(ENOMEM);
2313 
2314  return 0;
2315 }
2316 
2318 {
2319  Vp3DecodeContext *s = avctx->priv_data;
2320  int ret;
2321  int c_width;
2322  int c_height;
2323  int y_fragment_count, c_fragment_count;
2324 
2325  ret = init_frames(s);
2326  if (ret < 0)
2327  return ret;
2328 
2329  if (avctx->codec_tag == MKTAG('V', 'P', '4', '0')) {
2330  s->version = 3;
2331 #if !CONFIG_VP4_DECODER
2332  av_log(avctx, AV_LOG_ERROR, "This build does not support decoding VP4.\n");
2334 #endif
2335  } else if (avctx->codec_tag == MKTAG('V', 'P', '3', '0'))
2336  s->version = 0;
2337  else
2338  s->version = 1;
2339 
2340  s->avctx = avctx;
2341  s->width = FFALIGN(avctx->coded_width, 16);
2342  s->height = FFALIGN(avctx->coded_height, 16);
2343  if (s->width < 18)
2344  return AVERROR_PATCHWELCOME;
2345  if (avctx->codec_id != AV_CODEC_ID_THEORA)
2346  avctx->pix_fmt = AV_PIX_FMT_YUV420P;
2348  ff_hpeldsp_init(&s->hdsp, avctx->flags | AV_CODEC_FLAG_BITEXACT);
2349  ff_videodsp_init(&s->vdsp, 8);
2350  ff_vp3dsp_init(&s->vp3dsp, avctx->flags);
2351 
2352  for (int i = 0; i < 64; i++) {
2353 #define TRANSPOSE(x) (((x) >> 3) | (((x) & 7) << 3))
2354  s->idct_permutation[i] = TRANSPOSE(i);
2355  s->idct_scantable[i] = TRANSPOSE(ff_zigzag_direct[i]);
2356 #undef TRANSPOSE
2357  }
2358 
2359  /* initialize to an impossible value which will force a recalculation
2360  * in the first frame decode */
2361  for (int i = 0; i < 3; i++)
2362  s->qps[i] = -1;
2363 
2364  ret = av_pix_fmt_get_chroma_sub_sample(avctx->pix_fmt, &s->chroma_x_shift, &s->chroma_y_shift);
2365  if (ret)
2366  return ret;
2367 
2368  s->y_superblock_width = (s->width + 31) / 32;
2369  s->y_superblock_height = (s->height + 31) / 32;
2370  s->y_superblock_count = s->y_superblock_width * s->y_superblock_height;
2371 
2372  /* work out the dimensions for the C planes */
2373  c_width = s->width >> s->chroma_x_shift;
2374  c_height = s->height >> s->chroma_y_shift;
2375  s->c_superblock_width = (c_width + 31) / 32;
2376  s->c_superblock_height = (c_height + 31) / 32;
2377  s->c_superblock_count = s->c_superblock_width * s->c_superblock_height;
2378 
2379  s->superblock_count = s->y_superblock_count + (s->c_superblock_count * 2);
2380  s->u_superblock_start = s->y_superblock_count;
2381  s->v_superblock_start = s->u_superblock_start + s->c_superblock_count;
2382 
2383  s->macroblock_width = (s->width + 15) / 16;
2384  s->macroblock_height = (s->height + 15) / 16;
2385  s->macroblock_count = s->macroblock_width * s->macroblock_height;
2386  s->c_macroblock_width = (c_width + 15) / 16;
2387  s->c_macroblock_height = (c_height + 15) / 16;
2388  s->c_macroblock_count = s->c_macroblock_width * s->c_macroblock_height;
2389  s->yuv_macroblock_count = s->macroblock_count + 2 * s->c_macroblock_count;
2390 
2391  s->fragment_width[0] = s->width / FRAGMENT_PIXELS;
2392  s->fragment_height[0] = s->height / FRAGMENT_PIXELS;
2393  s->fragment_width[1] = s->fragment_width[0] >> s->chroma_x_shift;
2394  s->fragment_height[1] = s->fragment_height[0] >> s->chroma_y_shift;
2395 
2396  /* fragment count covers all 8x8 blocks for all 3 planes */
2397  y_fragment_count = s->fragment_width[0] * s->fragment_height[0];
2398  c_fragment_count = s->fragment_width[1] * s->fragment_height[1];
2399  s->fragment_count = y_fragment_count + 2 * c_fragment_count;
2400  s->fragment_start[1] = y_fragment_count;
2401  s->fragment_start[2] = y_fragment_count + c_fragment_count;
2402 
2403  if (!s->theora_tables) {
2404  const uint8_t (*bias_tabs)[32][2];
2405 
2406  for (int i = 0; i < 64; i++) {
2407  s->coded_dc_scale_factor[0][i] = s->version < 2 ? vp31_dc_scale_factor[i] : vp4_y_dc_scale_factor[i];
2408  s->coded_dc_scale_factor[1][i] = s->version < 2 ? vp31_dc_scale_factor[i] : vp4_uv_dc_scale_factor[i];
2409  s->coded_ac_scale_factor[i] = s->version < 2 ? vp31_ac_scale_factor[i] : vp4_ac_scale_factor[i];
2410  s->base_matrix[0][i] = s->version < 2 ? vp31_intra_y_dequant[i] : vp4_generic_dequant[i];
2411  s->base_matrix[1][i] = s->version < 2 ? ff_mjpeg_std_chrominance_quant_tbl[i] : vp4_generic_dequant[i];
2412  s->base_matrix[2][i] = s->version < 2 ? vp31_inter_dequant[i] : vp4_generic_dequant[i];
2413  s->filter_limit_values[i] = s->version < 2 ? vp31_filter_limit_values[i] : vp4_filter_limit_values[i];
2414  }
2415 
2416  for (int inter = 0; inter < 2; inter++) {
2417  for (int plane = 0; plane < 3; plane++) {
2418  s->qr_count[inter][plane] = 1;
2419  s->qr_size[inter][plane][0] = 63;
2420  s->qr_base[inter][plane][0] =
2421  s->qr_base[inter][plane][1] = 2 * inter + (!!plane) * !inter;
2422  }
2423  }
2424 
2425  /* init VLC tables */
2426  bias_tabs = CONFIG_VP4_DECODER && s->version >= 2 ? vp4_bias : vp3_bias;
2427  for (int i = 0; i < FF_ARRAY_ELEMS(s->coeff_vlc); i++) {
2428  ret = ff_vlc_init_from_lengths(&s->coeff_vlc[i], 11, 32,
2429  &bias_tabs[i][0][1], 2,
2430  &bias_tabs[i][0][0], 2, 1,
2431  0, 0, avctx);
2432  if (ret < 0)
2433  return ret;
2434  }
2435  } else {
2436  for (int i = 0; i < FF_ARRAY_ELEMS(s->coeff_vlc); i++) {
2437  const HuffTable *tab = &s->huffman_table[i];
2438 
2439  ret = ff_vlc_init_from_lengths(&s->coeff_vlc[i], 11, tab->nb_entries,
2440  &tab->entries[0].len, sizeof(*tab->entries),
2441  &tab->entries[0].sym, sizeof(*tab->entries), 1,
2442  0, 0, avctx);
2443  if (ret < 0)
2444  return ret;
2445  }
2446  }
2447 
2448  ret = ff_vlc_init_from_lengths(&s->superblock_run_length_vlc, SUPERBLOCK_VLC_BITS, 34,
2450  NULL, 0, 0, 1, 0, avctx);
2451  if (ret < 0)
2452  return ret;
2453 
2454  ret = ff_vlc_init_from_lengths(&s->fragment_run_length_vlc, 5, 30,
2456  NULL, 0, 0, 0, 0, avctx);
2457  if (ret < 0)
2458  return ret;
2459 
2460  ret = ff_vlc_init_from_lengths(&s->mode_code_vlc, 3, 8,
2461  mode_code_vlc_len, 1,
2462  NULL, 0, 0, 0, 0, avctx);
2463  if (ret < 0)
2464  return ret;
2465 
2466  ret = ff_vlc_init_from_lengths(&s->motion_vector_vlc, VP3_MV_VLC_BITS, 63,
2467  &motion_vector_vlc_table[0][1], 2,
2468  &motion_vector_vlc_table[0][0], 2, 1,
2469  -31, 0, avctx);
2470  if (ret < 0)
2471  return ret;
2472 
2473 #if CONFIG_VP4_DECODER
2474  for (int j = 0; j < 2; j++)
2475  for (int i = 0; i < 7; i++) {
2476  ret = ff_vlc_init_from_lengths(&s->vp4_mv_vlc[j][i], VP4_MV_VLC_BITS, 63,
2477  &vp4_mv_vlc[j][i][0][1], 2,
2478  &vp4_mv_vlc[j][i][0][0], 2, 1, -31,
2479  0, avctx);
2480  if (ret < 0)
2481  return ret;
2482  }
2483 
2484  /* version >= 2 */
2485  for (int i = 0; i < 2; i++)
2486  if ((ret = vlc_init(&s->block_pattern_vlc[i], 3, 14,
2487  &vp4_block_pattern_vlc[i][0][1], 2, 1,
2488  &vp4_block_pattern_vlc[i][0][0], 2, 1, 0)) < 0)
2489  return ret;
2490 #endif
2491 
2492  return allocate_tables(avctx);
2493 }
2494 
2495 /// Release and shuffle frames after decode finishes
2496 static int update_frames(AVCodecContext *avctx)
2497 {
2498  Vp3DecodeContext *s = avctx->priv_data;
2499  int ret = 0;
2500 
2501  if (s->keyframe) {
2502  ff_thread_release_ext_buffer(&s->golden_frame);
2503  ret = ff_thread_ref_frame(&s->golden_frame, &s->current_frame);
2504  }
2505  /* shuffle frames */
2506  ff_thread_release_ext_buffer(&s->last_frame);
2507  FFSWAP(ThreadFrame, s->last_frame, s->current_frame);
2508 
2509  return ret;
2510 }
2511 
2512 #if HAVE_THREADS
2513 static int ref_frame(ThreadFrame *dst, const ThreadFrame *src)
2514 {
2516  if (src->f->data[0])
2517  return ff_thread_ref_frame(dst, src);
2518  return 0;
2519 }
2520 
2521 static int ref_frames(Vp3DecodeContext *dst, const Vp3DecodeContext *src)
2522 {
2523  int ret;
2524  if ((ret = ref_frame(&dst->current_frame, &src->current_frame)) < 0 ||
2525  (ret = ref_frame(&dst->golden_frame, &src->golden_frame)) < 0 ||
2526  (ret = ref_frame(&dst->last_frame, &src->last_frame)) < 0)
2527  return ret;
2528  return 0;
2529 }
2530 
2531 static int vp3_update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
2532 {
2533  Vp3DecodeContext *s = dst->priv_data;
2534  const Vp3DecodeContext *s1 = src->priv_data;
2535  int qps_changed = 0, err;
2536 
2537  if (!s1->current_frame.f->data[0] ||
2538  s->width != s1->width || s->height != s1->height) {
2539  if (s != s1)
2540  ref_frames(s, s1);
2541  return -1;
2542  }
2543 
2544  if (s != s1) {
2545  // copy previous frame data
2546  if ((err = ref_frames(s, s1)) < 0)
2547  return err;
2548 
2549  s->keyframe = s1->keyframe;
2550 
2551  // copy qscale data if necessary
2552  for (int i = 0; i < 3; i++) {
2553  if (s->qps[i] != s1->qps[1]) {
2554  qps_changed = 1;
2555  memcpy(&s->qmat[i], &s1->qmat[i], sizeof(s->qmat[i]));
2556  }
2557  }
2558 
2559  if (s->qps[0] != s1->qps[0])
2560  memcpy(&s->bounding_values_array, &s1->bounding_values_array,
2561  sizeof(s->bounding_values_array));
2562 
2563  if (qps_changed) {
2564  memcpy(s->qps, s1->qps, sizeof(s->qps));
2565  memcpy(s->last_qps, s1->last_qps, sizeof(s->last_qps));
2566  s->nqps = s1->nqps;
2567  }
2568  }
2569 
2570  return update_frames(dst);
2571 }
2572 #endif
2573 
2575  int *got_frame, AVPacket *avpkt)
2576 {
2577  const uint8_t *buf = avpkt->data;
2578  int buf_size = avpkt->size;
2579  Vp3DecodeContext *s = avctx->priv_data;
2580  GetBitContext gb;
2581  int ret;
2582 
2583  if ((ret = init_get_bits8(&gb, buf, buf_size)) < 0)
2584  return ret;
2585 
2586 #if CONFIG_THEORA_DECODER
2587  if (s->theora && get_bits1(&gb)) {
2588  int type = get_bits(&gb, 7);
2589  skip_bits_long(&gb, 6*8); /* "theora" */
2590 
2591  if (s->avctx->active_thread_type&FF_THREAD_FRAME) {
2592  av_log(avctx, AV_LOG_ERROR, "midstream reconfiguration with multithreading is unsupported, try -threads 1\n");
2593  return AVERROR_PATCHWELCOME;
2594  }
2595  if (type == 0) {
2596  vp3_decode_end(avctx);
2597  ret = theora_decode_header(avctx, &gb);
2598 
2599  if (ret >= 0)
2600  ret = vp3_decode_init(avctx);
2601  if (ret < 0) {
2602  vp3_decode_end(avctx);
2603  return ret;
2604  }
2605  return buf_size;
2606  } else if (type == 2) {
2607  vp3_decode_end(avctx);
2608  ret = theora_decode_tables(avctx, &gb);
2609  if (ret >= 0)
2610  ret = vp3_decode_init(avctx);
2611  if (ret < 0) {
2612  vp3_decode_end(avctx);
2613  return ret;
2614  }
2615  return buf_size;
2616  }
2617 
2618  av_log(avctx, AV_LOG_ERROR,
2619  "Header packet passed to frame decoder, skipping\n");
2620  return -1;
2621  }
2622 #endif
2623 
2624  s->keyframe = !get_bits1(&gb);
2625  if (!s->all_fragments) {
2626  av_log(avctx, AV_LOG_ERROR, "Data packet without prior valid headers\n");
2627  return -1;
2628  }
2629  if (!s->theora)
2630  skip_bits(&gb, 1);
2631  for (int i = 0; i < 3; i++)
2632  s->last_qps[i] = s->qps[i];
2633 
2634  s->nqps = 0;
2635  do {
2636  s->qps[s->nqps++] = get_bits(&gb, 6);
2637  } while (s->theora >= 0x030200 && s->nqps < 3 && get_bits1(&gb));
2638  for (int i = s->nqps; i < 3; i++)
2639  s->qps[i] = -1;
2640 
2641  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
2642  av_log(s->avctx, AV_LOG_INFO, " VP3 %sframe #%"PRId64": Q index = %d\n",
2643  s->keyframe ? "key" : "", avctx->frame_num + 1, s->qps[0]);
2644 
2645  s->skip_loop_filter = !s->filter_limit_values[s->qps[0]] ||
2646  avctx->skip_loop_filter >= (s->keyframe ? AVDISCARD_ALL
2647  : AVDISCARD_NONKEY);
2648 
2649  if (s->qps[0] != s->last_qps[0])
2651 
2652  for (int i = 0; i < s->nqps; i++)
2653  // reinit all dequantizers if the first one changed, because
2654  // the DC of the first quantizer must be used for all matrices
2655  if (s->qps[i] != s->last_qps[i] || s->qps[0] != s->last_qps[0])
2656  init_dequantizer(s, i);
2657 
2658  if (avctx->skip_frame >= AVDISCARD_NONKEY && !s->keyframe)
2659  return buf_size;
2660 
2661  s->current_frame.f->pict_type = s->keyframe ? AV_PICTURE_TYPE_I
2663  if (s->keyframe)
2664  s->current_frame.f->flags |= AV_FRAME_FLAG_KEY;
2665  else
2666  s->current_frame.f->flags &= ~AV_FRAME_FLAG_KEY;
2667  if ((ret = ff_thread_get_ext_buffer(avctx, &s->current_frame,
2668  AV_GET_BUFFER_FLAG_REF)) < 0)
2669  goto error;
2670 
2671  if (!s->edge_emu_buffer) {
2672  s->edge_emu_buffer = av_malloc(9 * FFABS(s->current_frame.f->linesize[0]));
2673  if (!s->edge_emu_buffer) {
2674  ret = AVERROR(ENOMEM);
2675  goto error;
2676  }
2677  }
2678 
2679  if (s->keyframe) {
2680  if (!s->theora) {
2681  skip_bits(&gb, 4); /* width code */
2682  skip_bits(&gb, 4); /* height code */
2683  if (s->version) {
2684  int version = get_bits(&gb, 5);
2685 #if !CONFIG_VP4_DECODER
2686  if (version >= 2) {
2687  av_log(avctx, AV_LOG_ERROR, "This build does not support decoding VP4.\n");
2689  }
2690 #endif
2691  s->version = version;
2692  if (avctx->frame_num == 0)
2693  av_log(s->avctx, AV_LOG_DEBUG,
2694  "VP version: %d\n", s->version);
2695  }
2696  }
2697  if (s->version || s->theora) {
2698  if (get_bits1(&gb))
2699  av_log(s->avctx, AV_LOG_ERROR,
2700  "Warning, unsupported keyframe coding type?!\n");
2701  skip_bits(&gb, 2); /* reserved? */
2702 
2703 #if CONFIG_VP4_DECODER
2704  if (s->version >= 2) {
2705  int mb_height, mb_width;
2706  int mb_width_mul, mb_width_div, mb_height_mul, mb_height_div;
2707 
2708  mb_height = get_bits(&gb, 8);
2709  mb_width = get_bits(&gb, 8);
2710  if (mb_height != s->macroblock_height ||
2711  mb_width != s->macroblock_width)
2712  avpriv_request_sample(s->avctx, "macroblock dimension mismatch");
2713 
2714  mb_width_mul = get_bits(&gb, 5);
2715  mb_width_div = get_bits(&gb, 3);
2716  mb_height_mul = get_bits(&gb, 5);
2717  mb_height_div = get_bits(&gb, 3);
2718  if (mb_width_mul != 1 || mb_width_div != 1 || mb_height_mul != 1 || mb_height_div != 1)
2719  avpriv_request_sample(s->avctx, "unexpected macroblock dimension multipler/divider");
2720 
2721  if (get_bits(&gb, 2))
2722  avpriv_request_sample(s->avctx, "unknown bits");
2723  }
2724 #endif
2725  }
2726  } else {
2727  if (!s->golden_frame.f->data[0]) {
2728  av_log(s->avctx, AV_LOG_WARNING,
2729  "vp3: first frame not a keyframe\n");
2730 
2731  s->golden_frame.f->pict_type = AV_PICTURE_TYPE_I;
2732  if ((ret = ff_thread_get_ext_buffer(avctx, &s->golden_frame,
2733  AV_GET_BUFFER_FLAG_REF)) < 0)
2734  goto error;
2735  ff_thread_release_ext_buffer(&s->last_frame);
2736  if ((ret = ff_thread_ref_frame(&s->last_frame,
2737  &s->golden_frame)) < 0)
2738  goto error;
2739  ff_thread_report_progress(&s->last_frame, INT_MAX, 0);
2740  }
2741  }
2742  ff_thread_finish_setup(avctx);
2743 
2744  memset(s->all_fragments, 0, s->fragment_count * sizeof(Vp3Fragment));
2745 
2746  if (s->version < 2) {
2747  if ((ret = unpack_superblocks(s, &gb)) < 0) {
2748  av_log(s->avctx, AV_LOG_ERROR, "error in unpack_superblocks\n");
2749  goto error;
2750  }
2751 #if CONFIG_VP4_DECODER
2752  } else {
2753  if ((ret = vp4_unpack_macroblocks(s, &gb)) < 0) {
2754  av_log(s->avctx, AV_LOG_ERROR, "error in vp4_unpack_macroblocks\n");
2755  goto error;
2756  }
2757 #endif
2758  }
2759  if ((ret = unpack_modes(s, &gb)) < 0) {
2760  av_log(s->avctx, AV_LOG_ERROR, "error in unpack_modes\n");
2761  goto error;
2762  }
2763  if (ret = unpack_vectors(s, &gb)) {
2764  av_log(s->avctx, AV_LOG_ERROR, "error in unpack_vectors\n");
2765  goto error;
2766  }
2767  if ((ret = unpack_block_qpis(s, &gb)) < 0) {
2768  av_log(s->avctx, AV_LOG_ERROR, "error in unpack_block_qpis\n");
2769  goto error;
2770  }
2771 
2772  if (s->version < 2) {
2773  if ((ret = unpack_dct_coeffs(s, &gb)) < 0) {
2774  av_log(s->avctx, AV_LOG_ERROR, "error in unpack_dct_coeffs\n");
2775  goto error;
2776  }
2777 #if CONFIG_VP4_DECODER
2778  } else {
2779  if ((ret = vp4_unpack_dct_coeffs(s, &gb)) < 0) {
2780  av_log(s->avctx, AV_LOG_ERROR, "error in vp4_unpack_dct_coeffs\n");
2781  goto error;
2782  }
2783 #endif
2784  }
2785 
2786  for (int i = 0; i < 3; i++) {
2787  int height = s->height >> (i && s->chroma_y_shift);
2788  if (s->flipped_image)
2789  s->data_offset[i] = 0;
2790  else
2791  s->data_offset[i] = (height - 1) * s->current_frame.f->linesize[i];
2792  }
2793 
2794  s->last_slice_end = 0;
2795  for (int i = 0; i < s->c_superblock_height; i++)
2796  render_slice(s, i);
2797 
2798  // filter the last row
2799  if (s->version < 2)
2800  for (int i = 0; i < 3; i++) {
2801  int row = (s->height >> (3 + (i && s->chroma_y_shift))) - 1;
2802  apply_loop_filter(s, i, row, row + 1);
2803  }
2804  vp3_draw_horiz_band(s, s->height);
2805 
2806  /* output frame, offset as needed */
2807  if ((ret = av_frame_ref(frame, s->current_frame.f)) < 0)
2808  return ret;
2809 
2810  frame->crop_left = s->offset_x;
2811  frame->crop_right = avctx->coded_width - avctx->width - s->offset_x;
2812  frame->crop_top = s->offset_y;
2813  frame->crop_bottom = avctx->coded_height - avctx->height - s->offset_y;
2814 
2815  *got_frame = 1;
2816 
2817  if (!HAVE_THREADS || !(s->avctx->active_thread_type & FF_THREAD_FRAME)) {
2818  ret = update_frames(avctx);
2819  if (ret < 0)
2820  return ret;
2821  }
2822 
2823  return buf_size;
2824 
2825 error:
2826  ff_thread_report_progress(&s->current_frame, INT_MAX, 0);
2827 
2828  if (!HAVE_THREADS || !(s->avctx->active_thread_type & FF_THREAD_FRAME))
2829  av_frame_unref(s->current_frame.f);
2830 
2831  return ret;
2832 }
2833 
2834 static int read_huffman_tree(HuffTable *huff, GetBitContext *gb, int length,
2835  AVCodecContext *avctx)
2836 {
2837  if (get_bits1(gb)) {
2838  int token;
2839  if (huff->nb_entries >= 32) { /* overflow */
2840  av_log(avctx, AV_LOG_ERROR, "huffman tree overflow\n");
2841  return -1;
2842  }
2843  token = get_bits(gb, 5);
2844  ff_dlog(avctx, "code length %d, curr entry %d, token %d\n",
2845  length, huff->nb_entries, token);
2846  huff->entries[huff->nb_entries++] = (HuffEntry){ length, token };
2847  } else {
2848  /* The following bound follows from the fact that nb_entries <= 32. */
2849  if (length >= 31) { /* overflow */
2850  av_log(avctx, AV_LOG_ERROR, "huffman tree overflow\n");
2851  return -1;
2852  }
2853  length++;
2854  if (read_huffman_tree(huff, gb, length, avctx))
2855  return -1;
2856  if (read_huffman_tree(huff, gb, length, avctx))
2857  return -1;
2858  }
2859  return 0;
2860 }
2861 
2862 #if CONFIG_THEORA_DECODER
2863 static const enum AVPixelFormat theora_pix_fmts[4] = {
2865 };
2866 
2867 static int theora_decode_header(AVCodecContext *avctx, GetBitContext *gb)
2868 {
2869  Vp3DecodeContext *s = avctx->priv_data;
2870  int visible_width, visible_height, colorspace;
2871  uint8_t offset_x = 0, offset_y = 0;
2872  int ret;
2873  AVRational fps, aspect;
2874 
2875  if (get_bits_left(gb) < 206)
2876  return AVERROR_INVALIDDATA;
2877 
2878  s->theora_header = 0;
2879  s->theora = get_bits(gb, 24);
2880  av_log(avctx, AV_LOG_DEBUG, "Theora bitstream version %X\n", s->theora);
2881  if (!s->theora) {
2882  s->theora = 1;
2883  avpriv_request_sample(s->avctx, "theora 0");
2884  }
2885 
2886  /* 3.2.0 aka alpha3 has the same frame orientation as original vp3
2887  * but previous versions have the image flipped relative to vp3 */
2888  if (s->theora < 0x030200) {
2889  s->flipped_image = 1;
2890  av_log(avctx, AV_LOG_DEBUG,
2891  "Old (<alpha3) Theora bitstream, flipped image\n");
2892  }
2893 
2894  visible_width =
2895  s->width = get_bits(gb, 16) << 4;
2896  visible_height =
2897  s->height = get_bits(gb, 16) << 4;
2898 
2899  if (s->theora >= 0x030200) {
2900  visible_width = get_bits(gb, 24);
2901  visible_height = get_bits(gb, 24);
2902 
2903  offset_x = get_bits(gb, 8); /* offset x */
2904  offset_y = get_bits(gb, 8); /* offset y, from bottom */
2905  }
2906 
2907  /* sanity check */
2908  if (av_image_check_size(visible_width, visible_height, 0, avctx) < 0 ||
2909  visible_width + offset_x > s->width ||
2910  visible_height + offset_y > s->height ||
2911  visible_width < 18
2912  ) {
2913  av_log(avctx, AV_LOG_ERROR,
2914  "Invalid frame dimensions - w:%d h:%d x:%d y:%d (%dx%d).\n",
2915  visible_width, visible_height, offset_x, offset_y,
2916  s->width, s->height);
2917  return AVERROR_INVALIDDATA;
2918  }
2919 
2920  fps.num = get_bits_long(gb, 32);
2921  fps.den = get_bits_long(gb, 32);
2922  if (fps.num && fps.den) {
2923  if (fps.num < 0 || fps.den < 0) {
2924  av_log(avctx, AV_LOG_ERROR, "Invalid framerate\n");
2925  return AVERROR_INVALIDDATA;
2926  }
2927  av_reduce(&avctx->framerate.den, &avctx->framerate.num,
2928  fps.den, fps.num, 1 << 30);
2929  }
2930 
2931  aspect.num = get_bits(gb, 24);
2932  aspect.den = get_bits(gb, 24);
2933  if (aspect.num && aspect.den) {
2935  &avctx->sample_aspect_ratio.den,
2936  aspect.num, aspect.den, 1 << 30);
2937  ff_set_sar(avctx, avctx->sample_aspect_ratio);
2938  }
2939 
2940  if (s->theora < 0x030200)
2941  skip_bits(gb, 5); /* keyframe frequency force */
2942  colorspace = get_bits(gb, 8);
2943  skip_bits(gb, 24); /* bitrate */
2944 
2945  skip_bits(gb, 6); /* quality hint */
2946 
2947  if (s->theora >= 0x030200) {
2948  skip_bits(gb, 5); /* keyframe frequency force */
2949  avctx->pix_fmt = theora_pix_fmts[get_bits(gb, 2)];
2950  if (avctx->pix_fmt == AV_PIX_FMT_NONE) {
2951  av_log(avctx, AV_LOG_ERROR, "Invalid pixel format\n");
2952  return AVERROR_INVALIDDATA;
2953  }
2954  skip_bits(gb, 3); /* reserved */
2955  } else
2956  avctx->pix_fmt = AV_PIX_FMT_YUV420P;
2957 
2958  if (s->width < 18)
2959  return AVERROR_PATCHWELCOME;
2960  ret = ff_set_dimensions(avctx, s->width, s->height);
2961  if (ret < 0)
2962  return ret;
2963  if (!(avctx->flags2 & AV_CODEC_FLAG2_IGNORE_CROP)) {
2964  avctx->width = visible_width;
2965  avctx->height = visible_height;
2966  // translate offsets from theora axis ([0,0] lower left)
2967  // to normal axis ([0,0] upper left)
2968  s->offset_x = offset_x;
2969  s->offset_y = s->height - visible_height - offset_y;
2970  }
2971 
2972  if (colorspace == 1)
2974  else if (colorspace == 2)
2976 
2977  if (colorspace == 1 || colorspace == 2) {
2978  avctx->colorspace = AVCOL_SPC_BT470BG;
2979  avctx->color_trc = AVCOL_TRC_BT709;
2980  }
2981 
2982  s->theora_header = 1;
2983  return 0;
2984 }
2985 
2986 static int theora_decode_tables(AVCodecContext *avctx, GetBitContext *gb)
2987 {
2988  Vp3DecodeContext *s = avctx->priv_data;
2989  int n, matrices, ret;
2990 
2991  if (!s->theora_header)
2992  return AVERROR_INVALIDDATA;
2993 
2994  if (s->theora >= 0x030200) {
2995  n = get_bits(gb, 3);
2996  /* loop filter limit values table */
2997  if (n)
2998  for (int i = 0; i < 64; i++)
2999  s->filter_limit_values[i] = get_bits(gb, n);
3000  }
3001 
3002  if (s->theora >= 0x030200)
3003  n = get_bits(gb, 4) + 1;
3004  else
3005  n = 16;
3006  /* quality threshold table */
3007  for (int i = 0; i < 64; i++)
3008  s->coded_ac_scale_factor[i] = get_bits(gb, n);
3009 
3010  if (s->theora >= 0x030200)
3011  n = get_bits(gb, 4) + 1;
3012  else
3013  n = 16;
3014  /* dc scale factor table */
3015  for (int i = 0; i < 64; i++)
3016  s->coded_dc_scale_factor[0][i] =
3017  s->coded_dc_scale_factor[1][i] = get_bits(gb, n);
3018 
3019  if (s->theora >= 0x030200)
3020  matrices = get_bits(gb, 9) + 1;
3021  else
3022  matrices = 3;
3023 
3024  if (matrices > 384) {
3025  av_log(avctx, AV_LOG_ERROR, "invalid number of base matrixes\n");
3026  return -1;
3027  }
3028 
3029  for (int j = 0; j < matrices; j++)
3030  for (int i = 0; i < 64; i++)
3031  s->base_matrix[j][i] = get_bits(gb, 8);
3032 
3033  for (int inter = 0; inter <= 1; inter++) {
3034  for (int plane = 0; plane <= 2; plane++) {
3035  int newqr = 1;
3036  if (inter || plane > 0)
3037  newqr = get_bits1(gb);
3038  if (!newqr) {
3039  int qtj, plj;
3040  if (inter && get_bits1(gb)) {
3041  qtj = 0;
3042  plj = plane;
3043  } else {
3044  qtj = (3 * inter + plane - 1) / 3;
3045  plj = (plane + 2) % 3;
3046  }
3047  s->qr_count[inter][plane] = s->qr_count[qtj][plj];
3048  memcpy(s->qr_size[inter][plane], s->qr_size[qtj][plj],
3049  sizeof(s->qr_size[0][0]));
3050  memcpy(s->qr_base[inter][plane], s->qr_base[qtj][plj],
3051  sizeof(s->qr_base[0][0]));
3052  } else {
3053  int qri = 0;
3054  int qi = 0;
3055 
3056  for (;;) {
3057  int i = get_bits(gb, av_log2(matrices - 1) + 1);
3058  if (i >= matrices) {
3059  av_log(avctx, AV_LOG_ERROR,
3060  "invalid base matrix index\n");
3061  return -1;
3062  }
3063  s->qr_base[inter][plane][qri] = i;
3064  if (qi >= 63)
3065  break;
3066  i = get_bits(gb, av_log2(63 - qi) + 1) + 1;
3067  s->qr_size[inter][plane][qri++] = i;
3068  qi += i;
3069  }
3070 
3071  if (qi > 63) {
3072  av_log(avctx, AV_LOG_ERROR, "invalid qi %d > 63\n", qi);
3073  return -1;
3074  }
3075  s->qr_count[inter][plane] = qri;
3076  }
3077  }
3078  }
3079 
3080  /* Huffman tables */
3081  for (int i = 0; i < FF_ARRAY_ELEMS(s->huffman_table); i++) {
3082  s->huffman_table[i].nb_entries = 0;
3083  if ((ret = read_huffman_tree(&s->huffman_table[i], gb, 0, avctx)) < 0)
3084  return ret;
3085  }
3086 
3087  s->theora_tables = 1;
3088 
3089  return 0;
3090 }
3091 
3092 static av_cold int theora_decode_init(AVCodecContext *avctx)
3093 {
3094  Vp3DecodeContext *s = avctx->priv_data;
3095  GetBitContext gb;
3096  int ptype;
3097  const uint8_t *header_start[3];
3098  int header_len[3];
3099  int ret;
3100 
3101  avctx->pix_fmt = AV_PIX_FMT_YUV420P;
3102 
3103  s->theora = 1;
3104 
3105  if (!avctx->extradata_size) {
3106  av_log(avctx, AV_LOG_ERROR, "Missing extradata!\n");
3107  return -1;
3108  }
3109 
3111  42, header_start, header_len) < 0) {
3112  av_log(avctx, AV_LOG_ERROR, "Corrupt extradata\n");
3113  return -1;
3114  }
3115 
3116  for (int i = 0; i < 3; i++) {
3117  if (header_len[i] <= 0)
3118  continue;
3119  ret = init_get_bits8(&gb, header_start[i], header_len[i]);
3120  if (ret < 0)
3121  return ret;
3122 
3123  ptype = get_bits(&gb, 8);
3124 
3125  if (!(ptype & 0x80)) {
3126  av_log(avctx, AV_LOG_ERROR, "Invalid extradata!\n");
3127 // return -1;
3128  }
3129 
3130  // FIXME: Check for this as well.
3131  skip_bits_long(&gb, 6 * 8); /* "theora" */
3132 
3133  switch (ptype) {
3134  case 0x80:
3135  if (theora_decode_header(avctx, &gb) < 0)
3136  return -1;
3137  break;
3138  case 0x81:
3139 // FIXME: is this needed? it breaks sometimes
3140 // theora_decode_comments(avctx, gb);
3141  break;
3142  case 0x82:
3143  if (theora_decode_tables(avctx, &gb))
3144  return -1;
3145  break;
3146  default:
3147  av_log(avctx, AV_LOG_ERROR,
3148  "Unknown Theora config packet: %d\n", ptype & ~0x80);
3149  break;
3150  }
3151  if (ptype != 0x81 && get_bits_left(&gb) >= 8U)
3152  av_log(avctx, AV_LOG_WARNING,
3153  "%d bits left in packet %X\n",
3154  get_bits_left(&gb), ptype);
3155  if (s->theora < 0x030200)
3156  break;
3157  }
3158 
3159  return vp3_decode_init(avctx);
3160 }
3161 
3162 const FFCodec ff_theora_decoder = {
3163  .p.name = "theora",
3164  CODEC_LONG_NAME("Theora"),
3165  .p.type = AVMEDIA_TYPE_VIDEO,
3166  .p.id = AV_CODEC_ID_THEORA,
3167  .priv_data_size = sizeof(Vp3DecodeContext),
3168  .init = theora_decode_init,
3169  .close = vp3_decode_end,
3171  .p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_DRAW_HORIZ_BAND |
3173  .flush = vp3_decode_flush,
3174  UPDATE_THREAD_CONTEXT(vp3_update_thread_context),
3175  .caps_internal = FF_CODEC_CAP_INIT_CLEANUP |
3177 };
3178 #endif
3179 
3181  .p.name = "vp3",
3182  CODEC_LONG_NAME("On2 VP3"),
3183  .p.type = AVMEDIA_TYPE_VIDEO,
3184  .p.id = AV_CODEC_ID_VP3,
3185  .priv_data_size = sizeof(Vp3DecodeContext),
3186  .init = vp3_decode_init,
3187  .close = vp3_decode_end,
3189  .p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_DRAW_HORIZ_BAND |
3191  .flush = vp3_decode_flush,
3192  UPDATE_THREAD_CONTEXT(vp3_update_thread_context),
3193  .caps_internal = FF_CODEC_CAP_INIT_CLEANUP |
3195 };
3196 
3197 #if CONFIG_VP4_DECODER
3198 const FFCodec ff_vp4_decoder = {
3199  .p.name = "vp4",
3200  CODEC_LONG_NAME("On2 VP4"),
3201  .p.type = AVMEDIA_TYPE_VIDEO,
3202  .p.id = AV_CODEC_ID_VP4,
3203  .priv_data_size = sizeof(Vp3DecodeContext),
3204  .init = vp3_decode_init,
3205  .close = vp3_decode_end,
3207  .p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_DRAW_HORIZ_BAND |
3209  .flush = vp3_decode_flush,
3210  UPDATE_THREAD_CONTEXT(vp3_update_thread_context),
3211  .caps_internal = FF_CODEC_CAP_INIT_CLEANUP |
3213 };
3214 #endif
error
static void error(const char *err)
Definition: target_bsf_fuzzer.c:31
vp4_ac_scale_factor
static const uint16_t vp4_ac_scale_factor[64]
Definition: vp4data.h:64
vp4data.h
PUL
#define PUL
allocate_tables
static av_cold int allocate_tables(AVCodecContext *avctx)
Allocate tables for per-frame data in Vp3DecodeContext.
Definition: vp3.c:2262
vp3_dequant
static int vp3_dequant(Vp3DecodeContext *s, const Vp3Fragment *frag, int plane, int inter, int16_t block[64])
Pull DCT tokens from the 64 levels to decode and dequant the coefficients for the next block in codin...
Definition: vp3.c:1846
skip_bits_long
static void skip_bits_long(GetBitContext *s, int n)
Skips the specified number of bits.
Definition: get_bits.h:278
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:186
Vp3Fragment::dc
int16_t dc
Definition: vp3.c:65
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
NB_VP4_DC_TYPES
@ NB_VP4_DC_TYPES
Definition: vp3.c:146
ff_vlc_init_from_lengths
int ff_vlc_init_from_lengths(VLC *vlc, int nb_bits, int nb_codes, const int8_t *lens, int lens_wrap, const void *symbols, int symbols_wrap, int symbols_size, int offset, int flags, void *logctx)
Build VLC decoding tables suitable for use with get_vlc2()
Definition: vlc.c:306
av_clip
#define av_clip
Definition: common.h:96
Vp3DecodeContext::offset_x
uint8_t offset_x
Definition: vp3.c:227
FF_CODEC_CAP_INIT_CLEANUP
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Definition: codec_internal.h:42
Vp3DecodeContext::mode_code_vlc
VLC mode_code_vlc
Definition: vp3.c:286
VP3DSPContext
Definition: vp3dsp.h:25
get_bits_left
static int get_bits_left(GetBitContext *gb)
Definition: get_bits.h:694
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
vp3_decode_flush
static void vp3_decode_flush(AVCodecContext *avctx)
Definition: vp3.c:335
AVCodecContext::colorspace
enum AVColorSpace colorspace
YUV colorspace type.
Definition: avcodec.h:1029
mem_internal.h
Vp3DecodeContext::c_macroblock_height
int c_macroblock_height
Definition: vp3.c:217
zero_run_base
static const uint8_t zero_run_base[32]
Definition: vp3data.h:133
MODE_INTER_PRIOR_LAST
#define MODE_INTER_PRIOR_LAST
Definition: vp3.c:83
u
#define u(width, name, range_min, range_max)
Definition: cbs_h2645.c:250
VP4Predictor
Definition: vp3.c:161
Vp3DecodeContext::idct_scantable
uint8_t idct_scantable[64]
Definition: vp3.c:188
HuffEntry::len
uint8_t len
Definition: exr.c:95
VP4_DC_GOLDEN
@ VP4_DC_GOLDEN
Definition: vp3.c:145
VP4Predictor::dc
int dc
Definition: vp3.c:162
get_bits_long
static unsigned int get_bits_long(GetBitContext *s, int n)
Read 0-32 bits.
Definition: get_bits.h:421
mode_code_vlc_len
static const uint8_t mode_code_vlc_len[8]
Definition: vp3data.h:97
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:100
read_huffman_tree
static int read_huffman_tree(HuffTable *huff, GetBitContext *gb, int length, AVCodecContext *avctx)
Definition: vp3.c:2834
PUR
#define PUR
vp3dsp.h
Vp3DecodeContext::motion_vector_vlc
VLC motion_vector_vlc
Definition: vp3.c:287
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:340
AVCodecContext::color_trc
enum AVColorTransferCharacteristic color_trc
Color Transfer Characteristic.
Definition: avcodec.h:1022
Vp3DecodeContext::superblock_run_length_vlc
VLC superblock_run_length_vlc
Definition: vp3.c:283
AVPacket::data
uint8_t * data
Definition: packet.h:491
ff_vp3dsp_set_bounding_values
void ff_vp3dsp_set_bounding_values(int *bounding_values_array, int filter_limit)
Definition: vp3dsp.c:477
ff_vp3_decoder
const FFCodec ff_vp3_decoder
Definition: vp3.c:3180
table
static const uint16_t table[]
Definition: prosumer.c:205
Vp3DecodeContext::all_fragments
Vp3Fragment * all_fragments
Definition: vp3.c:224
Vp3DecodeContext::filter_limit_values
uint8_t filter_limit_values[64]
Definition: vp3.c:309
FFCodec
Definition: codec_internal.h:127
base
uint8_t base
Definition: vp3data.h:128
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
ff_set_dimensions
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
Definition: utils.c:94
Vp3Fragment::coding_method
uint8_t coding_method
Definition: vp3.c:66
thread.h
ff_thread_await_progress
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before ff_thread_await_progress() has been called on them. reget_buffer() and buffer age optimizations no longer work. *The contents of buffers must not be written to after ff_thread_report_progress() has been called on them. This includes draw_edges(). Porting codecs to frame threading
unpack_superblocks
static int unpack_superblocks(Vp3DecodeContext *s, GetBitContext *gb)
Definition: vp3.c:470
render_slice
static void render_slice(Vp3DecodeContext *s, int slice)
Definition: vp3.c:2055
FF_DEBUG_PICT_INFO
#define FF_DEBUG_PICT_INFO
Definition: avcodec.h:1389
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:361
Vp3DecodeContext::height
int height
Definition: vp3.c:181
vlc_tables
static VLCElem vlc_tables[VLC_TABLES_SIZE]
Definition: imc.c:114
AV_CODEC_FLAG2_IGNORE_CROP
#define AV_CODEC_FLAG2_IGNORE_CROP
Discard cropping information from SPS.
Definition: avcodec.h:371
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:30
fragment
Definition: dashdec.c:36
Vp3DecodeContext::y_superblock_count
int y_superblock_count
Definition: vp3.c:204
xiph.h
bit
#define bit(string, value)
Definition: cbs_mpeg2.c:56
Vp3DecodeContext::bounding_values_array
int bounding_values_array[256+2]
Definition: vp3.c:310
skip_bits
static void skip_bits(GetBitContext *s, int n)
Definition: get_bits.h:381
AVCodecContext::framerate
AVRational framerate
Definition: avcodec.h:1803
Vp3DecodeContext::superblock_fragments
int * superblock_fragments
Definition: vp3.c:298
AVCOL_SPC_BT470BG
@ AVCOL_SPC_BT470BG
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601
Definition: pixfmt.h:605
get_bits
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:335
get_coeff
static int get_coeff(GetBitContext *gb, int token, int16_t *coeff)
Definition: vp3.c:1148
FFCodec::p
AVCodec p
The public AVCodec.
Definition: codec_internal.h:131
Vp3DecodeContext::qr_count
uint8_t qr_count[2][3]
Definition: vp3.c:237
unpack_vlcs
static int unpack_vlcs(Vp3DecodeContext *s, GetBitContext *gb, const VLC *table, int coeff_index, int plane, int eob_run)
Definition: vp3.c:1176
Vp3DecodeContext::hdsp
HpelDSPContext hdsp
Definition: vp3.c:189
vp4_mv_vlc
static const uint8_t vp4_mv_vlc[2][7][63][2]
Definition: vp4data.h:112
BLOCK_Y
#define BLOCK_Y
Definition: vp3.c:644
Vp3DecodeContext::y_superblock_width
int y_superblock_width
Definition: vp3.c:202
CODING_MODE_COUNT
#define CODING_MODE_COUNT
Definition: vp3.c:87
FFSIGN
#define FFSIGN(a)
Definition: common.h:66
GetBitContext
Definition: get_bits.h:108
tab
static const struct twinvq_data tab
Definition: twinvq_data.h:10345
SET_CHROMA_MODES
#define SET_CHROMA_MODES
tables
Writing a table generator This documentation is preliminary Parts of the API are not good and should be changed Basic concepts A table generator consists of two *_tablegen c and *_tablegen h The h file will provide the variable declarations and initialization code for the tables
Definition: tablegen.txt:10
AVCodecContext::flags
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:521
perm
perm
Definition: f_perms.c:75
av_pix_fmt_get_chroma_sub_sample
int av_pix_fmt_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift)
Utility function to access log2_chroma_w log2_chroma_h from the pixel format AVPixFmtDescriptor.
Definition: pixdesc.c:2992
MODE_INTER_LAST_MV
#define MODE_INTER_LAST_MV
Definition: vp3.c:82
Vp3DecodeContext::y_superblock_height
int y_superblock_height
Definition: vp3.c:203
ff_videodsp_init
av_cold void ff_videodsp_init(VideoDSPContext *ctx, int bpc)
Definition: videodsp.c:39
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
Vp3DecodeContext::offset_y
uint8_t offset_y
Definition: vp3.c:228
Vp3DecodeContext::theora
int theora
Definition: vp3.c:179
AVCodecContext::coded_height
int coded_height
Definition: avcodec.h:636
av_reduce
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
Definition: rational.c:35
loop
static int loop
Definition: ffplay.c:337
TRANSPOSE
#define TRANSPOSE(x)
AVRational::num
int num
Numerator.
Definition: rational.h:59
Vp3DecodeContext::num_kf_coded_fragment
int num_kf_coded_fragment[3]
Definition: vp3.c:277
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:88
TOKEN_ZERO_RUN
#define TOKEN_ZERO_RUN(coeff, zero_run)
Definition: vp3.c:261
vp4_pred_block_type_map
static const uint8_t vp4_pred_block_type_map[8]
Definition: vp3.c:150
await_reference_row
static void await_reference_row(Vp3DecodeContext *s, const Vp3Fragment *fragment, int motion_y, int y)
Wait for the reference frame of the current fragment.
Definition: vp3.c:1933
AVCodecContext::color_primaries
enum AVColorPrimaries color_primaries
Chromaticity coordinates of the source primaries.
Definition: avcodec.h:1015
motion_vector_vlc_table
static const uint8_t motion_vector_vlc_table[63][2]
Definition: vp3data.h:101
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
av_cold
#define av_cold
Definition: attributes.h:90
init_get_bits8
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
Definition: get_bits.h:545
theora_decode_tables
static int theora_decode_tables(AVCodecContext *avctx, GetBitContext *gb)
AV_FRAME_FLAG_KEY
#define AV_FRAME_FLAG_KEY
A flag to mark frames that are keyframes.
Definition: frame.h:628
hilbert_offset
static const uint8_t hilbert_offset[16][2]
Definition: vp3.c:135
ff_thread_report_progress
void ff_thread_report_progress(ThreadFrame *f, int n, int field)
Notify later decoding threads when part of their reference picture is ready.
Definition: pthread_frame.c:589
emms_c
#define emms_c()
Definition: emms.h:63
AVCodecContext::extradata_size
int extradata_size
Definition: avcodec.h:543
Vp3DecodeContext::fragment_height
int fragment_height[2]
Definition: vp3.c:222
width
#define width
ref_frame
static const AVFrame * ref_frame(const struct pl_frame_mix *mix)
Definition: vf_libplacebo.c:783
FF_CODEC_DECODE_CB
#define FF_CODEC_DECODE_CB(func)
Definition: codec_internal.h:306
s
#define s(width, name)
Definition: cbs_vp9.c:198
VP4_DC_INTER
@ VP4_DC_INTER
Definition: vp3.c:144
init_loop_filter
static void init_loop_filter(Vp3DecodeContext *s)
Definition: vp3.c:461
Vp3DecodeContext::fragment_run_length_vlc
VLC fragment_run_length_vlc
Definition: vp3.c:284
Vp3DecodeContext::vp4_mv_vlc
VLC vp4_mv_vlc[2][7]
Definition: vp3.c:288
vp4_mv_table_selector
static const uint8_t vp4_mv_table_selector[32]
Definition: vp4data.h:105
AV_GET_BUFFER_FLAG_REF
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
Definition: avcodec.h:421
s1
#define s1
Definition: regdef.h:38
HuffTable::nb_entries
uint8_t nb_entries
Definition: vp3.c:174
init_block_mapping
static int init_block_mapping(Vp3DecodeContext *s)
This function sets up all of the various blocks mappings: superblocks <-> fragments,...
Definition: vp3.c:386
init
int(* init)(AVBSFContext *ctx)
Definition: dts2pts_bsf.c:365
SB_PARTIALLY_CODED
#define SB_PARTIALLY_CODED
Definition: vp3.c:71
bits
uint8_t bits
Definition: vp3data.h:128
SB_NOT_CODED
#define SB_NOT_CODED
Definition: vp3.c:70
vlc_init
#define vlc_init(vlc, nb_bits, nb_codes, bits, bits_wrap, bits_size, codes, codes_wrap, codes_size, flags)
Definition: vlc.h:56
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:201
Vp3Fragment::qpi
uint8_t qpi
Definition: vp3.c:67
decode.h
get_bits.h
reverse_dc_prediction
static void reverse_dc_prediction(Vp3DecodeContext *s, int first_fragment, int fragment_width, int fragment_height)
Definition: vp3.c:1631
unpack_dct_coeffs
static int unpack_dct_coeffs(Vp3DecodeContext *s, GetBitContext *gb)
Definition: vp3.c:1297
ModeAlphabet
static const int ModeAlphabet[6][CODING_MODE_COUNT]
Definition: vp3.c:97
AVFrame::crop_right
size_t crop_right
Definition: frame.h:781
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
RSHIFT
#define RSHIFT(a, b)
Definition: common.h:47
CODEC_LONG_NAME
#define CODEC_LONG_NAME(str)
Definition: codec_internal.h:272
AVCOL_PRI_BT470BG
@ AVCOL_PRI_BT470BG
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM
Definition: pixfmt.h:552
frame
static AVFrame * frame
Definition: demux_decode.c:54
ff_hpeldsp_init
av_cold void ff_hpeldsp_init(HpelDSPContext *c, int flags)
Definition: hpeldsp.c:338
MODE_USING_GOLDEN
#define MODE_USING_GOLDEN
Definition: vp3.c:84
AVCodecContext::codec_id
enum AVCodecID codec_id
Definition: avcodec.h:451
Vp3DecodeContext::macroblock_width
int macroblock_width
Definition: vp3.c:213
ff_thread_ref_frame
int ff_thread_ref_frame(ThreadFrame *dst, const ThreadFrame *src)
Definition: utils.c:871
FFABS
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:65
Vp3DecodeContext::idct_permutation
uint8_t idct_permutation[64]
Definition: vp3.c:187
if
if(ret)
Definition: filter_design.txt:179
init_dequantizer
static void init_dequantizer(Vp3DecodeContext *s, int qpi)
Definition: vp3.c:419
MODE_INTER_FOURMV
#define MODE_INTER_FOURMV
Definition: vp3.c:86
AV_CODEC_CAP_FRAME_THREADS
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
Definition: codec.h:110
AVDISCARD_ALL
@ AVDISCARD_ALL
discard all
Definition: defs.h:219
threadframe.h
Vp3DecodeContext::c_superblock_width
int c_superblock_width
Definition: vp3.c:205
coeff_tables
static const int16_t *const coeff_tables[32]
Definition: vp3data.h:332
Vp3DecodeContext::offset_x_warned
int offset_x_warned
Definition: vp3.c:229
NULL
#define NULL
Definition: coverity.c:32
Vp3DecodeContext::block_pattern_vlc
VLC block_pattern_vlc[2]
Definition: vp3.c:285
init_frames
static av_cold int init_frames(Vp3DecodeContext *s)
Definition: vp3.c:2305
AVERROR_PATCHWELCOME
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:64
HuffTable
Used to store optimal huffman encoding results.
Definition: mjpegenc_huffman.h:69
PU
#define PU
unpack_modes
static int unpack_modes(Vp3DecodeContext *s, GetBitContext *gb)
Definition: vp3.c:786
transform
static const int8_t transform[32][32]
Definition: hevcdsp.c:27
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
Vp3DecodeContext::superblock_count
int superblock_count
Definition: vp3.c:201
ff_vp3dsp_h_loop_filter_12
void ff_vp3dsp_h_loop_filter_12(uint8_t *first_pixel, ptrdiff_t stride, int *bounding_values)
theora_decode_header
static int theora_decode_header(AVCodecContext *avctx, GetBitContext *gb)
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:279
get_bits1
static unsigned int get_bits1(GetBitContext *s)
Definition: get_bits.h:388
fragment_run_length_vlc_len
static const uint8_t fragment_run_length_vlc_len[30]
Definition: vp3data.h:92
vp4_bias
static const uint8_t vp4_bias[5 *16][32][2]
Definition: vp4data.h:329
ff_thread_release_ext_buffer
void ff_thread_release_ext_buffer(ThreadFrame *f)
Unref a ThreadFrame.
Definition: pthread_frame.c:1012
ff_set_sar
int ff_set_sar(AVCodecContext *avctx, AVRational sar)
Check that the provided sample aspect ratio is valid and set it on the codec context.
Definition: utils.c:109
mathops.h
Vp3DecodeContext::theora_header
int theora_header
Definition: vp3.c:179
TOKEN_COEFF
#define TOKEN_COEFF(coeff)
Definition: vp3.c:262
vp4_y_dc_scale_factor
static const uint8_t vp4_y_dc_scale_factor[64]
Definition: vp4data.h:42
Vp3DecodeContext::skip_loop_filter
int skip_loop_filter
Definition: vp3.c:195
UPDATE_THREAD_CONTEXT
#define UPDATE_THREAD_CONTEXT(func)
Definition: codec_internal.h:281
update_frames
static int update_frames(AVCodecContext *avctx)
Release and shuffle frames after decode finishes.
Definition: vp3.c:2496
Vp3DecodeContext::last_qps
int last_qps[3]
Definition: vp3.c:199
AV_CODEC_ID_VP4
@ AV_CODEC_ID_VP4
Definition: codec_id.h:299
get_vlc2
static av_always_inline int get_vlc2(GetBitContext *s, const VLCElem *table, int bits, int max_depth)
Parse a vlc code.
Definition: get_bits.h:652
jpegquanttables.h
vp31_ac_scale_factor
static const uint16_t vp31_ac_scale_factor[64]
Definition: vp3data.h:63
Vp3DecodeContext::qr_size
uint8_t qr_size[2][3][64]
Definition: vp3.c:238
DC_COEFF
#define DC_COEFF(u)
Definition: vp3.c:1629
AVFrame::crop_bottom
size_t crop_bottom
Definition: frame.h:779
Vp3DecodeContext::vp3dsp
VP3DSPContext vp3dsp
Definition: vp3.c:191
Vp3DecodeContext::flipped_image
int flipped_image
Definition: vp3.c:193
vp31_intra_y_dequant
static const uint8_t vp31_intra_y_dequant[64]
Definition: vp3data.h:29
ff_vp3dsp_v_loop_filter_12
void ff_vp3dsp_v_loop_filter_12(uint8_t *first_pixel, ptrdiff_t stride, int *bounding_values)
ff_dlog
#define ff_dlog(a,...)
Definition: tableprint_vlc.h:28
HpelDSPContext
Half-pel DSP context.
Definition: hpeldsp.h:45
Vp3DecodeContext::fragment_width
int fragment_width[2]
Definition: vp3.c:221
Vp3DecodeContext::total_num_coded_frags
int total_num_coded_frags
Definition: vp3.c:269
SB_FULLY_CODED
#define SB_FULLY_CODED
Definition: vp3.c:72
AVFrame::crop_left
size_t crop_left
Definition: frame.h:780
AVDISCARD_NONKEY
@ AVDISCARD_NONKEY
discard all frames except keyframes
Definition: defs.h:218
AVCodecContext::flags2
int flags2
AV_CODEC_FLAG2_*.
Definition: avcodec.h:528
AV_CODEC_CAP_DR1
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:52
AV_CODEC_FLAG_GRAY
#define AV_CODEC_FLAG_GRAY
Only decode/encode grayscale.
Definition: avcodec.h:318
AVPacket::size
int size
Definition: packet.h:492
fixed_motion_vector_table
static const int8_t fixed_motion_vector_table[64]
Definition: vp3data.h:115
dc
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled top and top right vectors is used as motion vector prediction the used motion vector is the sum of the predictor and(mvx_diff, mvy_diff) *mv_scale Intra DC Prediction block[y][x] dc[1]
Definition: snow.txt:400
av_frame_ref
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:361
codec_internal.h
DECLARE_ALIGNED
#define DECLARE_ALIGNED(n, t, v)
Definition: mem_internal.h:87
unpack_vectors
static int unpack_vectors(Vp3DecodeContext *s, GetBitContext *gb)
Definition: vp3.c:900
for
for(k=2;k<=8;++k)
Definition: h264pred_template.c:425
ff_vp4_decoder
const FFCodec ff_vp4_decoder
vp4_get_mv
static int vp4_get_mv(Vp3DecodeContext *s, GetBitContext *gb, int axis, int last_motion)
Definition: vp3.c:889
FF_CODEC_CAP_EXPORTS_CROPPING
#define FF_CODEC_CAP_EXPORTS_CROPPING
The decoder sets the cropping fields in the output frames manually.
Definition: codec_internal.h:60
VLCElem
Definition: vlc.h:29
AV_NUM_DATA_POINTERS
#define AV_NUM_DATA_POINTERS
Definition: frame.h:341
FF_CODEC_CAP_ALLOCATE_PROGRESS
#define FF_CODEC_CAP_ALLOCATE_PROGRESS
Definition: codec_internal.h:69
Vp3DecodeContext::dct_tokens
int16_t * dct_tokens[3][64]
This is a list of all tokens in bitstream order.
Definition: vp3.c:258
Vp3DecodeContext::coded_dc_scale_factor
uint16_t coded_dc_scale_factor[2][64]
Definition: vp3.c:234
Vp3DecodeContext::qps
int qps[3]
Definition: vp3.c:197
Vp3DecodeContext::current_frame
ThreadFrame current_frame
Definition: vp3.c:185
Vp3DecodeContext::block
int16_t block[64]
Definition: vp3.c:192
height
#define height
Vp3DecodeContext::chroma_y_shift
int chroma_y_shift
Definition: vp3.c:182
Vp3DecodeContext::data_offset
int data_offset[3]
Definition: vp3.c:226
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
Vp3DecodeContext::macroblock_coding
unsigned char * macroblock_coding
Definition: vp3.c:302
version
version
Definition: libkvazaar.c:321
vp3data.h
AV_LOG_INFO
#define AV_LOG_INFO
Standard information.
Definition: log.h:191
AVCOL_TRC_BT709
@ AVCOL_TRC_BT709
also ITU-R BT1361
Definition: pixfmt.h:572
FF_THREAD_FRAME
#define FF_THREAD_FRAME
Decode more than one frame at once.
Definition: avcodec.h:1543
Vp3DecodeContext::avctx
AVCodecContext * avctx
Definition: vp3.c:178
AV_CODEC_ID_VP3
@ AV_CODEC_ID_VP3
Definition: codec_id.h:81
emms.h
Vp3DecodeContext::nkf_coded_fragment_list
int * nkf_coded_fragment_list
Definition: vp3.c:276
Vp3DecodeContext::keyframe
int keyframe
Definition: vp3.c:186
MODE_INTRA
#define MODE_INTRA
Definition: vp3.c:80
apply_loop_filter
static void apply_loop_filter(Vp3DecodeContext *s, int plane, int ystart, int yend)
Definition: vp3.c:1780
Vp3DecodeContext::macroblock_height
int macroblock_height
Definition: vp3.c:214
ff_vp3dsp_init
av_cold void ff_vp3dsp_init(VP3DSPContext *c, int flags)
Definition: vp3dsp.c:448
Vp3DecodeContext::yuv_macroblock_count
int yuv_macroblock_count
Definition: vp3.c:218
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:255
Vp3DecodeContext::edge_emu_buffer
uint8_t * edge_emu_buffer
Definition: vp3.c:304
AVCodecContext::extradata
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:542
show_bits
static unsigned int show_bits(GetBitContext *s, int n)
Show 1-25 bits.
Definition: get_bits.h:371
Vp3DecodeContext::c_macroblock_count
int c_macroblock_count
Definition: vp3.c:215
AV_CODEC_ID_THEORA
@ AV_CODEC_ID_THEORA
Definition: codec_id.h:82
av_malloc_array
#define av_malloc_array(a, b)
Definition: tableprint_vlc.h:31
vp3_decode_frame
static int vp3_decode_frame(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *avpkt)
Definition: vp3.c:2574
superblock_run_length_vlc_lens
static const uint8_t superblock_run_length_vlc_lens[34]
Definition: vp3data.h:85
ff_mjpeg_std_chrominance_quant_tbl
const uint8_t ff_mjpeg_std_chrominance_quant_tbl[64]
Definition: jpegquanttables.c:45
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:622
Vp3DecodeContext::macroblock_count
int macroblock_count
Definition: vp3.c:212
SUPERBLOCK_VLC_BITS
#define SUPERBLOCK_VLC_BITS
Definition: vp3.c:59
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:254
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:194
AVCodecContext::chroma_sample_location
enum AVChromaLocation chroma_sample_location
This defines the location of chroma samples.
Definition: avcodec.h:1046
Vp3DecodeContext::v_superblock_start
int v_superblock_start
Definition: vp3.c:209
Vp3DecodeContext::c_superblock_height
int c_superblock_height
Definition: vp3.c:206
AVCodecContext::height
int height
Definition: avcodec.h:621
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:658
ff_thread_get_ext_buffer
int ff_thread_get_ext_buffer(AVCodecContext *avctx, ThreadFrame *f, int flags)
Wrapper around ff_get_buffer() for frame-multithreaded codecs.
Definition: pthread_frame.c:984
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Definition: mem.c:262
VP4_MV_VLC_BITS
#define VP4_MV_VLC_BITS
Definition: vp3.c:58
Vp3DecodeContext::coded_fragment_list
int * coded_fragment_list[3]
Definition: vp3.c:273
avcodec.h
Vp3DecodeContext::c_superblock_count
int c_superblock_count
Definition: vp3.c:207
stride
#define stride
Definition: h264pred_template.c:537
ff_zigzag_direct
const uint8_t ff_zigzag_direct[64]
Definition: mathtables.c:98
PL
#define PL
AVCOL_PRI_BT470M
@ AVCOL_PRI_BT470M
also FCC Title 47 Code of Federal Regulations 73.682 (a)(20)
Definition: pixfmt.h:550
ff_vlc_free
void ff_vlc_free(VLC *vlc)
Definition: vlc.c:509
ret
ret
Definition: filter_design.txt:187
Vp3DecodeContext::theora_tables
int theora_tables
Definition: vp3.c:179
FFSWAP
#define FFSWAP(type, a, b)
Definition: macros.h:52
free_tables
static av_cold void free_tables(AVCodecContext *avctx)
Definition: vp3.c:319
MODE_INTER_PLUS_MV
#define MODE_INTER_PLUS_MV
Definition: vp3.c:81
Vp3DecodeContext::num_coded_frags
int num_coded_frags[3][64]
number of blocks that contain DCT coefficients at the given level or higher
Definition: vp3.c:268
vp4_block_pattern_table_selector
static const uint8_t vp4_block_pattern_table_selector[14]
Definition: vp4data.h:86
ff_thread_finish_setup
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call ff_thread_finish_setup() afterwards. If some code can 't be moved
Vp3DecodeContext::golden_frame
ThreadFrame golden_frame
Definition: vp3.c:183
Vp3DecodeContext::chroma_x_shift
int chroma_x_shift
Definition: vp3.c:182
BLOCK_X
#define BLOCK_X
Definition: vp3.c:643
U
#define U(x)
Definition: vpx_arith.h:37
MODE_COPY
#define MODE_COPY
Definition: vp3.c:90
Vp3DecodeContext
Definition: vp3.c:177
ff_theora_decoder
const FFCodec ff_theora_decoder
vp4_filter_limit_values
static const uint8_t vp4_filter_limit_values[64]
Definition: vp4data.h:75
MODE_GOLDEN_MV
#define MODE_GOLDEN_MV
Definition: vp3.c:85
FRAGMENT_PIXELS
#define FRAGMENT_PIXELS
Definition: vp3.c:61
AVCodecContext
main external API structure.
Definition: avcodec.h:441
ThreadFrame
Definition: threadframe.h:27
eob_run_table
static const struct @192 eob_run_table[7]
VP4_DC_INTRA
@ VP4_DC_INTRA
Definition: vp3.c:143
vp3_draw_horiz_band
static void vp3_draw_horiz_band(Vp3DecodeContext *s, int y)
called when all pixels up to row y are complete
Definition: vp3.c:1891
AVCHROMA_LOC_CENTER
@ AVCHROMA_LOC_CENTER
MPEG-1 4:2:0, JPEG 4:2:0, H.263 4:2:0.
Definition: pixfmt.h:695
vp4_generic_dequant
static const uint8_t vp4_generic_dequant[64]
Definition: vp4data.h:31
zero_run_get_bits
static const uint8_t zero_run_get_bits[32]
Definition: vp3data.h:140
AVRational::den
int den
Denominator.
Definition: rational.h:60
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
VLC
Definition: vlc.h:33
Vp3DecodeContext::coded_ac_scale_factor
uint32_t coded_ac_scale_factor[64]
Definition: vp3.c:235
output_plane
static void output_plane(const Plane *plane, int buf_sel, uint8_t *dst, ptrdiff_t dst_pitch, int dst_height)
Convert and output the current plane.
Definition: indeo3.c:1030
HuffEntry
Definition: exr.c:94
vp31_inter_dequant
static const uint8_t vp31_inter_dequant[64]
Definition: vp3data.h:41
temp
else temp
Definition: vf_mcdeint.c:263
body
static void body(uint32_t ABCD[4], const uint8_t *src, size_t nblocks)
Definition: md5.c:103
vp4_block_pattern_vlc
static const uint8_t vp4_block_pattern_vlc[2][14][2]
Definition: vp4data.h:90
avpriv_split_xiph_headers
int avpriv_split_xiph_headers(const uint8_t *extradata, int extradata_size, int first_header_size, const uint8_t *header_start[3], int header_len[3])
Split a single extradata buffer into the three headers that most Xiph codecs use.
Definition: xiph.c:26
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:71
MODE_INTER_NO_MV
#define MODE_INTER_NO_MV
Definition: vp3.c:79
VideoDSPContext
Definition: videodsp.h:40
HuffEntry::sym
uint8_t sym
Definition: vp3.c:169
Vp3DecodeContext::coeff_vlc
VLC coeff_vlc[5 *16]
Definition: vp3.c:281
Vp3DecodeContext::superblock_coding
unsigned char * superblock_coding
Definition: vp3.c:210
COMPATIBLE_FRAME
#define COMPATIBLE_FRAME(x)
Definition: vp3.c:1627
AVERROR_DECODER_NOT_FOUND
#define AVERROR_DECODER_NOT_FOUND
Decoder not found.
Definition: error.h:54
AVCodecContext::coded_width
int coded_width
Bitstream width / height, may be different from width/height e.g.
Definition: avcodec.h:636
Vp3DecodeContext::last_frame
ThreadFrame last_frame
Definition: vp3.c:184
Vp3DecodeContext::fragment_start
int fragment_start[3]
Definition: vp3.c:225
AV_PICTURE_TYPE_P
@ AV_PICTURE_TYPE_P
Predicted.
Definition: avutil.h:280
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
AV_PIX_FMT_YUV422P
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:70
AV_CODEC_FLAG_BITEXACT
#define AV_CODEC_FLAG_BITEXACT
Use only bitexact stuff (except (I)DCT).
Definition: avcodec.h:338
avpriv_request_sample
#define avpriv_request_sample(...)
Definition: tableprint_vlc.h:36
vp3_bias
static const uint8_t vp3_bias[5 *16][32][2]
Definition: vp3data.h:370
get_eob_run
static int get_eob_run(GetBitContext *gb, int token)
Definition: vp3.c:1140
HuffTable::entries
HuffEntry entries[32]
Definition: vp3.c:173
AVCodecContext::codec_tag
unsigned int codec_tag
fourcc (LSB first, so "ABCD" -> ('D'<<24) + ('C'<<16) + ('B'<<8) + 'A').
Definition: avcodec.h:466
Vp3DecodeContext::huffman_table
HuffTable huffman_table[5 *16]
Definition: vp3.c:307
FFALIGN
#define FFALIGN(x, a)
Definition: macros.h:78
vp31_filter_limit_values
static const uint8_t vp31_filter_limit_values[64]
Definition: vp3data.h:74
AVPacket
This structure stores compressed data.
Definition: packet.h:468
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:468
VP4Predictor::type
int type
Definition: vp3.c:163
vp3_decode_init
static av_cold int vp3_decode_init(AVCodecContext *avctx)
Definition: vp3.c:2317
Vp3DecodeContext::base_matrix
uint8_t base_matrix[384][64]
Definition: vp3.c:236
AVFrame::crop_top
size_t crop_top
Definition: frame.h:778
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
src
INIT_CLIP pixel * src
Definition: h264pred_template.c:418
videodsp.h
VP3_MV_VLC_BITS
#define VP3_MV_VLC_BITS
Definition: vp3.c:57
Vp3DecodeContext::fragment_count
int fragment_count
Definition: vp3.c:220
vp31_dc_scale_factor
static const uint8_t vp31_dc_scale_factor[64]
Definition: vp3data.h:52
d
d
Definition: ffmpeg_filter.c:368
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:621
imgutils.h
hpeldsp.h
Vp3DecodeContext::width
int width
Definition: vp3.c:181
Vp3DecodeContext::kf_coded_fragment_list
int * kf_coded_fragment_list
Definition: vp3.c:275
AV_CODEC_CAP_DRAW_HORIZ_BAND
#define AV_CODEC_CAP_DRAW_HORIZ_BAND
Decoder can use draw_horiz_band callback.
Definition: codec.h:44
unpack_block_qpis
static int unpack_block_qpis(Vp3DecodeContext *s, GetBitContext *gb)
Definition: vp3.c:1097
Vp3DecodeContext::qr_base
uint16_t qr_base[2][3][64]
Definition: vp3.c:239
vp3_decode_end
static av_cold int vp3_decode_end(AVCodecContext *avctx)
Definition: vp3.c:347
block
The exact code depends on how similar the blocks are and how related they are to the block
Definition: filter_design.txt:207
coeff
static const double coeff[2][5]
Definition: vf_owdenoise.c:79
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
MKTAG
#define MKTAG(a, b, c, d)
Definition: macros.h:55
h
h
Definition: vp9dsp_template.c:2038
vp4_uv_dc_scale_factor
static const uint8_t vp4_uv_dc_scale_factor[64]
Definition: vp4data.h:53
MAXIMUM_LONG_BIT_RUN
#define MAXIMUM_LONG_BIT_RUN
Definition: vp3.c:77
Vp3DecodeContext::version
int version
Definition: vp3.c:180
av_image_check_size
int av_image_check_size(unsigned int w, unsigned int h, int log_offset, void *log_ctx)
Check if the given dimension of an image is valid, meaning that all bytes of the image can be address...
Definition: imgutils.c:318
Vp3DecodeContext::motion_val
int8_t(*[2] motion_val)[2]
Definition: vp3.c:231
VP4_DC_UNDEFINED
@ VP4_DC_UNDEFINED
Definition: vp3.c:147
Vp3DecodeContext::last_slice_end
int last_slice_end
Definition: vp3.c:194
Vp3DecodeContext::dc_pred_row
VP4Predictor * dc_pred_row
Definition: vp3.c:312
av_log2
int av_log2(unsigned v)
Definition: intmath.c:26
Vp3DecodeContext::u_superblock_start
int u_superblock_start
Definition: vp3.c:208
coeff_get_bits
static const uint8_t coeff_get_bits[32]
Definition: vp3data.h:148
Vp3DecodeContext::dct_tokens_base
int16_t * dct_tokens_base
Definition: vp3.c:259
Vp3Fragment
Definition: vp3.c:64
AVCodecContext::sample_aspect_ratio
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown) That is the width of a pixel divided by the height of the pixel.
Definition: avcodec.h:822
Vp3DecodeContext::nqps
int nqps
Definition: vp3.c:198
Vp3DecodeContext::qmat
int16_t qmat[3][2][3][64]
qmat[qpi][is_inter][plane]
Definition: vp3.c:292
Vp3DecodeContext::vdsp
VideoDSPContext vdsp
Definition: vp3.c:190
TOKEN_EOB
#define TOKEN_EOB(eob_run)
Definition: vp3.c:260
Vp3DecodeContext::c_macroblock_width
int c_macroblock_width
Definition: vp3.c:216