FFmpeg
mpeg12dec.c
Go to the documentation of this file.
1 /*
2  * MPEG-1/2 decoder
3  * Copyright (c) 2000, 2001 Fabrice Bellard
4  * Copyright (c) 2002-2013 Michael Niedermayer <michaelni@gmx.at>
5  *
6  * This file is part of FFmpeg.
7  *
8  * FFmpeg is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU Lesser General Public
10  * License as published by the Free Software Foundation; either
11  * version 2.1 of the License, or (at your option) any later version.
12  *
13  * FFmpeg is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16  * Lesser General Public License for more details.
17  *
18  * You should have received a copy of the GNU Lesser General Public
19  * License along with FFmpeg; if not, write to the Free Software
20  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21  */
22 
23 /**
24  * @file
25  * MPEG-1/2 decoder
26  */
27 
28 #include "config_components.h"
29 
30 #define UNCHECKED_BITSTREAM_READER 1
31 #include <inttypes.h>
32 
33 #include "libavutil/attributes.h"
34 #include "libavutil/emms.h"
35 #include "libavutil/imgutils.h"
36 #include "libavutil/internal.h"
37 #include "libavutil/mem_internal.h"
38 #include "libavutil/reverse.h"
39 #include "libavutil/stereo3d.h"
40 #include "libavutil/timecode.h"
41 
42 #include "avcodec.h"
43 #include "codec_internal.h"
44 #include "decode.h"
45 #include "error_resilience.h"
46 #include "hwaccel_internal.h"
47 #include "hwconfig.h"
48 #include "idctdsp.h"
49 #include "internal.h"
50 #include "mpeg_er.h"
51 #include "mpeg12.h"
52 #include "mpeg12codecs.h"
53 #include "mpeg12data.h"
54 #include "mpeg12dec.h"
55 #include "mpegutils.h"
56 #include "mpegvideo.h"
57 #include "mpegvideodata.h"
58 #include "mpegvideodec.h"
59 #include "profiles.h"
60 #include "startcode.h"
61 #include "thread.h"
62 
63 #define A53_MAX_CC_COUNT 2000
64 
71 };
72 
73 typedef struct Mpeg1Context {
75  int repeat_field; /* true if we must repeat the field */
76  AVPanScan pan_scan; /* some temporary storage for the panscan */
81  uint8_t afd;
82  int has_afd;
87  AVRational frame_rate_ext; /* MPEG-2 specific framerate modificator */
88  unsigned frame_rate_index;
89  int sync; /* Did we reach a sync point like a GOP/SEQ/KEYFrame? */
91  int tmpgexs;
94  int64_t timecode_frame_start; /*< GOP timecode frame start number, in non drop frame format */
95 } Mpeg1Context;
96 
97 /* as H.263, but only 17 codes */
98 static int mpeg_decode_motion(MpegEncContext *s, int fcode, int pred)
99 {
100  int code, sign, val, shift;
101 
102  code = get_vlc2(&s->gb, ff_mv_vlc, MV_VLC_BITS, 2);
103  if (code == 0)
104  return pred;
105  if (code < 0)
106  return 0xffff;
107 
108  sign = get_bits1(&s->gb);
109  shift = fcode - 1;
110  val = code;
111  if (shift) {
112  val = (val - 1) << shift;
113  val |= get_bits(&s->gb, shift);
114  val++;
115  }
116  if (sign)
117  val = -val;
118  val += pred;
119 
120  /* modulo decoding */
121  return sign_extend(val, 5 + shift);
122 }
123 
124 #define MAX_INDEX (64 - 1)
125 #define check_scantable_index(ctx, x) \
126  do { \
127  if ((x) > MAX_INDEX) { \
128  av_log(ctx->avctx, AV_LOG_ERROR, "ac-tex damaged at %d %d\n", \
129  ctx->mb_x, ctx->mb_y); \
130  return AVERROR_INVALIDDATA; \
131  } \
132  } while (0)
133 
135  int16_t *block, int n)
136 {
137  int level, i, j, run;
138  const uint8_t *const scantable = s->intra_scantable.permutated;
139  const uint16_t *quant_matrix = s->inter_matrix;
140  const int qscale = s->qscale;
141 
142  {
143  OPEN_READER(re, &s->gb);
144  i = -1;
145  // special case for first coefficient, no need to add second VLC table
146  UPDATE_CACHE(re, &s->gb);
147  if (((int32_t) GET_CACHE(re, &s->gb)) < 0) {
148  level = (3 * qscale * quant_matrix[0]) >> 5;
149  level = (level - 1) | 1;
150  if (GET_CACHE(re, &s->gb) & 0x40000000)
151  level = -level;
152  block[0] = level;
153  i++;
154  SKIP_BITS(re, &s->gb, 2);
155  if (((int32_t) GET_CACHE(re, &s->gb)) <= (int32_t) 0xBFFFFFFF)
156  goto end;
157  }
158  /* now quantify & encode AC coefficients */
159  for (;;) {
160  GET_RL_VLC(level, run, re, &s->gb, ff_mpeg1_rl_vlc,
161  TEX_VLC_BITS, 2, 0);
162 
163  if (level != 0) {
164  i += run;
165  if (i > MAX_INDEX)
166  break;
167  j = scantable[i];
168  level = ((level * 2 + 1) * qscale * quant_matrix[j]) >> 5;
169  level = (level - 1) | 1;
170  level = (level ^ SHOW_SBITS(re, &s->gb, 1)) -
171  SHOW_SBITS(re, &s->gb, 1);
172  SKIP_BITS(re, &s->gb, 1);
173  } else {
174  /* escape */
175  run = SHOW_UBITS(re, &s->gb, 6) + 1;
176  LAST_SKIP_BITS(re, &s->gb, 6);
177  UPDATE_CACHE(re, &s->gb);
178  level = SHOW_SBITS(re, &s->gb, 8);
179  SKIP_BITS(re, &s->gb, 8);
180  if (level == -128) {
181  level = SHOW_UBITS(re, &s->gb, 8) - 256;
182  SKIP_BITS(re, &s->gb, 8);
183  } else if (level == 0) {
184  level = SHOW_UBITS(re, &s->gb, 8);
185  SKIP_BITS(re, &s->gb, 8);
186  }
187  i += run;
188  if (i > MAX_INDEX)
189  break;
190  j = scantable[i];
191  if (level < 0) {
192  level = -level;
193  level = ((level * 2 + 1) * qscale * quant_matrix[j]) >> 5;
194  level = (level - 1) | 1;
195  level = -level;
196  } else {
197  level = ((level * 2 + 1) * qscale * quant_matrix[j]) >> 5;
198  level = (level - 1) | 1;
199  }
200  }
201 
202  block[j] = level;
203  if (((int32_t) GET_CACHE(re, &s->gb)) <= (int32_t) 0xBFFFFFFF)
204  break;
205  UPDATE_CACHE(re, &s->gb);
206  }
207 end:
208  LAST_SKIP_BITS(re, &s->gb, 2);
209  CLOSE_READER(re, &s->gb);
210  }
211 
213 
214  s->block_last_index[n] = i;
215  return 0;
216 }
217 
219  int16_t *block, int n)
220 {
221  int level, i, j, run;
222  const uint8_t *const scantable = s->intra_scantable.permutated;
223  const uint16_t *quant_matrix;
224  const int qscale = s->qscale;
225  int mismatch;
226 
227  mismatch = 1;
228 
229  {
230  OPEN_READER(re, &s->gb);
231  i = -1;
232  if (n < 4)
233  quant_matrix = s->inter_matrix;
234  else
235  quant_matrix = s->chroma_inter_matrix;
236 
237  // Special case for first coefficient, no need to add second VLC table.
238  UPDATE_CACHE(re, &s->gb);
239  if (((int32_t) GET_CACHE(re, &s->gb)) < 0) {
240  level = (3 * qscale * quant_matrix[0]) >> 5;
241  if (GET_CACHE(re, &s->gb) & 0x40000000)
242  level = -level;
243  block[0] = level;
244  mismatch ^= level;
245  i++;
246  SKIP_BITS(re, &s->gb, 2);
247  if (((int32_t) GET_CACHE(re, &s->gb)) <= (int32_t) 0xBFFFFFFF)
248  goto end;
249  }
250 
251  /* now quantify & encode AC coefficients */
252  for (;;) {
253  GET_RL_VLC(level, run, re, &s->gb, ff_mpeg1_rl_vlc,
254  TEX_VLC_BITS, 2, 0);
255 
256  if (level != 0) {
257  i += run;
258  if (i > MAX_INDEX)
259  break;
260  j = scantable[i];
261  level = ((level * 2 + 1) * qscale * quant_matrix[j]) >> 5;
262  level = (level ^ SHOW_SBITS(re, &s->gb, 1)) -
263  SHOW_SBITS(re, &s->gb, 1);
264  SKIP_BITS(re, &s->gb, 1);
265  } else {
266  /* escape */
267  run = SHOW_UBITS(re, &s->gb, 6) + 1;
268  LAST_SKIP_BITS(re, &s->gb, 6);
269  UPDATE_CACHE(re, &s->gb);
270  level = SHOW_SBITS(re, &s->gb, 12);
271  SKIP_BITS(re, &s->gb, 12);
272 
273  i += run;
274  if (i > MAX_INDEX)
275  break;
276  j = scantable[i];
277  if (level < 0) {
278  level = ((-level * 2 + 1) * qscale * quant_matrix[j]) >> 5;
279  level = -level;
280  } else {
281  level = ((level * 2 + 1) * qscale * quant_matrix[j]) >> 5;
282  }
283  }
284 
285  mismatch ^= level;
286  block[j] = level;
287  if (((int32_t) GET_CACHE(re, &s->gb)) <= (int32_t) 0xBFFFFFFF)
288  break;
289  UPDATE_CACHE(re, &s->gb);
290  }
291 end:
292  LAST_SKIP_BITS(re, &s->gb, 2);
293  CLOSE_READER(re, &s->gb);
294  }
295  block[63] ^= (mismatch & 1);
296 
298 
299  s->block_last_index[n] = i;
300  return 0;
301 }
302 
304  int16_t *block, int n)
305 {
306  int level, dc, diff, i, j, run;
307  int component;
308  const RL_VLC_ELEM *rl_vlc;
309  const uint8_t *const scantable = s->intra_scantable.permutated;
310  const uint16_t *quant_matrix;
311  const int qscale = s->qscale;
312  int mismatch;
313 
314  /* DC coefficient */
315  if (n < 4) {
316  quant_matrix = s->intra_matrix;
317  component = 0;
318  } else {
319  quant_matrix = s->chroma_intra_matrix;
320  component = (n & 1) + 1;
321  }
322  diff = decode_dc(&s->gb, component);
323  dc = s->last_dc[component];
324  dc += diff;
325  s->last_dc[component] = dc;
326  block[0] = dc * (1 << (3 - s->intra_dc_precision));
327  ff_tlog(s->avctx, "dc=%d\n", block[0]);
328  mismatch = block[0] ^ 1;
329  i = 0;
330  if (s->intra_vlc_format)
332  else
334 
335  {
336  OPEN_READER(re, &s->gb);
337  /* now quantify & encode AC coefficients */
338  for (;;) {
339  UPDATE_CACHE(re, &s->gb);
340  GET_RL_VLC(level, run, re, &s->gb, rl_vlc,
341  TEX_VLC_BITS, 2, 0);
342 
343  if (level == 127) {
344  break;
345  } else if (level != 0) {
346  i += run;
347  if (i > MAX_INDEX)
348  break;
349  j = scantable[i];
350  level = (level * qscale * quant_matrix[j]) >> 4;
351  level = (level ^ SHOW_SBITS(re, &s->gb, 1)) -
352  SHOW_SBITS(re, &s->gb, 1);
353  LAST_SKIP_BITS(re, &s->gb, 1);
354  } else {
355  /* escape */
356  run = SHOW_UBITS(re, &s->gb, 6) + 1;
357  SKIP_BITS(re, &s->gb, 6);
358  level = SHOW_SBITS(re, &s->gb, 12);
359  LAST_SKIP_BITS(re, &s->gb, 12);
360  i += run;
361  if (i > MAX_INDEX)
362  break;
363  j = scantable[i];
364  if (level < 0) {
365  level = (-level * qscale * quant_matrix[j]) >> 4;
366  level = -level;
367  } else {
368  level = (level * qscale * quant_matrix[j]) >> 4;
369  }
370  }
371 
372  mismatch ^= level;
373  block[j] = level;
374  }
375  CLOSE_READER(re, &s->gb);
376  }
377  block[63] ^= mismatch & 1;
378 
380 
381  s->block_last_index[n] = i;
382  return 0;
383 }
384 
385 /******************************************/
386 /* decoding */
387 
388 static inline int get_dmv(MpegEncContext *s)
389 {
390  if (get_bits1(&s->gb))
391  return 1 - (get_bits1(&s->gb) << 1);
392  else
393  return 0;
394 }
395 
396 /* motion type (for MPEG-2) */
397 #define MT_FIELD 1
398 #define MT_FRAME 2
399 #define MT_16X8 2
400 #define MT_DMV 3
401 
402 static int mpeg_decode_mb(MpegEncContext *s, int16_t block[12][64])
403 {
404  int i, j, k, cbp, val, mb_type, motion_type;
405  const int mb_block_count = 4 + (1 << s->chroma_format);
406  int ret;
407 
408  ff_tlog(s->avctx, "decode_mb: x=%d y=%d\n", s->mb_x, s->mb_y);
409 
410  av_assert2(s->mb_skipped == 0);
411 
412  if (s->mb_skip_run-- != 0) {
413  if (s->pict_type == AV_PICTURE_TYPE_P) {
414  s->mb_skipped = 1;
415  s->cur_pic.mb_type[s->mb_x + s->mb_y * s->mb_stride] =
417  } else {
418  int mb_type;
419 
420  if (s->mb_x)
421  mb_type = s->cur_pic.mb_type[s->mb_x + s->mb_y * s->mb_stride - 1];
422  else
423  // FIXME not sure if this is allowed in MPEG at all
424  mb_type = s->cur_pic.mb_type[s->mb_width + (s->mb_y - 1) * s->mb_stride - 1];
425  if (IS_INTRA(mb_type)) {
426  av_log(s->avctx, AV_LOG_ERROR, "skip with previntra\n");
427  return AVERROR_INVALIDDATA;
428  }
429  s->cur_pic.mb_type[s->mb_x + s->mb_y * s->mb_stride] =
430  mb_type | MB_TYPE_SKIP;
431 
432  if ((s->mv[0][0][0] | s->mv[0][0][1] | s->mv[1][0][0] | s->mv[1][0][1]) == 0)
433  s->mb_skipped = 1;
434  }
435 
436  return 0;
437  }
438 
439  switch (s->pict_type) {
440  default:
441  case AV_PICTURE_TYPE_I:
442  if (get_bits1(&s->gb) == 0) {
443  if (get_bits1(&s->gb) == 0) {
444  av_log(s->avctx, AV_LOG_ERROR,
445  "Invalid mb type in I-frame at %d %d\n",
446  s->mb_x, s->mb_y);
447  return AVERROR_INVALIDDATA;
448  }
449  mb_type = MB_TYPE_QUANT | MB_TYPE_INTRA;
450  } else {
451  mb_type = MB_TYPE_INTRA;
452  }
453  break;
454  case AV_PICTURE_TYPE_P:
455  mb_type = get_vlc2(&s->gb, ff_mb_ptype_vlc, MB_PTYPE_VLC_BITS, 1);
456  if (mb_type < 0) {
457  av_log(s->avctx, AV_LOG_ERROR,
458  "Invalid mb type in P-frame at %d %d\n", s->mb_x, s->mb_y);
459  return AVERROR_INVALIDDATA;
460  }
461  break;
462  case AV_PICTURE_TYPE_B:
463  mb_type = get_vlc2(&s->gb, ff_mb_btype_vlc, MB_BTYPE_VLC_BITS, 1);
464  if (mb_type < 0) {
465  av_log(s->avctx, AV_LOG_ERROR,
466  "Invalid mb type in B-frame at %d %d\n", s->mb_x, s->mb_y);
467  return AVERROR_INVALIDDATA;
468  }
469  break;
470  }
471  ff_tlog(s->avctx, "mb_type=%x\n", mb_type);
472 // motion_type = 0; /* avoid warning */
473  if (IS_INTRA(mb_type)) {
474  s->bdsp.clear_blocks(s->block[0]);
475 
476  if (!s->chroma_y_shift)
477  s->bdsp.clear_blocks(s->block[6]);
478 
479  /* compute DCT type */
480  // FIXME: add an interlaced_dct coded var?
481  if (s->picture_structure == PICT_FRAME &&
482  !s->frame_pred_frame_dct)
483  s->interlaced_dct = get_bits1(&s->gb);
484 
485  if (IS_QUANT(mb_type))
486  s->qscale = mpeg_get_qscale(s);
487 
488  if (s->concealment_motion_vectors) {
489  /* just parse them */
490  if (s->picture_structure != PICT_FRAME)
491  skip_bits1(&s->gb); /* field select */
492 
493  s->mv[0][0][0] =
494  s->last_mv[0][0][0] =
495  s->last_mv[0][1][0] = mpeg_decode_motion(s, s->mpeg_f_code[0][0],
496  s->last_mv[0][0][0]);
497  s->mv[0][0][1] =
498  s->last_mv[0][0][1] =
499  s->last_mv[0][1][1] = mpeg_decode_motion(s, s->mpeg_f_code[0][1],
500  s->last_mv[0][0][1]);
501 
502  check_marker(s->avctx, &s->gb, "after concealment_motion_vectors");
503  } else {
504  /* reset mv prediction */
505  memset(s->last_mv, 0, sizeof(s->last_mv));
506  }
507  s->mb_intra = 1;
508 
509  if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
510  for (i = 0; i < mb_block_count; i++)
511  if ((ret = mpeg2_decode_block_intra(s, s->block[i], i)) < 0)
512  return ret;
513  } else {
514  for (i = 0; i < 6; i++) {
516  s->intra_matrix,
517  s->intra_scantable.permutated,
518  s->last_dc, s->block[i],
519  i, s->qscale);
520  if (ret < 0) {
521  av_log(s->avctx, AV_LOG_ERROR, "ac-tex damaged at %d %d\n",
522  s->mb_x, s->mb_y);
523  return ret;
524  }
525 
526  s->block_last_index[i] = ret;
527  }
528  }
529  } else {
530  if (mb_type & MB_TYPE_ZERO_MV) {
531  av_assert2(mb_type & MB_TYPE_CBP);
532 
533  s->mv_dir = MV_DIR_FORWARD;
534  if (s->picture_structure == PICT_FRAME) {
535  if (s->picture_structure == PICT_FRAME
536  && !s->frame_pred_frame_dct)
537  s->interlaced_dct = get_bits1(&s->gb);
538  s->mv_type = MV_TYPE_16X16;
539  } else {
540  s->mv_type = MV_TYPE_FIELD;
541  mb_type |= MB_TYPE_INTERLACED;
542  s->field_select[0][0] = s->picture_structure - 1;
543  }
544 
545  if (IS_QUANT(mb_type))
546  s->qscale = mpeg_get_qscale(s);
547 
548  s->last_mv[0][0][0] = 0;
549  s->last_mv[0][0][1] = 0;
550  s->last_mv[0][1][0] = 0;
551  s->last_mv[0][1][1] = 0;
552  s->mv[0][0][0] = 0;
553  s->mv[0][0][1] = 0;
554  } else {
555  av_assert2(mb_type & MB_TYPE_BIDIR_MV);
556  // FIXME decide if MBs in field pictures are MB_TYPE_INTERLACED
557  /* get additional motion vector type */
558  if (s->picture_structure == PICT_FRAME && s->frame_pred_frame_dct) {
559  motion_type = MT_FRAME;
560  } else {
561  motion_type = get_bits(&s->gb, 2);
562  if (s->picture_structure == PICT_FRAME && HAS_CBP(mb_type))
563  s->interlaced_dct = get_bits1(&s->gb);
564  }
565 
566  if (IS_QUANT(mb_type))
567  s->qscale = mpeg_get_qscale(s);
568 
569  /* motion vectors */
570  s->mv_dir = MB_TYPE_MV_2_MV_DIR(mb_type);
571  ff_tlog(s->avctx, "motion_type=%d\n", motion_type);
572  switch (motion_type) {
573  case MT_FRAME: /* or MT_16X8 */
574  if (s->picture_structure == PICT_FRAME) {
575  mb_type |= MB_TYPE_16x16;
576  s->mv_type = MV_TYPE_16X16;
577  for (i = 0; i < 2; i++) {
578  if (HAS_MV(mb_type, i)) {
579  /* MT_FRAME */
580  s->mv[i][0][0] =
581  s->last_mv[i][0][0] =
582  s->last_mv[i][1][0] =
583  mpeg_decode_motion(s, s->mpeg_f_code[i][0],
584  s->last_mv[i][0][0]);
585  s->mv[i][0][1] =
586  s->last_mv[i][0][1] =
587  s->last_mv[i][1][1] =
588  mpeg_decode_motion(s, s->mpeg_f_code[i][1],
589  s->last_mv[i][0][1]);
590  /* full_pel: only for MPEG-1 */
591  if (s->full_pel[i]) {
592  s->mv[i][0][0] *= 2;
593  s->mv[i][0][1] *= 2;
594  }
595  }
596  }
597  } else {
598  mb_type |= MB_TYPE_16x8 | MB_TYPE_INTERLACED;
599  s->mv_type = MV_TYPE_16X8;
600  for (i = 0; i < 2; i++) {
601  if (HAS_MV(mb_type, i)) {
602  /* MT_16X8 */
603  for (j = 0; j < 2; j++) {
604  s->field_select[i][j] = get_bits1(&s->gb);
605  for (k = 0; k < 2; k++) {
606  val = mpeg_decode_motion(s, s->mpeg_f_code[i][k],
607  s->last_mv[i][j][k]);
608  s->last_mv[i][j][k] = val;
609  s->mv[i][j][k] = val;
610  }
611  }
612  }
613  }
614  }
615  break;
616  case MT_FIELD:
617  s->mv_type = MV_TYPE_FIELD;
618  if (s->picture_structure == PICT_FRAME) {
619  mb_type |= MB_TYPE_16x8 | MB_TYPE_INTERLACED;
620  for (i = 0; i < 2; i++) {
621  if (HAS_MV(mb_type, i)) {
622  for (j = 0; j < 2; j++) {
623  s->field_select[i][j] = get_bits1(&s->gb);
624  val = mpeg_decode_motion(s, s->mpeg_f_code[i][0],
625  s->last_mv[i][j][0]);
626  s->last_mv[i][j][0] = val;
627  s->mv[i][j][0] = val;
628  ff_tlog(s->avctx, "fmx=%d\n", val);
629  val = mpeg_decode_motion(s, s->mpeg_f_code[i][1],
630  s->last_mv[i][j][1] >> 1);
631  s->last_mv[i][j][1] = 2 * val;
632  s->mv[i][j][1] = val;
633  ff_tlog(s->avctx, "fmy=%d\n", val);
634  }
635  }
636  }
637  } else {
638  av_assert0(!s->progressive_sequence);
639  mb_type |= MB_TYPE_16x16 | MB_TYPE_INTERLACED;
640  for (i = 0; i < 2; i++) {
641  if (HAS_MV(mb_type, i)) {
642  s->field_select[i][0] = get_bits1(&s->gb);
643  for (k = 0; k < 2; k++) {
644  val = mpeg_decode_motion(s, s->mpeg_f_code[i][k],
645  s->last_mv[i][0][k]);
646  s->last_mv[i][0][k] = val;
647  s->last_mv[i][1][k] = val;
648  s->mv[i][0][k] = val;
649  }
650  }
651  }
652  }
653  break;
654  case MT_DMV:
655  if (s->progressive_sequence){
656  av_log(s->avctx, AV_LOG_ERROR, "MT_DMV in progressive_sequence\n");
657  return AVERROR_INVALIDDATA;
658  }
659  s->mv_type = MV_TYPE_DMV;
660  for (i = 0; i < 2; i++) {
661  if (HAS_MV(mb_type, i)) {
662  int dmx, dmy, mx, my, m;
663  const int my_shift = s->picture_structure == PICT_FRAME;
664 
665  mx = mpeg_decode_motion(s, s->mpeg_f_code[i][0],
666  s->last_mv[i][0][0]);
667  s->last_mv[i][0][0] = mx;
668  s->last_mv[i][1][0] = mx;
669  dmx = get_dmv(s);
670  my = mpeg_decode_motion(s, s->mpeg_f_code[i][1],
671  s->last_mv[i][0][1] >> my_shift);
672  dmy = get_dmv(s);
673 
674 
675  s->last_mv[i][0][1] = my * (1 << my_shift);
676  s->last_mv[i][1][1] = my * (1 << my_shift);
677 
678  s->mv[i][0][0] = mx;
679  s->mv[i][0][1] = my;
680  s->mv[i][1][0] = mx; // not used
681  s->mv[i][1][1] = my; // not used
682 
683  if (s->picture_structure == PICT_FRAME) {
684  mb_type |= MB_TYPE_16x16 | MB_TYPE_INTERLACED;
685 
686  // m = 1 + 2 * s->top_field_first;
687  m = s->top_field_first ? 1 : 3;
688 
689  /* top -> top pred */
690  s->mv[i][2][0] = ((mx * m + (mx > 0)) >> 1) + dmx;
691  s->mv[i][2][1] = ((my * m + (my > 0)) >> 1) + dmy - 1;
692  m = 4 - m;
693  s->mv[i][3][0] = ((mx * m + (mx > 0)) >> 1) + dmx;
694  s->mv[i][3][1] = ((my * m + (my > 0)) >> 1) + dmy + 1;
695  } else {
696  mb_type |= MB_TYPE_16x16;
697 
698  s->mv[i][2][0] = ((mx + (mx > 0)) >> 1) + dmx;
699  s->mv[i][2][1] = ((my + (my > 0)) >> 1) + dmy;
700  if (s->picture_structure == PICT_TOP_FIELD)
701  s->mv[i][2][1]--;
702  else
703  s->mv[i][2][1]++;
704  }
705  }
706  }
707  break;
708  default:
709  av_log(s->avctx, AV_LOG_ERROR,
710  "00 motion_type at %d %d\n", s->mb_x, s->mb_y);
711  return AVERROR_INVALIDDATA;
712  }
713  }
714 
715  s->mb_intra = 0;
716  if (HAS_CBP(mb_type)) {
717  s->bdsp.clear_blocks(s->block[0]);
718 
719  cbp = get_vlc2(&s->gb, ff_mb_pat_vlc, MB_PAT_VLC_BITS, 1);
720  if (mb_block_count > 6) {
721  cbp *= 1 << mb_block_count - 6;
722  cbp |= get_bits(&s->gb, mb_block_count - 6);
723  s->bdsp.clear_blocks(s->block[6]);
724  }
725  if (cbp <= 0) {
726  av_log(s->avctx, AV_LOG_ERROR,
727  "invalid cbp %d at %d %d\n", cbp, s->mb_x, s->mb_y);
728  return AVERROR_INVALIDDATA;
729  }
730 
731  if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
732  cbp <<= 12 - mb_block_count;
733 
734  for (i = 0; i < mb_block_count; i++) {
735  if (cbp & (1 << 11)) {
736  if ((ret = mpeg2_decode_block_non_intra(s, s->block[i], i)) < 0)
737  return ret;
738  } else {
739  s->block_last_index[i] = -1;
740  }
741  cbp += cbp;
742  }
743  } else {
744  for (i = 0; i < 6; i++) {
745  if (cbp & 32) {
746  if ((ret = mpeg1_decode_block_inter(s, s->block[i], i)) < 0)
747  return ret;
748  } else {
749  s->block_last_index[i] = -1;
750  }
751  cbp += cbp;
752  }
753  }
754  } else {
755  for (i = 0; i < 12; i++)
756  s->block_last_index[i] = -1;
757  }
758  }
759 
760  s->cur_pic.mb_type[s->mb_x + s->mb_y * s->mb_stride] = mb_type;
761 
762  return 0;
763 }
764 
766 {
767  Mpeg1Context *s = avctx->priv_data;
768  MpegEncContext *s2 = &s->mpeg_enc_ctx;
769  int ret;
770 
771  s2->out_format = FMT_MPEG1;
772 
773  if ( avctx->codec_tag != AV_RL32("VCR2")
774  && avctx->codec_tag != AV_RL32("BW10"))
775  avctx->coded_width = avctx->coded_height = 0; // do not trust dimensions from input
776  ret = ff_mpv_decode_init(s2, avctx);
777  if (ret < 0)
778  return ret;
779 
781 
782  s2->chroma_format = 1;
783  s->repeat_field = 0;
784  avctx->color_range = AVCOL_RANGE_MPEG;
785  return 0;
786 }
787 
788 #if HAVE_THREADS
789 static int mpeg_decode_update_thread_context(AVCodecContext *avctx,
790  const AVCodecContext *avctx_from)
791 {
792  Mpeg1Context *ctx = avctx->priv_data, *ctx_from = avctx_from->priv_data;
793  MpegEncContext *s = &ctx->mpeg_enc_ctx, *s1 = &ctx_from->mpeg_enc_ctx;
794  int err;
795 
796  if (avctx == avctx_from || !s1->context_initialized)
797  return 0;
798 
799  err = ff_mpeg_update_thread_context(avctx, avctx_from);
800  if (err)
801  return err;
802 
803  if (!s->context_initialized)
804  memcpy(s + 1, s1 + 1, sizeof(Mpeg1Context) - sizeof(MpegEncContext));
805 
806  return 0;
807 }
808 #endif
809 
811 #if CONFIG_MPEG1_NVDEC_HWACCEL
813 #endif
814 #if CONFIG_MPEG1_VDPAU_HWACCEL
816 #endif
819 };
820 
822 #if CONFIG_MPEG2_NVDEC_HWACCEL
824 #endif
825 #if CONFIG_MPEG2_VDPAU_HWACCEL
827 #endif
828 #if CONFIG_MPEG2_DXVA2_HWACCEL
830 #endif
831 #if CONFIG_MPEG2_D3D11VA_HWACCEL
834 #endif
835 #if CONFIG_MPEG2_D3D12VA_HWACCEL
837 #endif
838 #if CONFIG_MPEG2_VAAPI_HWACCEL
840 #endif
841 #if CONFIG_MPEG2_VIDEOTOOLBOX_HWACCEL
843 #endif
846 };
847 
848 static const enum AVPixelFormat mpeg12_pixfmt_list_422[] = {
851 };
852 
853 static const enum AVPixelFormat mpeg12_pixfmt_list_444[] = {
856 };
857 
859 {
860  Mpeg1Context *s1 = avctx->priv_data;
861  MpegEncContext *s = &s1->mpeg_enc_ctx;
862  const enum AVPixelFormat *pix_fmts;
863 
864  if (CONFIG_GRAY && (avctx->flags & AV_CODEC_FLAG_GRAY))
865  return AV_PIX_FMT_GRAY8;
866 
867  if (s->chroma_format < 2)
871  else if (s->chroma_format == 2)
873  else
875 
876  return ff_get_format(avctx, pix_fmts);
877 }
878 
879 /* Call this function when we know all parameters.
880  * It may be called in different places for MPEG-1 and MPEG-2. */
882 {
883  Mpeg1Context *s1 = avctx->priv_data;
884  MpegEncContext *s = &s1->mpeg_enc_ctx;
885  int ret;
886 
887  if (avctx->codec_id == AV_CODEC_ID_MPEG1VIDEO) {
888  // MPEG-1 aspect
889  AVRational aspect_inv = av_d2q(ff_mpeg1_aspect[s1->aspect_ratio_info], 255);
890  avctx->sample_aspect_ratio = (AVRational) { aspect_inv.den, aspect_inv.num };
891  } else { // MPEG-2
892  // MPEG-2 aspect
893  if (s1->aspect_ratio_info > 1) {
894  AVRational dar =
896  (AVRational) { s1->pan_scan.width,
897  s1->pan_scan.height }),
898  (AVRational) { s->width, s->height });
899 
900  /* We ignore the spec here and guess a bit as reality does not
901  * match the spec, see for example res_change_ffmpeg_aspect.ts
902  * and sequence-display-aspect.mpg.
903  * issue1613, 621, 562 */
904  if ((s1->pan_scan.width == 0) || (s1->pan_scan.height == 0) ||
905  (av_cmp_q(dar, (AVRational) { 4, 3 }) &&
906  av_cmp_q(dar, (AVRational) { 16, 9 }))) {
907  s->avctx->sample_aspect_ratio =
909  (AVRational) { s->width, s->height });
910  } else {
911  s->avctx->sample_aspect_ratio =
913  (AVRational) { s1->pan_scan.width, s1->pan_scan.height });
914 // issue1613 4/3 16/9 -> 16/9
915 // res_change_ffmpeg_aspect.ts 4/3 225/44 ->4/3
916 // widescreen-issue562.mpg 4/3 16/9 -> 16/9
917 // s->avctx->sample_aspect_ratio = av_mul_q(s->avctx->sample_aspect_ratio, (AVRational) {s->width, s->height});
918  ff_dlog(avctx, "aspect A %d/%d\n",
921  ff_dlog(avctx, "aspect B %d/%d\n", s->avctx->sample_aspect_ratio.num,
922  s->avctx->sample_aspect_ratio.den);
923  }
924  } else {
925  s->avctx->sample_aspect_ratio =
927  }
928  } // MPEG-2
929 
930  if (av_image_check_sar(s->width, s->height,
931  avctx->sample_aspect_ratio) < 0) {
932  av_log(avctx, AV_LOG_WARNING, "ignoring invalid SAR: %u/%u\n",
933  avctx->sample_aspect_ratio.num,
934  avctx->sample_aspect_ratio.den);
935  avctx->sample_aspect_ratio = (AVRational){ 0, 1 };
936  }
937 
938  if (!s->context_initialized ||
939  avctx->coded_width != s->width ||
940  avctx->coded_height != s->height ||
941  s1->save_width != s->width ||
942  s1->save_height != s->height ||
943  av_cmp_q(s1->save_aspect, s->avctx->sample_aspect_ratio) ||
944  (s1->save_progressive_seq != s->progressive_sequence && FFALIGN(s->height, 16) != FFALIGN(s->height, 32)) ||
945  0) {
946  if (s->context_initialized)
948 
949  ret = ff_set_dimensions(avctx, s->width, s->height);
950  if (ret < 0)
951  return ret;
952 
953  if (avctx->codec_id == AV_CODEC_ID_MPEG2VIDEO && s->bit_rate &&
954  (s->bit_rate != 0x3FFFF*400)) {
955  avctx->rc_max_rate = s->bit_rate;
956  } else if (avctx->codec_id == AV_CODEC_ID_MPEG1VIDEO && s->bit_rate &&
957  (s->bit_rate != 0x3FFFF*400 || s->vbv_delay != 0xFFFF)) {
958  avctx->bit_rate = s->bit_rate;
959  }
960  s1->save_aspect = s->avctx->sample_aspect_ratio;
961  s1->save_width = s->width;
962  s1->save_height = s->height;
963  s1->save_progressive_seq = s->progressive_sequence;
964 
965  /* low_delay may be forced, in this case we will have B-frames
966  * that behave like P-frames. */
967  avctx->has_b_frames = !s->low_delay;
968 
969  if (avctx->codec_id == AV_CODEC_ID_MPEG1VIDEO) {
970  // MPEG-1 fps
972 #if FF_API_TICKS_PER_FRAME
974  avctx->ticks_per_frame = 1;
976 #endif
977 
979  } else { // MPEG-2
980  // MPEG-2 fps
981  av_reduce(&s->avctx->framerate.num,
982  &s->avctx->framerate.den,
985  1 << 30);
986 #if FF_API_TICKS_PER_FRAME
988  avctx->ticks_per_frame = 2;
990 #endif
991 
992  switch (s->chroma_format) {
993  case 1: avctx->chroma_sample_location = AVCHROMA_LOC_LEFT; break;
994  case 2:
995  case 3: avctx->chroma_sample_location = AVCHROMA_LOC_TOPLEFT; break;
996  default: av_assert0(0);
997  }
998  } // MPEG-2
999 
1000  avctx->pix_fmt = mpeg_get_pixelformat(avctx);
1001 
1002  if ((ret = ff_mpv_common_init(s)) < 0)
1003  return ret;
1004  if (!s->avctx->lowres)
1006  }
1007  return 0;
1008 }
1009 
1010 static int mpeg1_decode_picture(AVCodecContext *avctx, const uint8_t *buf,
1011  int buf_size)
1012 {
1013  Mpeg1Context *s1 = avctx->priv_data;
1014  MpegEncContext *s = &s1->mpeg_enc_ctx;
1015  int ref, f_code, vbv_delay, ret;
1016 
1017  ret = init_get_bits8(&s->gb, buf, buf_size);
1018  if (ret < 0)
1019  return ret;
1020 
1021  ref = get_bits(&s->gb, 10); /* temporal ref */
1022  s->pict_type = get_bits(&s->gb, 3);
1023  if (s->pict_type == 0 || s->pict_type > 3)
1024  return AVERROR_INVALIDDATA;
1025 
1026  vbv_delay = get_bits(&s->gb, 16);
1027  s->vbv_delay = vbv_delay;
1028  if (s->pict_type == AV_PICTURE_TYPE_P ||
1029  s->pict_type == AV_PICTURE_TYPE_B) {
1030  s->full_pel[0] = get_bits1(&s->gb);
1031  f_code = get_bits(&s->gb, 3);
1032  if (f_code == 0 && (avctx->err_recognition & (AV_EF_BITSTREAM|AV_EF_COMPLIANT)))
1033  return AVERROR_INVALIDDATA;
1034  f_code += !f_code;
1035  s->mpeg_f_code[0][0] = f_code;
1036  s->mpeg_f_code[0][1] = f_code;
1037  }
1038  if (s->pict_type == AV_PICTURE_TYPE_B) {
1039  s->full_pel[1] = get_bits1(&s->gb);
1040  f_code = get_bits(&s->gb, 3);
1041  if (f_code == 0 && (avctx->err_recognition & (AV_EF_BITSTREAM|AV_EF_COMPLIANT)))
1042  return AVERROR_INVALIDDATA;
1043  f_code += !f_code;
1044  s->mpeg_f_code[1][0] = f_code;
1045  s->mpeg_f_code[1][1] = f_code;
1046  }
1047 
1048  if (avctx->debug & FF_DEBUG_PICT_INFO)
1049  av_log(avctx, AV_LOG_DEBUG,
1050  "vbv_delay %d, ref %d type:%d\n", vbv_delay, ref, s->pict_type);
1051 
1052  s->y_dc_scale = 8;
1053  s->c_dc_scale = 8;
1054  return 0;
1055 }
1056 
1058 {
1059  MpegEncContext *s = &s1->mpeg_enc_ctx;
1060  int horiz_size_ext, vert_size_ext;
1061  int bit_rate_ext;
1062 
1063  skip_bits(&s->gb, 1); /* profile and level esc*/
1064  s->avctx->profile = get_bits(&s->gb, 3);
1065  s->avctx->level = get_bits(&s->gb, 4);
1066  s->progressive_sequence = get_bits1(&s->gb); /* progressive_sequence */
1067  s->chroma_format = get_bits(&s->gb, 2); /* chroma_format 1=420, 2=422, 3=444 */
1068 
1069  if (!s->chroma_format) {
1070  s->chroma_format = 1;
1071  av_log(s->avctx, AV_LOG_WARNING, "Chroma format invalid\n");
1072  }
1073 
1074  horiz_size_ext = get_bits(&s->gb, 2);
1075  vert_size_ext = get_bits(&s->gb, 2);
1076  s->width |= (horiz_size_ext << 12);
1077  s->height |= (vert_size_ext << 12);
1078  bit_rate_ext = get_bits(&s->gb, 12); /* XXX: handle it */
1079  s->bit_rate += (bit_rate_ext << 18) * 400LL;
1080  check_marker(s->avctx, &s->gb, "after bit rate extension");
1081  s->avctx->rc_buffer_size += get_bits(&s->gb, 8) * 1024 * 16 << 10;
1082 
1083  s->low_delay = get_bits1(&s->gb);
1084  if (s->avctx->flags & AV_CODEC_FLAG_LOW_DELAY)
1085  s->low_delay = 1;
1086 
1087  s1->frame_rate_ext.num = get_bits(&s->gb, 2) + 1;
1088  s1->frame_rate_ext.den = get_bits(&s->gb, 5) + 1;
1089 
1090  ff_dlog(s->avctx, "sequence extension\n");
1091  s->codec_id = s->avctx->codec_id = AV_CODEC_ID_MPEG2VIDEO;
1092 
1093  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1094  av_log(s->avctx, AV_LOG_DEBUG,
1095  "profile: %d, level: %d ps: %d cf:%d vbv buffer: %d, bitrate:%"PRId64"\n",
1096  s->avctx->profile, s->avctx->level, s->progressive_sequence, s->chroma_format,
1097  s->avctx->rc_buffer_size, s->bit_rate);
1098 }
1099 
1101 {
1102  MpegEncContext *s = &s1->mpeg_enc_ctx;
1103  int color_description, w, h;
1104 
1105  skip_bits(&s->gb, 3); /* video format */
1106  color_description = get_bits1(&s->gb);
1107  if (color_description) {
1108  s->avctx->color_primaries = get_bits(&s->gb, 8);
1109  s->avctx->color_trc = get_bits(&s->gb, 8);
1110  s->avctx->colorspace = get_bits(&s->gb, 8);
1111  }
1112  w = get_bits(&s->gb, 14);
1113  skip_bits(&s->gb, 1); // marker
1114  h = get_bits(&s->gb, 14);
1115  // remaining 3 bits are zero padding
1116 
1117  s1->pan_scan.width = 16 * w;
1118  s1->pan_scan.height = 16 * h;
1119 
1120  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1121  av_log(s->avctx, AV_LOG_DEBUG, "sde w:%d, h:%d\n", w, h);
1122 }
1123 
1125 {
1126  MpegEncContext *s = &s1->mpeg_enc_ctx;
1127  int i, nofco;
1128 
1129  nofco = 1;
1130  if (s->progressive_sequence) {
1131  if (s->repeat_first_field) {
1132  nofco++;
1133  if (s->top_field_first)
1134  nofco++;
1135  }
1136  } else {
1137  if (s->picture_structure == PICT_FRAME) {
1138  nofco++;
1139  if (s->repeat_first_field)
1140  nofco++;
1141  }
1142  }
1143  for (i = 0; i < nofco; i++) {
1144  s1->pan_scan.position[i][0] = get_sbits(&s->gb, 16);
1145  skip_bits(&s->gb, 1); // marker
1146  s1->pan_scan.position[i][1] = get_sbits(&s->gb, 16);
1147  skip_bits(&s->gb, 1); // marker
1148  }
1149 
1150  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1151  av_log(s->avctx, AV_LOG_DEBUG,
1152  "pde (%"PRId16",%"PRId16") (%"PRId16",%"PRId16") (%"PRId16",%"PRId16")\n",
1153  s1->pan_scan.position[0][0], s1->pan_scan.position[0][1],
1154  s1->pan_scan.position[1][0], s1->pan_scan.position[1][1],
1155  s1->pan_scan.position[2][0], s1->pan_scan.position[2][1]);
1156 }
1157 
1158 static int load_matrix(MpegEncContext *s, uint16_t matrix0[64],
1159  uint16_t matrix1[64], int intra)
1160 {
1161  int i;
1162 
1163  for (i = 0; i < 64; i++) {
1164  int j = s->idsp.idct_permutation[ff_zigzag_direct[i]];
1165  int v = get_bits(&s->gb, 8);
1166  if (v == 0) {
1167  av_log(s->avctx, AV_LOG_ERROR, "matrix damaged\n");
1168  return AVERROR_INVALIDDATA;
1169  }
1170  if (intra && i == 0 && v != 8) {
1171  av_log(s->avctx, AV_LOG_DEBUG, "intra matrix specifies invalid DC quantizer %d, ignoring\n", v);
1172  v = 8; // needed by pink.mpg / issue1046
1173  }
1174  matrix0[j] = v;
1175  if (matrix1)
1176  matrix1[j] = v;
1177  }
1178  return 0;
1179 }
1180 
1182 {
1183  ff_dlog(s->avctx, "matrix extension\n");
1184 
1185  if (get_bits1(&s->gb))
1186  load_matrix(s, s->chroma_intra_matrix, s->intra_matrix, 1);
1187  if (get_bits1(&s->gb))
1188  load_matrix(s, s->chroma_inter_matrix, s->inter_matrix, 0);
1189  if (get_bits1(&s->gb))
1190  load_matrix(s, s->chroma_intra_matrix, NULL, 1);
1191  if (get_bits1(&s->gb))
1192  load_matrix(s, s->chroma_inter_matrix, NULL, 0);
1193 }
1194 
1196 {
1197  MpegEncContext *s = &s1->mpeg_enc_ctx;
1198 
1199  s->full_pel[0] = s->full_pel[1] = 0;
1200  s->mpeg_f_code[0][0] = get_bits(&s->gb, 4);
1201  s->mpeg_f_code[0][1] = get_bits(&s->gb, 4);
1202  s->mpeg_f_code[1][0] = get_bits(&s->gb, 4);
1203  s->mpeg_f_code[1][1] = get_bits(&s->gb, 4);
1204  s->mpeg_f_code[0][0] += !s->mpeg_f_code[0][0];
1205  s->mpeg_f_code[0][1] += !s->mpeg_f_code[0][1];
1206  s->mpeg_f_code[1][0] += !s->mpeg_f_code[1][0];
1207  s->mpeg_f_code[1][1] += !s->mpeg_f_code[1][1];
1208  if (!s->pict_type && s->context_initialized) {
1209  av_log(s->avctx, AV_LOG_ERROR, "Missing picture start code\n");
1210  if (s->avctx->err_recognition & AV_EF_EXPLODE)
1211  return AVERROR_INVALIDDATA;
1212  av_log(s->avctx, AV_LOG_WARNING, "Guessing pict_type from mpeg_f_code\n");
1213  if (s->mpeg_f_code[1][0] == 15 && s->mpeg_f_code[1][1] == 15) {
1214  if (s->mpeg_f_code[0][0] == 15 && s->mpeg_f_code[0][1] == 15)
1215  s->pict_type = AV_PICTURE_TYPE_I;
1216  else
1217  s->pict_type = AV_PICTURE_TYPE_P;
1218  } else
1219  s->pict_type = AV_PICTURE_TYPE_B;
1220  }
1221 
1222  s->intra_dc_precision = get_bits(&s->gb, 2);
1223  s->picture_structure = get_bits(&s->gb, 2);
1224  s->top_field_first = get_bits1(&s->gb);
1225  s->frame_pred_frame_dct = get_bits1(&s->gb);
1226  s->concealment_motion_vectors = get_bits1(&s->gb);
1227  s->q_scale_type = get_bits1(&s->gb);
1228  s->intra_vlc_format = get_bits1(&s->gb);
1229  s->alternate_scan = get_bits1(&s->gb);
1230  s->repeat_first_field = get_bits1(&s->gb);
1231  s->chroma_420_type = get_bits1(&s->gb);
1232  s->progressive_frame = get_bits1(&s->gb);
1233 
1234  // We only initialize intra_scantable, as both scantables always coincide
1235  // and all code therefore only uses the intra one.
1236  ff_init_scantable(s->idsp.idct_permutation, &s->intra_scantable,
1237  s->alternate_scan ? ff_alternate_vertical_scan : ff_zigzag_direct);
1238 
1239  /* composite display not parsed */
1240  ff_dlog(s->avctx, "intra_dc_precision=%d\n", s->intra_dc_precision);
1241  ff_dlog(s->avctx, "picture_structure=%d\n", s->picture_structure);
1242  ff_dlog(s->avctx, "top field first=%d\n", s->top_field_first);
1243  ff_dlog(s->avctx, "repeat first field=%d\n", s->repeat_first_field);
1244  ff_dlog(s->avctx, "conceal=%d\n", s->concealment_motion_vectors);
1245  ff_dlog(s->avctx, "intra_vlc_format=%d\n", s->intra_vlc_format);
1246  ff_dlog(s->avctx, "alternate_scan=%d\n", s->alternate_scan);
1247  ff_dlog(s->avctx, "frame_pred_frame_dct=%d\n", s->frame_pred_frame_dct);
1248  ff_dlog(s->avctx, "progressive_frame=%d\n", s->progressive_frame);
1249 
1250  return 0;
1251 }
1252 
1253 static int mpeg_field_start(Mpeg1Context *s1, const uint8_t *buf, int buf_size)
1254 {
1255  MpegEncContext *s = &s1->mpeg_enc_ctx;
1256  AVCodecContext *avctx = s->avctx;
1257  int second_field = 0;
1258  int ret;
1259 
1260  if (!(avctx->flags2 & AV_CODEC_FLAG2_CHUNKS)) {
1261  if (s->mb_width * s->mb_height * 11LL / (33 * 2 * 8) > buf_size)
1262  return AVERROR_INVALIDDATA;
1263  }
1264 
1265  /* start frame decoding */
1266  if (s->first_field || s->picture_structure == PICT_FRAME) {
1267  AVFrameSideData *pan_scan;
1268 
1269  if ((ret = ff_mpv_frame_start(s, avctx)) < 0)
1270  return ret;
1271 
1272  if (s->picture_structure != PICT_FRAME) {
1273  s->cur_pic.ptr->f->flags |= AV_FRAME_FLAG_TOP_FIELD_FIRST *
1274  (s->picture_structure == PICT_TOP_FIELD);
1275 
1276  for (int i = 0; i < 3; i++) {
1277  if (s->picture_structure == PICT_BOTTOM_FIELD) {
1278  s->cur_pic.data[i] = FF_PTR_ADD(s->cur_pic.data[i],
1279  s->cur_pic.linesize[i]);
1280  }
1281  s->cur_pic.linesize[i] *= 2;
1282  }
1283  }
1284 
1286 
1287  /* first check if we must repeat the frame */
1288  s->cur_pic.ptr->f->repeat_pict = 0;
1289  if (s->repeat_first_field) {
1290  if (s->progressive_sequence) {
1291  if (s->top_field_first)
1292  s->cur_pic.ptr->f->repeat_pict = 4;
1293  else
1294  s->cur_pic.ptr->f->repeat_pict = 2;
1295  } else if (s->progressive_frame) {
1296  s->cur_pic.ptr->f->repeat_pict = 1;
1297  }
1298  }
1299 
1300  ret = ff_frame_new_side_data(s->avctx, s->cur_pic.ptr->f,
1301  AV_FRAME_DATA_PANSCAN, sizeof(s1->pan_scan),
1302  &pan_scan);
1303  if (ret < 0)
1304  return ret;
1305  if (pan_scan)
1306  memcpy(pan_scan->data, &s1->pan_scan, sizeof(s1->pan_scan));
1307 
1308  if (s1->a53_buf_ref) {
1310  s->avctx, s->cur_pic.ptr->f, AV_FRAME_DATA_A53_CC,
1311  &s1->a53_buf_ref);
1312  if (ret < 0)
1313  return ret;
1314  }
1315 
1316  if (s1->has_stereo3d) {
1317  AVStereo3D *stereo = av_stereo3d_create_side_data(s->cur_pic.ptr->f);
1318  if (!stereo)
1319  return AVERROR(ENOMEM);
1320 
1321  stereo->type = s1->stereo3d_type;
1322  s1->has_stereo3d = 0;
1323  }
1324 
1325  if (s1->has_afd) {
1326  AVFrameSideData *sd;
1327  ret = ff_frame_new_side_data(s->avctx, s->cur_pic.ptr->f,
1328  AV_FRAME_DATA_AFD, 1, &sd);
1329  if (ret < 0)
1330  return ret;
1331  if (sd)
1332  *sd->data = s1->afd;
1333  s1->has_afd = 0;
1334  }
1335 
1336  if (HAVE_THREADS && (avctx->active_thread_type & FF_THREAD_FRAME))
1337  ff_thread_finish_setup(avctx);
1338  } else { // second field
1339  second_field = 1;
1340  if (!s->cur_pic.ptr) {
1341  av_log(s->avctx, AV_LOG_ERROR, "first field missing\n");
1342  return AVERROR_INVALIDDATA;
1343  }
1344 
1345  if (s->avctx->hwaccel) {
1346  if ((ret = FF_HW_SIMPLE_CALL(s->avctx, end_frame)) < 0) {
1347  av_log(avctx, AV_LOG_ERROR,
1348  "hardware accelerator failed to decode first field\n");
1349  return ret;
1350  }
1351  }
1353  if (ret < 0)
1354  return ret;
1355 
1356  for (int i = 0; i < 3; i++) {
1357  s->cur_pic.data[i] = s->cur_pic.ptr->f->data[i];
1358  if (s->picture_structure == PICT_BOTTOM_FIELD)
1359  s->cur_pic.data[i] +=
1360  s->cur_pic.ptr->f->linesize[i];
1361  }
1362  }
1363 
1364  if (avctx->hwaccel) {
1365  if ((ret = FF_HW_CALL(avctx, start_frame, buf, buf_size)) < 0)
1366  return ret;
1367  } else if (s->codec_tag == MKTAG('V', 'C', 'R', '2')) {
1368  // Exchange UV
1369  FFSWAP(uint8_t*, s->cur_pic.data[1], s->cur_pic.data[2]);
1370  FFSWAP(ptrdiff_t, s->cur_pic.linesize[1], s->cur_pic.linesize[2]);
1371  if (!second_field) {
1372  FFSWAP(uint8_t*, s->next_pic.data[1], s->next_pic.data[2]);
1373  FFSWAP(ptrdiff_t, s->next_pic.linesize[1], s->next_pic.linesize[2]);
1374  FFSWAP(uint8_t*, s->last_pic.data[1], s->last_pic.data[2]);
1375  FFSWAP(ptrdiff_t, s->last_pic.linesize[1], s->last_pic.linesize[2]);
1376  }
1377  }
1378 
1379  return 0;
1380 }
1381 
1382 #define DECODE_SLICE_ERROR -1
1383 #define DECODE_SLICE_OK 0
1384 
1385 /**
1386  * Decode a slice.
1387  * MpegEncContext.mb_y must be set to the MB row from the startcode.
1388  * @return DECODE_SLICE_ERROR if the slice is damaged,
1389  * DECODE_SLICE_OK if this slice is OK
1390  */
1391 static int mpeg_decode_slice(MpegEncContext *s, int mb_y,
1392  const uint8_t **buf, int buf_size)
1393 {
1394  AVCodecContext *avctx = s->avctx;
1395  const int lowres = s->avctx->lowres;
1396  const int field_pic = s->picture_structure != PICT_FRAME;
1397  int ret;
1398 
1399  s->resync_mb_x =
1400  s->resync_mb_y = -1;
1401 
1402  av_assert0(mb_y < s->mb_height);
1403 
1404  ret = init_get_bits8(&s->gb, *buf, buf_size);
1405  if (ret < 0)
1406  return ret;
1407 
1408  if (s->codec_id != AV_CODEC_ID_MPEG1VIDEO && s->mb_height > 2800/16)
1409  skip_bits(&s->gb, 3);
1410 
1412  s->interlaced_dct = 0;
1413 
1414  s->qscale = mpeg_get_qscale(s);
1415 
1416  if (s->qscale == 0) {
1417  av_log(s->avctx, AV_LOG_ERROR, "qscale == 0\n");
1418  return AVERROR_INVALIDDATA;
1419  }
1420 
1421  /* extra slice info */
1422  if (skip_1stop_8data_bits(&s->gb) < 0)
1423  return AVERROR_INVALIDDATA;
1424 
1425  s->mb_x = 0;
1426 
1427  if (mb_y == 0 && s->codec_tag == AV_RL32("SLIF")) {
1428  skip_bits1(&s->gb);
1429  } else {
1430  while (get_bits_left(&s->gb) > 0) {
1431  int code = get_vlc2(&s->gb, ff_mbincr_vlc,
1432  MBINCR_VLC_BITS, 2);
1433  if (code < 0) {
1434  av_log(s->avctx, AV_LOG_ERROR, "first mb_incr damaged\n");
1435  return AVERROR_INVALIDDATA;
1436  }
1437  if (code >= 33) {
1438  if (code == 33)
1439  s->mb_x += 33;
1440  /* otherwise, stuffing, nothing to do */
1441  } else {
1442  s->mb_x += code;
1443  break;
1444  }
1445  }
1446  }
1447 
1448  if (s->mb_x >= (unsigned) s->mb_width) {
1449  av_log(s->avctx, AV_LOG_ERROR, "initial skip overflow\n");
1450  return AVERROR_INVALIDDATA;
1451  }
1452 
1453  if (avctx->hwaccel) {
1454  const uint8_t *buf_end, *buf_start = *buf - 4; /* include start_code */
1455  int start_code = -1;
1456  buf_end = avpriv_find_start_code(buf_start + 2, *buf + buf_size, &start_code);
1457  if (buf_end < *buf + buf_size)
1458  buf_end -= 4;
1459  s->mb_y = mb_y;
1460  if (FF_HW_CALL(avctx, decode_slice, buf_start, buf_end - buf_start) < 0)
1461  return DECODE_SLICE_ERROR;
1462  *buf = buf_end;
1463  return DECODE_SLICE_OK;
1464  }
1465 
1466  s->resync_mb_x = s->mb_x;
1467  s->resync_mb_y = s->mb_y = mb_y;
1468  s->mb_skip_run = 0;
1470 
1471  if (s->mb_y == 0 && s->mb_x == 0 && (s->first_field || s->picture_structure == PICT_FRAME)) {
1472  if (s->avctx->debug & FF_DEBUG_PICT_INFO) {
1473  av_log(s->avctx, AV_LOG_DEBUG,
1474  "qp:%d fc:%2d%2d%2d%2d %c %s %s %s %s dc:%d pstruct:%d fdct:%d cmv:%d qtype:%d ivlc:%d rff:%d %s\n",
1475  s->qscale,
1476  s->mpeg_f_code[0][0], s->mpeg_f_code[0][1],
1477  s->mpeg_f_code[1][0], s->mpeg_f_code[1][1],
1478  s->pict_type == AV_PICTURE_TYPE_I ? 'I' :
1479  (s->pict_type == AV_PICTURE_TYPE_P ? 'P' :
1480  (s->pict_type == AV_PICTURE_TYPE_B ? 'B' : 'S')),
1481  s->progressive_sequence ? "ps" : "",
1482  s->progressive_frame ? "pf" : "",
1483  s->alternate_scan ? "alt" : "",
1484  s->top_field_first ? "top" : "",
1485  s->intra_dc_precision, s->picture_structure,
1486  s->frame_pred_frame_dct, s->concealment_motion_vectors,
1487  s->q_scale_type, s->intra_vlc_format,
1488  s->repeat_first_field, s->chroma_420_type ? "420" : "");
1489  }
1490  }
1491 
1492  for (;;) {
1493  if ((ret = mpeg_decode_mb(s, s->block)) < 0)
1494  return ret;
1495 
1496  // Note motion_val is normally NULL unless we want to extract the MVs.
1497  if (s->cur_pic.motion_val[0]) {
1498  const int wrap = s->b8_stride;
1499  int xy = s->mb_x * 2 + s->mb_y * 2 * wrap;
1500  int b8_xy = 4 * (s->mb_x + s->mb_y * s->mb_stride);
1501  int motion_x, motion_y, dir, i;
1502 
1503  for (i = 0; i < 2; i++) {
1504  for (dir = 0; dir < 2; dir++) {
1505  if (s->mb_intra ||
1506  (dir == 1 && s->pict_type != AV_PICTURE_TYPE_B)) {
1507  motion_x = motion_y = 0;
1508  } else if (s->mv_type == MV_TYPE_16X16 ||
1509  (s->mv_type == MV_TYPE_FIELD && field_pic)) {
1510  motion_x = s->mv[dir][0][0];
1511  motion_y = s->mv[dir][0][1];
1512  } else { /* if ((s->mv_type == MV_TYPE_FIELD) || (s->mv_type == MV_TYPE_16X8)) */
1513  motion_x = s->mv[dir][i][0];
1514  motion_y = s->mv[dir][i][1];
1515  }
1516 
1517  s->cur_pic.motion_val[dir][xy][0] = motion_x;
1518  s->cur_pic.motion_val[dir][xy][1] = motion_y;
1519  s->cur_pic.motion_val[dir][xy + 1][0] = motion_x;
1520  s->cur_pic.motion_val[dir][xy + 1][1] = motion_y;
1521  s->cur_pic.ref_index [dir][b8_xy] =
1522  s->cur_pic.ref_index [dir][b8_xy + 1] = s->field_select[dir][i];
1523  av_assert2(s->field_select[dir][i] == 0 ||
1524  s->field_select[dir][i] == 1);
1525  }
1526  xy += wrap;
1527  b8_xy += 2;
1528  }
1529  }
1530 
1531  s->dest[0] += 16 >> lowres;
1532  s->dest[1] +=(16 >> lowres) >> s->chroma_x_shift;
1533  s->dest[2] +=(16 >> lowres) >> s->chroma_x_shift;
1534 
1535  ff_mpv_reconstruct_mb(s, s->block);
1536 
1537  if (++s->mb_x >= s->mb_width) {
1538  const int mb_size = 16 >> s->avctx->lowres;
1539  int left;
1540 
1541  ff_mpeg_draw_horiz_band(s, mb_size * (s->mb_y >> field_pic), mb_size);
1543 
1544  s->mb_x = 0;
1545  s->mb_y += 1 << field_pic;
1546 
1547  if (s->mb_y >= s->mb_height) {
1548  int left = get_bits_left(&s->gb);
1549  int is_d10 = s->chroma_format == 2 &&
1550  s->pict_type == AV_PICTURE_TYPE_I &&
1551  avctx->profile == 0 && avctx->level == 5 &&
1552  s->intra_dc_precision == 2 &&
1553  s->q_scale_type == 1 && s->alternate_scan == 0 &&
1554  s->progressive_frame == 0
1555  /* vbv_delay == 0xBBB || 0xE10 */;
1556 
1557  if (left >= 32 && !is_d10) {
1558  GetBitContext gb = s->gb;
1559  align_get_bits(&gb);
1560  if (show_bits(&gb, 24) == 0x060E2B) {
1561  av_log(avctx, AV_LOG_DEBUG, "Invalid MXF data found in video stream\n");
1562  is_d10 = 1;
1563  }
1564  if (left > 32 && show_bits_long(&gb, 32) == 0x201) {
1565  av_log(avctx, AV_LOG_DEBUG, "skipping m704 alpha (unsupported)\n");
1566  goto eos;
1567  }
1568  }
1569 
1570  if (left < 0 ||
1571  (left && show_bits(&s->gb, FFMIN(left, 23)) && !is_d10) ||
1572  ((avctx->err_recognition & (AV_EF_BITSTREAM | AV_EF_AGGRESSIVE)) && left > 8)) {
1573  av_log(avctx, AV_LOG_ERROR, "end mismatch left=%d %0X at %d %d\n",
1574  left, left>0 ? show_bits(&s->gb, FFMIN(left, 23)) : 0, s->mb_x, s->mb_y);
1575  return AVERROR_INVALIDDATA;
1576  } else
1577  goto eos;
1578  }
1579  // There are some files out there which are missing the last slice
1580  // in cases where the slice is completely outside the visible
1581  // area, we detect this here instead of running into the end expecting
1582  // more data
1583  left = get_bits_left(&s->gb);
1584  if (s->mb_y >= ((s->height + 15) >> 4) &&
1585  !s->progressive_sequence &&
1586  left <= 25 &&
1587  left >= 0 &&
1588  s->mb_skip_run == -1 &&
1589  (!left || show_bits(&s->gb, left) == 0))
1590  goto eos;
1591 
1593  }
1594 
1595  /* skip mb handling */
1596  if (s->mb_skip_run == -1) {
1597  /* read increment again */
1598  s->mb_skip_run = 0;
1599  for (;;) {
1600  int code = get_vlc2(&s->gb, ff_mbincr_vlc,
1601  MBINCR_VLC_BITS, 2);
1602  if (code < 0) {
1603  av_log(s->avctx, AV_LOG_ERROR, "mb incr damaged\n");
1604  return AVERROR_INVALIDDATA;
1605  }
1606  if (code >= 33) {
1607  if (code == 33) {
1608  s->mb_skip_run += 33;
1609  } else if (code == 35) {
1610  if (s->mb_skip_run != 0 || show_bits(&s->gb, 15) != 0) {
1611  av_log(s->avctx, AV_LOG_ERROR, "slice mismatch\n");
1612  return AVERROR_INVALIDDATA;
1613  }
1614  goto eos; /* end of slice */
1615  }
1616  /* otherwise, stuffing, nothing to do */
1617  } else {
1618  s->mb_skip_run += code;
1619  break;
1620  }
1621  }
1622  if (s->mb_skip_run) {
1623  int i;
1624  if (s->pict_type == AV_PICTURE_TYPE_I) {
1625  av_log(s->avctx, AV_LOG_ERROR,
1626  "skipped MB in I-frame at %d %d\n", s->mb_x, s->mb_y);
1627  return AVERROR_INVALIDDATA;
1628  }
1629 
1630  /* skip mb */
1631  s->mb_intra = 0;
1632  for (i = 0; i < 12; i++)
1633  s->block_last_index[i] = -1;
1634  if (s->picture_structure == PICT_FRAME)
1635  s->mv_type = MV_TYPE_16X16;
1636  else
1637  s->mv_type = MV_TYPE_FIELD;
1638  if (s->pict_type == AV_PICTURE_TYPE_P) {
1639  /* if P type, zero motion vector is implied */
1640  s->mv_dir = MV_DIR_FORWARD;
1641  s->mv[0][0][0] = s->mv[0][0][1] = 0;
1642  s->last_mv[0][0][0] = s->last_mv[0][0][1] = 0;
1643  s->last_mv[0][1][0] = s->last_mv[0][1][1] = 0;
1644  s->field_select[0][0] = (s->picture_structure - 1) & 1;
1645  } else {
1646  /* if B type, reuse previous vectors and directions */
1647  s->mv[0][0][0] = s->last_mv[0][0][0];
1648  s->mv[0][0][1] = s->last_mv[0][0][1];
1649  s->mv[1][0][0] = s->last_mv[1][0][0];
1650  s->mv[1][0][1] = s->last_mv[1][0][1];
1651  s->field_select[0][0] = (s->picture_structure - 1) & 1;
1652  s->field_select[1][0] = (s->picture_structure - 1) & 1;
1653  }
1654  }
1655  }
1656  }
1657 eos: // end of slice
1658  if (get_bits_left(&s->gb) < 0) {
1659  av_log(s, AV_LOG_ERROR, "overread %d\n", -get_bits_left(&s->gb));
1660  return AVERROR_INVALIDDATA;
1661  }
1662  *buf += (get_bits_count(&s->gb) - 1) / 8;
1663  ff_dlog(s, "Slice start:%d %d end:%d %d\n", s->resync_mb_x, s->resync_mb_y, s->mb_x, s->mb_y);
1664  return 0;
1665 }
1666 
1668 {
1669  MpegEncContext *s = *(void **) arg;
1670  const uint8_t *buf = s->gb.buffer;
1671  int mb_y = s->start_mb_y;
1672  const int field_pic = s->picture_structure != PICT_FRAME;
1673 
1674  s->er.error_count = (3 * (s->end_mb_y - s->start_mb_y) * s->mb_width) >> field_pic;
1675 
1676  for (;;) {
1677  uint32_t start_code;
1678  int ret;
1679 
1680  ret = mpeg_decode_slice(s, mb_y, &buf, s->gb.buffer_end - buf);
1681  emms_c();
1682  ff_dlog(c, "ret:%d resync:%d/%d mb:%d/%d ts:%d/%d ec:%d\n",
1683  ret, s->resync_mb_x, s->resync_mb_y, s->mb_x, s->mb_y,
1684  s->start_mb_y, s->end_mb_y, s->er.error_count);
1685  if (ret < 0) {
1686  if (c->err_recognition & AV_EF_EXPLODE)
1687  return ret;
1688  if (s->resync_mb_x >= 0 && s->resync_mb_y >= 0)
1689  ff_er_add_slice(&s->er, s->resync_mb_x, s->resync_mb_y,
1690  s->mb_x, s->mb_y,
1692  } else {
1693  ff_er_add_slice(&s->er, s->resync_mb_x, s->resync_mb_y,
1694  s->mb_x - 1, s->mb_y,
1696  }
1697 
1698  if (s->mb_y == s->end_mb_y)
1699  return 0;
1700 
1701  start_code = -1;
1702  buf = avpriv_find_start_code(buf, s->gb.buffer_end, &start_code);
1703  if (start_code < SLICE_MIN_START_CODE || start_code > SLICE_MAX_START_CODE)
1704  return AVERROR_INVALIDDATA;
1706  if (s->codec_id != AV_CODEC_ID_MPEG1VIDEO && s->mb_height > 2800/16)
1707  mb_y += (*buf&0xE0)<<2;
1708  mb_y <<= field_pic;
1709  if (s->picture_structure == PICT_BOTTOM_FIELD)
1710  mb_y++;
1711  if (mb_y >= s->end_mb_y)
1712  return AVERROR_INVALIDDATA;
1713  }
1714 }
1715 
1716 /**
1717  * Handle slice ends.
1718  * @return 1 if it seems to be the last slice
1719  */
1720 static int slice_end(AVCodecContext *avctx, AVFrame *pict, int *got_output)
1721 {
1722  Mpeg1Context *s1 = avctx->priv_data;
1723  MpegEncContext *s = &s1->mpeg_enc_ctx;
1724 
1725  if (!s->context_initialized || !s->cur_pic.ptr)
1726  return 0;
1727 
1728  if (s->avctx->hwaccel) {
1729  int ret = FF_HW_SIMPLE_CALL(s->avctx, end_frame);
1730  if (ret < 0) {
1731  av_log(avctx, AV_LOG_ERROR,
1732  "hardware accelerator failed to decode picture\n");
1733  return ret;
1734  }
1735  }
1736 
1737  /* end of slice reached */
1738  if (/* s->mb_y << field_pic == s->mb_height && */ !s->first_field && !s1->first_slice) {
1739  /* end of image */
1740 
1741  ff_er_frame_end(&s->er, NULL);
1742 
1744 
1745  if (s->pict_type == AV_PICTURE_TYPE_B || s->low_delay) {
1746  int ret = av_frame_ref(pict, s->cur_pic.ptr->f);
1747  if (ret < 0)
1748  return ret;
1749  ff_print_debug_info(s, s->cur_pic.ptr, pict);
1750  ff_mpv_export_qp_table(s, pict, s->cur_pic.ptr, FF_MPV_QSCALE_TYPE_MPEG2);
1751  *got_output = 1;
1752  } else {
1753  /* latency of 1 frame for I- and P-frames */
1754  if (s->last_pic.ptr && !s->last_pic.ptr->dummy) {
1755  int ret = av_frame_ref(pict, s->last_pic.ptr->f);
1756  if (ret < 0)
1757  return ret;
1758  ff_print_debug_info(s, s->last_pic.ptr, pict);
1759  ff_mpv_export_qp_table(s, pict, s->last_pic.ptr, FF_MPV_QSCALE_TYPE_MPEG2);
1760  *got_output = 1;
1761  }
1762  }
1763 
1764  return 1;
1765  } else {
1766  return 0;
1767  }
1768 }
1769 
1771  const uint8_t *buf, int buf_size)
1772 {
1773  Mpeg1Context *s1 = avctx->priv_data;
1774  MpegEncContext *s = &s1->mpeg_enc_ctx;
1775  int width, height;
1776  int i, v, j;
1777 
1778  int ret = init_get_bits8(&s->gb, buf, buf_size);
1779  if (ret < 0)
1780  return ret;
1781 
1782  width = get_bits(&s->gb, 12);
1783  height = get_bits(&s->gb, 12);
1784  if (width == 0 || height == 0) {
1785  av_log(avctx, AV_LOG_WARNING,
1786  "Invalid horizontal or vertical size value.\n");
1788  return AVERROR_INVALIDDATA;
1789  }
1790  s1->aspect_ratio_info = get_bits(&s->gb, 4);
1791  if (s1->aspect_ratio_info == 0) {
1792  av_log(avctx, AV_LOG_ERROR, "aspect ratio has forbidden 0 value\n");
1794  return AVERROR_INVALIDDATA;
1795  }
1796  s1->frame_rate_index = get_bits(&s->gb, 4);
1797  if (s1->frame_rate_index == 0 || s1->frame_rate_index > 13) {
1798  av_log(avctx, AV_LOG_WARNING,
1799  "frame_rate_index %d is invalid\n", s1->frame_rate_index);
1800  s1->frame_rate_index = 1;
1801  }
1802  s->bit_rate = get_bits(&s->gb, 18) * 400LL;
1803  if (check_marker(s->avctx, &s->gb, "in sequence header") == 0) {
1804  return AVERROR_INVALIDDATA;
1805  }
1806 
1807  s->avctx->rc_buffer_size = get_bits(&s->gb, 10) * 1024 * 16;
1808  skip_bits(&s->gb, 1);
1809 
1810  /* get matrix */
1811  if (get_bits1(&s->gb)) {
1812  load_matrix(s, s->chroma_intra_matrix, s->intra_matrix, 1);
1813  } else {
1814  for (i = 0; i < 64; i++) {
1815  j = s->idsp.idct_permutation[i];
1817  s->intra_matrix[j] = v;
1818  s->chroma_intra_matrix[j] = v;
1819  }
1820  }
1821  if (get_bits1(&s->gb)) {
1822  load_matrix(s, s->chroma_inter_matrix, s->inter_matrix, 0);
1823  } else {
1824  for (i = 0; i < 64; i++) {
1825  int j = s->idsp.idct_permutation[i];
1827  s->inter_matrix[j] = v;
1828  s->chroma_inter_matrix[j] = v;
1829  }
1830  }
1831 
1832  if (show_bits(&s->gb, 23) != 0) {
1833  av_log(s->avctx, AV_LOG_ERROR, "sequence header damaged\n");
1834  return AVERROR_INVALIDDATA;
1835  }
1836 
1837  s->width = width;
1838  s->height = height;
1839 
1840  /* We set MPEG-2 parameters so that it emulates MPEG-1. */
1841  s->progressive_sequence = 1;
1842  s->progressive_frame = 1;
1843  s->picture_structure = PICT_FRAME;
1844  s->first_field = 0;
1845  s->frame_pred_frame_dct = 1;
1846  s->chroma_format = 1;
1847  s->codec_id =
1848  s->avctx->codec_id = AV_CODEC_ID_MPEG1VIDEO;
1849  if (s->avctx->flags & AV_CODEC_FLAG_LOW_DELAY)
1850  s->low_delay = 1;
1851 
1852  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1853  av_log(s->avctx, AV_LOG_DEBUG, "vbv buffer: %d, bitrate:%"PRId64", aspect_ratio_info: %d \n",
1854  s->avctx->rc_buffer_size, s->bit_rate, s1->aspect_ratio_info);
1855 
1856  return 0;
1857 }
1858 
1860 {
1861  Mpeg1Context *s1 = avctx->priv_data;
1862  MpegEncContext *s = &s1->mpeg_enc_ctx;
1863  int i, v, ret;
1864 
1865  /* start new MPEG-1 context decoding */
1866  if (s->context_initialized)
1868 
1869  s->width = avctx->coded_width;
1870  s->height = avctx->coded_height;
1871  avctx->has_b_frames = 0; // true?
1872  s->low_delay = 1;
1873 
1874  avctx->pix_fmt = mpeg_get_pixelformat(avctx);
1875 
1876  if ((ret = ff_mpv_common_init(s)) < 0)
1877  return ret;
1878  if (!s->avctx->lowres)
1880 
1881  for (i = 0; i < 64; i++) {
1882  int j = s->idsp.idct_permutation[i];
1884  s->intra_matrix[j] = v;
1885  s->chroma_intra_matrix[j] = v;
1886 
1888  s->inter_matrix[j] = v;
1889  s->chroma_inter_matrix[j] = v;
1890  }
1891 
1892  s->progressive_sequence = 1;
1893  s->progressive_frame = 1;
1894  s->picture_structure = PICT_FRAME;
1895  s->first_field = 0;
1896  s->frame_pred_frame_dct = 1;
1897  s->chroma_format = 1;
1898  if (s->codec_tag == AV_RL32("BW10")) {
1899  s->codec_id = s->avctx->codec_id = AV_CODEC_ID_MPEG1VIDEO;
1900  } else {
1901  s->codec_id = s->avctx->codec_id = AV_CODEC_ID_MPEG2VIDEO;
1902  }
1903  s1->save_width = s->width;
1904  s1->save_height = s->height;
1905  s1->save_progressive_seq = s->progressive_sequence;
1906  return 0;
1907 }
1908 
1910  const char *label)
1911 {
1912  Mpeg1Context *s1 = avctx->priv_data;
1913 
1915 
1916  if (!s1->cc_format) {
1917  s1->cc_format = format;
1918 
1919  av_log(avctx, AV_LOG_DEBUG, "CC: first seen substream is %s format\n", label);
1920  }
1921 
1923 }
1924 
1926  const uint8_t *p, int buf_size)
1927 {
1928  Mpeg1Context *s1 = avctx->priv_data;
1929 
1930  if ((!s1->cc_format || s1->cc_format == CC_FORMAT_A53_PART4) &&
1931  buf_size >= 6 &&
1932  p[0] == 'G' && p[1] == 'A' && p[2] == '9' && p[3] == '4' &&
1933  p[4] == 3 && (p[5] & 0x40)) {
1934  /* extract A53 Part 4 CC data */
1935  int cc_count = p[5] & 0x1f;
1936  if (cc_count > 0 && buf_size >= 7 + cc_count * 3) {
1937  int old_size = s1->a53_buf_ref ? s1->a53_buf_ref->size : 0;
1938  const uint64_t new_size = (old_size + cc_count
1939  * UINT64_C(3));
1940  int ret;
1941 
1942  if (new_size > 3*A53_MAX_CC_COUNT)
1943  return AVERROR(EINVAL);
1944 
1945  ret = av_buffer_realloc(&s1->a53_buf_ref, new_size);
1946  if (ret >= 0)
1947  memcpy(s1->a53_buf_ref->data + old_size, p + 7, cc_count * UINT64_C(3));
1948 
1949  mpeg_set_cc_format(avctx, CC_FORMAT_A53_PART4, "A/53 Part 4");
1950  }
1951  return 1;
1952  } else if ((!s1->cc_format || s1->cc_format == CC_FORMAT_SCTE20) &&
1953  buf_size >= 2 &&
1954  p[0] == 0x03 && (p[1]&0x7f) == 0x01) {
1955  /* extract SCTE-20 CC data */
1956  GetBitContext gb;
1957  int cc_count = 0;
1958  int i, ret;
1959 
1960  ret = init_get_bits8(&gb, p + 2, buf_size - 2);
1961  if (ret < 0)
1962  return ret;
1963  cc_count = get_bits(&gb, 5);
1964  if (cc_count > 0) {
1965  int old_size = s1->a53_buf_ref ? s1->a53_buf_ref->size : 0;
1966  const uint64_t new_size = (old_size + cc_count
1967  * UINT64_C(3));
1968  if (new_size > 3*A53_MAX_CC_COUNT)
1969  return AVERROR(EINVAL);
1970 
1971  ret = av_buffer_realloc(&s1->a53_buf_ref, new_size);
1972  if (ret >= 0) {
1973  uint8_t field, cc1, cc2;
1974  uint8_t *cap = s1->a53_buf_ref->data;
1975 
1976  memset(s1->a53_buf_ref->data + old_size, 0, cc_count * 3);
1977  for (i = 0; i < cc_count && get_bits_left(&gb) >= 26; i++) {
1978  skip_bits(&gb, 2); // priority
1979  field = get_bits(&gb, 2);
1980  skip_bits(&gb, 5); // line_offset
1981  cc1 = get_bits(&gb, 8);
1982  cc2 = get_bits(&gb, 8);
1983  skip_bits(&gb, 1); // marker
1984 
1985  if (!field) { // forbidden
1986  cap[0] = cap[1] = cap[2] = 0x00;
1987  } else {
1988  field = (field == 2 ? 1 : 0);
1989  if (!s1->mpeg_enc_ctx.top_field_first) field = !field;
1990  cap[0] = 0x04 | field;
1991  cap[1] = ff_reverse[cc1];
1992  cap[2] = ff_reverse[cc2];
1993  }
1994  cap += 3;
1995  }
1996  }
1997 
1998  mpeg_set_cc_format(avctx, CC_FORMAT_SCTE20, "SCTE-20");
1999  }
2000  return 1;
2001  } else if ((!s1->cc_format || s1->cc_format == CC_FORMAT_DVD) &&
2002  buf_size >= 11 &&
2003  p[0] == 'C' && p[1] == 'C' && p[2] == 0x01 && p[3] == 0xf8) {
2004  /* extract DVD CC data
2005  *
2006  * uint32_t user_data_start_code 0x000001B2 (big endian)
2007  * uint16_t user_identifier 0x4343 "CC"
2008  * uint8_t user_data_type_code 0x01
2009  * uint8_t caption_block_size 0xF8
2010  * uint8_t
2011  * bit 7 caption_odd_field_first 1=odd field (CC1/CC2) first 0=even field (CC3/CC4) first
2012  * bit 6 caption_filler 0
2013  * bit 5:1 caption_block_count number of caption blocks (pairs of caption words = frames). Most DVDs use 15 per start of GOP.
2014  * bit 0 caption_extra_field_added 1=one additional caption word
2015  *
2016  * struct caption_field_block {
2017  * uint8_t
2018  * bit 7:1 caption_filler 0x7F (all 1s)
2019  * bit 0 caption_field_odd 1=odd field (this is CC1/CC2) 0=even field (this is CC3/CC4)
2020  * uint8_t caption_first_byte
2021  * uint8_t caption_second_byte
2022  * } caption_block[(caption_block_count * 2) + caption_extra_field_added];
2023  *
2024  * Some DVDs encode caption data for both fields with caption_field_odd=1. The only way to decode the fields
2025  * correctly is to start on the field indicated by caption_odd_field_first and count between odd/even fields.
2026  * Don't assume that the first caption word is the odd field. There do exist MPEG files in the wild that start
2027  * on the even field. There also exist DVDs in the wild that encode an odd field count and the
2028  * caption_extra_field_added/caption_odd_field_first bits change per packet to allow that. */
2029  int cc_count = 0;
2030  int i, ret;
2031  // There is a caption count field in the data, but it is often
2032  // incorrect. So count the number of captions present.
2033  for (i = 5; i + 6 <= buf_size && ((p[i] & 0xfe) == 0xfe); i += 6)
2034  cc_count++;
2035  // Transform the DVD format into A53 Part 4 format
2036  if (cc_count > 0) {
2037  int old_size = s1->a53_buf_ref ? s1->a53_buf_ref->size : 0;
2038  const uint64_t new_size = (old_size + cc_count
2039  * UINT64_C(6));
2040  if (new_size > 3*A53_MAX_CC_COUNT)
2041  return AVERROR(EINVAL);
2042 
2043  ret = av_buffer_realloc(&s1->a53_buf_ref, new_size);
2044  if (ret >= 0) {
2045  uint8_t field1 = !!(p[4] & 0x80);
2046  uint8_t *cap = s1->a53_buf_ref->data;
2047  p += 5;
2048  for (i = 0; i < cc_count; i++) {
2049  cap[0] = (p[0] == 0xff && field1) ? 0xfc : 0xfd;
2050  cap[1] = p[1];
2051  cap[2] = p[2];
2052  cap[3] = (p[3] == 0xff && !field1) ? 0xfc : 0xfd;
2053  cap[4] = p[4];
2054  cap[5] = p[5];
2055  cap += 6;
2056  p += 6;
2057  }
2058  }
2059 
2060  mpeg_set_cc_format(avctx, CC_FORMAT_DVD, "DVD");
2061  }
2062  return 1;
2063  } else if ((!s1->cc_format || s1->cc_format == CC_FORMAT_DVB_0502) &&
2064  buf_size >= 12 &&
2065  p[0] == 0x05 && p[1] == 0x02) {
2066  /* extract DVB 0502 CC data */
2067  const uint8_t cc_header = 0xf8 | 0x04 /* valid */ | 0x00 /* line 21 field 1 */;
2068  uint8_t cc_data[4] = {0};
2069  int cc_count = 0;
2070  uint8_t dvb_cc_type = p[7];
2071  p += 8;
2072  buf_size -= 8;
2073 
2074  if (dvb_cc_type == 0x05 && buf_size >= 7) {
2075  dvb_cc_type = p[6];
2076  p += 7;
2077  buf_size -= 7;
2078  }
2079 
2080  if (dvb_cc_type == 0x02 && buf_size >= 4) { /* 2-byte caption, can be repeated */
2081  cc_count = 1;
2082  cc_data[0] = p[1];
2083  cc_data[1] = p[2];
2084  dvb_cc_type = p[3];
2085 
2086  /* Only repeat characters when the next type flag
2087  * is 0x04 and the characters are repeatable (i.e., less than
2088  * 32 with the parity stripped).
2089  */
2090  if (dvb_cc_type == 0x04 && (cc_data[0] & 0x7f) < 32) {
2091  cc_count = 2;
2092  cc_data[2] = cc_data[0];
2093  cc_data[3] = cc_data[1];
2094  }
2095  } else if (dvb_cc_type == 0x04 && buf_size >= 5) { /* 4-byte caption, not repeated */
2096  cc_count = 2;
2097  cc_data[0] = p[1];
2098  cc_data[1] = p[2];
2099  cc_data[2] = p[3];
2100  cc_data[3] = p[4];
2101  }
2102 
2103  if (cc_count > 0) {
2104  int ret;
2105  int old_size = s1->a53_buf_ref ? s1->a53_buf_ref->size : 0;
2106  const uint64_t new_size = (old_size + cc_count * UINT64_C(3));
2107  if (new_size > 3 * A53_MAX_CC_COUNT)
2108  return AVERROR(EINVAL);
2109 
2110  ret = av_buffer_realloc(&s1->a53_buf_ref, new_size);
2111  if (ret >= 0) {
2112  s1->a53_buf_ref->data[0] = cc_header;
2113  s1->a53_buf_ref->data[1] = cc_data[0];
2114  s1->a53_buf_ref->data[2] = cc_data[1];
2115  if (cc_count == 2) {
2116  s1->a53_buf_ref->data[3] = cc_header;
2117  s1->a53_buf_ref->data[4] = cc_data[2];
2118  s1->a53_buf_ref->data[5] = cc_data[3];
2119  }
2120  }
2121 
2122  mpeg_set_cc_format(avctx, CC_FORMAT_DVB_0502, "DVB 0502");
2123  }
2124  return 1;
2125  }
2126  return 0;
2127 }
2128 
2130  const uint8_t *p, int buf_size)
2131 {
2132  Mpeg1Context *s = avctx->priv_data;
2133  const uint8_t *buf_end = p + buf_size;
2134  Mpeg1Context *s1 = avctx->priv_data;
2135 
2136 #if 0
2137  int i;
2138  for(i=0; !(!p[i-2] && !p[i-1] && p[i]==1) && i<buf_size; i++){
2139  av_log(avctx, AV_LOG_ERROR, "%c", p[i]);
2140  }
2141  av_log(avctx, AV_LOG_ERROR, "\n");
2142 #endif
2143 
2144  if (buf_size > 29){
2145  int i;
2146  for(i=0; i<20; i++)
2147  if (!memcmp(p+i, "\0TMPGEXS\0", 9)){
2148  s->tmpgexs= 1;
2149  }
2150  }
2151  /* we parse the DTG active format information */
2152  if (buf_end - p >= 5 &&
2153  p[0] == 'D' && p[1] == 'T' && p[2] == 'G' && p[3] == '1') {
2154  int flags = p[4];
2155  p += 5;
2156  if (flags & 0x80) {
2157  /* skip event id */
2158  p += 2;
2159  }
2160  if (flags & 0x40) {
2161  if (buf_end - p < 1)
2162  return;
2163  s1->has_afd = 1;
2164  s1->afd = p[0] & 0x0f;
2165  }
2166  } else if (buf_end - p >= 6 &&
2167  p[0] == 'J' && p[1] == 'P' && p[2] == '3' && p[3] == 'D' &&
2168  p[4] == 0x03) { // S3D_video_format_length
2169  // the 0x7F mask ignores the reserved_bit value
2170  const uint8_t S3D_video_format_type = p[5] & 0x7F;
2171 
2172  if (S3D_video_format_type == 0x03 ||
2173  S3D_video_format_type == 0x04 ||
2174  S3D_video_format_type == 0x08 ||
2175  S3D_video_format_type == 0x23) {
2176 
2177  s1->has_stereo3d = 1;
2178 
2179  switch (S3D_video_format_type) {
2180  case 0x03:
2182  break;
2183  case 0x04:
2185  break;
2186  case 0x08:
2188  break;
2189  case 0x23:
2191  break;
2192  }
2193  }
2194  } else if (mpeg_decode_a53_cc(avctx, p, buf_size)) {
2195  return;
2196  }
2197 }
2198 
2200  const uint8_t *buf, int buf_size)
2201 {
2202  Mpeg1Context *s1 = avctx->priv_data;
2203  MpegEncContext *s = &s1->mpeg_enc_ctx;
2204  int broken_link;
2205  int64_t tc;
2206 
2207  int ret = init_get_bits8(&s->gb, buf, buf_size);
2208  if (ret < 0)
2209  return ret;
2210 
2211  tc = s1->timecode_frame_start = get_bits(&s->gb, 25);
2212 
2213  s1->closed_gop = get_bits1(&s->gb);
2214  /* broken_link indicates that after editing the
2215  * reference frames of the first B-Frames after GOP I-Frame
2216  * are missing (open gop) */
2217  broken_link = get_bits1(&s->gb);
2218 
2219  if (s->avctx->debug & FF_DEBUG_PICT_INFO) {
2220  char tcbuf[AV_TIMECODE_STR_SIZE];
2222  av_log(s->avctx, AV_LOG_DEBUG,
2223  "GOP (%s) closed_gop=%d broken_link=%d\n",
2224  tcbuf, s1->closed_gop, broken_link);
2225  }
2226 
2227  return 0;
2228 }
2229 
2230 static int decode_chunks(AVCodecContext *avctx, AVFrame *picture,
2231  int *got_output, const uint8_t *buf, int buf_size)
2232 {
2233  Mpeg1Context *s = avctx->priv_data;
2234  MpegEncContext *s2 = &s->mpeg_enc_ctx;
2235  const uint8_t *buf_ptr = buf;
2236  const uint8_t *buf_end = buf + buf_size;
2237  int ret, input_size;
2238  int last_code = 0, skip_frame = 0;
2239  int picture_start_code_seen = 0;
2240 
2241  for (;;) {
2242  /* find next start code */
2243  uint32_t start_code = -1;
2244  buf_ptr = avpriv_find_start_code(buf_ptr, buf_end, &start_code);
2245  if (start_code > 0x1ff) {
2246  if (!skip_frame) {
2247  if (HAVE_THREADS &&
2248  (avctx->active_thread_type & FF_THREAD_SLICE) &&
2249  !avctx->hwaccel) {
2250  int i;
2251  av_assert0(avctx->thread_count > 1);
2252 
2253  avctx->execute(avctx, slice_decode_thread,
2254  &s2->thread_context[0], NULL,
2255  s->slice_count, sizeof(void *));
2256  for (i = 0; i < s->slice_count; i++)
2258  }
2259 
2260  ret = slice_end(avctx, picture, got_output);
2261  if (ret < 0)
2262  return ret;
2263  }
2264  s2->pict_type = 0;
2265 
2266  if (avctx->err_recognition & AV_EF_EXPLODE && s2->er.error_count)
2267  return AVERROR_INVALIDDATA;
2268 
2269  return FFMAX(0, buf_ptr - buf);
2270  }
2271 
2272  input_size = buf_end - buf_ptr;
2273 
2274  if (avctx->debug & FF_DEBUG_STARTCODE)
2275  av_log(avctx, AV_LOG_DEBUG, "%3"PRIX32" at %"PTRDIFF_SPECIFIER" left %d\n",
2276  start_code, buf_ptr - buf, input_size);
2277 
2278  /* prepare data for next start code */
2279  switch (start_code) {
2280  case SEQ_START_CODE:
2281  if (last_code == 0) {
2282  mpeg1_decode_sequence(avctx, buf_ptr, input_size);
2283  if (buf != avctx->extradata)
2284  s->sync = 1;
2285  } else {
2286  av_log(avctx, AV_LOG_ERROR,
2287  "ignoring SEQ_START_CODE after %X\n", last_code);
2288  if (avctx->err_recognition & AV_EF_EXPLODE)
2289  return AVERROR_INVALIDDATA;
2290  }
2291  break;
2292 
2293  case PICTURE_START_CODE:
2294  if (picture_start_code_seen && s2->picture_structure == PICT_FRAME) {
2295  /* If it's a frame picture, there can't be more than one picture header.
2296  Yet, it does happen and we need to handle it. */
2297  av_log(avctx, AV_LOG_WARNING, "ignoring extra picture following a frame-picture\n");
2298  break;
2299  }
2300  picture_start_code_seen = 1;
2301 
2302  if (buf == avctx->extradata && avctx->codec_tag == AV_RL32("AVmp")) {
2303  av_log(avctx, AV_LOG_WARNING, "ignoring picture start code in AVmp extradata\n");
2304  break;
2305  }
2306 
2307  if (s2->width <= 0 || s2->height <= 0) {
2308  av_log(avctx, AV_LOG_ERROR, "Invalid frame dimensions %dx%d.\n",
2309  s2->width, s2->height);
2310  return AVERROR_INVALIDDATA;
2311  }
2312 
2313  if (s->tmpgexs){
2314  s2->intra_dc_precision= 3;
2315  s2->intra_matrix[0]= 1;
2316  }
2317  if (HAVE_THREADS && (avctx->active_thread_type & FF_THREAD_SLICE) &&
2318  !avctx->hwaccel && s->slice_count) {
2319  int i;
2320 
2321  avctx->execute(avctx, slice_decode_thread,
2322  s2->thread_context, NULL,
2323  s->slice_count, sizeof(void *));
2324  for (i = 0; i < s->slice_count; i++)
2326  s->slice_count = 0;
2327  }
2328  if (last_code == 0 || last_code == SLICE_MIN_START_CODE) {
2329  ret = mpeg_decode_postinit(avctx);
2330  if (ret < 0) {
2331  av_log(avctx, AV_LOG_ERROR,
2332  "mpeg_decode_postinit() failure\n");
2333  return ret;
2334  }
2335 
2336  /* We have a complete image: we try to decompress it. */
2337  if (mpeg1_decode_picture(avctx, buf_ptr, input_size) < 0)
2338  s2->pict_type = 0;
2339  s->first_slice = 1;
2340  last_code = PICTURE_START_CODE;
2341  } else {
2342  av_log(avctx, AV_LOG_ERROR,
2343  "ignoring pic after %X\n", last_code);
2344  if (avctx->err_recognition & AV_EF_EXPLODE)
2345  return AVERROR_INVALIDDATA;
2346  }
2347  break;
2348  case EXT_START_CODE:
2349  ret = init_get_bits8(&s2->gb, buf_ptr, input_size);
2350  if (ret < 0)
2351  return ret;
2352 
2353  switch (get_bits(&s2->gb, 4)) {
2354  case 0x1:
2355  if (last_code == 0) {
2357  } else {
2358  av_log(avctx, AV_LOG_ERROR,
2359  "ignoring seq ext after %X\n", last_code);
2360  if (avctx->err_recognition & AV_EF_EXPLODE)
2361  return AVERROR_INVALIDDATA;
2362  }
2363  break;
2364  case 0x2:
2366  break;
2367  case 0x3:
2369  break;
2370  case 0x7:
2372  break;
2373  case 0x8:
2374  if (last_code == PICTURE_START_CODE) {
2376  if (ret < 0)
2377  return ret;
2378  } else {
2379  av_log(avctx, AV_LOG_ERROR,
2380  "ignoring pic cod ext after %X\n", last_code);
2381  if (avctx->err_recognition & AV_EF_EXPLODE)
2382  return AVERROR_INVALIDDATA;
2383  }
2384  break;
2385  }
2386  break;
2387  case USER_START_CODE:
2388  mpeg_decode_user_data(avctx, buf_ptr, input_size);
2389  break;
2390  case GOP_START_CODE:
2391  if (last_code == 0) {
2392  s2->first_field = 0;
2393  ret = mpeg_decode_gop(avctx, buf_ptr, input_size);
2394  if (ret < 0)
2395  return ret;
2396  s->sync = 1;
2397  } else {
2398  av_log(avctx, AV_LOG_ERROR,
2399  "ignoring GOP_START_CODE after %X\n", last_code);
2400  if (avctx->err_recognition & AV_EF_EXPLODE)
2401  return AVERROR_INVALIDDATA;
2402  }
2403  break;
2404  default:
2406  start_code <= SLICE_MAX_START_CODE && last_code == PICTURE_START_CODE) {
2407  if (s2->progressive_sequence && !s2->progressive_frame) {
2408  s2->progressive_frame = 1;
2409  av_log(s2->avctx, AV_LOG_ERROR,
2410  "interlaced frame in progressive sequence, ignoring\n");
2411  }
2412 
2413  if (s2->picture_structure == 0 ||
2415  av_log(s2->avctx, AV_LOG_ERROR,
2416  "picture_structure %d invalid, ignoring\n",
2417  s2->picture_structure);
2419  }
2420 
2422  av_log(s2->avctx, AV_LOG_WARNING, "invalid frame_pred_frame_dct\n");
2423 
2424  if (s2->picture_structure == PICT_FRAME) {
2425  s2->first_field = 0;
2426  s2->v_edge_pos = 16 * s2->mb_height;
2427  } else {
2428  s2->first_field ^= 1;
2429  s2->v_edge_pos = 8 * s2->mb_height;
2430  memset(s2->mbskip_table, 0, s2->mb_stride * s2->mb_height);
2431  }
2432  }
2434  start_code <= SLICE_MAX_START_CODE && last_code != 0) {
2435  const int field_pic = s2->picture_structure != PICT_FRAME;
2436  int mb_y = start_code - SLICE_MIN_START_CODE;
2437  last_code = SLICE_MIN_START_CODE;
2438  if (s2->codec_id != AV_CODEC_ID_MPEG1VIDEO && s2->mb_height > 2800/16)
2439  mb_y += (*buf_ptr&0xE0)<<2;
2440 
2441  mb_y <<= field_pic;
2443  mb_y++;
2444 
2445  if (buf_end - buf_ptr < 2) {
2446  av_log(s2->avctx, AV_LOG_ERROR, "slice too small\n");
2447  return AVERROR_INVALIDDATA;
2448  }
2449 
2450  if (mb_y >= s2->mb_height) {
2451  av_log(s2->avctx, AV_LOG_ERROR,
2452  "slice below image (%d >= %d)\n", mb_y, s2->mb_height);
2453  return AVERROR_INVALIDDATA;
2454  }
2455 
2456  if (!s2->last_pic.ptr) {
2457  /* Skip B-frames if we do not have reference frames and
2458  * GOP is not closed. */
2459  if (s2->pict_type == AV_PICTURE_TYPE_B) {
2460  if (!s->closed_gop) {
2461  skip_frame = 1;
2462  av_log(s2->avctx, AV_LOG_DEBUG,
2463  "Skipping B slice due to open GOP\n");
2464  break;
2465  }
2466  }
2467  }
2469  s->sync = 1;
2470  if (!s2->next_pic.ptr) {
2471  /* Skip P-frames if we do not have a reference frame or
2472  * we have an invalid header. */
2473  if (s2->pict_type == AV_PICTURE_TYPE_P && !s->sync) {
2474  skip_frame = 1;
2475  av_log(s2->avctx, AV_LOG_DEBUG,
2476  "Skipping P slice due to !sync\n");
2477  break;
2478  }
2479  }
2480  if ((avctx->skip_frame >= AVDISCARD_NONREF &&
2481  s2->pict_type == AV_PICTURE_TYPE_B) ||
2482  (avctx->skip_frame >= AVDISCARD_NONKEY &&
2483  s2->pict_type != AV_PICTURE_TYPE_I) ||
2484  avctx->skip_frame >= AVDISCARD_ALL) {
2485  skip_frame = 1;
2486  break;
2487  }
2488 
2489  if (!s2->context_initialized)
2490  break;
2491 
2492  if (s2->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
2493  if (mb_y < avctx->skip_top ||
2494  mb_y >= s2->mb_height - avctx->skip_bottom)
2495  break;
2496  }
2497 
2498  if (!s2->pict_type) {
2499  av_log(avctx, AV_LOG_ERROR, "Missing picture start code\n");
2500  if (avctx->err_recognition & AV_EF_EXPLODE)
2501  return AVERROR_INVALIDDATA;
2502  break;
2503  }
2504 
2505  if (s->first_slice) {
2506  skip_frame = 0;
2507  s->first_slice = 0;
2508  if ((ret = mpeg_field_start(s, buf, buf_size)) < 0)
2509  return ret;
2510  }
2511  if (!s2->cur_pic.ptr) {
2512  av_log(avctx, AV_LOG_ERROR,
2513  "current_picture not initialized\n");
2514  return AVERROR_INVALIDDATA;
2515  }
2516 
2517  if (HAVE_THREADS &&
2518  (avctx->active_thread_type & FF_THREAD_SLICE) &&
2519  !avctx->hwaccel) {
2520  int threshold = (s2->mb_height * s->slice_count +
2521  s2->slice_context_count / 2) /
2522  s2->slice_context_count;
2523  av_assert0(avctx->thread_count > 1);
2524  if (threshold <= mb_y) {
2525  MpegEncContext *thread_context = s2->thread_context[s->slice_count];
2526 
2527  thread_context->start_mb_y = mb_y;
2528  thread_context->end_mb_y = s2->mb_height;
2529  if (s->slice_count) {
2530  s2->thread_context[s->slice_count - 1]->end_mb_y = mb_y;
2531  ret = ff_update_duplicate_context(thread_context, s2);
2532  if (ret < 0)
2533  return ret;
2534  }
2535  ret = init_get_bits8(&thread_context->gb, buf_ptr, input_size);
2536  if (ret < 0)
2537  return ret;
2538  s->slice_count++;
2539  }
2540  buf_ptr += 2; // FIXME add minimum number of bytes per slice
2541  } else {
2542  ret = mpeg_decode_slice(s2, mb_y, &buf_ptr, input_size);
2543  emms_c();
2544 
2545  if (ret < 0) {
2546  if (avctx->err_recognition & AV_EF_EXPLODE)
2547  return ret;
2548  if (s2->resync_mb_x >= 0 && s2->resync_mb_y >= 0)
2549  ff_er_add_slice(&s2->er, s2->resync_mb_x,
2550  s2->resync_mb_y, s2->mb_x, s2->mb_y,
2552  } else {
2553  ff_er_add_slice(&s2->er, s2->resync_mb_x,
2554  s2->resync_mb_y, s2->mb_x - 1, s2->mb_y,
2556  }
2557  }
2558  }
2559  break;
2560  }
2561  }
2562 }
2563 
2564 static int mpeg_decode_frame(AVCodecContext *avctx, AVFrame *picture,
2565  int *got_output, AVPacket *avpkt)
2566 {
2567  const uint8_t *buf = avpkt->data;
2568  int ret;
2569  int buf_size = avpkt->size;
2570  Mpeg1Context *s = avctx->priv_data;
2571  MpegEncContext *s2 = &s->mpeg_enc_ctx;
2572 
2573  if (buf_size == 0 || (buf_size == 4 && AV_RB32(buf) == SEQ_END_CODE)) {
2574  /* special case for last picture */
2575  if (s2->low_delay == 0 && s2->next_pic.ptr) {
2576  int ret = av_frame_ref(picture, s2->next_pic.ptr->f);
2577  if (ret < 0)
2578  return ret;
2579 
2581 
2582  *got_output = 1;
2583  }
2584  return buf_size;
2585  }
2586 
2587  if (!s2->context_initialized &&
2588  (s2->codec_tag == AV_RL32("VCR2") || s2->codec_tag == AV_RL32("BW10")))
2589  vcr2_init_sequence(avctx);
2590 
2591  s->slice_count = 0;
2592 
2593  if (avctx->extradata && !s->extradata_decoded) {
2594  ret = decode_chunks(avctx, picture, got_output,
2595  avctx->extradata, avctx->extradata_size);
2596  if (*got_output) {
2597  av_log(avctx, AV_LOG_ERROR, "picture in extradata\n");
2598  av_frame_unref(picture);
2599  *got_output = 0;
2600  }
2601  s->extradata_decoded = 1;
2602  if (ret < 0 && (avctx->err_recognition & AV_EF_EXPLODE)) {
2604  return ret;
2605  }
2606  }
2607 
2608  ret = decode_chunks(avctx, picture, got_output, buf, buf_size);
2609  if (ret<0 || *got_output) {
2611 
2612  if (s->timecode_frame_start != -1 && *got_output) {
2613  char tcbuf[AV_TIMECODE_STR_SIZE];
2614  AVFrameSideData *tcside = av_frame_new_side_data(picture,
2616  sizeof(int64_t));
2617  if (!tcside)
2618  return AVERROR(ENOMEM);
2619  memcpy(tcside->data, &s->timecode_frame_start, sizeof(int64_t));
2620 
2621  av_timecode_make_mpeg_tc_string(tcbuf, s->timecode_frame_start);
2622  av_dict_set(&picture->metadata, "timecode", tcbuf, 0);
2623 
2624  s->timecode_frame_start = -1;
2625  }
2626  }
2627 
2628  return ret;
2629 }
2630 
2631 static void flush(AVCodecContext *avctx)
2632 {
2633  Mpeg1Context *s = avctx->priv_data;
2634 
2635  s->sync = 0;
2636  s->closed_gop = 0;
2637 
2638  av_buffer_unref(&s->a53_buf_ref);
2639  ff_mpeg_flush(avctx);
2640 }
2641 
2643 {
2644  Mpeg1Context *s = avctx->priv_data;
2645 
2646  av_buffer_unref(&s->a53_buf_ref);
2647  return ff_mpv_decode_close(avctx);
2648 }
2649 
2651  .p.name = "mpeg1video",
2652  CODEC_LONG_NAME("MPEG-1 video"),
2653  .p.type = AVMEDIA_TYPE_VIDEO,
2654  .p.id = AV_CODEC_ID_MPEG1VIDEO,
2655  .priv_data_size = sizeof(Mpeg1Context),
2657  .close = mpeg_decode_end,
2659  .p.capabilities = AV_CODEC_CAP_DRAW_HORIZ_BAND | AV_CODEC_CAP_DR1 |
2661  .caps_internal = FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM,
2662  .flush = flush,
2663  .p.max_lowres = 3,
2664  UPDATE_THREAD_CONTEXT(mpeg_decode_update_thread_context),
2665  .hw_configs = (const AVCodecHWConfigInternal *const []) {
2666 #if CONFIG_MPEG1_NVDEC_HWACCEL
2667  HWACCEL_NVDEC(mpeg1),
2668 #endif
2669 #if CONFIG_MPEG1_VDPAU_HWACCEL
2670  HWACCEL_VDPAU(mpeg1),
2671 #endif
2672 #if CONFIG_MPEG1_VIDEOTOOLBOX_HWACCEL
2673  HWACCEL_VIDEOTOOLBOX(mpeg1),
2674 #endif
2675  NULL
2676  },
2677 };
2678 
2679 #define M2V_OFFSET(x) offsetof(Mpeg1Context, x)
2680 #define M2V_PARAM AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_DECODING_PARAM
2681 
2682 static const AVOption mpeg2video_options[] = {
2683  { "cc_format", "extract a specific Closed Captions format",
2684  M2V_OFFSET(cc_format), AV_OPT_TYPE_INT, { .i64 = CC_FORMAT_AUTO },
2685  CC_FORMAT_AUTO, CC_FORMAT_DVD, M2V_PARAM, .unit = "cc_format" },
2686 
2687  { "auto", "pick first seen CC substream", 0, AV_OPT_TYPE_CONST,
2688  { .i64 = CC_FORMAT_AUTO }, .flags = M2V_PARAM, .unit = "cc_format" },
2689  { "a53", "pick A/53 Part 4 CC substream", 0, AV_OPT_TYPE_CONST,
2690  { .i64 = CC_FORMAT_A53_PART4 }, .flags = M2V_PARAM, .unit = "cc_format" },
2691  { "scte20", "pick SCTE-20 CC substream", 0, AV_OPT_TYPE_CONST,
2692  { .i64 = CC_FORMAT_SCTE20 }, .flags = M2V_PARAM, .unit = "cc_format" },
2693  { "dvd", "pick DVD CC substream", 0, AV_OPT_TYPE_CONST,
2694  { .i64 = CC_FORMAT_DVD }, .flags = M2V_PARAM, .unit = "cc_format" },
2695  { "dvb_0502", "pick DVB 0502 CC substream", 0, AV_OPT_TYPE_CONST,
2696  { .i64 = CC_FORMAT_DVB_0502 }, .flags = M2V_PARAM, .unit = "cc_format" },
2697  { NULL }
2698 };
2699 
2700 static const AVClass mpeg2video_class = {
2701  .class_name = "MPEG-2 video",
2702  .item_name = av_default_item_name,
2703  .option = mpeg2video_options,
2704  .version = LIBAVUTIL_VERSION_INT,
2705  .category = AV_CLASS_CATEGORY_DECODER,
2706 };
2707 
2709  .p.name = "mpeg2video",
2710  CODEC_LONG_NAME("MPEG-2 video"),
2711  .p.type = AVMEDIA_TYPE_VIDEO,
2712  .p.id = AV_CODEC_ID_MPEG2VIDEO,
2713  .p.priv_class = &mpeg2video_class,
2714  .priv_data_size = sizeof(Mpeg1Context),
2716  .close = mpeg_decode_end,
2718  .p.capabilities = AV_CODEC_CAP_DRAW_HORIZ_BAND | AV_CODEC_CAP_DR1 |
2720  .caps_internal = FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM,
2721  .flush = flush,
2722  .p.max_lowres = 3,
2724  .hw_configs = (const AVCodecHWConfigInternal *const []) {
2725 #if CONFIG_MPEG2_DXVA2_HWACCEL
2726  HWACCEL_DXVA2(mpeg2),
2727 #endif
2728 #if CONFIG_MPEG2_D3D11VA_HWACCEL
2729  HWACCEL_D3D11VA(mpeg2),
2730 #endif
2731 #if CONFIG_MPEG2_D3D11VA2_HWACCEL
2732  HWACCEL_D3D11VA2(mpeg2),
2733 #endif
2734 #if CONFIG_MPEG2_D3D12VA_HWACCEL
2735  HWACCEL_D3D12VA(mpeg2),
2736 #endif
2737 #if CONFIG_MPEG2_NVDEC_HWACCEL
2738  HWACCEL_NVDEC(mpeg2),
2739 #endif
2740 #if CONFIG_MPEG2_VAAPI_HWACCEL
2741  HWACCEL_VAAPI(mpeg2),
2742 #endif
2743 #if CONFIG_MPEG2_VDPAU_HWACCEL
2744  HWACCEL_VDPAU(mpeg2),
2745 #endif
2746 #if CONFIG_MPEG2_VIDEOTOOLBOX_HWACCEL
2747  HWACCEL_VIDEOTOOLBOX(mpeg2),
2748 #endif
2749  NULL
2750  },
2751 };
2752 
2753 //legacy decoder
2755  .p.name = "mpegvideo",
2756  CODEC_LONG_NAME("MPEG-1 video"),
2757  .p.type = AVMEDIA_TYPE_VIDEO,
2758  .p.id = AV_CODEC_ID_MPEG2VIDEO,
2759  .priv_data_size = sizeof(Mpeg1Context),
2761  .close = mpeg_decode_end,
2763  .p.capabilities = AV_CODEC_CAP_DRAW_HORIZ_BAND | AV_CODEC_CAP_DR1 |
2765  .caps_internal = FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM,
2766  .flush = flush,
2767  .p.max_lowres = 3,
2768 };
2769 
2770 typedef struct IPUContext {
2772 
2773  int flags;
2774  DECLARE_ALIGNED(32, int16_t, block)[6][64];
2775 } IPUContext;
2776 
2778  int *got_frame, AVPacket *avpkt)
2779 {
2780  IPUContext *s = avctx->priv_data;
2781  MpegEncContext *m = &s->m;
2782  GetBitContext *gb = &m->gb;
2783  int ret;
2784 
2785  // Check for minimal intra MB size (considering mb header, luma & chroma dc VLC, ac EOB VLC)
2786  if (avpkt->size*8LL < (avctx->width+15)/16 * ((avctx->height+15)/16) * (2LL + 3*4 + 2*2 + 2*6))
2787  return AVERROR_INVALIDDATA;
2788 
2789  ret = ff_get_buffer(avctx, frame, 0);
2790  if (ret < 0)
2791  return ret;
2792 
2793  ret = init_get_bits8(gb, avpkt->data, avpkt->size);
2794  if (ret < 0)
2795  return ret;
2796 
2797  s->flags = get_bits(gb, 8);
2798  m->intra_dc_precision = s->flags & 3;
2799  m->q_scale_type = !!(s->flags & 0x40);
2800  m->intra_vlc_format = !!(s->flags & 0x20);
2801  m->alternate_scan = !!(s->flags & 0x10);
2802 
2804  s->flags & 0x10 ? ff_alternate_vertical_scan : ff_zigzag_direct);
2805 
2806  m->last_dc[0] = m->last_dc[1] = m->last_dc[2] = 1 << (7 + (s->flags & 3));
2807  m->qscale = 1;
2808 
2809  for (int y = 0; y < avctx->height; y += 16) {
2810  int intraquant;
2811 
2812  for (int x = 0; x < avctx->width; x += 16) {
2813  if (x || y) {
2814  if (!get_bits1(gb))
2815  return AVERROR_INVALIDDATA;
2816  }
2817  if (get_bits1(gb)) {
2818  intraquant = 0;
2819  } else {
2820  if (!get_bits1(gb))
2821  return AVERROR_INVALIDDATA;
2822  intraquant = 1;
2823  }
2824 
2825  if (s->flags & 4)
2826  skip_bits1(gb);
2827 
2828  if (intraquant)
2829  m->qscale = mpeg_get_qscale(m);
2830 
2831  memset(s->block, 0, sizeof(s->block));
2832 
2833  for (int n = 0; n < 6; n++) {
2834  if (s->flags & 0x80) {
2836  m->intra_matrix,
2838  m->last_dc, s->block[n],
2839  n, m->qscale);
2840  } else {
2841  ret = mpeg2_decode_block_intra(m, s->block[n], n);
2842  }
2843 
2844  if (ret < 0)
2845  return ret;
2846  }
2847 
2848  m->idsp.idct_put(frame->data[0] + y * frame->linesize[0] + x,
2849  frame->linesize[0], s->block[0]);
2850  m->idsp.idct_put(frame->data[0] + y * frame->linesize[0] + x + 8,
2851  frame->linesize[0], s->block[1]);
2852  m->idsp.idct_put(frame->data[0] + (y + 8) * frame->linesize[0] + x,
2853  frame->linesize[0], s->block[2]);
2854  m->idsp.idct_put(frame->data[0] + (y + 8) * frame->linesize[0] + x + 8,
2855  frame->linesize[0], s->block[3]);
2856  m->idsp.idct_put(frame->data[1] + (y >> 1) * frame->linesize[1] + (x >> 1),
2857  frame->linesize[1], s->block[4]);
2858  m->idsp.idct_put(frame->data[2] + (y >> 1) * frame->linesize[2] + (x >> 1),
2859  frame->linesize[2], s->block[5]);
2860  }
2861  }
2862 
2863  align_get_bits(gb);
2864  if (get_bits_left(gb) != 32)
2865  return AVERROR_INVALIDDATA;
2866 
2867  *got_frame = 1;
2868 
2869  return avpkt->size;
2870 }
2871 
2873 {
2874  IPUContext *s = avctx->priv_data;
2875  MpegEncContext *m = &s->m;
2876 
2877  avctx->pix_fmt = AV_PIX_FMT_YUV420P;
2878  m->avctx = avctx;
2879 
2880  ff_idctdsp_init(&m->idsp, avctx);
2882 
2883  for (int i = 0; i < 64; i++) {
2884  int j = m->idsp.idct_permutation[i];
2886  m->intra_matrix[j] = v;
2887  m->chroma_intra_matrix[j] = v;
2888  }
2889 
2890  return 0;
2891 }
2892 
2894  .p.name = "ipu",
2895  CODEC_LONG_NAME("IPU Video"),
2896  .p.type = AVMEDIA_TYPE_VIDEO,
2897  .p.id = AV_CODEC_ID_IPU,
2898  .priv_data_size = sizeof(IPUContext),
2899  .init = ipu_decode_init,
2901  .p.capabilities = AV_CODEC_CAP_DR1,
2902 };
PICT_FRAME
#define PICT_FRAME
Definition: mpegutils.h:33
vcr2_init_sequence
static int vcr2_init_sequence(AVCodecContext *avctx)
Definition: mpeg12dec.c:1859
HWACCEL_D3D12VA
#define HWACCEL_D3D12VA(codec)
Definition: hwconfig.h:80
ff_mpv_common_init
av_cold int ff_mpv_common_init(MpegEncContext *s)
init common structure for both encoder and decoder.
Definition: mpegvideo.c:687
hwconfig.h
AVCodecContext::hwaccel
const struct AVHWAccel * hwaccel
Hardware accelerator in use.
Definition: avcodec.h:1445
FF_ENABLE_DEPRECATION_WARNINGS
#define FF_ENABLE_DEPRECATION_WARNINGS
Definition: internal.h:73
MV_TYPE_16X16
#define MV_TYPE_16X16
1 vector for the whole mb
Definition: mpegvideo.h:265
Mpeg1Context::has_afd
int has_afd
Definition: mpeg12dec.c:82
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:215
AV_TIMECODE_STR_SIZE
#define AV_TIMECODE_STR_SIZE
Definition: timecode.h:33
AV_PIX_FMT_CUDA
@ AV_PIX_FMT_CUDA
HW acceleration through CUDA.
Definition: pixfmt.h:260
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:71
MpegEncContext::progressive_sequence
int progressive_sequence
Definition: mpegvideo.h:442
M2V_OFFSET
#define M2V_OFFSET(x)
Definition: mpeg12dec.c:2679
ff_mb_pat_vlc
VLCElem ff_mb_pat_vlc[512]
Definition: mpeg12.c:148
level
uint8_t level
Definition: svq3.c:205
AV_EF_EXPLODE
#define AV_EF_EXPLODE
abort decoding on minor error detection
Definition: defs.h:51
Mpeg1Context::a53_buf_ref
AVBufferRef * a53_buf_ref
Definition: mpeg12dec.c:79
ff_mpeg2_aspect
const AVRational ff_mpeg2_aspect[16]
Definition: mpeg12data.c:380
AVPanScan::position
int16_t position[3][2]
position of the top left corner in 1/16 pel for up to 3 fields/frames
Definition: defs.h:263
show_bits_long
static unsigned int show_bits_long(GetBitContext *s, int n)
Show 0-32 bits.
Definition: get_bits.h:495
mpeg_decode_a53_cc
static int mpeg_decode_a53_cc(AVCodecContext *avctx, const uint8_t *p, int buf_size)
Definition: mpeg12dec.c:1925
get_bits_left
static int get_bits_left(GetBitContext *gb)
Definition: get_bits.h:695
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
decode_slice
static int decode_slice(AVCodecContext *c, void *arg)
Definition: ffv1dec.c:266
AV_CLASS_CATEGORY_DECODER
@ AV_CLASS_CATEGORY_DECODER
Definition: log.h:35
AV_STEREO3D_SIDEBYSIDE_QUINCUNX
@ AV_STEREO3D_SIDEBYSIDE_QUINCUNX
Views are next to each other, but when upscaling apply a checkerboard pattern.
Definition: stereo3d.h:114
FF_MPV_QSCALE_TYPE_MPEG2
#define FF_MPV_QSCALE_TYPE_MPEG2
Definition: mpegvideodec.h:41
mem_internal.h
ff_get_format
int ff_get_format(AVCodecContext *avctx, const enum AVPixelFormat *fmt)
Select the (possibly hardware accelerated) pixel format.
Definition: decode.c:1277
mpeg_decode_frame
static int mpeg_decode_frame(AVCodecContext *avctx, AVFrame *picture, int *got_output, AVPacket *avpkt)
Definition: mpeg12dec.c:2564
MpegEncContext::gb
GetBitContext gb
Definition: mpegvideo.h:435
AV_EF_COMPLIANT
#define AV_EF_COMPLIANT
consider all spec non compliances as errors
Definition: defs.h:55
MpegEncContext::top_field_first
int top_field_first
Definition: mpegvideo.h:450
SEQ_END_CODE
#define SEQ_END_CODE
Definition: mpeg12.h:28
av_frame_new_side_data
AVFrameSideData * av_frame_new_side_data(AVFrame *frame, enum AVFrameSideDataType type, size_t size)
Add a new side data to a frame.
Definition: frame.c:811
check_scantable_index
#define check_scantable_index(ctx, x)
Definition: mpeg12dec.c:125
AVBufferRef::data
uint8_t * data
The data buffer.
Definition: buffer.h:90
AV_FRAME_DATA_A53_CC
@ AV_FRAME_DATA_A53_CC
ATSC A53 Part 4 Closed Captions.
Definition: frame.h:59
MT_FIELD
#define MT_FIELD
Definition: mpeg12dec.c:397
EXT_START_CODE
#define EXT_START_CODE
Definition: cavs.h:39
MV_TYPE_16X8
#define MV_TYPE_16X8
2 vectors, one per 16x8 block
Definition: mpegvideo.h:267
av_div_q
AVRational av_div_q(AVRational b, AVRational c)
Divide one rational by another.
Definition: rational.c:88
AVPanScan
Pan Scan area.
Definition: defs.h:242
AVCodecContext::err_recognition
int err_recognition
Error recognition; may misdetect some more or less valid parts as errors.
Definition: avcodec.h:1438
SLICE_MAX_START_CODE
#define SLICE_MAX_START_CODE
Definition: cavs.h:38
int64_t
long long int64_t
Definition: coverity.c:34
MB_TYPE_16x8
#define MB_TYPE_16x8
Definition: mpegutils.h:43
get_bits_count
static int get_bits_count(const GetBitContext *s)
Definition: get_bits.h:266
ipu_decode_init
static av_cold int ipu_decode_init(AVCodecContext *avctx)
Definition: mpeg12dec.c:2872
ff_update_duplicate_context
int ff_update_duplicate_context(MpegEncContext *dst, const MpegEncContext *src)
Definition: mpegvideo.c:459
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:389
start_code
static const uint8_t start_code[]
Definition: videotoolboxenc.c:221
ff_mpv_report_decode_progress
void ff_mpv_report_decode_progress(MpegEncContext *s)
Definition: mpegvideo_dec.c:463
w
uint8_t w
Definition: llviddspenc.c:38
HWACCEL_DXVA2
#define HWACCEL_DXVA2(codec)
Definition: hwconfig.h:64
ff_mpegvideo_decoder
const FFCodec ff_mpegvideo_decoder
Definition: mpeg12dec.c:2754
internal.h
AVPacket::data
uint8_t * data
Definition: packet.h:539
mpeg_decode_mb
static int mpeg_decode_mb(MpegEncContext *s, int16_t block[12][64])
Definition: mpeg12dec.c:402
Mpeg1Context::closed_gop
int closed_gop
Definition: mpeg12dec.c:90
mpeg2_decode_block_intra
static int mpeg2_decode_block_intra(MpegEncContext *s, int16_t *block, int n)
Definition: mpeg12dec.c:303
AVOption
AVOption.
Definition: opt.h:429
HWACCEL_D3D11VA2
#define HWACCEL_D3D11VA2(codec)
Definition: hwconfig.h:66
ff_reverse
const uint8_t ff_reverse[256]
Definition: reverse.c:23
MpegEncContext::last_dc
int last_dc[3]
last DC values for MPEG-1
Definition: mpegvideo.h:180
MB_TYPE_16x16
#define MB_TYPE_16x16
Definition: mpegutils.h:42
CC_FORMAT_DVB_0502
@ CC_FORMAT_DVB_0502
Definition: mpeg12dec.c:70
AV_PIX_FMT_D3D11VA_VLD
@ AV_PIX_FMT_D3D11VA_VLD
HW decoding through Direct3D11 via old API, Picture.data[3] contains a ID3D11VideoDecoderOutputView p...
Definition: pixfmt.h:254
FFCodec
Definition: codec_internal.h:127
ff_mpv_framesize_disable
static void ff_mpv_framesize_disable(ScratchpadContext *sc)
Disable allocating the ScratchpadContext's buffers in future calls to ff_mpv_framesize_alloc().
Definition: mpegpicture.h:143
PICT_BOTTOM_FIELD
#define PICT_BOTTOM_FIELD
Definition: mpegutils.h:32
FF_HW_SIMPLE_CALL
#define FF_HW_SIMPLE_CALL(avctx, function)
Definition: hwaccel_internal.h:174
ff_er_add_slice
void ff_er_add_slice(ERContext *s, int startx, int starty, int endx, int endy, int status)
Add a slice.
Definition: error_resilience.c:826
ff_init_block_index
void ff_init_block_index(MpegEncContext *s)
Definition: mpegvideo.c:820
reverse.h
mpegvideo.h
MpegEncContext::avctx
struct AVCodecContext * avctx
Definition: mpegvideo.h:91
UPDATE_CACHE
#define UPDATE_CACHE(name, gb)
Definition: get_bits.h:225
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
Mpeg1Context::first_slice
int first_slice
Definition: mpeg12dec.c:92
ER_DC_END
#define ER_DC_END
Definition: error_resilience.h:34
mpeg_decode_postinit
static int mpeg_decode_postinit(AVCodecContext *avctx)
Definition: mpeg12dec.c:881
MpegEncContext::height
int height
picture size. must be a multiple of 16
Definition: mpegvideo.h:96
mpegutils.h
ff_set_dimensions
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
Definition: utils.c:94
ER_MV_ERROR
#define ER_MV_ERROR
Definition: error_resilience.h:32
thread.h
ff_idctdsp_init
av_cold void ff_idctdsp_init(IDCTDSPContext *c, AVCodecContext *avctx)
Definition: idctdsp.c:228
SEQ_START_CODE
#define SEQ_START_CODE
Definition: mpeg12.h:29
FF_DEBUG_PICT_INFO
#define FF_DEBUG_PICT_INFO
Definition: avcodec.h:1415
MV_TYPE_DMV
#define MV_TYPE_DMV
2 vectors, special mpeg2 Dual Prime Vectors
Definition: mpegvideo.h:269
MpegEncContext::out_format
enum OutputFormat out_format
output format
Definition: mpegvideo.h:100
AV_FRAME_FLAG_TOP_FIELD_FIRST
#define AV_FRAME_FLAG_TOP_FIELD_FIRST
A flag to mark frames where the top field is displayed first if the content is interlaced.
Definition: frame.h:653
GET_CACHE
#define GET_CACHE(name, gb)
Definition: get_bits.h:263
skip_bits
static void skip_bits(GetBitContext *s, int n)
Definition: get_bits.h:381
ff_mpeg2_rl_vlc
RL_VLC_ELEM ff_mpeg2_rl_vlc[674]
Definition: mpeg12.c:151
Mpeg1Context::save_aspect
AVRational save_aspect
Definition: mpeg12dec.c:85
MpegEncContext::intra_scantable
ScanTable intra_scantable
Definition: mpegvideo.h:87
AVCodecContext::framerate
AVRational framerate
Definition: avcodec.h:574
AV_STEREO3D_SIDEBYSIDE
@ AV_STEREO3D_SIDEBYSIDE
Views are next to each other.
Definition: stereo3d.h:64
get_bits
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:335
mx
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t mx
Definition: dsp.h:53
MT_DMV
#define MT_DMV
Definition: mpeg12dec.c:400
ff_mpv_reconstruct_mb
void ff_mpv_reconstruct_mb(MpegEncContext *s, int16_t block[12][64])
Definition: mpegvideo_dec.c:910
MpegEncContext::mb_height
int mb_height
number of MBs horizontally & vertically
Definition: mpegvideo.h:124
ff_mbincr_vlc
VLCElem ff_mbincr_vlc[538]
Definition: mpeg12.c:145
FFCodec::p
AVCodec p
The public AVCodec.
Definition: codec_internal.h:131
MpegEncContext::pict_type
int pict_type
AV_PICTURE_TYPE_I, AV_PICTURE_TYPE_P, AV_PICTURE_TYPE_B, ...
Definition: mpegvideo.h:206
slice_end
static int slice_end(AVCodecContext *avctx, AVFrame *pict, int *got_output)
Handle slice ends.
Definition: mpeg12dec.c:1720
FMT_MPEG1
@ FMT_MPEG1
Definition: mpegvideo.h:63
decode_chunks
static int decode_chunks(AVCodecContext *avctx, AVFrame *picture, int *got_output, const uint8_t *buf, int buf_size)
Definition: mpeg12dec.c:2230
AVCodecContext::skip_frame
enum AVDiscard skip_frame
Skip decoding for selected frames.
Definition: avcodec.h:1838
mpeg_decode_quant_matrix_extension
static void mpeg_decode_quant_matrix_extension(MpegEncContext *s)
Definition: mpeg12dec.c:1181
AVCodecContext::thread_count
int thread_count
thread count is used to decide how many independent tasks should be passed to execute()
Definition: avcodec.h:1601
AV_STEREO3D_2D
@ AV_STEREO3D_2D
Video is not stereoscopic (and metadata has to be there).
Definition: stereo3d.h:52
MpegEncContext::picture_structure
int picture_structure
Definition: mpegvideo.h:446
wrap
#define wrap(func)
Definition: neontest.h:65
timecode.h
GetBitContext
Definition: get_bits.h:108
AV_EF_BITSTREAM
#define AV_EF_BITSTREAM
detect bitstream specification deviations
Definition: defs.h:49
AVPanScan::width
int width
width and height in 1/16 pel
Definition: defs.h:255
slice_decode_thread
static int slice_decode_thread(AVCodecContext *c, void *arg)
Definition: mpeg12dec.c:1667
AVCodecContext::flags
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:508
IDCTDSPContext::idct_put
void(* idct_put)(uint8_t *dest, ptrdiff_t line_size, int16_t *block)
block -> idct -> clip to unsigned 8 bit -> dest.
Definition: idctdsp.h:62
MB_TYPE_CBP
#define MB_TYPE_CBP
Definition: mpegutils.h:48
val
static double val(void *priv, double ch)
Definition: aeval.c:77
Mpeg1Context::tmpgexs
int tmpgexs
Definition: mpeg12dec.c:91
HWACCEL_VDPAU
#define HWACCEL_VDPAU(codec)
Definition: hwconfig.h:72
AV_CODEC_FLAG_LOW_DELAY
#define AV_CODEC_FLAG_LOW_DELAY
Force low delay.
Definition: avcodec.h:334
mpeg12_pixfmt_list_444
static enum AVPixelFormat mpeg12_pixfmt_list_444[]
Definition: mpeg12dec.c:853
MpegEncContext::width
int width
Definition: mpegvideo.h:96
AVCodecContext::coded_height
int coded_height
Definition: avcodec.h:647
mpeg1_decode_sequence
static int mpeg1_decode_sequence(AVCodecContext *avctx, const uint8_t *buf, int buf_size)
Definition: mpeg12dec.c:1770
av_reduce
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
Definition: rational.c:35
HAS_CBP
#define HAS_CBP(a)
Definition: mpegutils.h:88
AVRational::num
int num
Numerator.
Definition: rational.h:59
GOP_START_CODE
#define GOP_START_CODE
Definition: mpeg12.h:30
MpegEncContext::frame_pred_frame_dct
int frame_pred_frame_dct
Definition: mpegvideo.h:449
ff_frame_new_side_data_from_buf
int ff_frame_new_side_data_from_buf(const AVCodecContext *avctx, AVFrame *frame, enum AVFrameSideDataType type, AVBufferRef **buf)
Similar to ff_frame_new_side_data, but using an existing buffer ref.
Definition: decode.c:2137
IPUContext
Definition: mpeg12dec.c:2770
mpeg1_hwaccel_pixfmt_list_420
static enum AVPixelFormat mpeg1_hwaccel_pixfmt_list_420[]
Definition: mpeg12dec.c:810
ff_mpv_common_end
void ff_mpv_common_end(MpegEncContext *s)
Definition: mpegvideo.c:774
mpeg12.h
mpegvideodec.h
ff_mpeg2video_decoder
const FFCodec ff_mpeg2video_decoder
Definition: mpeg12dec.c:2708
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:209
Mpeg1Context::frame_rate_index
unsigned frame_rate_index
Definition: mpeg12dec.c:88
ipu_decode_frame
static int ipu_decode_frame(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *avpkt)
Definition: mpeg12dec.c:2777
HAS_MV
#define HAS_MV(a, dir)
Definition: mpegutils.h:92
ER_DC_ERROR
#define ER_DC_ERROR
Definition: error_resilience.h:31
av_cold
#define av_cold
Definition: attributes.h:90
mpeg2_hwaccel_pixfmt_list_420
static enum AVPixelFormat mpeg2_hwaccel_pixfmt_list_420[]
Definition: mpeg12dec.c:821
init_get_bits8
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
Definition: get_bits.h:545
mpeg1_decode_picture
static int mpeg1_decode_picture(AVCodecContext *avctx, const uint8_t *buf, int buf_size)
Definition: mpeg12dec.c:1010
flush
static void flush(AVCodecContext *avctx)
Definition: mpeg12dec.c:2631
Mpeg1Context::save_progressive_seq
int save_progressive_seq
Definition: mpeg12dec.c:86
emms_c
#define emms_c()
Definition: emms.h:63
CLOSE_READER
#define CLOSE_READER(name, gb)
Definition: get_bits.h:188
AVCodecContext::extradata_size
int extradata_size
Definition: avcodec.h:538
AVCodecContext::has_b_frames
int has_b_frames
Size of the frame reordering buffer in the decoder.
Definition: avcodec.h:737
A53_MAX_CC_COUNT
#define A53_MAX_CC_COUNT
Definition: mpeg12dec.c:63
Mpeg1Context::stereo3d_type
enum AVStereo3DType stereo3d_type
Definition: mpeg12dec.c:77
ff_er_frame_end
void ff_er_frame_end(ERContext *s, int *decode_error_flags)
Indicate that a frame has finished decoding and perform error concealment in case it has been enabled...
Definition: error_resilience.c:896
Mpeg1Context::repeat_field
int repeat_field
Definition: mpeg12dec.c:75
FF_CODEC_DECODE_CB
#define FF_CODEC_DECODE_CB(func)
Definition: codec_internal.h:311
stereo3d.h
AV_PIX_FMT_DXVA2_VLD
@ AV_PIX_FMT_DXVA2_VLD
HW decoding through DXVA2, Picture.data[3] contains a LPDIRECT3DSURFACE9 pointer.
Definition: pixfmt.h:134
s
#define s(width, name)
Definition: cbs_vp9.c:198
ff_mv_vlc
VLCElem ff_mv_vlc[266]
Definition: mpeg12.c:140
MPVWorkPicture::ptr
MPVPicture * ptr
RefStruct reference.
Definition: mpegpicture.h:99
format
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample format(the sample packing is implied by the sample format) and sample rate. The lists are not just lists
ff_mpeg1_aspect
const float ff_mpeg1_aspect[16]
Definition: mpeg12data.c:359
MB_TYPE_ZERO_MV
#define MB_TYPE_ZERO_MV
Definition: mpeg12dec.h:28
SHOW_SBITS
#define SHOW_SBITS(name, gb, num)
Definition: get_bits.h:260
ff_mpeg_er_frame_start
void ff_mpeg_er_frame_start(MpegEncContext *s)
Definition: mpeg_er.c:49
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:40
Mpeg1Context::aspect_ratio_info
unsigned aspect_ratio_info
Definition: mpeg12dec.c:84
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:304
mpeg_decode_sequence_display_extension
static void mpeg_decode_sequence_display_extension(Mpeg1Context *s1)
Definition: mpeg12dec.c:1100
Mpeg1Context::pan_scan
AVPanScan pan_scan
Definition: mpeg12dec.c:76
get_sbits
static int get_sbits(GetBitContext *s, int n)
Definition: get_bits.h:320
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:230
ctx
AVFormatContext * ctx
Definition: movenc.c:49
PICT_TOP_FIELD
#define PICT_TOP_FIELD
Definition: mpegutils.h:31
decode.h
mpeg12_pixfmt_list_422
static enum AVPixelFormat mpeg12_pixfmt_list_422[]
Definition: mpeg12dec.c:848
SKIP_BITS
#define SKIP_BITS(name, gb, num)
Definition: get_bits.h:241
field
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this field
Definition: writing_filters.txt:78
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:73
AVCodecContext::rc_max_rate
int64_t rc_max_rate
maximum bitrate
Definition: avcodec.h:1310
MpegEncContext::cur_pic
MPVWorkPicture cur_pic
copy of the current picture structure.
Definition: mpegvideo.h:177
CODEC_LONG_NAME
#define CODEC_LONG_NAME(str)
Definition: codec_internal.h:296
AVCodecContext::codec_id
enum AVCodecID codec_id
Definition: avcodec.h:461
my
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t intptr_t my
Definition: dsp.h:53
arg
const char * arg
Definition: jacosubdec.c:67
rl_vlc
static const VLCElem * rl_vlc[2]
Definition: mobiclip.c:278
MpegEncContext::mb_stride
int mb_stride
mb_width+1 used for some arrays to allow simple addressing of left & top MBs without sig11
Definition: mpegvideo.h:125
if
if(ret)
Definition: filter_design.txt:179
ff_mpv_unref_picture
void ff_mpv_unref_picture(MPVWorkPicture *pic)
Definition: mpegpicture.c:98
MpegEncContext::low_delay
int low_delay
no reordering needed / has no B-frames
Definition: mpegvideo.h:390
AVDISCARD_ALL
@ AVDISCARD_ALL
discard all
Definition: defs.h:221
MB_PTYPE_VLC_BITS
#define MB_PTYPE_VLC_BITS
Definition: mpeg12vlc.h:39
LIBAVUTIL_VERSION_INT
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
Mpeg1Context::save_width
int save_width
Definition: mpeg12dec.c:86
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:75
PTRDIFF_SPECIFIER
#define PTRDIFF_SPECIFIER
Definition: internal.h:128
ff_mpv_export_qp_table
int ff_mpv_export_qp_table(const MpegEncContext *s, AVFrame *f, const MPVPicture *p, int qp_type)
Definition: mpegvideo_dec.c:410
NULL
#define NULL
Definition: coverity.c:32
run
uint8_t run
Definition: svq3.c:204
AVCodecContext::color_range
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: avcodec.h:709
av_buffer_unref
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it.
Definition: buffer.c:139
ER_AC_ERROR
#define ER_AC_ERROR
Definition: error_resilience.h:30
MpegEncContext::mb_y
int mb_y
Definition: mpegvideo.h:287
SLICE_MIN_START_CODE
#define SLICE_MIN_START_CODE
Definition: mpeg12.h:32
hwaccel_internal.h
Mpeg1Context::sync
int sync
Definition: mpeg12dec.c:89
MpegEncContext::next_pic
MPVWorkPicture next_pic
copy of the next picture structure.
Definition: mpegvideo.h:165
AVCHROMA_LOC_LEFT
@ AVCHROMA_LOC_LEFT
MPEG-2/4 4:2:0, H.264 default for 4:2:0.
Definition: pixfmt.h:738
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
AVCHROMA_LOC_TOPLEFT
@ AVCHROMA_LOC_TOPLEFT
ITU-R 601, SMPTE 274M 296M S314M(DV 4:1:1), mpeg2 4:2:2.
Definition: pixfmt.h:740
AVCodecContext::bit_rate
int64_t bit_rate
the average bitrate
Definition: avcodec.h:501
mpeg_decode_picture_display_extension
static void mpeg_decode_picture_display_extension(Mpeg1Context *s1)
Definition: mpeg12dec.c:1124
M2V_PARAM
#define M2V_PARAM
Definition: mpeg12dec.c:2680
av_default_item_name
const char * av_default_item_name(void *ptr)
Return the context name.
Definition: log.c:237
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:279
get_bits1
static unsigned int get_bits1(GetBitContext *s)
Definition: get_bits.h:388
profiles.h
CC_FORMAT_A53_PART4
@ CC_FORMAT_A53_PART4
Definition: mpeg12dec.c:67
FF_PTR_ADD
#define FF_PTR_ADD(ptr, off)
Definition: internal.h:80
LAST_SKIP_BITS
#define LAST_SKIP_BITS(name, gb, num)
Definition: get_bits.h:247
MB_TYPE_QUANT
#define MB_TYPE_QUANT
Definition: mpegutils.h:49
avpriv_find_start_code
const uint8_t * avpriv_find_start_code(const uint8_t *p, const uint8_t *end, uint32_t *state)
MB_TYPE_BIDIR_MV
#define MB_TYPE_BIDIR_MV
Definition: mpegutils.h:52
lowres
static int lowres
Definition: ffplay.c:330
ff_mpeg1_rl_vlc
RL_VLC_ELEM ff_mpeg1_rl_vlc[680]
Definition: mpeg12.c:150
MB_BTYPE_VLC_BITS
#define MB_BTYPE_VLC_BITS
Definition: mpeg12vlc.h:40
UPDATE_THREAD_CONTEXT
#define UPDATE_THREAD_CONTEXT(func)
Definition: codec_internal.h:305
CC_FORMAT_AUTO
@ CC_FORMAT_AUTO
Definition: mpeg12dec.c:66
AV_PIX_FMT_D3D12
@ AV_PIX_FMT_D3D12
Hardware surfaces for Direct3D 12.
Definition: pixfmt.h:440
AV_PIX_FMT_GRAY8
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
Definition: pixfmt.h:81
mpeg12codecs.h
MpegEncContext::slice_context_count
int slice_context_count
number of used thread_contexts
Definition: mpegvideo.h:153
get_vlc2
static av_always_inline int get_vlc2(GetBitContext *s, const VLCElem *table, int bits, int max_depth)
Parse a vlc code.
Definition: get_bits.h:652
AV_FRAME_DATA_AFD
@ AV_FRAME_DATA_AFD
Active Format Description data consisting of a single byte as specified in ETSI TS 101 154 using AVAc...
Definition: frame.h:90
AVCodecContext::level
int level
Encoding level descriptor.
Definition: avcodec.h:1802
Mpeg1Context::save_height
int save_height
Definition: mpeg12dec.c:86
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
AV_CODEC_ID_MPEG1VIDEO
@ AV_CODEC_ID_MPEG1VIDEO
Definition: codec_id.h:53
MpegEncContext::idsp
IDCTDSPContext idsp
Definition: mpegvideo.h:222
ff_mpv_alloc_dummy_frames
int ff_mpv_alloc_dummy_frames(MpegEncContext *s)
Ensure that the dummy frames are allocated according to pict_type if necessary.
Definition: mpegvideo_dec.c:301
ff_dlog
#define ff_dlog(a,...)
Definition: tableprint_vlc.h:28
startcode.h
CC_FORMAT_DVD
@ CC_FORMAT_DVD
Definition: mpeg12dec.c:69
IS_INTRA
#define IS_INTRA(x, y)
AVDISCARD_NONKEY
@ AVDISCARD_NONKEY
discard all frames except keyframes
Definition: defs.h:220
check_marker
static int check_marker(void *logctx, GetBitContext *s, const char *msg)
Definition: mpegvideodec.h:81
ERContext::error_count
atomic_int error_count
Definition: error_resilience.h:65
AVCodecContext::flags2
int flags2
AV_CODEC_FLAG2_*.
Definition: avcodec.h:515
ff_get_buffer
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
Definition: decode.c:1697
init
int(* init)(AVBSFContext *ctx)
Definition: dts2pts.c:368
mpeg2video_options
static const AVOption mpeg2video_options[]
Definition: mpeg12dec.c:2682
AV_CODEC_CAP_DR1
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:52
AV_CODEC_FLAG_GRAY
#define AV_CODEC_FLAG_GRAY
Only decode/encode grayscale.
Definition: avcodec.h:322
AVPacket::size
int size
Definition: packet.h:540
dc
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled top and top right vectors is used as motion vector prediction the used motion vector is the sum of the predictor and(mvx_diff, mvy_diff) *mv_scale Intra DC Prediction block[y][x] dc[1]
Definition: snow.txt:400
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:94
MpegEncContext::qscale
int qscale
QP.
Definition: mpegvideo.h:199
AV_CODEC_ID_IPU
@ AV_CODEC_ID_IPU
Definition: codec_id.h:310
AV_FRAME_DATA_PANSCAN
@ AV_FRAME_DATA_PANSCAN
The data is the AVPanScan struct defined in libavcodec.
Definition: frame.h:53
CC_FORMAT_SCTE20
@ CC_FORMAT_SCTE20
Definition: mpeg12dec.c:68
height
#define height
Definition: dsp.h:85
RL_VLC_ELEM
Definition: vlc.h:56
av_frame_ref
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:400
MT_FRAME
#define MT_FRAME
Definition: mpeg12dec.c:398
codec_internal.h
DECLARE_ALIGNED
#define DECLARE_ALIGNED(n, t, v)
Definition: mem_internal.h:104
shift
static int shift(int a, int b)
Definition: bonk.c:261
IPUContext::flags
int flags
Definition: mpeg12dec.c:2773
MpegEncContext::intra_matrix
uint16_t intra_matrix[64]
matrix transmitted in the bitstream
Definition: mpegvideo.h:299
mpeg_field_start
static int mpeg_field_start(Mpeg1Context *s1, const uint8_t *buf, int buf_size)
Definition: mpeg12dec.c:1253
ff_mpeg1_clean_buffers
void ff_mpeg1_clean_buffers(MpegEncContext *s)
Definition: mpeg12.c:128
MpegEncContext::v_edge_pos
int v_edge_pos
horizontal / vertical position of the right/bottom edge (pixel replication)
Definition: mpegvideo.h:127
ff_mpeg1video_decoder
const FFCodec ff_mpeg1video_decoder
Definition: mpeg12dec.c:2650
ff_frame_new_side_data
int ff_frame_new_side_data(const AVCodecContext *avctx, AVFrame *frame, enum AVFrameSideDataType type, size_t size, AVFrameSideData **psd)
Wrapper around av_frame_new_side_data, which rejects side data overridden by the demuxer.
Definition: decode.c:2099
AV_RB32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_RB32
Definition: bytestream.h:96
FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM
#define FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM
The decoder extracts and fills its parameters even if the frame is skipped due to the skip_frame sett...
Definition: codec_internal.h:55
AVFrameSideData::data
uint8_t * data
Definition: frame.h:267
MB_TYPE_SKIP
#define MB_TYPE_SKIP
Definition: mpegutils.h:62
FF_THREAD_SLICE
#define FF_THREAD_SLICE
Decode more than one part of a single frame at once.
Definition: avcodec.h:1613
ff_mpeg_draw_horiz_band
void ff_mpeg_draw_horiz_band(MpegEncContext *s, int y, int h)
Definition: mpegvideo_dec.c:441
PICTURE_START_CODE
#define PICTURE_START_CODE
Definition: mpeg12.h:31
USER_START_CODE
#define USER_START_CODE
Definition: cavs.h:40
AVCodecContext::skip_bottom
int skip_bottom
Number of macroblock rows at the bottom which are skipped.
Definition: avcodec.h:1866
AVCodecHWConfigInternal
Definition: hwconfig.h:25
MpegEncContext::mbskip_table
uint8_t * mbskip_table
used to avoid copy if macroblock skipped (for black regions for example) and used for B-frame encodin...
Definition: mpegvideo.h:191
ff_mpeg1_default_intra_matrix
const uint16_t ff_mpeg1_default_intra_matrix[256]
Definition: mpeg12data.c:31
diff
static av_always_inline int diff(const struct color_info *a, const struct color_info *b, const int trans_thresh)
Definition: vf_paletteuse.c:166
MpegEncContext::context_initialized
int context_initialized
Definition: mpegvideo.h:119
ff_mpv_frame_start
int ff_mpv_frame_start(MpegEncContext *s, AVCodecContext *avctx)
generic function called after decoding the header and before a frame is decoded.
Definition: mpegvideo_dec.c:344
MB_TYPE_INTERLACED
#define MB_TYPE_INTERLACED
Definition: mpegutils.h:46
OPEN_READER
#define OPEN_READER(name, gb)
Definition: get_bits.h:177
ff_mpeg_flush
void ff_mpeg_flush(AVCodecContext *avctx)
Definition: mpegvideo_dec.c:449
Mpeg1Context::has_stereo3d
int has_stereo3d
Definition: mpeg12dec.c:78
mpeg_decode_init
static av_cold int mpeg_decode_init(AVCodecContext *avctx)
Definition: mpeg12dec.c:765
AV_CODEC_CAP_SLICE_THREADS
#define AV_CODEC_CAP_SLICE_THREADS
Codec supports slice-based (or partition-based) multithreading.
Definition: codec.h:114
HWACCEL_D3D11VA
#define HWACCEL_D3D11VA(codec)
Definition: hwconfig.h:78
mpegvideodata.h
attributes.h
ff_mpeg1_decode_block_intra
int ff_mpeg1_decode_block_intra(GetBitContext *gb, const uint16_t *quant_matrix, const uint8_t *scantable, int last_dc[3], int16_t *block, int index, int qscale)
Definition: mpeg12.c:196
MV_TYPE_FIELD
#define MV_TYPE_FIELD
2 vectors, one per field
Definition: mpegvideo.h:268
skip_bits1
static void skip_bits1(GetBitContext *s)
Definition: get_bits.h:413
AV_PIX_FMT_D3D11
@ AV_PIX_FMT_D3D11
Hardware surfaces for Direct3D11.
Definition: pixfmt.h:336
HWACCEL_NVDEC
#define HWACCEL_NVDEC(codec)
Definition: hwconfig.h:68
mpeg2video_class
static const AVClass mpeg2video_class
Definition: mpeg12dec.c:2700
AV_PIX_FMT_VAAPI
@ AV_PIX_FMT_VAAPI
Hardware acceleration through VA-API, data[3] contains a VASurfaceID.
Definition: pixfmt.h:126
AVBufferRef::size
size_t size
Size of data in bytes.
Definition: buffer.h:94
FF_THREAD_FRAME
#define FF_THREAD_FRAME
Decode more than one frame at once.
Definition: avcodec.h:1612
ff_mpeg2_video_profiles
const AVProfile ff_mpeg2_video_profiles[]
Definition: profiles.c:116
AV_PIX_FMT_VDPAU
@ AV_PIX_FMT_VDPAU
HW acceleration through VDPAU, Picture.data[3] contains a VdpVideoSurface.
Definition: pixfmt.h:194
emms.h
AV_PIX_FMT_VIDEOTOOLBOX
@ AV_PIX_FMT_VIDEOTOOLBOX
hardware decoding through Videotoolbox
Definition: pixfmt.h:305
ff_init_scantable
av_cold void ff_init_scantable(const uint8_t *permutation, ScanTable *st, const uint8_t *src_scantable)
Definition: mpegvideo.c:293
ff_print_debug_info
void ff_print_debug_info(const MpegEncContext *s, const MPVPicture *p, AVFrame *pict)
Definition: mpegvideo_dec.c:403
av_assert2
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
Definition: avassert.h:67
MpegEncContext::progressive_frame
int progressive_frame
Definition: mpegvideo.h:465
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:256
code
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some it can consider them to be part of the FIFO and delay acknowledging a status change accordingly Example code
Definition: filter_design.txt:178
AV_CODEC_FLAG2_SHOW_ALL
#define AV_CODEC_FLAG2_SHOW_ALL
Show all frames before the first keyframe.
Definition: avcodec.h:380
AVCodecContext::properties
unsigned properties
Properties of the stream that gets decoded.
Definition: avcodec.h:1814
ff_alternate_vertical_scan
const uint8_t ff_alternate_vertical_scan[64]
Definition: mpegvideodata.c:63
AVCodecContext::extradata
uint8_t * extradata
Out-of-band global headers that may be used by some codecs.
Definition: avcodec.h:537
show_bits
static unsigned int show_bits(GetBitContext *s, int n)
Show 1-25 bits.
Definition: get_bits.h:371
internal.h
mpeg_set_cc_format
static void mpeg_set_cc_format(AVCodecContext *avctx, enum Mpeg2ClosedCaptionsFormat format, const char *label)
Definition: mpeg12dec.c:1909
AV_STEREO3D_TOPBOTTOM
@ AV_STEREO3D_TOPBOTTOM
Views are on top of each other.
Definition: stereo3d.h:76
IS_QUANT
#define IS_QUANT(a)
Definition: mpegutils.h:86
MpegEncContext::mb_x
int mb_x
Definition: mpegvideo.h:287
ff_mpeg12_init_vlcs
av_cold void ff_mpeg12_init_vlcs(void)
Definition: mpeg12.c:188
FF_DEBUG_STARTCODE
#define FF_DEBUG_STARTCODE
Definition: avcodec.h:1422
MpegEncContext::thread_context
struct MpegEncContext * thread_context[MAX_THREADS]
Definition: mpegvideo.h:152
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
av_d2q
AVRational av_d2q(double d, int max)
Convert a double precision floating point number to a rational.
Definition: rational.c:106
MB_TYPE_MV_2_MV_DIR
#define MB_TYPE_MV_2_MV_DIR(a)
Definition: mpegutils.h:94
MB_PAT_VLC_BITS
#define MB_PAT_VLC_BITS
Definition: mpeg12vlc.h:38
mpeg1_decode_block_inter
static int mpeg1_decode_block_inter(MpegEncContext *s, int16_t *block, int n)
Definition: mpeg12dec.c:134
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:622
IPUContext::m
MpegEncContext m
Definition: mpeg12dec.c:2771
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:194
MpegEncContext::last_pic
MPVWorkPicture last_pic
copy of the previous picture structure.
Definition: mpegvideo.h:159
MpegEncContext::intra_vlc_format
int intra_vlc_format
Definition: mpegvideo.h:454
AVCodecContext::chroma_sample_location
enum AVChromaLocation chroma_sample_location
This defines the location of chroma samples.
Definition: avcodec.h:716
MAX_INDEX
#define MAX_INDEX
Definition: mpeg12dec.c:124
MpegEncContext::er
ERContext er
Definition: mpegvideo.h:535
AVCodecContext::height
int height
Definition: avcodec.h:632
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:671
AVCOL_RANGE_MPEG
@ AVCOL_RANGE_MPEG
Narrow or limited range content.
Definition: pixfmt.h:700
HWACCEL_VIDEOTOOLBOX
#define HWACCEL_VIDEOTOOLBOX(codec)
Definition: hwconfig.h:74
idctdsp.h
avcodec.h
av_cmp_q
static int av_cmp_q(AVRational a, AVRational b)
Compare two rationals.
Definition: rational.h:89
GET_RL_VLC
#define GET_RL_VLC(level, run, name, gb, table, bits, max_depth, need_update)
Definition: get_bits.h:606
ff_zigzag_direct
const uint8_t ff_zigzag_direct[64]
Definition: mathtables.c:98
ff_mpeg12_frame_rate_tab
const AVRational ff_mpeg12_frame_rate_tab[]
Definition: mpeg12framerate.c:24
mpeg_decode_gop
static int mpeg_decode_gop(AVCodecContext *avctx, const uint8_t *buf, int buf_size)
Definition: mpeg12dec.c:2199
ret
ret
Definition: filter_design.txt:187
AV_EF_AGGRESSIVE
#define AV_EF_AGGRESSIVE
consider things that a sane encoder/muxer should not do as an error
Definition: defs.h:56
pred
static const float pred[4]
Definition: siprdata.h:259
AV_FRAME_DATA_GOP_TIMECODE
@ AV_FRAME_DATA_GOP_TIMECODE
The GOP timecode in 25 bit timecode format.
Definition: frame.h:125
FFSWAP
#define FFSWAP(type, a, b)
Definition: macros.h:52
AVClass::class_name
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:80
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
ff_mpeg1_default_non_intra_matrix
const uint16_t ff_mpeg1_default_non_intra_matrix[64]
Definition: mpeg12data.c:42
AVStereo3D::type
enum AVStereo3DType type
How views are packed within the video.
Definition: stereo3d.h:207
ff_mpv_decode_init
int ff_mpv_decode_init(MpegEncContext *s, AVCodecContext *avctx)
Initialize the given MpegEncContext for decoding.
Definition: mpegvideo_dec.c:46
align_get_bits
static const uint8_t * align_get_bits(GetBitContext *s)
Definition: get_bits.h:561
TEX_VLC_BITS
#define TEX_VLC_BITS
Definition: dvdec.c:147
ff_thread_finish_setup
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call ff_thread_finish_setup() afterwards. If some code can 't be moved
MPVPicture::f
struct AVFrame * f
Definition: mpegpicture.h:59
left
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled left
Definition: snow.txt:386
mpeg_get_pixelformat
static enum AVPixelFormat mpeg_get_pixelformat(AVCodecContext *avctx)
Definition: mpeg12dec.c:858
AV_CODEC_FLAG2_CHUNKS
#define AV_CODEC_FLAG2_CHUNKS
Input bitstream might be truncated at a packet boundaries instead of only at frame boundaries.
Definition: avcodec.h:371
AV_RL32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
Definition: bytestream.h:92
mpeg12data.h
ff_mpeg_update_thread_context
int ff_mpeg_update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
Definition: mpegvideo_dec.c:77
skip_1stop_8data_bits
static int skip_1stop_8data_bits(GetBitContext *gb)
Definition: get_bits.h:700
AVCodecContext
main external API structure.
Definition: avcodec.h:451
AVCodecContext::active_thread_type
int active_thread_type
Which multithreading methods are in use by the codec.
Definition: avcodec.h:1620
av_timecode_make_mpeg_tc_string
char * av_timecode_make_mpeg_tc_string(char *buf, uint32_t tc25bit)
Get the timecode string from the 25-bit timecode format (MPEG GOP format).
Definition: timecode.c:168
MpegEncContext::intra_dc_precision
int intra_dc_precision
Definition: mpegvideo.h:448
AVCodecContext::execute
int(* execute)(struct AVCodecContext *c, int(*func)(struct AVCodecContext *c2, void *arg), void *arg2, int *ret, int count, int size)
The codec may call this to execute several independent things.
Definition: avcodec.h:1631
SHOW_UBITS
#define SHOW_UBITS(name, gb, num)
Definition: get_bits.h:259
AV_PICTURE_TYPE_B
@ AV_PICTURE_TYPE_B
Bi-dir predicted.
Definition: avutil.h:281
mpeg12dec.h
AVCHROMA_LOC_CENTER
@ AVCHROMA_LOC_CENTER
MPEG-1 4:2:0, JPEG 4:2:0, H.263 4:2:0.
Definition: pixfmt.h:739
AVRational::den
int den
Denominator.
Definition: rational.h:60
error_resilience.h
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:72
FF_HW_CALL
#define FF_HW_CALL(avctx, function,...)
Definition: hwaccel_internal.h:171
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Underlying C type is int.
Definition: opt.h:259
AVCodecContext::profile
int profile
profile
Definition: avcodec.h:1658
AVFrame::metadata
AVDictionary * metadata
metadata.
Definition: frame.h:707
Mpeg1Context::cc_format
enum Mpeg2ClosedCaptionsFormat cc_format
Definition: mpeg12dec.c:80
sign_extend
static av_const int sign_extend(int val, unsigned bits)
Definition: mathops.h:131
ff_mpv_frame_end
void ff_mpv_frame_end(MpegEncContext *s)
Definition: mpegvideo_dec.c:395
ref
static int ref[MAX_W *MAX_W]
Definition: jpeg2000dwt.c:117
Mpeg1Context::slice_count
int slice_count
Definition: mpeg12dec.c:83
AVCodecContext::ticks_per_frame
attribute_deprecated int ticks_per_frame
For some codecs, the time base is closer to the field rate than the frame rate.
Definition: avcodec.h:590
AV_CODEC_CAP_DELAY
#define AV_CODEC_CAP_DELAY
Encoder or decoder requires flushing with NULL input at the end in order to give the complete and cor...
Definition: codec.h:76
MpegEncContext::resync_mb_x
int resync_mb_x
x position of last resync marker
Definition: mpegvideo.h:350
FF_CODEC_PROPERTY_CLOSED_CAPTIONS
#define FF_CODEC_PROPERTY_CLOSED_CAPTIONS
Definition: avcodec.h:1816
av_mul_q
AVRational av_mul_q(AVRational b, AVRational c)
Multiply two rationals.
Definition: rational.c:80
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:78
av_buffer_realloc
int av_buffer_realloc(AVBufferRef **pbuf, size_t size)
Reallocate a given buffer.
Definition: buffer.c:183
ff_mb_ptype_vlc
VLCElem ff_mb_ptype_vlc[64]
Definition: mpeg12.c:146
AVCodecContext::debug
int debug
debug
Definition: avcodec.h:1414
FF_DISABLE_DEPRECATION_WARNINGS
#define FF_DISABLE_DEPRECATION_WARNINGS
Definition: internal.h:72
AVCodecContext::coded_width
int coded_width
Bitstream width / height, may be different from width/height e.g.
Definition: avcodec.h:647
get_dmv
static int get_dmv(MpegEncContext *s)
Definition: mpeg12dec.c:388
AV_PICTURE_TYPE_P
@ AV_PICTURE_TYPE_P
Predicted.
Definition: avutil.h:280
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
mpeg_decode_end
static av_cold int mpeg_decode_end(AVCodecContext *avctx)
Definition: mpeg12dec.c:2642
AV_PIX_FMT_YUV422P
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:77
AVBufferRef
A reference to a data buffer.
Definition: buffer.h:82
IDCTDSPContext::idct_permutation
uint8_t idct_permutation[64]
IDCT input permutation.
Definition: idctdsp.h:86
ff_ipu_decoder
const FFCodec ff_ipu_decoder
Definition: mpeg12dec.c:2893
av_stereo3d_create_side_data
AVStereo3D * av_stereo3d_create_side_data(AVFrame *frame)
Allocate a complete AVFrameSideData and add it to the frame.
Definition: stereo3d.c:54
AVFrameSideData
Structure to hold side data for an AVFrame.
Definition: frame.h:265
ER_MV_END
#define ER_MV_END
Definition: error_resilience.h:35
MpegEncContext::first_field
int first_field
is 1 for the first field of a field picture 0 otherwise
Definition: mpegvideo.h:468
MpegEncContext::q_scale_type
int q_scale_type
Definition: mpegvideo.h:452
AVCodecContext::codec_tag
unsigned int codec_tag
fourcc (LSB first, so "ABCD" -> ('D'<<24) + ('C'<<16) + ('B'<<8) + 'A').
Definition: avcodec.h:476
Mpeg1Context::mpeg_enc_ctx
MpegEncContext mpeg_enc_ctx
Definition: mpeg12dec.c:74
ff_tlog
#define ff_tlog(ctx,...)
Definition: internal.h:141
FFALIGN
#define FFALIGN(x, a)
Definition: macros.h:78
MV_DIR_FORWARD
#define MV_DIR_FORWARD
Definition: mpegvideo.h:261
AVPacket
This structure stores compressed data.
Definition: packet.h:516
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:478
ScanTable::permutated
uint8_t permutated[64]
Definition: mpegvideo.h:58
av_dict_set
int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags)
Set the given entry in *pm, overwriting an existing entry.
Definition: dict.c:88
mpeg_get_qscale
static int mpeg_get_qscale(MpegEncContext *s)
Definition: mpegvideodec.h:72
mpeg_decode_sequence_extension
static void mpeg_decode_sequence_extension(Mpeg1Context *s1)
Definition: mpeg12dec.c:1057
HWACCEL_VAAPI
#define HWACCEL_VAAPI(codec)
Definition: hwconfig.h:70
mpeg_er.h
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:632
int32_t
int32_t
Definition: audioconvert.c:56
imgutils.h
flags
#define flags(name, subs,...)
Definition: cbs_av1.c:482
AV_CODEC_CAP_DRAW_HORIZ_BAND
#define AV_CODEC_CAP_DRAW_HORIZ_BAND
Decoder can use draw_horiz_band callback.
Definition: codec.h:44
AVStereo3DType
AVStereo3DType
List of possible 3D Types.
Definition: stereo3d.h:48
block
The exact code depends on how similar the blocks are and how related they are to the block
Definition: filter_design.txt:207
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
Mpeg1Context::frame_rate_ext
AVRational frame_rate_ext
Definition: mpeg12dec.c:87
mpeg_decode_motion
static int mpeg_decode_motion(MpegEncContext *s, int fcode, int pred)
Definition: mpeg12dec.c:98
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
IPUContext::block
int16_t block[6][64]
Definition: mpeg12dec.c:2774
AVPanScan::height
int height
Definition: defs.h:256
MKTAG
#define MKTAG(a, b, c, d)
Definition: macros.h:55
ff_mb_btype_vlc
VLCElem ff_mb_btype_vlc[64]
Definition: mpeg12.c:147
MpegEncContext::resync_mb_y
int resync_mb_y
y position of last resync marker
Definition: mpegvideo.h:351
mpeg_decode_user_data
static void mpeg_decode_user_data(AVCodecContext *avctx, const uint8_t *p, int buf_size)
Definition: mpeg12dec.c:2129
h
h
Definition: vp9dsp_template.c:2070
MpegEncContext::end_mb_y
int end_mb_y
end mb_y of this thread (so current thread should process start_mb_y <= row < end_mb_y)
Definition: mpegvideo.h:151
Mpeg2ClosedCaptionsFormat
Mpeg2ClosedCaptionsFormat
Definition: mpeg12dec.c:65
ER_AC_END
#define ER_AC_END
Definition: error_resilience.h:33
AVStereo3D
Stereo 3D type: this structure describes how two videos are packed within a single video surface,...
Definition: stereo3d.h:203
av_image_check_sar
int av_image_check_sar(unsigned int w, unsigned int h, AVRational sar)
Check if the given sample aspect ratio of an image is valid.
Definition: imgutils.c:323
MV_VLC_BITS
#define MV_VLC_BITS
Definition: mpeg12vlc.h:34
Mpeg1Context::timecode_frame_start
int64_t timecode_frame_start
Definition: mpeg12dec.c:94
width
#define width
Definition: dsp.h:85
MpegEncContext::start_mb_y
int start_mb_y
start mb_y of this thread (so current thread should process start_mb_y <= row < end_mb_y)
Definition: mpegvideo.h:150
AVDISCARD_NONREF
@ AVDISCARD_NONREF
discard all non reference
Definition: defs.h:217
MpegEncContext::alternate_scan
int alternate_scan
Definition: mpegvideo.h:455
DECODE_SLICE_OK
#define DECODE_SLICE_OK
Definition: mpeg12dec.c:1383
AV_CODEC_ID_MPEG2VIDEO
@ AV_CODEC_ID_MPEG2VIDEO
preferred ID for MPEG-1/2 video decoding
Definition: codec_id.h:54
DECODE_SLICE_ERROR
#define DECODE_SLICE_ERROR
Definition: mpeg12dec.c:1382
AV_OPT_TYPE_CONST
@ AV_OPT_TYPE_CONST
Special option type for declaring named constants.
Definition: opt.h:299
MpegEncContext
MpegEncContext.
Definition: mpegvideo.h:73
load_matrix
static int load_matrix(MpegEncContext *s, uint16_t matrix0[64], uint16_t matrix1[64], int intra)
Definition: mpeg12dec.c:1158
MpegEncContext::codec_id
enum AVCodecID codec_id
Definition: mpegvideo.h:108
AVCodecContext::sample_aspect_ratio
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown) That is the width of a pixel divided by the height of the pixel.
Definition: avcodec.h:656
MB_TYPE_FORWARD_MV
#define MB_TYPE_FORWARD_MV
Definition: mpegutils.h:50
decode_dc
static int decode_dc(GetBitContext *gb, int component)
Definition: mpeg12dec.h:30
Mpeg1Context::afd
uint8_t afd
Definition: mpeg12dec.c:81
Mpeg1Context
Definition: mpeg12dec.c:73
MpegEncContext::chroma_intra_matrix
uint16_t chroma_intra_matrix[64]
Definition: mpegvideo.h:300
mpeg_decode_picture_coding_extension
static int mpeg_decode_picture_coding_extension(Mpeg1Context *s1)
Definition: mpeg12dec.c:1195
Mpeg1Context::extradata_decoded
int extradata_decoded
Definition: mpeg12dec.c:93
ff_mpv_decode_close
int ff_mpv_decode_close(AVCodecContext *avctx)
Definition: mpegvideo_dec.c:163
mpeg2_decode_block_non_intra
static int mpeg2_decode_block_non_intra(MpegEncContext *s, int16_t *block, int n)
Definition: mpeg12dec.c:218
MB_TYPE_INTRA
#define MB_TYPE_INTRA
Definition: mpegutils.h:65
MBINCR_VLC_BITS
#define MBINCR_VLC_BITS
Definition: mpeg12vlc.h:37
mpeg_decode_slice
static int mpeg_decode_slice(MpegEncContext *s, int mb_y, const uint8_t **buf, int buf_size)
Decode a slice.
Definition: mpeg12dec.c:1391
MpegEncContext::chroma_format
int chroma_format
Definition: mpegvideo.h:458
MpegEncContext::codec_tag
int codec_tag
internal codec_tag upper case converted from avctx codec_tag
Definition: mpegvideo.h:115