Go to the documentation of this file.
28 #include "config_components.h"
30 #define UNCHECKED_BITSTREAM_READER 1
59 #define A53_MAX_CC_COUNT 2000
86 #define MB_TYPE_ZERO_MV 0x20000000
139 #define MAX_INDEX (64 - 1)
140 #define check_scantable_index(ctx, x) \
142 if ((x) > MAX_INDEX) { \
143 av_log(ctx->avctx, AV_LOG_ERROR, "ac-tex damaged at %d %d\n", \
144 ctx->mb_x, ctx->mb_y); \
145 return AVERROR_INVALIDDATA; \
150 int16_t *
block,
int n)
154 uint8_t *
const scantable =
s->intra_scantable.permutated;
155 const uint16_t *quant_matrix =
s->inter_matrix;
156 const int qscale =
s->qscale;
164 level = (3 * qscale * quant_matrix[0]) >> 5;
184 level = ((
level * 2 + 1) * qscale * quant_matrix[j]) >> 5;
199 }
else if (
level == 0) {
209 level = ((
level * 2 + 1) * qscale * quant_matrix[j]) >> 5;
213 level = ((
level * 2 + 1) * qscale * quant_matrix[j]) >> 5;
230 s->block_last_index[n] =
i;
239 int16_t *
block,
int n)
243 uint8_t *
const scantable =
s->intra_scantable.permutated;
244 const int qscale =
s->qscale;
252 level = (3 * qscale) >> 1;
288 }
else if (
level == 0) {
319 s->block_last_index[n] =
i;
324 int16_t *
block,
int n)
328 uint8_t *
const scantable =
s->intra_scantable.permutated;
329 const uint16_t *quant_matrix;
330 const int qscale =
s->qscale;
339 quant_matrix =
s->inter_matrix;
341 quant_matrix =
s->chroma_inter_matrix;
346 level = (3 * qscale * quant_matrix[0]) >> 5;
367 level = ((
level * 2 + 1) * qscale * quant_matrix[j]) >> 5;
384 level = ((-
level * 2 + 1) * qscale * quant_matrix[j]) >> 5;
387 level = ((
level * 2 + 1) * qscale * quant_matrix[j]) >> 5;
401 block[63] ^= (mismatch & 1);
405 s->block_last_index[n] =
i;
414 int16_t *
block,
int n)
418 uint8_t *
const scantable =
s->intra_scantable.permutated;
419 const int qscale =
s->qscale;
426 level = (3 * qscale) >> 1;
481 s->block_last_index[n] =
i;
486 int16_t *
block,
int n)
491 uint8_t *
const scantable =
s->intra_scantable.permutated;
492 const uint16_t *quant_matrix;
493 const int qscale =
s->qscale;
498 quant_matrix =
s->intra_matrix;
501 quant_matrix =
s->chroma_intra_matrix;
502 component = (n & 1) + 1;
505 dc =
s->last_dc[component];
507 s->last_dc[component] =
dc;
508 block[0] =
dc * (1 << (3 -
s->intra_dc_precision));
510 mismatch =
block[0] ^ 1;
512 if (
s->intra_vlc_format)
527 }
else if (
level != 0) {
532 level = (
level * qscale * quant_matrix[j]) >> 4;
547 level = (-
level * qscale * quant_matrix[j]) >> 4;
550 level = (
level * qscale * quant_matrix[j]) >> 4;
559 block[63] ^= mismatch & 1;
563 s->block_last_index[n] =
i;
572 int16_t *
block,
int n)
577 uint8_t *
const scantable =
s->intra_scantable.permutated;
578 const uint16_t *quant_matrix;
579 const int qscale =
s->qscale;
583 quant_matrix =
s->intra_matrix;
586 quant_matrix =
s->chroma_intra_matrix;
587 component = (n & 1) + 1;
590 dc =
s->last_dc[component];
592 s->last_dc[component] =
dc;
593 block[0] =
dc * (1 << (3 -
s->intra_dc_precision));
595 if (
s->intra_vlc_format)
608 if (
level >= 64 ||
i > 63) {
610 }
else if (
level != 0) {
613 level = (
level * qscale * quant_matrix[j]) >> 4;
626 level = (-
level * qscale * quant_matrix[j]) >> 4;
629 level = (
level * qscale * quant_matrix[j]) >> 4;
640 s->block_last_index[n] =
i;
663 int i, j, k, cbp,
val, mb_type, motion_type;
664 const int mb_block_count = 4 + (1 <<
s->chroma_format);
667 ff_tlog(
s->avctx,
"decode_mb: x=%d y=%d\n",
s->mb_x,
s->mb_y);
671 if (
s->mb_skip_run-- != 0) {
674 s->current_picture.mb_type[
s->mb_x +
s->mb_y *
s->mb_stride] =
680 mb_type =
s->current_picture.mb_type[
s->mb_x +
s->mb_y *
s->mb_stride - 1];
683 mb_type =
s->current_picture.mb_type[
s->mb_width + (
s->mb_y - 1) *
s->mb_stride - 1];
688 s->current_picture.mb_type[
s->mb_x +
s->mb_y *
s->mb_stride] =
691 if ((
s->mv[0][0][0] |
s->mv[0][0][1] |
s->mv[1][0][0] |
s->mv[1][0][1]) == 0)
698 switch (
s->pict_type) {
704 "Invalid mb type in I-frame at %d %d\n",
717 "Invalid mb type in P-frame at %d %d\n",
s->mb_x,
s->mb_y);
726 "Invalid mb type in B-frame at %d %d\n",
s->mb_x,
s->mb_y);
732 ff_tlog(
s->avctx,
"mb_type=%x\n", mb_type);
735 s->bdsp.clear_blocks(
s->block[0]);
737 if (!
s->chroma_y_shift)
738 s->bdsp.clear_blocks(
s->block[6]);
743 !
s->frame_pred_frame_dct)
749 if (
s->concealment_motion_vectors) {
755 s->last_mv[0][0][0] =
757 s->last_mv[0][0][0]);
759 s->last_mv[0][0][1] =
761 s->last_mv[0][0][1]);
763 check_marker(
s->avctx, &
s->gb,
"after concealment_motion_vectors");
766 memset(
s->last_mv, 0,
sizeof(
s->last_mv));
772 for (
i = 0;
i < 6;
i++)
775 for (
i = 0;
i < mb_block_count;
i++)
780 for (
i = 0;
i < 6;
i++) {
783 s->intra_scantable.permutated,
784 s->last_dc, *
s->pblocks[
i],
792 s->block_last_index[
i] =
ret;
802 && !
s->frame_pred_frame_dct)
808 s->field_select[0][0] =
s->picture_structure - 1;
814 s->last_mv[0][0][0] = 0;
815 s->last_mv[0][0][1] = 0;
816 s->last_mv[0][1][0] = 0;
817 s->last_mv[0][1][1] = 0;
824 if (
s->picture_structure ==
PICT_FRAME &&
s->frame_pred_frame_dct) {
836 s->mv_dir = (mb_type >> 13) & 3;
837 ff_tlog(
s->avctx,
"motion_type=%d\n", motion_type);
838 switch (motion_type) {
843 for (
i = 0;
i < 2;
i++) {
847 s->last_mv[
i][0][0] =
848 s->last_mv[
i][1][0] =
850 s->last_mv[
i][0][0]);
852 s->last_mv[
i][0][1] =
853 s->last_mv[
i][1][1] =
855 s->last_mv[
i][0][1]);
857 if (
s->full_pel[
i]) {
866 for (
i = 0;
i < 2;
i++) {
869 for (j = 0; j < 2; j++) {
871 for (k = 0; k < 2; k++) {
873 s->last_mv[
i][j][k]);
874 s->last_mv[
i][j][k] =
val;
875 s->mv[
i][j][k] =
val;
886 for (
i = 0;
i < 2;
i++) {
888 for (j = 0; j < 2; j++) {
891 s->last_mv[
i][j][0]);
892 s->last_mv[
i][j][0] =
val;
893 s->mv[
i][j][0] =
val;
896 s->last_mv[
i][j][1] >> 1);
897 s->last_mv[
i][j][1] = 2 *
val;
898 s->mv[
i][j][1] =
val;
906 for (
i = 0;
i < 2;
i++) {
909 for (k = 0; k < 2; k++) {
911 s->last_mv[
i][0][k]);
912 s->last_mv[
i][0][k] =
val;
913 s->last_mv[
i][1][k] =
val;
914 s->mv[
i][0][k] =
val;
921 if (
s->progressive_sequence){
926 for (
i = 0;
i < 2;
i++) {
928 int dmx, dmy, mx, my, m;
929 const int my_shift =
s->picture_structure ==
PICT_FRAME;
932 s->last_mv[
i][0][0]);
933 s->last_mv[
i][0][0] = mx;
934 s->last_mv[
i][1][0] = mx;
937 s->last_mv[
i][0][1] >> my_shift);
941 s->last_mv[
i][0][1] = my * (1 << my_shift);
942 s->last_mv[
i][1][1] = my * (1 << my_shift);
953 m =
s->top_field_first ? 1 : 3;
956 s->mv[
i][2][0] = ((mx * m + (mx > 0)) >> 1) + dmx;
957 s->mv[
i][2][1] = ((my * m + (my > 0)) >> 1) + dmy - 1;
959 s->mv[
i][3][0] = ((mx * m + (mx > 0)) >> 1) + dmx;
960 s->mv[
i][3][1] = ((my * m + (my > 0)) >> 1) + dmy + 1;
964 s->mv[
i][2][0] = ((mx + (mx > 0)) >> 1) + dmx;
965 s->mv[
i][2][1] = ((my + (my > 0)) >> 1) + dmy;
976 "00 motion_type at %d %d\n",
s->mb_x,
s->mb_y);
983 s->bdsp.clear_blocks(
s->block[0]);
986 if (mb_block_count > 6) {
987 cbp *= 1 << mb_block_count - 6;
988 cbp |=
get_bits(&
s->gb, mb_block_count - 6);
989 s->bdsp.clear_blocks(
s->block[6]);
993 "invalid cbp %d at %d %d\n", cbp,
s->mb_x,
s->mb_y);
999 for (
i = 0;
i < 6;
i++) {
1003 s->block_last_index[
i] = -1;
1007 cbp <<= 12 - mb_block_count;
1009 for (
i = 0;
i < mb_block_count;
i++) {
1010 if (cbp & (1 << 11)) {
1014 s->block_last_index[
i] = -1;
1021 for (
i = 0;
i < 6;
i++) {
1025 s->block_last_index[
i] = -1;
1029 for (
i = 0;
i < 6;
i++) {
1034 s->block_last_index[
i] = -1;
1041 for (
i = 0;
i < 12;
i++)
1042 s->block_last_index[
i] = -1;
1046 s->current_picture.mb_type[
s->mb_x +
s->mb_y *
s->mb_stride] = mb_type;
1067 s2->chroma_format = 1;
1068 s->mpeg_enc_ctx_allocated = 0;
1069 s->repeat_field = 0;
1075 static int mpeg_decode_update_thread_context(
AVCodecContext *avctx,
1082 if (avctx == avctx_from ||
1083 !ctx_from->mpeg_enc_ctx_allocated ||
1084 !
s1->context_initialized)
1091 if (!
ctx->mpeg_enc_ctx_allocated)
1099 const uint8_t *new_perm)
1101 uint16_t temp_matrix[64];
1104 memcpy(temp_matrix,
matrix, 64 *
sizeof(uint16_t));
1106 for (
i = 0;
i < 64;
i++)
1107 matrix[new_perm[
i]] = temp_matrix[old_perm[
i]];
1111 #if CONFIG_MPEG1_NVDEC_HWACCEL
1114 #if CONFIG_MPEG1_VDPAU_HWACCEL
1122 #if CONFIG_MPEG2_NVDEC_HWACCEL
1125 #if CONFIG_MPEG2_VDPAU_HWACCEL
1128 #if CONFIG_MPEG2_DXVA2_HWACCEL
1131 #if CONFIG_MPEG2_D3D11VA_HWACCEL
1135 #if CONFIG_MPEG2_VAAPI_HWACCEL
1138 #if CONFIG_MPEG2_VIDEOTOOLBOX_HWACCEL
1164 if (
s->chroma_format < 2)
1168 else if (
s->chroma_format == 2)
1182 uint8_t old_permutation[64];
1191 if (
s1->aspect_ratio_info > 1) {
1195 s1->pan_scan.height }),
1202 if ((
s1->pan_scan.width == 0) || (
s1->pan_scan.height == 0) ||
1205 s->avctx->sample_aspect_ratio =
1209 s->avctx->sample_aspect_ratio =
1211 (
AVRational) { s1->pan_scan.width, s1->pan_scan.height });
1216 ff_dlog(avctx,
"aspect A %d/%d\n",
1219 ff_dlog(avctx,
"aspect B %d/%d\n",
s->avctx->sample_aspect_ratio.num,
1220 s->avctx->sample_aspect_ratio.den);
1223 s->avctx->sample_aspect_ratio =
1236 if ((
s1->mpeg_enc_ctx_allocated == 0) ||
1239 s1->save_width !=
s->width ||
1240 s1->save_height !=
s->height ||
1241 av_cmp_q(
s1->save_aspect,
s->avctx->sample_aspect_ratio) ||
1242 (
s1->save_progressive_seq !=
s->progressive_sequence &&
FFALIGN(
s->height, 16) !=
FFALIGN(
s->height, 32)) ||
1244 if (
s1->mpeg_enc_ctx_allocated) {
1245 #if FF_API_FLAG_TRUNCATED
1247 s->parse_context.buffer = 0;
1249 s->parse_context = pc;
1253 s1->mpeg_enc_ctx_allocated = 0;
1263 (
s->bit_rate != 0x3FFFF*400 ||
s->vbv_delay != 0xFFFF)) {
1266 s1->save_aspect =
s->avctx->sample_aspect_ratio;
1267 s1->save_width =
s->width;
1268 s1->save_height =
s->height;
1269 s1->save_progressive_seq =
s->progressive_sequence;
1284 &
s->avctx->framerate.den,
1290 switch (
s->chroma_format) {
1302 memcpy(old_permutation,
s->idsp.idct_permutation, 64 *
sizeof(uint8_t));
1313 s1->mpeg_enc_ctx_allocated = 1;
1323 int ref, f_code, vbv_delay,
ret;
1331 if (
s->pict_type == 0 ||
s->pict_type > 3)
1335 s->vbv_delay = vbv_delay;
1343 s->mpeg_f_code[0][0] = f_code;
1344 s->mpeg_f_code[0][1] = f_code;
1352 s->mpeg_f_code[1][0] = f_code;
1353 s->mpeg_f_code[1][1] = f_code;
1355 s->current_picture.f->pict_type =
s->pict_type;
1360 "vbv_delay %d, ref %d type:%d\n", vbv_delay,
ref,
s->pict_type);
1370 int horiz_size_ext, vert_size_ext;
1380 if (!
s->chroma_format) {
1381 s->chroma_format = 1;
1387 s->width |= (horiz_size_ext << 12);
1388 s->height |= (vert_size_ext << 12);
1390 s->bit_rate += (bit_rate_ext << 18) * 400LL;
1392 s1->rc_buffer_size +=
get_bits(&
s->gb, 8) * 1024 * 16 << 10;
1401 ff_dlog(
s->avctx,
"sequence extension\n");
1406 if (
s->bit_rate != 0x3FFFF*400)
1412 "profile: %d, level: %d ps: %d cf:%d vbv buffer: %d, bitrate:%"PRId64
"\n",
1413 s->avctx->profile,
s->avctx->level,
s->progressive_sequence,
s->chroma_format,
1414 s1->rc_buffer_size,
s->bit_rate);
1420 int color_description,
w,
h;
1424 if (color_description) {
1425 s->avctx->color_primaries =
get_bits(&
s->gb, 8);
1434 s1->pan_scan.width = 16 *
w;
1435 s1->pan_scan.height = 16 *
h;
1447 if (
s->progressive_sequence) {
1448 if (
s->repeat_first_field) {
1450 if (
s->top_field_first)
1456 if (
s->repeat_first_field)
1460 for (
i = 0;
i < nofco;
i++) {
1469 "pde (%"PRId16
",%"PRId16
") (%"PRId16
",%"PRId16
") (%"PRId16
",%"PRId16
")\n",
1470 s1->pan_scan.position[0][0],
s1->pan_scan.position[0][1],
1471 s1->pan_scan.position[1][0],
s1->pan_scan.position[1][1],
1472 s1->pan_scan.position[2][0],
s1->pan_scan.position[2][1]);
1476 uint16_t matrix1[64],
int intra)
1480 for (
i = 0;
i < 64;
i++) {
1487 if (intra &&
i == 0 && v != 8) {
1488 av_log(
s->avctx,
AV_LOG_DEBUG,
"intra matrix specifies invalid DC quantizer %d, ignoring\n", v);
1500 ff_dlog(
s->avctx,
"matrix extension\n");
1516 s->full_pel[0] =
s->full_pel[1] = 0;
1521 s->mpeg_f_code[0][0] += !
s->mpeg_f_code[0][0];
1522 s->mpeg_f_code[0][1] += !
s->mpeg_f_code[0][1];
1523 s->mpeg_f_code[1][0] += !
s->mpeg_f_code[1][0];
1524 s->mpeg_f_code[1][1] += !
s->mpeg_f_code[1][1];
1525 if (!
s->pict_type &&
s1->mpeg_enc_ctx_allocated) {
1530 if (
s->mpeg_f_code[1][0] == 15 &&
s->mpeg_f_code[1][1] == 15) {
1531 if (
s->mpeg_f_code[0][0] == 15 &&
s->mpeg_f_code[0][1] == 15)
1537 s->current_picture.f->pict_type =
s->pict_type;
1545 s->concealment_motion_vectors =
get_bits1(&
s->gb);
1553 if (
s->alternate_scan) {
1562 ff_dlog(
s->avctx,
"intra_dc_precision=%d\n",
s->intra_dc_precision);
1563 ff_dlog(
s->avctx,
"picture_structure=%d\n",
s->picture_structure);
1564 ff_dlog(
s->avctx,
"top field first=%d\n",
s->top_field_first);
1565 ff_dlog(
s->avctx,
"repeat first field=%d\n",
s->repeat_first_field);
1566 ff_dlog(
s->avctx,
"conceal=%d\n",
s->concealment_motion_vectors);
1567 ff_dlog(
s->avctx,
"intra_vlc_format=%d\n",
s->intra_vlc_format);
1568 ff_dlog(
s->avctx,
"alternate_scan=%d\n",
s->alternate_scan);
1569 ff_dlog(
s->avctx,
"frame_pred_frame_dct=%d\n",
s->frame_pred_frame_dct);
1570 ff_dlog(
s->avctx,
"progressive_frame=%d\n",
s->progressive_frame);
1582 if (
s->mb_width *
s->mb_height * 11LL / (33 * 2 * 8) > buf_size)
1587 if (
s->first_field ||
s->picture_structure ==
PICT_FRAME) {
1596 s->current_picture_ptr->f->repeat_pict = 0;
1597 if (
s->repeat_first_field) {
1598 if (
s->progressive_sequence) {
1599 if (
s->top_field_first)
1600 s->current_picture_ptr->f->repeat_pict = 4;
1602 s->current_picture_ptr->f->repeat_pict = 2;
1603 }
else if (
s->progressive_frame) {
1604 s->current_picture_ptr->f->repeat_pict = 1;
1610 sizeof(
s1->pan_scan));
1613 memcpy(pan_scan->
data, &
s1->pan_scan,
sizeof(
s1->pan_scan));
1615 if (
s1->a53_buf_ref) {
1624 if (
s1->has_stereo3d) {
1629 *stereo =
s1->stereo3d;
1630 s1->has_stereo3d = 0;
1649 if (!
s->current_picture_ptr) {
1654 if (
s->avctx->hwaccel) {
1655 if ((
ret =
s->avctx->hwaccel->end_frame(
s->avctx)) < 0) {
1657 "hardware accelerator failed to decode first field\n");
1662 for (
i = 0;
i < 4;
i++) {
1663 s->current_picture.f->data[
i] =
s->current_picture_ptr->f->data[
i];
1665 s->current_picture.f->data[
i] +=
1666 s->current_picture_ptr->f->linesize[
i];
1678 #define DECODE_SLICE_ERROR -1
1679 #define DECODE_SLICE_OK 0
1688 const uint8_t **buf,
int buf_size)
1691 const int lowres =
s->avctx->lowres;
1692 const int field_pic =
s->picture_structure !=
PICT_FRAME;
1696 s->resync_mb_y = -1;
1705 s->interlaced_dct = 0;
1709 if (
s->qscale == 0) {
1720 if (mb_y == 0 &&
s->codec_tag ==
AV_RL32(
"SLIF")) {
1741 if (
s->mb_x >= (
unsigned)
s->mb_width) {
1747 const uint8_t *buf_end, *buf_start = *buf - 4;
1750 if (buf_end < *buf + buf_size)
1759 s->resync_mb_x =
s->mb_x;
1760 s->resync_mb_y =
s->mb_y = mb_y;
1764 if (
s->mb_y == 0 &&
s->mb_x == 0 && (
s->first_field ||
s->picture_structure ==
PICT_FRAME)) {
1767 "qp:%d fc:%2d%2d%2d%2d %c %s %s %s %s dc:%d pstruct:%d fdct:%d cmv:%d qtype:%d ivlc:%d rff:%d %s\n",
1769 s->mpeg_f_code[0][0],
s->mpeg_f_code[0][1],
1770 s->mpeg_f_code[1][0],
s->mpeg_f_code[1][1],
1774 s->progressive_sequence ?
"ps" :
"",
1775 s->progressive_frame ?
"pf" :
"",
1776 s->alternate_scan ?
"alt" :
"",
1777 s->top_field_first ?
"top" :
"",
1778 s->intra_dc_precision,
s->picture_structure,
1779 s->frame_pred_frame_dct,
s->concealment_motion_vectors,
1780 s->q_scale_type,
s->intra_vlc_format,
1781 s->repeat_first_field,
s->chroma_420_type ?
"420" :
"");
1790 if (
s->current_picture.motion_val[0] && !
s->encoding) {
1791 const int wrap =
s->b8_stride;
1792 int xy =
s->mb_x * 2 +
s->mb_y * 2 *
wrap;
1793 int b8_xy = 4 * (
s->mb_x +
s->mb_y *
s->mb_stride);
1794 int motion_x, motion_y, dir,
i;
1796 for (
i = 0;
i < 2;
i++) {
1797 for (dir = 0; dir < 2; dir++) {
1800 motion_x = motion_y = 0;
1803 motion_x =
s->mv[dir][0][0];
1804 motion_y =
s->mv[dir][0][1];
1806 motion_x =
s->mv[dir][
i][0];
1807 motion_y =
s->mv[dir][
i][1];
1810 s->current_picture.motion_val[dir][xy][0] = motion_x;
1811 s->current_picture.motion_val[dir][xy][1] = motion_y;
1812 s->current_picture.motion_val[dir][xy + 1][0] = motion_x;
1813 s->current_picture.motion_val[dir][xy + 1][1] = motion_y;
1814 s->current_picture.ref_index [dir][b8_xy] =
1815 s->current_picture.ref_index [dir][b8_xy + 1] =
s->field_select[dir][
i];
1817 s->field_select[dir][
i] == 1);
1825 s->dest[1] +=(16 >>
lowres) >>
s->chroma_x_shift;
1826 s->dest[2] +=(16 >>
lowres) >>
s->chroma_x_shift;
1830 if (++
s->mb_x >=
s->mb_width) {
1831 const int mb_size = 16 >>
s->avctx->lowres;
1838 s->mb_y += 1 << field_pic;
1840 if (
s->mb_y >=
s->mb_height) {
1842 int is_d10 =
s->chroma_format == 2 &&
1845 s->intra_dc_precision == 2 &&
1846 s->q_scale_type == 1 &&
s->alternate_scan == 0 &&
1847 s->progressive_frame == 0
1850 if (
left >= 32 && !is_d10) {
1877 if (
s->mb_y >= ((
s->height + 15) >> 4) &&
1878 !
s->progressive_sequence &&
1881 s->mb_skip_run == -1 &&
1889 if (
s->mb_skip_run == -1) {
1901 s->mb_skip_run += 33;
1902 }
else if (
code == 35) {
1903 if (
s->mb_skip_run != 0 ||
show_bits(&
s->gb, 15) != 0) {
1911 s->mb_skip_run +=
code;
1915 if (
s->mb_skip_run) {
1919 "skipped MB in I-frame at %d %d\n",
s->mb_x,
s->mb_y);
1925 for (
i = 0;
i < 12;
i++)
1926 s->block_last_index[
i] = -1;
1934 s->mv[0][0][0] =
s->mv[0][0][1] = 0;
1935 s->last_mv[0][0][0] =
s->last_mv[0][0][1] = 0;
1936 s->last_mv[0][1][0] =
s->last_mv[0][1][1] = 0;
1937 s->field_select[0][0] = (
s->picture_structure - 1) & 1;
1940 s->mv[0][0][0] =
s->last_mv[0][0][0];
1941 s->mv[0][0][1] =
s->last_mv[0][0][1];
1942 s->mv[1][0][0] =
s->last_mv[1][0][0];
1943 s->mv[1][0][1] =
s->last_mv[1][0][1];
1944 s->field_select[0][0] = (
s->picture_structure - 1) & 1;
1945 s->field_select[1][0] = (
s->picture_structure - 1) & 1;
1956 ff_dlog(
s,
"Slice start:%d %d end:%d %d\n",
s->resync_mb_x,
s->resync_mb_y,
s->mb_x,
s->mb_y);
1963 const uint8_t *buf =
s->gb.buffer;
1964 int mb_y =
s->start_mb_y;
1965 const int field_pic =
s->picture_structure !=
PICT_FRAME;
1967 s->er.error_count = (3 * (
s->end_mb_y -
s->start_mb_y) *
s->mb_width) >> field_pic;
1975 ff_dlog(
c,
"ret:%d resync:%d/%d mb:%d/%d ts:%d/%d ec:%d\n",
1976 ret,
s->resync_mb_x,
s->resync_mb_y,
s->mb_x,
s->mb_y,
1977 s->start_mb_y,
s->end_mb_y,
s->er.error_count);
1981 if (
s->resync_mb_x >= 0 &&
s->resync_mb_y >= 0)
1987 s->mb_x - 1,
s->mb_y,
1991 if (
s->mb_y ==
s->end_mb_y)
2000 mb_y += (*buf&0xE0)<<2;
2004 if (mb_y >=
s->end_mb_y)
2018 if (!
s1->mpeg_enc_ctx_allocated || !
s->current_picture_ptr)
2021 if (
s->avctx->hwaccel) {
2022 int ret =
s->avctx->hwaccel->end_frame(
s->avctx);
2025 "hardware accelerator failed to decode picture\n");
2031 if ( !
s->first_field && !
s1->first_slice) {
2046 if (
s->last_picture_ptr) {
2062 const uint8_t *buf,
int buf_size)
2075 "Invalid horizontal or vertical size value.\n");
2080 if (
s1->aspect_ratio_info == 0) {
2086 if (
s1->frame_rate_index == 0 ||
s1->frame_rate_index > 13) {
2088 "frame_rate_index %d is invalid\n",
s1->frame_rate_index);
2089 s1->frame_rate_index = 1;
2096 s1->rc_buffer_size =
get_bits(&
s->gb, 10) * 1024 * 16;
2103 for (
i = 0;
i < 64;
i++) {
2104 j =
s->idsp.idct_permutation[
i];
2106 s->intra_matrix[j] = v;
2107 s->chroma_intra_matrix[j] = v;
2113 for (
i = 0;
i < 64;
i++) {
2114 int j =
s->idsp.idct_permutation[
i];
2116 s->inter_matrix[j] = v;
2117 s->chroma_inter_matrix[j] = v;
2130 s->progressive_sequence = 1;
2131 s->progressive_frame = 1;
2134 s->frame_pred_frame_dct = 1;
2135 s->chroma_format = 1;
2143 av_log(
s->avctx,
AV_LOG_DEBUG,
"vbv buffer: %d, bitrate:%"PRId64
", aspect_ratio_info: %d \n",
2144 s1->rc_buffer_size,
s->bit_rate,
s1->aspect_ratio_info);
2157 if (
s1->mpeg_enc_ctx_allocated) {
2159 s1->mpeg_enc_ctx_allocated = 0;
2171 s1->mpeg_enc_ctx_allocated = 1;
2173 for (
i = 0;
i < 64;
i++) {
2174 int j =
s->idsp.idct_permutation[
i];
2176 s->intra_matrix[j] = v;
2177 s->chroma_intra_matrix[j] = v;
2180 s->inter_matrix[j] = v;
2181 s->chroma_inter_matrix[j] = v;
2184 s->progressive_sequence = 1;
2185 s->progressive_frame = 1;
2188 s->frame_pred_frame_dct = 1;
2189 s->chroma_format = 1;
2190 if (
s->codec_tag ==
AV_RL32(
"BW10")) {
2195 s1->save_width =
s->width;
2196 s1->save_height =
s->height;
2197 s1->save_progressive_seq =
s->progressive_sequence;
2202 const uint8_t *p,
int buf_size)
2206 if (buf_size >= 6 &&
2207 p[0] ==
'G' && p[1] ==
'A' && p[2] ==
'9' && p[3] ==
'4' &&
2208 p[4] == 3 && (p[5] & 0x40)) {
2210 int cc_count = p[5] & 0x1f;
2211 if (cc_count > 0 && buf_size >= 7 + cc_count * 3) {
2212 int old_size =
s1->a53_buf_ref ?
s1->a53_buf_ref->size : 0;
2213 const uint64_t new_size = (old_size + cc_count
2222 memcpy(
s1->a53_buf_ref->data + old_size, p + 7, cc_count * UINT64_C(3));
2227 }
else if (buf_size >= 2 &&
2228 p[0] == 0x03 && (p[1]&0x7f) == 0x01) {
2237 int old_size =
s1->a53_buf_ref ?
s1->a53_buf_ref->size : 0;
2238 const uint64_t new_size = (old_size + cc_count
2245 uint8_t
field, cc1, cc2;
2246 uint8_t *cap =
s1->a53_buf_ref->data;
2248 memset(
s1->a53_buf_ref->data + old_size, 0, cc_count * 3);
2258 cap[0] = cap[1] = cap[2] = 0x00;
2262 cap[0] = 0x04 |
field;
2272 }
else if (buf_size >= 11 &&
2273 p[0] ==
'C' && p[1] ==
'C' && p[2] == 0x01 && p[3] == 0xf8) {
2303 for (
i = 5;
i + 6 <= buf_size && ((p[
i] & 0xfe) == 0xfe);
i += 6)
2307 int old_size =
s1->a53_buf_ref ?
s1->a53_buf_ref->size : 0;
2308 const uint64_t new_size = (old_size + cc_count
2315 uint8_t field1 = !!(p[4] & 0x80);
2316 uint8_t *cap =
s1->a53_buf_ref->data;
2318 for (
i = 0;
i < cc_count;
i++) {
2319 cap[0] = (p[0] == 0xff && field1) ? 0xfc : 0xfd;
2322 cap[3] = (p[3] == 0xff && !field1) ? 0xfc : 0xfd;
2337 const uint8_t *p,
int buf_size)
2340 const uint8_t *buf_end = p + buf_size;
2345 for(
i=0; !(!p[
i-2] && !p[
i-1] && p[
i]==1) &&
i<buf_size;
i++){
2354 if (!memcmp(p+
i,
"\0TMPGEXS\0", 9)){
2359 if (buf_end - p >= 5 &&
2360 p[0] ==
'D' && p[1] ==
'T' && p[2] ==
'G' && p[3] ==
'1') {
2368 if (buf_end - p < 1)
2371 s1->afd = p[0] & 0x0f;
2373 }
else if (buf_end - p >= 6 &&
2374 p[0] ==
'J' && p[1] ==
'P' && p[2] ==
'3' && p[3] ==
'D' &&
2377 const uint8_t S3D_video_format_type = p[5] & 0x7F;
2379 if (S3D_video_format_type == 0x03 ||
2380 S3D_video_format_type == 0x04 ||
2381 S3D_video_format_type == 0x08 ||
2382 S3D_video_format_type == 0x23) {
2384 s1->has_stereo3d = 1;
2386 switch (S3D_video_format_type) {
2407 const uint8_t *buf,
int buf_size)
2428 "GOP (%s) closed_gop=%d broken_link=%d\n",
2429 tcbuf,
s1->closed_gop, broken_link);
2434 int *got_output,
const uint8_t *buf,
int buf_size)
2438 const uint8_t *buf_ptr = buf;
2439 const uint8_t *buf_end = buf + buf_size;
2440 int ret, input_size;
2441 int last_code = 0, skip_frame = 0;
2442 int picture_start_code_seen = 0;
2457 &
s2->thread_context[0],
NULL,
2458 s->slice_count,
sizeof(
void *));
2459 for (
i = 0;
i <
s->slice_count;
i++)
2460 s2->er.error_count +=
s2->thread_context[
i]->er.error_count;
2477 #if FF_API_FLAG_TRUNCATED
2478 return FFMAX(0, buf_ptr - buf -
s2->parse_context.last_index);
2480 return FFMAX(0, buf_ptr - buf);
2484 input_size = buf_end - buf_ptr;
2493 if (last_code == 0) {
2499 "ignoring SEQ_START_CODE after %X\n", last_code);
2506 if (picture_start_code_seen &&
s2->picture_structure ==
PICT_FRAME) {
2512 picture_start_code_seen = 1;
2514 if (
s2->width <= 0 ||
s2->height <= 0) {
2516 s2->width,
s2->height);
2521 s2->intra_dc_precision= 3;
2522 s2->intra_matrix[0]= 1;
2525 !avctx->
hwaccel &&
s->slice_count) {
2529 s2->thread_context,
NULL,
2530 s->slice_count,
sizeof(
void *));
2531 for (
i = 0;
i <
s->slice_count;
i++)
2532 s2->er.error_count +=
s2->thread_context[
i]->er.error_count;
2539 "mpeg_decode_postinit() failure\n");
2550 "ignoring pic after %X\n", last_code);
2560 if (last_code == 0) {
2564 "ignoring seq ext after %X\n", last_code);
2585 "ignoring pic cod ext after %X\n", last_code);
2596 if (last_code == 0) {
2597 s2->first_field = 0;
2602 "ignoring GOP_START_CODE after %X\n", last_code);
2610 if (
s2->progressive_sequence && !
s2->progressive_frame) {
2611 s2->progressive_frame = 1;
2613 "interlaced frame in progressive sequence, ignoring\n");
2616 if (
s2->picture_structure == 0 ||
2617 (
s2->progressive_frame &&
s2->picture_structure !=
PICT_FRAME)) {
2619 "picture_structure %d invalid, ignoring\n",
2620 s2->picture_structure);
2624 if (
s2->progressive_sequence && !
s2->frame_pred_frame_dct)
2628 s2->first_field = 0;
2629 s2->v_edge_pos = 16 *
s2->mb_height;
2631 s2->first_field ^= 1;
2632 s2->v_edge_pos = 8 *
s2->mb_height;
2633 memset(
s2->mbskip_table, 0,
s2->mb_stride *
s2->mb_height);
2638 const int field_pic =
s2->picture_structure !=
PICT_FRAME;
2642 mb_y += (*buf_ptr&0xE0)<<2;
2648 if (buf_end - buf_ptr < 2) {
2653 if (mb_y >=
s2->mb_height) {
2655 "slice below image (%d >= %d)\n", mb_y,
s2->mb_height);
2659 if (!
s2->last_picture_ptr) {
2663 if (!
s->closed_gop) {
2666 "Skipping B slice due to open GOP\n");
2673 if (!
s2->next_picture_ptr) {
2679 "Skipping P slice due to !sync\n");
2692 if (!
s->mpeg_enc_ctx_allocated)
2696 if (mb_y < avctx->skip_top ||
2701 if (!
s2->pict_type) {
2708 if (
s->first_slice) {
2714 if (!
s2->current_picture_ptr) {
2716 "current_picture not initialized\n");
2723 int threshold = (
s2->mb_height *
s->slice_count +
2724 s2->slice_context_count / 2) /
2725 s2->slice_context_count;
2727 if (threshold <= mb_y) {
2732 if (
s->slice_count) {
2733 s2->thread_context[
s->slice_count - 1]->end_mb_y = mb_y;
2749 if (
s2->resync_mb_x >= 0 &&
s2->resync_mb_y >= 0)
2751 s2->resync_mb_y,
s2->mb_x,
s2->mb_y,
2755 s2->resync_mb_y,
s2->mb_x - 1,
s2->mb_y,
2768 const uint8_t *buf = avpkt->
data;
2770 int buf_size = avpkt->
size;
2776 if (
s2->low_delay == 0 &&
s2->next_picture_ptr) {
2781 s2->next_picture_ptr =
NULL;
2788 #if FF_API_FLAG_TRUNCATED
2794 (
const uint8_t **) &buf, &buf_size) < 0)
2800 if (
s->mpeg_enc_ctx_allocated == 0 && (
s2->codec_tag ==
AV_RL32(
"VCR2")
2807 if (avctx->
extradata && !
s->extradata_decoded) {
2815 s->extradata_decoded = 1;
2817 s2->current_picture_ptr =
NULL;
2823 if (
ret<0 || *got_output) {
2824 s2->current_picture_ptr =
NULL;
2826 if (
s->timecode_frame_start != -1 && *got_output) {
2833 memcpy(tcside->
data, &
s->timecode_frame_start,
sizeof(int64_t));
2838 s->timecode_frame_start = -1;
2859 if (
s->mpeg_enc_ctx_allocated)
2866 .
p.
name =
"mpeg1video",
2875 #if FF_API_FLAG_TRUNCATED
2876 AV_CODEC_CAP_TRUNCATED |
2885 #if CONFIG_MPEG1_NVDEC_HWACCEL
2888 #if CONFIG_MPEG1_VDPAU_HWACCEL
2891 #if CONFIG_MPEG1_VIDEOTOOLBOX_HWACCEL
2899 .
p.
name =
"mpeg2video",
2908 #if FF_API_FLAG_TRUNCATED
2909 AV_CODEC_CAP_TRUNCATED |
2918 #if CONFIG_MPEG2_DXVA2_HWACCEL
2921 #if CONFIG_MPEG2_D3D11VA_HWACCEL
2924 #if CONFIG_MPEG2_D3D11VA2_HWACCEL
2927 #if CONFIG_MPEG2_NVDEC_HWACCEL
2930 #if CONFIG_MPEG2_VAAPI_HWACCEL
2933 #if CONFIG_MPEG2_VDPAU_HWACCEL
2936 #if CONFIG_MPEG2_VIDEOTOOLBOX_HWACCEL
2945 .
p.
name =
"mpegvideo",
2954 #if FF_API_FLAG_TRUNCATED
2955 AV_CODEC_CAP_TRUNCATED |
2993 if (
s->flags & 0x10) {
3004 for (
int y = 0; y < avctx->
height; y += 16) {
3007 for (
int x = 0; x < avctx->
width; x += 16) {
3026 memset(
s->block, 0,
sizeof(
s->block));
3028 for (
int n = 0; n < 6; n++) {
3029 if (
s->flags & 0x80) {
3046 frame->linesize[0],
s->block[0]);
3048 frame->linesize[0],
s->block[1]);
3050 frame->linesize[0],
s->block[2]);
3052 frame->linesize[0],
s->block[3]);
3054 frame->linesize[1],
s->block[4]);
3056 frame->linesize[2],
s->block[5]);
3065 frame->key_frame = 1;
3083 for (
int i = 0;
i < 64;
i++) {
3090 for (
int i = 0;
i < 64;
i++) {
static int vcr2_init_sequence(AVCodecContext *avctx)
av_cold int ff_mpv_common_init(MpegEncContext *s)
init common structure for both encoder and decoder.
const struct AVHWAccel * hwaccel
Hardware accelerator in use.
#define MV_TYPE_16X16
1 vector for the whole mb
#define AV_LOG_WARNING
Something somehow does not look correct.
#define AV_TIMECODE_STR_SIZE
@ AV_PIX_FMT_CUDA
HW acceleration through CUDA.
AVPixelFormat
Pixel format.
AVBufferRef * a53_buf_ref
const AVRational ff_mpeg2_aspect[16]
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
static unsigned int show_bits_long(GetBitContext *s, int n)
Show 0-32 bits.
static int mpeg_decode_a53_cc(AVCodecContext *avctx, const uint8_t *p, int buf_size)
static int get_bits_left(GetBitContext *gb)
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
@ AV_STEREO3D_SIDEBYSIDE_QUINCUNX
Views are next to each other, but when upscaling apply a checkerboard pattern.
#define FF_MPV_QSCALE_TYPE_MPEG2
static int mpeg_decode_frame(AVCodecContext *avctx, AVFrame *picture, int *got_output, AVPacket *avpkt)
AVFrameSideData * av_frame_new_side_data(AVFrame *frame, enum AVFrameSideDataType type, size_t size)
Add a new side data to a frame.
#define check_scantable_index(ctx, x)
@ AV_FRAME_DATA_A53_CC
ATSC A53 Part 4 Closed Captions.
#define MV_TYPE_16X8
2 vectors, one per 16x8 block
AVRational av_div_q(AVRational b, AVRational c)
Divide one rational by another.
int err_recognition
Error recognition; may misdetect some more or less valid parts as errors.
#define SLICE_MAX_START_CODE
static int get_bits_count(const GetBitContext *s)
static av_cold int ipu_decode_init(AVCodecContext *avctx)
int ff_update_duplicate_context(MpegEncContext *dst, const MpegEncContext *src)
This structure describes decoded (raw) audio or video data.
av_cold void ff_mpeg12_common_init(MpegEncContext *s)
void ff_mpv_report_decode_progress(MpegEncContext *s)
#define HWACCEL_DXVA2(codec)
const FFCodec ff_mpegvideo_decoder
static av_cold int ipu_decode_end(AVCodecContext *avctx)
static int mpeg_decode_mb(MpegEncContext *s, int16_t block[12][64])
static int mpeg2_decode_block_intra(MpegEncContext *s, int16_t *block, int n)
#define HWACCEL_D3D11VA2(codec)
const uint8_t ff_reverse[256]
int last_dc[3]
last DC values for MPEG-1
@ AV_PIX_FMT_D3D11VA_VLD
HW decoding through Direct3D11 via old API, Picture.data[3] contains a ID3D11VideoDecoderOutputView p...
static int mpeg2_fast_decode_block_intra(MpegEncContext *s, int16_t *block, int n)
Changing this would eat up any speed benefits it has.
unsigned int ff_toupper4(unsigned int x)
#define PICT_BOTTOM_FIELD
void ff_er_add_slice(ERContext *s, int startx, int starty, int endx, int endy, int status)
Add a slice.
void ff_init_block_index(MpegEncContext *s)
#define AV_EF_COMPLIANT
consider all spec non compliances as errors
#define UPDATE_CACHE(name, gb)
static int mpeg_decode_postinit(AVCodecContext *avctx)
AVCPBProperties * ff_add_cpb_side_data(AVCodecContext *avctx)
Add a CPB properties side data to an encoding context.
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
#define FF_DEBUG_PICT_INFO
#define MV_TYPE_DMV
2 vectors, special mpeg2 Dual Prime Vectors
#define GET_CACHE(name, gb)
static void skip_bits(GetBitContext *s, int n)
ScanTable intra_scantable
@ AV_STEREO3D_SIDEBYSIDE
Views are next to each other.
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
AVCodec p
The public AVCodec.
static int decode_chunks(AVCodecContext *avctx, AVFrame *picture, int *got_output, const uint8_t *buf, int buf_size)
enum AVDiscard skip_frame
Skip decoding for selected frames.
static void mpeg_decode_quant_matrix_extension(MpegEncContext *s)
int thread_count
thread count is used to decide how many independent tasks should be passed to execute()
@ AV_STEREO3D_2D
Video is not stereoscopic (and metadata has to be there).
#define USES_LIST(a, list)
static int slice_decode_thread(AVCodecContext *c, void *arg)
int flags
AV_CODEC_FLAG_*.
void(* idct_put)(uint8_t *dest, ptrdiff_t line_size, int16_t *block)
block -> idct -> clip to unsigned 8 bit -> dest.
static double val(void *priv, double ch)
#define HWACCEL_VDPAU(codec)
#define AV_CODEC_FLAG_LOW_DELAY
Force low delay.
static enum AVPixelFormat mpeg12_pixfmt_list_444[]
static int mpeg1_decode_sequence(AVCodecContext *avctx, const uint8_t *buf, int buf_size)
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
#define AV_EF_BITSTREAM
detect bitstream specification deviations
static enum AVPixelFormat mpeg1_hwaccel_pixfmt_list_420[]
void ff_mpv_common_end(MpegEncContext *s)
const FFCodec ff_mpeg2video_decoder
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
unsigned frame_rate_index
static int ipu_decode_frame(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *avpkt)
static enum AVPixelFormat mpeg2_hwaccel_pixfmt_list_420[]
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
static int mpeg1_decode_picture(AVCodecContext *avctx, const uint8_t *buf, int buf_size)
static void flush(AVCodecContext *avctx)
#define CLOSE_READER(name, gb)
int has_b_frames
Size of the frame reordering buffer in the decoder.
#define FF_CODEC_DECODE_CB(func)
@ AV_PIX_FMT_DXVA2_VLD
HW decoding through DXVA2, Picture.data[3] contains a LPDIRECT3DSURFACE9 pointer.
const float ff_mpeg1_aspect[16]
int ticks_per_frame
For some codecs, the time base is closer to the field rate than the frame rate.
static int slice_end(AVCodecContext *avctx, AVFrame *pict)
Handle slice ends.
int mpeg_enc_ctx_allocated
#define SHOW_SBITS(name, gb, num)
void ff_mpeg_er_frame_start(MpegEncContext *s)
#define av_assert0(cond)
assert() equivalent, that is always enabled.
unsigned aspect_ratio_info
static enum AVPixelFormat pix_fmts[]
static void mpeg_decode_sequence_display_extension(Mpeg1Context *s1)
static int get_sbits(GetBitContext *s, int n)
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
static enum AVPixelFormat mpeg12_pixfmt_list_422[]
#define SKIP_BITS(name, gb, num)
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this field
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
int64_t rc_max_rate
maximum bitrate
This structure describes the bitrate properties of an encoded bitstream.
@ AVDISCARD_ALL
discard all
#define MB_PTYPE_VLC_BITS
#define PTRDIFF_SPECIFIER
enum AVColorRange color_range
MPEG vs JPEG YUV range.
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it.
#define SLICE_MIN_START_CODE
av_cold void ff_mpv_idct_init(MpegEncContext *s)
@ AVCHROMA_LOC_LEFT
MPEG-2/4 4:2:0, H.264 default for 4:2:0.
Rational number (pair of numerator and denominator).
@ AVCHROMA_LOC_TOPLEFT
ITU-R 601, SMPTE 274M 296M S314M(DV 4:1:1), mpeg2 4:2:2.
int64_t bit_rate
the average bitrate
static void mpeg_decode_picture_display_extension(Mpeg1Context *s1)
uint16_t inter_matrix[64]
#define AV_CODEC_FLAG2_FAST
Allow non spec compliant speedup tricks.
@ AV_PICTURE_TYPE_I
Intra.
static unsigned int get_bits1(GetBitContext *s)
#define AV_CODEC_FLAG_TRUNCATED
Input bitstream might be truncated at a random location instead of only at frame boundaries.
#define LAST_SKIP_BITS(name, gb, num)
const uint8_t * avpriv_find_start_code(const uint8_t *p, const uint8_t *end, uint32_t *state)
AVFrameSideData * av_frame_new_side_data_from_buf(AVFrame *frame, enum AVFrameSideDataType type, AVBufferRef *buf)
Add a new side data to a frame from an existing AVBufferRef.
#define ONLY_IF_THREADS_ENABLED(x)
Define a function with only the non-default version specified.
#define MB_BTYPE_VLC_BITS
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
#define AV_EF_EXPLODE
abort decoding on minor error detection
static av_always_inline int get_vlc2(GetBitContext *s, const VLCElem *table, int bits, int max_depth)
Parse a vlc code.
@ AV_FRAME_DATA_AFD
Active Format Description data consisting of a single byte as specified in ETSI TS 101 154 using AVAc...
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
static void quant_matrix_rebuild(uint16_t *matrix, const uint8_t *old_perm, const uint8_t *new_perm)
int ff_mpeg1_find_frame_end(ParseContext *pc, const uint8_t *buf, int buf_size, AVCodecParserContext *s)
Find the end of the current frame in the bitstream.
@ AVDISCARD_NONKEY
discard all frames except keyframes
static int check_marker(void *logctx, GetBitContext *s, const char *msg)
int flags2
AV_CODEC_FLAG2_*.
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
#define AV_CODEC_FLAG_GRAY
Only decode/encode grayscale.
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled top and top right vectors is used as motion vector prediction the used motion vector is the sum of the predictor and(mvx_diff, mvy_diff) *mv_scale Intra DC Prediction block[y][x] dc[1]
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
@ AV_FRAME_DATA_PANSCAN
The data is the AVPanScan struct defined in libavcodec.
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
uint16_t intra_matrix[64]
matrix transmitted in the bitstream
void ff_mpeg1_clean_buffers(MpegEncContext *s)
const FFCodec ff_mpeg1video_decoder
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_RB32
#define FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM
The decoder extracts and fills its parameters even if the frame is skipped due to the skip_frame sett...
#define FF_THREAD_SLICE
Decode more than one part of a single frame at once.
void ff_mpeg_draw_horiz_band(MpegEncContext *s, int y, int h)
#define PICTURE_START_CODE
int skip_bottom
Number of macroblock rows at the bottom which are skipped.
const uint16_t ff_mpeg1_default_intra_matrix[256]
int ff_mpv_frame_start(MpegEncContext *s, AVCodecContext *avctx)
generic function called after decoding the header and before a frame is decoded.
#define MB_TYPE_INTERLACED
#define OPEN_READER(name, gb)
void ff_mpeg_flush(AVCodecContext *avctx)
static av_cold int mpeg_decode_init(AVCodecContext *avctx)
#define AV_CODEC_CAP_SLICE_THREADS
Codec supports slice-based (or partition-based) multithreading.
#define HWACCEL_D3D11VA(codec)
int ff_mpeg1_decode_block_intra(GetBitContext *gb, const uint16_t *quant_matrix, const uint8_t *scantable, int last_dc[3], int16_t *block, int index, int qscale)
#define MV_TYPE_FIELD
2 vectors, one per field
static void skip_bits1(GetBitContext *s)
@ AV_PIX_FMT_D3D11
Hardware surfaces for Direct3D11.
#define HWACCEL_NVDEC(codec)
@ AV_PIX_FMT_VAAPI
Hardware acceleration through VA-API, data[3] contains a VASurfaceID.
int ff_combine_frame(ParseContext *pc, int next, const uint8_t **buf, int *buf_size)
Combine the (truncated) bitstream to a complete frame.
#define FF_THREAD_FRAME
Decode more than one frame at once.
const AVProfile ff_mpeg2_video_profiles[]
@ AV_PIX_FMT_VDPAU
HW acceleration through VDPAU, Picture.data[3] contains a VdpVideoSurface.
#define DECLARE_ALIGNED(n, t, v)
@ AV_PIX_FMT_VIDEOTOOLBOX
hardware decoding through Videotoolbox
int block_last_index[12]
last non zero coefficient in block
static void mpeg_decode_gop(AVCodecContext *avctx, const uint8_t *buf, int buf_size)
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
uint16_t chroma_inter_matrix[64]
#define i(width, name, range_min, range_max)
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some it can consider them to be part of the FIFO and delay acknowledging a status change accordingly Example code
#define AV_CODEC_FLAG2_SHOW_ALL
Show all frames before the first keyframe.
unsigned properties
Properties of the stream that gets decoded.
const uint8_t ff_alternate_vertical_scan[64]
static const uint32_t btype2mb_type[11]
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
int(* decode_slice)(AVCodecContext *avctx, const uint8_t *buf, uint32_t buf_size)
Callback for each slice.
static unsigned int show_bits(GetBitContext *s, int n)
Show 1-25 bits.
av_cold void ff_init_scantable(const uint8_t *permutation, ScanTable *st, const uint8_t *src_scantable)
void ff_mpv_decode_init(MpegEncContext *s, AVCodecContext *avctx)
Initialize the given MpegEncContext for decoding.
@ AV_STEREO3D_TOPBOTTOM
Views are on top of each other.
int64_t max_bitrate
Maximum bitrate of the stream, in bits per second.
av_cold void ff_mpeg12_init_vlcs(void)
#define FF_DEBUG_STARTCODE
AVRational av_d2q(double d, int max)
Convert a double precision floating point number to a rational.
static int mpeg1_decode_block_inter(MpegEncContext *s, int16_t *block, int n)
#define FF_CODEC_CAP_INIT_THREADSAFE
The codec does not modify any global variables in the init function, allowing to call the init functi...
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
int ff_mpv_export_qp_table(MpegEncContext *s, AVFrame *f, Picture *p, int qp_type)
static const uint32_t ptype2mb_type[7]
const char * name
Name of the codec implementation.
enum AVChromaLocation chroma_sample_location
This defines the location of chroma samples.
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
@ AVCOL_RANGE_MPEG
Narrow or limited range content.
#define HWACCEL_VIDEOTOOLBOX(codec)
static int av_cmp_q(AVRational a, AVRational b)
Compare two rationals.
#define GET_RL_VLC(level, run, name, gb, table, bits, max_depth, need_update)
const uint8_t ff_zigzag_direct[64]
const AVRational ff_mpeg12_frame_rate_tab[]
static const float pred[4]
@ AV_FRAME_DATA_GOP_TIMECODE
The GOP timecode in 25 bit timecode format.
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
const uint16_t ff_mpeg1_default_non_intra_matrix[64]
int64_t buffer_size
The size of the buffer to which the ratecontrol is applied, in bits.
static int mpeg1_fast_decode_block_inter(MpegEncContext *s, int16_t *block, int n)
Changing this would eat up any speed benefits it has.
static const uint8_t * align_get_bits(GetBitContext *s)
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call ff_thread_finish_setup() afterwards. If some code can 't be moved
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled left
static enum AVPixelFormat mpeg_get_pixelformat(AVCodecContext *avctx)
#define AV_CODEC_FLAG2_CHUNKS
Input bitstream might be truncated at a packet boundaries instead of only at frame boundaries.
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
static int mpeg2_fast_decode_block_non_intra(MpegEncContext *s, int16_t *block, int n)
Changing this would eat up any speed benefits it has.
static int mpeg_field_start(MpegEncContext *s, const uint8_t *buf, int buf_size)
int ff_mpeg_update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
static int skip_1stop_8data_bits(GetBitContext *gb)
main external API structure.
int active_thread_type
Which multithreading methods are in use by the codec.
char * av_timecode_make_mpeg_tc_string(char *buf, uint32_t tc25bit)
Get the timecode string from the 25-bit timecode format (MPEG GOP format).
int(* execute)(struct AVCodecContext *c, int(*func)(struct AVCodecContext *c2, void *arg), void *arg2, int *ret, int count, int size)
The codec may call this to execute several independent things.
#define SHOW_UBITS(name, gb, num)
@ AV_PICTURE_TYPE_B
Bi-dir predicted.
@ AVCHROMA_LOC_CENTER
MPEG-1 4:2:0, JPEG 4:2:0, H.263 4:2:0.
AVDictionary * metadata
metadata.
static av_const int sign_extend(int val, unsigned bits)
FF_DISABLE_DEPRECATION_WARNINGS enum AVPixelFormat ff_thread_get_format(AVCodecContext *avctx, const enum AVPixelFormat *fmt)
Wrapper around get_format() for frame-multithreaded codecs.
void ff_mpv_frame_end(MpegEncContext *s)
static int ref[MAX_W *MAX_W]
void ff_mpv_reconstruct_mb(MpegEncContext *s, int16_t block[12][64])
#define AV_CODEC_CAP_DELAY
Encoder or decoder requires flushing with NULL input at the end in order to give the complete and cor...
#define FF_CODEC_PROPERTY_CLOSED_CAPTIONS
AVRational av_mul_q(AVRational b, AVRational c)
Multiply two rationals.
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
int av_buffer_realloc(AVBufferRef **pbuf, size_t size)
Reallocate a given buffer.
#define AV_EF_AGGRESSIVE
consider things that a sane encoder should not do as an error
int(* start_frame)(AVCodecContext *avctx, const uint8_t *buf, uint32_t buf_size)
Called at the beginning of each frame or field picture.
static int shift(int a, int b)
int coded_width
Bitstream width / height, may be different from width/height e.g.
static int get_dmv(MpegEncContext *s)
@ AV_PICTURE_TYPE_P
Predicted.
static av_cold int mpeg_decode_end(AVCodecContext *avctx)
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
A reference to a data buffer.
ScanTable inter_scantable
if inter == intra then intra should be used to reduce the cache usage
uint8_t idct_permutation[64]
IDCT input permutation.
const FFCodec ff_ipu_decoder
AVStereo3D * av_stereo3d_create_side_data(AVFrame *frame)
Allocate a complete AVFrameSideData and add it to the frame.
Structure to hold side data for an AVFrame.
static av_always_inline int diff(const uint32_t a, const uint32_t b)
unsigned int codec_tag
fourcc (LSB first, so "ABCD" -> ('D'<<24) + ('C'<<16) + ('B'<<8) + 'A').
MpegEncContext mpeg_enc_ctx
This structure stores compressed data.
void ff_er_frame_end(ERContext *s)
int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags)
Set the given entry in *pm, overwriting an existing entry.
static int mpeg_get_qscale(MpegEncContext *s)
static void mpeg_decode_sequence_extension(Mpeg1Context *s1)
#define HWACCEL_VAAPI(codec)
int width
picture width / height.
#define flags(name, subs,...)
#define AV_CODEC_CAP_DRAW_HORIZ_BAND
Decoder can use draw_horiz_band callback.
The exact code depends on how similar the blocks are and how related they are to the block
AVRational frame_rate_ext
static int mpeg_decode_motion(MpegEncContext *s, int fcode, int pred)
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
static void mpeg_decode_user_data(AVCodecContext *avctx, const uint8_t *p, int buf_size)
int end_mb_y
end mb_y of this thread (so current thread should process start_mb_y <= row < end_mb_y)
Stereo 3D type: this structure describes how two videos are packed within a single video surface,...
int av_image_check_sar(unsigned int w, unsigned int h, AVRational sar)
Check if the given sample aspect ratio of an image is valid.
int64_t timecode_frame_start
void ff_print_debug_info(MpegEncContext *s, Picture *p, AVFrame *pict)
int start_mb_y
start mb_y of this thread (so current thread should process start_mb_y <= row < end_mb_y)
@ AVDISCARD_NONREF
discard all non reference
@ AV_CODEC_ID_MPEG2VIDEO
preferred ID for MPEG-1/2 video decoding
#define DECODE_SLICE_ERROR
static int load_matrix(MpegEncContext *s, uint16_t matrix0[64], uint16_t matrix1[64], int intra)
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown) That is the width of a pixel divided by the height of the pixel.
static int decode_dc(GetBitContext *gb, int component)
RL_VLC_ELEM * rl_vlc[32]
decoding only
uint16_t chroma_intra_matrix[64]
static int mpeg_decode_picture_coding_extension(Mpeg1Context *s1)
static int mpeg2_decode_block_non_intra(MpegEncContext *s, int16_t *block, int n)
static int mpeg_decode_slice(MpegEncContext *s, int mb_y, const uint8_t **buf, int buf_size)
Decode a slice.