Go to the documentation of this file.
34 #include "config_components.h"
81 #define QUANT_BIAS_SHIFT 8
83 #define QMAT_SHIFT_MMX 16
109 uint16_t (*
qmat16)[2][64],
110 const uint16_t *quant_matrix,
111 int bias,
int qmin,
int qmax,
int intra)
122 else qscale2 =
qscale << 1;
129 for (
i = 0;
i < 64;
i++) {
130 const int j =
s->idsp.idct_permutation[
i];
131 int64_t den = (int64_t) qscale2 * quant_matrix[j];
141 for (
i = 0;
i < 64;
i++) {
142 const int j =
s->idsp.idct_permutation[
i];
143 int64_t den =
ff_aanscales[
i] * (int64_t) qscale2 * quant_matrix[j];
153 for (
i = 0;
i < 64;
i++) {
154 const int j =
s->idsp.idct_permutation[
i];
155 int64_t den = (int64_t) qscale2 * quant_matrix[j];
175 for (
i = intra;
i < 64;
i++) {
187 "Warning, QMAT_SHIFT is larger than %d, overflows possible\n",
194 if (
s->q_scale_type == 1 && 0) {
196 int bestdiff=INT_MAX;
204 if (
diff < bestdiff) {
213 s->qscale =
av_clip(
s->qscale,
s->avctx->qmin,
s->vbv_ignore_qmax ? 31 :
s->avctx->qmax);
226 for (
i = 0;
i < 64;
i++) {
238 int8_t *
const qscale_table =
s->current_picture.qscale_table;
241 for (
i = 0;
i <
s->mb_num;
i++) {
242 unsigned int lam =
s->lambda_table[
s->mb_index2xy[
i]];
244 qscale_table[
s->mb_index2xy[
i]] =
av_clip(qp,
s->avctx->qmin,
252 #define COPY(a) dst->a= src->a
268 for (
int i = -16;
i < 16;
i++)
287 s->input_picture_number = 0;
288 s->picture_in_gop_number = 0;
297 if (CONFIG_H263_ENCODER)
299 if (!
s->dct_quantize)
303 s->fast_dct_quantize =
s->dct_quantize;
304 if (
s->avctx->trellis)
316 int mb_array_size, mv_table_size;
344 "keyframe interval too large!, reducing it from %d to %d\n",
356 "max b frames must be 0 or positive for mpegvideo based encoders\n");
367 s->rtp_mode = !!
s->rtp_payload_size;
371 if (
s->intra_dc_precision < 0) {
372 s->intra_dc_precision += 8;
373 }
else if (
s->intra_dc_precision >= 8)
374 s->intra_dc_precision -= 8;
376 if (
s->intra_dc_precision < 0) {
378 "intra dc precision must be positive, note some applications use"
379 " 0 and some 8 as base meaning 8bit, the value must not be smaller than that\n");
389 if (
s->gop_size <= 1) {
443 "Warning min_rate > 0 but min_rate != max_rate isn't recommended!\n");
460 "impossible bitrate constraints, this will fail\n");
470 if (!
s->fixed_qscale &&
476 if (nbt <= INT_MAX) {
489 "Warning vbv_delay will be set to 0xFFFF (=VBR) as the "
490 "specified vbv buffer is too large for the given bitrate!\n");
502 "OBMC is only supported with simple mb decision\n");
517 "Invalid pixel aspect ratio %i/%i, limit is 255/255 reducing\n",
570 if (
s->scenechange_threshold < 1000000000 &&
573 "closed gop with scene change detection are not supported yet, "
574 "set threshold to 1000000000\n");
582 "low delay forcing is only available for mpeg2, "
583 "set strict_std_compliance to 'unofficial' or lower in order to allow it\n");
586 if (
s->max_b_frames != 0) {
588 "B-frames cannot be used with low delay\n");
593 if (
s->q_scale_type == 1) {
596 "non linear quant only supports qmax <= 28 currently\n");
609 "notice: b_frame_strategy only affects the first pass\n");
610 s->b_frame_strategy = 0;
624 s->inter_quant_bias = 0;
626 s->intra_quant_bias = 0;
641 "timebase %d/%d not supported by MPEG 4 standard, "
642 "the maximum admitted value for the timebase denominator "
650 #if CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER
657 avctx->
delay =
s->low_delay ? 0 : (
s->max_b_frames + 1);
661 #if CONFIG_MJPEG_ENCODER || CONFIG_AMV_ENCODER
675 if (!CONFIG_SPEEDHQ_ENCODER)
683 if (!CONFIG_H261_ENCODER)
694 if (!CONFIG_H263_ENCODER)
697 s->width,
s->height) == 8) {
699 "The specified picture size of %dx%d is not valid for "
700 "the H.263 codec.\nValid sizes are 128x96, 176x144, "
701 "352x288, 704x576, and 1408x1152. "
702 "Try H.263+.\n",
s->width,
s->height);
714 s->modified_quant =
s->h263_aic;
716 s->unrestricted_mv =
s->obmc ||
s->loop_filter ||
s->umvplus;
726 s->unrestricted_mv = 1;
740 s->modified_quant = 1;
744 s->unrestricted_mv = 0;
749 s->unrestricted_mv = 1;
750 s->low_delay =
s->max_b_frames ? 0 : 1;
751 avctx->
delay =
s->low_delay ? 0 : (
s->max_b_frames + 1);
756 s->unrestricted_mv = 1;
757 s->msmpeg4_version = 2;
764 s->unrestricted_mv = 1;
765 s->msmpeg4_version = 3;
766 s->flipflop_rounding = 1;
773 s->unrestricted_mv = 1;
774 s->msmpeg4_version = 4;
775 s->flipflop_rounding = 1;
782 s->unrestricted_mv = 1;
783 s->msmpeg4_version = 5;
784 s->flipflop_rounding = 1;
796 s->progressive_frame =
824 mv_table_size = (
s->mb_height + 2) *
s->mb_stride + 1;
832 s->p_mv_table =
s->p_mv_table_base +
s->mb_stride + 1;
833 s->b_forw_mv_table =
s->b_forw_mv_table_base +
s->mb_stride + 1;
834 s->b_back_mv_table =
s->b_back_mv_table_base +
s->mb_stride + 1;
835 s->b_bidir_forw_mv_table =
s->b_bidir_forw_mv_table_base +
s->mb_stride + 1;
836 s->b_bidir_back_mv_table =
s->b_bidir_back_mv_table_base +
s->mb_stride + 1;
837 s->b_direct_mv_table =
s->b_direct_mv_table_base +
s->mb_stride + 1;
840 mb_array_size =
s->mb_stride *
s->mb_height;
850 #define ALLOCZ_ARRAYS(p, mult, numb) ((p) = av_calloc(numb, mult * sizeof(*(p))))
855 if (!(tmp1 =
ALLOCZ_ARRAYS(
s->b_field_mv_table_base, 8, mv_table_size)) ||
856 !(tmp2 =
ALLOCZ_ARRAYS(
s->b_field_select_table[0][0], 2 * 4, mv_table_size)) ||
860 s->p_field_select_table[1] =
s->p_field_select_table[0] + 2 * mv_table_size;
861 tmp1 +=
s->mb_stride + 1;
863 for (
int i = 0;
i < 2;
i++) {
864 for (
int j = 0; j < 2; j++) {
865 for (
int k = 0; k < 2; k++) {
866 s->b_field_mv_table[
i][j][k] = tmp1;
867 tmp1 += mv_table_size;
869 s->b_field_select_table[
i][j] = tmp2;
870 tmp2 += 2 * mv_table_size;
875 if (
s->noise_reduction) {
883 s->dct_unquantize_intra =
s->dct_unquantize_mpeg2_intra;
884 s->dct_unquantize_inter =
s->dct_unquantize_mpeg2_inter;
886 s->dct_unquantize_intra =
s->dct_unquantize_h263_intra;
887 s->dct_unquantize_inter =
s->dct_unquantize_h263_inter;
889 s->dct_unquantize_intra =
s->dct_unquantize_mpeg1_intra;
890 s->dct_unquantize_inter =
s->dct_unquantize_mpeg1_inter;
893 if ((CONFIG_H263P_ENCODER || CONFIG_RV20_ENCODER) &&
s->modified_quant)
896 if (
s->slice_context_count > 1) {
900 s->h263_slice_structured = 1;
903 s->quant_precision = 5;
906 ff_set_cmp(&
s->mecc,
s->mecc.frame_skip_cmp,
s->frame_skip_cmp);
908 if (CONFIG_H263_ENCODER &&
s->out_format ==
FMT_H263) {
910 if (CONFIG_MSMPEG4ENC &&
s->msmpeg4_version)
915 for (
i = 0;
i < 64;
i++) {
916 int j =
s->idsp.idct_permutation[
i];
929 s->chroma_intra_matrix[j] =
953 if (
s->b_frame_strategy == 2) {
954 for (
i = 0;
i <
s->max_b_frames + 2;
i++) {
956 if (!
s->tmp_frames[
i])
960 s->tmp_frames[
i]->width =
s->width >>
s->brd_scale;
961 s->tmp_frames[
i]->height =
s->height >>
s->brd_scale;
999 av_freep(&
s->b_bidir_forw_mv_table_base);
1000 av_freep(&
s->b_bidir_back_mv_table_base);
1003 av_freep(&
s->b_field_select_table[0][0]);
1012 if(
s->q_chroma_intra_matrix !=
s->q_intra_matrix )
av_freep(&
s->q_chroma_intra_matrix);
1013 if(
s->q_chroma_intra_matrix16 !=
s->q_intra_matrix16)
av_freep(&
s->q_chroma_intra_matrix16);
1014 s->q_chroma_intra_matrix=
NULL;
1015 s->q_chroma_intra_matrix16=
NULL;
1030 #define IS_ENCODER 1
1038 for (
int i = 0;
i < 6;
i++) {
1039 for (
int j = 0; j < 64; j++) {
1041 block[
i][
s->idsp.idct_permutation[j]]);
1055 for (y = 0; y < 16; y++) {
1056 for (x = 0; x < 16; x++) {
1071 h =
s->height & ~15;
1073 for (y = 0; y <
h; y += 16) {
1074 for (x = 0; x <
w; x += 16) {
1081 acc += sae + 500 < sad;
1090 s->chroma_x_shift,
s->chroma_y_shift,
s->out_format,
1091 s->mb_stride,
s->mb_width,
s->mb_height,
s->b8_stride,
1092 &
s->linesize, &
s->uvlinesize);
1099 int i, display_picture_number = 0,
ret;
1100 int encoding_delay =
s->max_b_frames ?
s->max_b_frames
1101 : (
s->low_delay ? 0 : 1);
1102 int flush_offset = 1;
1107 display_picture_number =
s->input_picture_number++;
1111 int64_t last =
s->user_specified_pts;
1115 "Invalid pts (%"PRId64
") <= last (%"PRId64
")\n",
1120 if (!
s->low_delay && display_picture_number == 1)
1121 s->dts_delta =
pts - last;
1123 s->user_specified_pts =
pts;
1126 s->user_specified_pts =
1127 pts =
s->user_specified_pts + 1;
1129 "Warning: AVFrame.pts=? trying to guess (%"PRId64
")\n",
1132 pts = display_picture_number;
1136 if (!pic_arg->
buf[0] ||
1138 pic_arg->
linesize[1] !=
s->uvlinesize ||
1141 if ((
s->width & 15) || (
s->height & 15))
1149 pic_arg->
linesize[1],
s->linesize,
s->uvlinesize);
1155 pic = &
s->picture[
i];
1172 int h_chroma_shift, v_chroma_shift;
1177 for (
i = 0;
i < 3;
i++) {
1179 int dst_stride =
i ?
s->uvlinesize :
s->linesize;
1180 int h_shift =
i ? h_chroma_shift : 0;
1181 int v_shift =
i ? v_chroma_shift : 0;
1182 int w =
s->width >> h_shift;
1183 int h =
s->height >> v_shift;
1184 const uint8_t *
src = pic_arg->
data[
i];
1185 uint8_t *dst = pic->
f->
data[
i];
1189 && !
s->progressive_sequence
1190 &&
FFALIGN(
s->height, 32) -
s->height > 16)
1193 if (!
s->avctx->rc_buffer_size)
1196 if (src_stride == dst_stride)
1197 memcpy(dst,
src, src_stride *
h);
1200 uint8_t *dst2 = dst;
1202 memcpy(dst2,
src,
w);
1207 if ((
s->width & 15) || (
s->height & (vpad-1))) {
1208 s->mpvencdsp.draw_edges(dst, dst_stride,
1227 for (flush_offset = 0; flush_offset < encoding_delay + 1; flush_offset++)
1228 if (
s->input_picture[flush_offset])
1231 if (flush_offset <= 1)
1234 encoding_delay = encoding_delay - flush_offset + 1;
1239 s->input_picture[
i - flush_offset] =
s->input_picture[
i];
1241 s->input_picture[encoding_delay] = pic;
1250 int64_t score64 = 0;
1252 for (plane = 0; plane < 3; plane++) {
1254 const int bw = plane ? 1 : 2;
1255 for (y = 0; y <
s->mb_height * bw; y++) {
1256 for (x = 0; x <
s->mb_width * bw; x++) {
1257 int off = p->
shared ? 0 : 16;
1258 const uint8_t *dptr = p->
f->
data[plane] + 8 * (x + y *
stride) + off;
1259 const uint8_t *rptr =
ref->f->data[plane] + 8 * (x + y *
stride);
1260 int v =
s->mecc.frame_skip_cmp[1](
s, dptr, rptr,
stride, 8);
1262 switch (
FFABS(
s->frame_skip_exp)) {
1263 case 0: score =
FFMAX(score, v);
break;
1264 case 1: score +=
FFABS(v);
break;
1265 case 2: score64 += v * (int64_t)v;
break;
1266 case 3: score64 +=
FFABS(v * (int64_t)v * v);
break;
1267 case 4: score64 += (v * (int64_t)v) * (v * (int64_t)v);
break;
1276 if (
s->frame_skip_exp < 0)
1277 score64 = pow(score64 / (
double)(
s->mb_width *
s->mb_height),
1278 -1.0/
s->frame_skip_exp);
1282 if (score64 < ((
s->frame_skip_factor * (int64_t)
s->lambda) >> 8))
1311 const int scale =
s->brd_scale;
1315 int64_t best_rd = INT64_MAX;
1316 int best_b_count = -1;
1331 b_lambda = p_lambda;
1335 for (
i = 0;
i <
s->max_b_frames + 2;
i++) {
1336 const Picture *pre_input_ptr =
i ?
s->input_picture[
i - 1] :
1337 s->next_picture_ptr;
1339 if (pre_input_ptr) {
1340 const uint8_t *
data[4];
1343 if (!pre_input_ptr->
shared &&
i) {
1349 s->mpvencdsp.shrink[
scale](
s->tmp_frames[
i]->data[0],
1350 s->tmp_frames[
i]->linesize[0],
1354 s->mpvencdsp.shrink[
scale](
s->tmp_frames[
i]->data[1],
1355 s->tmp_frames[
i]->linesize[1],
1359 s->mpvencdsp.shrink[
scale](
s->tmp_frames[
i]->data[2],
1360 s->tmp_frames[
i]->linesize[2],
1367 for (j = 0; j <
s->max_b_frames + 1; j++) {
1371 if (!
s->input_picture[j])
1384 c->mb_decision =
s->avctx->mb_decision;
1385 c->me_cmp =
s->avctx->me_cmp;
1386 c->mb_cmp =
s->avctx->mb_cmp;
1387 c->me_sub_cmp =
s->avctx->me_sub_cmp;
1389 c->time_base =
s->avctx->time_base;
1390 c->max_b_frames =
s->max_b_frames;
1408 for (
i = 0;
i <
s->max_b_frames + 1;
i++) {
1409 int is_p =
i % (j + 1) == j ||
i ==
s->max_b_frames;
1411 s->tmp_frames[
i + 1]->pict_type = is_p ?
1413 s->tmp_frames[
i + 1]->quality = is_p ? p_lambda : b_lambda;
1432 rd +=
c->error[0] +
c->error[1] +
c->error[2];
1450 return best_b_count;
1458 s->reordered_input_picture[
i - 1] =
s->reordered_input_picture[
i];
1462 if (!
s->reordered_input_picture[0] &&
s->input_picture[0]) {
1463 if (
s->frame_skip_threshold ||
s->frame_skip_factor) {
1464 if (
s->picture_in_gop_number <
s->gop_size &&
1465 s->next_picture_ptr &&
1477 !
s->next_picture_ptr ||
s->intra_only) {
1478 s->reordered_input_picture[0] =
s->input_picture[0];
1480 s->reordered_input_picture[0]->coded_picture_number =
1481 s->coded_picture_number++;
1486 for (
i = 0;
i <
s->max_b_frames + 1;
i++) {
1487 int pict_num =
s->input_picture[0]->display_picture_number +
i;
1489 if (pict_num >=
s->rc_context.num_entries)
1491 if (!
s->input_picture[
i]) {
1496 s->input_picture[
i]->f->pict_type =
1497 s->rc_context.entry[pict_num].new_pict_type;
1501 if (
s->b_frame_strategy == 0) {
1502 b_frames =
s->max_b_frames;
1503 while (b_frames && !
s->input_picture[b_frames])
1505 }
else if (
s->b_frame_strategy == 1) {
1506 for (
i = 1;
i <
s->max_b_frames + 1;
i++) {
1507 if (
s->input_picture[
i] &&
1508 s->input_picture[
i]->b_frame_score == 0) {
1509 s->input_picture[
i]->b_frame_score =
1511 s->input_picture[
i ]->f->data[0],
1512 s->input_picture[
i - 1]->f->data[0],
1516 for (
i = 0;
i <
s->max_b_frames + 1;
i++) {
1517 if (!
s->input_picture[
i] ||
1518 s->input_picture[
i]->b_frame_score - 1 >
1519 s->mb_num /
s->b_sensitivity)
1523 b_frames =
FFMAX(0,
i - 1);
1526 for (
i = 0;
i < b_frames + 1;
i++) {
1527 s->input_picture[
i]->b_frame_score = 0;
1529 }
else if (
s->b_frame_strategy == 2) {
1537 for (
i = b_frames - 1;
i >= 0;
i--) {
1538 int type =
s->input_picture[
i]->f->pict_type;
1543 b_frames ==
s->max_b_frames) {
1545 "warning, too many B-frames in a row\n");
1548 if (
s->picture_in_gop_number + b_frames >=
s->gop_size) {
1550 s->gop_size >
s->picture_in_gop_number) {
1551 b_frames =
s->gop_size -
s->picture_in_gop_number - 1;
1563 s->reordered_input_picture[0] =
s->input_picture[b_frames];
1566 s->reordered_input_picture[0]->coded_picture_number =
1567 s->coded_picture_number++;
1568 for (
i = 0;
i < b_frames;
i++) {
1569 s->reordered_input_picture[
i + 1] =
s->input_picture[
i];
1570 s->reordered_input_picture[
i + 1]->f->pict_type =
1572 s->reordered_input_picture[
i + 1]->coded_picture_number =
1573 s->coded_picture_number++;
1580 if (
s->reordered_input_picture[0]) {
1581 s->reordered_input_picture[0]->reference =
1582 s->reordered_input_picture[0]->f->pict_type !=
1586 s->reordered_input_picture[0]->f)))
1589 if (
s->reordered_input_picture[0]->shared ||
s->avctx->rc_buffer_size) {
1597 pic = &
s->picture[
i];
1599 pic->
reference =
s->reordered_input_picture[0]->reference;
1612 s->reordered_input_picture[0]->shared = 0;
1614 s->current_picture_ptr = pic;
1617 s->current_picture_ptr =
s->reordered_input_picture[0];
1618 for (
i = 0;
i < 4;
i++) {
1619 if (
s->new_picture->data[
i])
1623 s->picture_number =
s->current_picture_ptr->display_picture_number;
1631 if (
s->unrestricted_mv &&
1632 s->current_picture.reference &&
1635 int hshift =
desc->log2_chroma_w;
1636 int vshift =
desc->log2_chroma_h;
1637 s->mpvencdsp.draw_edges(
s->current_picture.f->data[0],
1638 s->current_picture.f->linesize[0],
1639 s->h_edge_pos,
s->v_edge_pos,
1642 s->mpvencdsp.draw_edges(
s->current_picture.f->data[1],
1643 s->current_picture.f->linesize[1],
1644 s->h_edge_pos >> hshift,
1645 s->v_edge_pos >> vshift,
1649 s->mpvencdsp.draw_edges(
s->current_picture.f->data[2],
1650 s->current_picture.f->linesize[2],
1651 s->h_edge_pos >> hshift,
1652 s->v_edge_pos >> vshift,
1660 s->last_pict_type =
s->pict_type;
1661 s->last_lambda_for [
s->pict_type] =
s->current_picture_ptr->f->quality;
1663 s->last_non_b_pict_type =
s->pict_type;
1670 for (intra = 0; intra < 2; intra++) {
1671 if (
s->dct_count[intra] > (1 << 16)) {
1672 for (
i = 0;
i < 64;
i++) {
1673 s->dct_error_sum[intra][
i] >>= 1;
1675 s->dct_count[intra] >>= 1;
1678 for (
i = 0;
i < 64;
i++) {
1679 s->dct_offset[intra][
i] = (
s->noise_reduction *
1680 s->dct_count[intra] +
1681 s->dct_error_sum[intra][
i] / 2) /
1682 (
s->dct_error_sum[intra][
i] + 1);
1693 s->last_picture_ptr !=
s->next_picture_ptr &&
1694 s->last_picture_ptr->f->buf[0]) {
1698 s->current_picture_ptr->f->pict_type =
s->pict_type;
1703 s->current_picture_ptr)) < 0)
1707 s->last_picture_ptr =
s->next_picture_ptr;
1708 s->next_picture_ptr =
s->current_picture_ptr;
1711 if (
s->last_picture_ptr) {
1713 if (
s->last_picture_ptr->f->buf[0] &&
1715 s->last_picture_ptr)) < 0)
1718 if (
s->next_picture_ptr) {
1720 if (
s->next_picture_ptr->f->buf[0] &&
1722 s->next_picture_ptr)) < 0)
1728 for (
i = 0;
i < 4;
i++) {
1730 s->current_picture.f->data[
i] +=
1731 s->current_picture.f->linesize[
i];
1733 s->current_picture.f->linesize[
i] *= 2;
1734 s->last_picture.f->linesize[
i] *= 2;
1735 s->next_picture.f->linesize[
i] *= 2;
1739 if (
s->dct_error_sum) {
1748 const AVFrame *pic_arg,
int *got_packet)
1751 int i, stuffing_count,
ret;
1752 int context_count =
s->slice_context_count;
1754 s->vbv_ignore_qmax = 0;
1756 s->picture_in_gop_number++;
1766 if (
s->new_picture->data[0]) {
1767 int growing_buffer = context_count == 1 && !
s->data_partitioning;
1768 size_t pkt_size = 10000 +
s->mb_width *
s->mb_height *
1781 s->mb_width*
s->mb_height*12);
1782 s->prev_mb_info =
s->last_mb_info =
s->mb_info_size = 0;
1785 for (
i = 0;
i < context_count;
i++) {
1786 int start_y =
s->thread_context[
i]->start_mb_y;
1788 int h =
s->mb_height;
1789 uint8_t *start =
pkt->
data + (size_t)(((int64_t)
pkt->
size) * start_y /
h);
1790 uint8_t *end =
pkt->
data + (size_t)(((int64_t)
pkt->
size) * end_y /
h);
1795 s->pict_type =
s->new_picture->pict_type;
1802 if (growing_buffer) {
1812 if ((CONFIG_MJPEG_ENCODER || CONFIG_AMV_ENCODER) &&
s->out_format ==
FMT_MJPEG)
1822 s->lambda <
s->lmax) {
1823 s->next_lambda =
FFMAX(
s->lambda + min_step,
s->lambda *
1824 (
s->qscale + 1) /
s->qscale);
1825 if (
s->adaptive_quant) {
1827 for (
i = 0;
i <
s->mb_height *
s->mb_stride;
i++)
1828 s->lambda_table[
i] =
1829 FFMAX(
s->lambda_table[
i] + min_step,
1830 s->lambda_table[
i] * (
s->qscale + 1) /
1836 if (
s->flipflop_rounding ||
1839 s->no_rounding ^= 1;
1842 s->time_base =
s->last_time_base;
1843 s->last_non_b_time =
s->time -
s->pp_time;
1845 for (
i = 0;
i < context_count;
i++) {
1849 s->vbv_ignore_qmax = 1;
1860 for (
i = 0;
i < 4;
i++) {
1870 s->misc_bits +
s->i_tex_bits +
1876 s->stuffing_bits = 8*stuffing_count;
1877 if (stuffing_count) {
1883 switch (
s->codec_id) {
1886 while (stuffing_count--) {
1893 stuffing_count -= 4;
1894 while (stuffing_count--) {
1900 s->stuffing_bits = 0;
1918 int minbits =
s->frame_bits - 8 *
1919 (
s->vbv_delay_pos - 1);
1920 double bits =
s->rc_context.buffer_index + minbits - inbits;
1921 uint8_t *
const vbv_delay_ptr =
s->pb.buf +
s->vbv_delay_pos;
1925 "Internal error, negative bits\n");
1937 vbv_delay_ptr[0] &= 0xF8;
1940 vbv_delay_ptr[2] &= 0x07;
1949 (uint8_t*)props, props_size);
1955 s->total_bits +=
s->frame_bits;
1957 pkt->
pts =
s->current_picture.f->pts;
1960 if (!
s->current_picture.coded_picture_number)
1975 if (
s->current_picture.f->key_frame)
1985 if (!
s->picture[
i].reference)
1997 int n,
int threshold)
1999 static const char tab[64] = {
2000 3, 2, 2, 1, 1, 1, 1, 1,
2001 1, 1, 1, 1, 1, 1, 1, 1,
2002 1, 1, 1, 1, 1, 1, 1, 1,
2003 0, 0, 0, 0, 0, 0, 0, 0,
2004 0, 0, 0, 0, 0, 0, 0, 0,
2005 0, 0, 0, 0, 0, 0, 0, 0,
2006 0, 0, 0, 0, 0, 0, 0, 0,
2007 0, 0, 0, 0, 0, 0, 0, 0
2012 int16_t *
block =
s->block[n];
2013 const int last_index =
s->block_last_index[n];
2016 if (threshold < 0) {
2018 threshold = -threshold;
2023 if (last_index <= skip_dc - 1)
2026 for (
i = 0;
i <= last_index;
i++) {
2027 const int j =
s->intra_scantable.permutated[
i];
2030 if (skip_dc &&
i == 0)
2034 }
else if (
level > 1) {
2040 if (score >= threshold)
2042 for (
i = skip_dc;
i <= last_index;
i++) {
2043 const int j =
s->intra_scantable.permutated[
i];
2047 s->block_last_index[n] = 0;
2049 s->block_last_index[n] = -1;
2056 const int maxlevel =
s->max_qcoeff;
2057 const int minlevel =
s->min_qcoeff;
2065 for (;
i <= last_index;
i++) {
2066 const int j =
s->intra_scantable.permutated[
i];
2069 if (
level > maxlevel) {
2072 }
else if (
level < minlevel) {
2082 "warning, clipping %d dct coefficients to %d..%d\n",
2090 for (y = 0; y < 8; y++) {
2091 for (x = 0; x < 8; x++) {
2097 for (y2 =
FFMAX(y - 1, 0); y2 <
FFMIN(8, y + 2); y2++) {
2098 for (x2=
FFMAX(x - 1, 0); x2 <
FFMIN(8, x + 2); x2++) {
2099 int v = ptr[x2 + y2 *
stride];
2111 int motion_x,
int motion_y,
2112 int mb_block_height,
2121 #define INTERLACED_DCT(s) ((chroma_format == CHROMA_420 || chroma_format == CHROMA_422) && \
2122 (s)->avctx->flags & AV_CODEC_FLAG_INTERLACED_DCT)
2124 int16_t orig[12][64];
2125 const int mb_x =
s->mb_x;
2126 const int mb_y =
s->mb_y;
2130 int uv_dct_offset =
s->uvlinesize * 8;
2131 const uint8_t *ptr_y, *ptr_cb, *ptr_cr;
2132 ptrdiff_t wrap_y, wrap_c;
2134 for (
i = 0;
i < mb_block_count;
i++)
2135 skip_dct[
i] =
s->skipdct;
2137 if (
s->adaptive_quant) {
2138 const int last_qp =
s->qscale;
2139 const int mb_xy =
mb_x +
mb_y *
s->mb_stride;
2141 s->lambda =
s->lambda_table[mb_xy];
2145 s->qscale =
s->current_picture_ptr->qscale_table[mb_xy];
2146 s->dquant =
s->qscale - last_qp;
2167 wrap_y =
s->linesize;
2168 wrap_c =
s->uvlinesize;
2169 ptr_y =
s->new_picture->data[0] +
2171 ptr_cb =
s->new_picture->data[1] +
2172 (
mb_y * mb_block_height * wrap_c) +
mb_x * mb_block_width;
2173 ptr_cr =
s->new_picture->data[2] +
2174 (
mb_y * mb_block_height * wrap_c) +
mb_x * mb_block_width;
2177 uint8_t *ebuf =
s->sc.edge_emu_buffer + 38 * wrap_y;
2180 s->vdsp.emulated_edge_mc(ebuf, ptr_y,
2183 s->width,
s->height);
2185 s->vdsp.emulated_edge_mc(ebuf + 16 * wrap_y, ptr_cb,
2187 mb_block_width, mb_block_height,
2188 mb_x * mb_block_width,
mb_y * mb_block_height,
2190 ptr_cb = ebuf + 16 * wrap_y;
2191 s->vdsp.emulated_edge_mc(ebuf + 16 * wrap_y + 16, ptr_cr,
2193 mb_block_width, mb_block_height,
2194 mb_x * mb_block_width,
mb_y * mb_block_height,
2196 ptr_cr = ebuf + 16 * wrap_y + 16;
2201 int progressive_score, interlaced_score;
2203 s->interlaced_dct = 0;
2204 progressive_score =
s->mecc.ildct_cmp[4](
s, ptr_y,
NULL, wrap_y, 8) +
2205 s->mecc.ildct_cmp[4](
s, ptr_y + wrap_y * 8,
2206 NULL, wrap_y, 8) - 400;
2208 if (progressive_score > 0) {
2209 interlaced_score =
s->mecc.ildct_cmp[4](
s, ptr_y,
2210 NULL, wrap_y * 2, 8) +
2211 s->mecc.ildct_cmp[4](
s, ptr_y + wrap_y,
2212 NULL, wrap_y * 2, 8);
2213 if (progressive_score > interlaced_score) {
2214 s->interlaced_dct = 1;
2217 uv_dct_offset = wrap_c;
2226 s->pdsp.get_pixels(
s->block[0], ptr_y, wrap_y);
2227 s->pdsp.get_pixels(
s->block[1], ptr_y + 8, wrap_y);
2228 s->pdsp.get_pixels(
s->block[2], ptr_y +
dct_offset, wrap_y);
2229 s->pdsp.get_pixels(
s->block[3], ptr_y +
dct_offset + 8, wrap_y);
2235 s->pdsp.get_pixels(
s->block[4], ptr_cb, wrap_c);
2236 s->pdsp.get_pixels(
s->block[5], ptr_cr, wrap_c);
2238 s->pdsp.get_pixels(
s->block[6], ptr_cb + uv_dct_offset, wrap_c);
2239 s->pdsp.get_pixels(
s->block[7], ptr_cr + uv_dct_offset, wrap_c);
2241 s->pdsp.get_pixels(
s->block[ 6], ptr_cb + 8, wrap_c);
2242 s->pdsp.get_pixels(
s->block[ 7], ptr_cr + 8, wrap_c);
2243 s->pdsp.get_pixels(
s->block[ 8], ptr_cb + uv_dct_offset, wrap_c);
2244 s->pdsp.get_pixels(
s->block[ 9], ptr_cr + uv_dct_offset, wrap_c);
2245 s->pdsp.get_pixels(
s->block[10], ptr_cb + uv_dct_offset + 8, wrap_c);
2246 s->pdsp.get_pixels(
s->block[11], ptr_cr + uv_dct_offset + 8, wrap_c);
2252 uint8_t *dest_y, *dest_cb, *dest_cr;
2254 dest_y =
s->dest[0];
2255 dest_cb =
s->dest[1];
2256 dest_cr =
s->dest[2];
2259 op_pix =
s->hdsp.put_pixels_tab;
2260 op_qpix =
s->qdsp.put_qpel_pixels_tab;
2262 op_pix =
s->hdsp.put_no_rnd_pixels_tab;
2263 op_qpix =
s->qdsp.put_no_rnd_qpel_pixels_tab;
2268 s->last_picture.f->data,
2270 op_pix =
s->hdsp.avg_pixels_tab;
2271 op_qpix =
s->qdsp.avg_qpel_pixels_tab;
2275 s->next_picture.f->data,
2280 int progressive_score, interlaced_score;
2282 s->interlaced_dct = 0;
2283 progressive_score =
s->mecc.ildct_cmp[0](
s, dest_y, ptr_y, wrap_y, 8) +
2284 s->mecc.ildct_cmp[0](
s, dest_y + wrap_y * 8,
2289 progressive_score -= 400;
2291 if (progressive_score > 0) {
2292 interlaced_score =
s->mecc.ildct_cmp[0](
s, dest_y, ptr_y,
2294 s->mecc.ildct_cmp[0](
s, dest_y + wrap_y,
2298 if (progressive_score > interlaced_score) {
2299 s->interlaced_dct = 1;
2302 uv_dct_offset = wrap_c;
2310 s->pdsp.diff_pixels(
s->block[0], ptr_y, dest_y, wrap_y);
2311 s->pdsp.diff_pixels(
s->block[1], ptr_y + 8, dest_y + 8, wrap_y);
2314 s->pdsp.diff_pixels(
s->block[3], ptr_y +
dct_offset + 8,
2321 s->pdsp.diff_pixels(
s->block[4], ptr_cb, dest_cb, wrap_c);
2322 s->pdsp.diff_pixels(
s->block[5], ptr_cr, dest_cr, wrap_c);
2324 s->pdsp.diff_pixels(
s->block[6], ptr_cb + uv_dct_offset,
2325 dest_cb + uv_dct_offset, wrap_c);
2326 s->pdsp.diff_pixels(
s->block[7], ptr_cr + uv_dct_offset,
2327 dest_cr + uv_dct_offset, wrap_c);
2331 if (
s->mc_mb_var[
s->mb_stride *
mb_y +
mb_x] < 2 *
s->qscale *
s->qscale) {
2333 if (
s->mecc.sad[1](
NULL, ptr_y, dest_y, wrap_y, 8) < 20 *
s->qscale)
2335 if (
s->mecc.sad[1](
NULL, ptr_y + 8, dest_y + 8, wrap_y, 8) < 20 *
s->qscale)
2338 wrap_y, 8) < 20 *
s->qscale)
2341 wrap_y, 8) < 20 *
s->qscale)
2343 if (
s->mecc.sad[1](
NULL, ptr_cb, dest_cb, wrap_c, 8) < 20 *
s->qscale)
2345 if (
s->mecc.sad[1](
NULL, ptr_cr, dest_cr, wrap_c, 8) < 20 *
s->qscale)
2348 if (
s->mecc.sad[1](
NULL, ptr_cb + uv_dct_offset,
2349 dest_cb + uv_dct_offset,
2350 wrap_c, 8) < 20 *
s->qscale)
2352 if (
s->mecc.sad[1](
NULL, ptr_cr + uv_dct_offset,
2353 dest_cr + uv_dct_offset,
2354 wrap_c, 8) < 20 *
s->qscale)
2360 if (
s->quantizer_noise_shaping) {
2381 memcpy(orig[0],
s->block[0],
sizeof(int16_t) * 64 * mb_block_count);
2387 for (
i = 0;
i < mb_block_count;
i++) {
2390 s->block_last_index[
i] =
s->dct_quantize(
s,
s->block[
i],
i,
s->qscale, &
overflow);
2399 s->block_last_index[
i] = -1;
2401 if (
s->quantizer_noise_shaping) {
2402 for (
i = 0;
i < mb_block_count;
i++) {
2404 s->block_last_index[
i] =
2406 orig[
i],
i,
s->qscale);
2411 if (
s->luma_elim_threshold && !
s->mb_intra)
2412 for (
i = 0;
i < 4;
i++)
2414 if (
s->chroma_elim_threshold && !
s->mb_intra)
2415 for (
i = 4;
i < mb_block_count;
i++)
2419 for (
i = 0;
i < mb_block_count;
i++) {
2420 if (
s->block_last_index[
i] == -1)
2421 s->coded_score[
i] = INT_MAX / 256;
2427 s->block_last_index[4] =
2428 s->block_last_index[5] = 0;
2430 s->block[5][0] = (1024 +
s->c_dc_scale / 2) /
s->c_dc_scale;
2432 for (
i=6;
i<12;
i++) {
2433 s->block_last_index[
i] = 0;
2434 s->block[
i][0] =
s->block[4][0];
2441 for (
i = 0;
i < mb_block_count;
i++) {
2443 if (
s->block_last_index[
i] > 0) {
2444 for (j = 63; j > 0; j--) {
2445 if (
s->block[
i][
s->intra_scantable.permutated[j]])
2448 s->block_last_index[
i] = j;
2454 switch(
s->codec_id){
2457 if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
2461 if (CONFIG_MPEG4_ENCODER)
2467 if (CONFIG_MSMPEG4ENC)
2471 if (CONFIG_WMV2_ENCODER)
2475 if (CONFIG_H261_ENCODER)
2483 if (CONFIG_H263_ENCODER)
2486 #if CONFIG_MJPEG_ENCODER || CONFIG_AMV_ENCODER
2493 if (CONFIG_SPEEDHQ_ENCODER)
2516 memcpy(
d->last_mv,
s->last_mv, 2*2*2*
sizeof(
int));
2519 d->mb_skip_run=
s->mb_skip_run;
2521 d->last_dc[
i] =
s->last_dc[
i];
2524 d->mv_bits=
s->mv_bits;
2525 d->i_tex_bits=
s->i_tex_bits;
2526 d->p_tex_bits=
s->p_tex_bits;
2527 d->i_count=
s->i_count;
2528 d->skip_count=
s->skip_count;
2529 d->misc_bits=
s->misc_bits;
2533 d->qscale=
s->qscale;
2534 d->dquant=
s->dquant;
2536 d->esc3_level_length=
s->esc3_level_length;
2544 memcpy(
d->mv,
s->mv, 2*4*2*
sizeof(
int));
2545 memcpy(
d->last_mv,
s->last_mv, 2*2*2*
sizeof(
int));
2548 d->mb_skip_run=
s->mb_skip_run;
2550 d->last_dc[
i] =
s->last_dc[
i];
2553 d->mv_bits=
s->mv_bits;
2554 d->i_tex_bits=
s->i_tex_bits;
2555 d->p_tex_bits=
s->p_tex_bits;
2556 d->i_count=
s->i_count;
2557 d->skip_count=
s->skip_count;
2558 d->misc_bits=
s->misc_bits;
2560 d->mb_intra=
s->mb_intra;
2561 d->mb_skipped=
s->mb_skipped;
2562 d->mv_type=
s->mv_type;
2563 d->mv_dir=
s->mv_dir;
2565 if(
s->data_partitioning){
2567 d->tex_pb=
s->tex_pb;
2571 d->block_last_index[
i]=
s->block_last_index[
i];
2572 d->interlaced_dct=
s->interlaced_dct;
2573 d->qscale=
s->qscale;
2575 d->esc3_level_length=
s->esc3_level_length;
2580 int *dmin,
int *next_block,
int motion_x,
int motion_y)
2583 uint8_t *dest_backup[3];
2587 s->block=
s->blocks[*next_block];
2588 s->pb=
pb[*next_block];
2589 if(
s->data_partitioning){
2590 s->pb2 =
pb2 [*next_block];
2591 s->tex_pb=
tex_pb[*next_block];
2595 memcpy(dest_backup,
s->dest,
sizeof(
s->dest));
2596 s->dest[0] =
s->sc.rd_scratchpad;
2597 s->dest[1] =
s->sc.rd_scratchpad + 16*
s->linesize;
2598 s->dest[2] =
s->sc.rd_scratchpad + 16*
s->linesize + 8;
2605 if(
s->data_partitioning){
2613 score *=
s->lambda2;
2618 memcpy(
s->dest, dest_backup,
sizeof(
s->dest));
2636 else if(
w==8 &&
h==8)
2653 int chroma_mb_w =
w >>
s->chroma_x_shift;
2654 int chroma_mb_h =
h >>
s->chroma_y_shift;
2656 if(
s->mb_x*16 + 16 >
s->width )
w=
s->width -
s->mb_x*16;
2657 if(
s->mb_y*16 + 16 >
s->height)
h=
s->height-
s->mb_y*16;
2661 return s->mecc.nsse[0](
s,
s->new_picture->data[0] +
s->mb_x * 16 +
s->mb_y *
s->linesize * 16,
2662 s->dest[0],
s->linesize, 16) +
2663 s->mecc.nsse[1](
s,
s->new_picture->data[1] +
s->mb_x * chroma_mb_w +
s->mb_y *
s->uvlinesize * chroma_mb_h,
2664 s->dest[1],
s->uvlinesize, chroma_mb_h) +
2665 s->mecc.nsse[1](
s,
s->new_picture->data[2] +
s->mb_x * chroma_mb_w +
s->mb_y *
s->uvlinesize * chroma_mb_h,
2666 s->dest[2],
s->uvlinesize, chroma_mb_h);
2668 return s->mecc.sse[0](
NULL,
s->new_picture->data[0] +
s->mb_x * 16 +
s->mb_y *
s->linesize * 16,
2669 s->dest[0],
s->linesize, 16) +
2670 s->mecc.sse[1](
NULL,
s->new_picture->data[1] +
s->mb_x * chroma_mb_w +
s->mb_y *
s->uvlinesize * chroma_mb_h,
2671 s->dest[1],
s->uvlinesize, chroma_mb_h) +
2672 s->mecc.sse[1](
NULL,
s->new_picture->data[2] +
s->mb_x * chroma_mb_w +
s->mb_y *
s->uvlinesize * chroma_mb_h,
2673 s->dest[2],
s->uvlinesize, chroma_mb_h);
2676 return sse(
s,
s->new_picture->data[0] +
s->mb_x * 16 +
s->mb_y *
s->linesize * 16,
2677 s->dest[0],
w,
h,
s->linesize) +
2678 sse(
s,
s->new_picture->data[1] +
s->mb_x * chroma_mb_w +
s->mb_y *
s->uvlinesize * chroma_mb_h,
2679 s->dest[1],
w >>
s->chroma_x_shift,
h >>
s->chroma_y_shift,
s->uvlinesize) +
2680 sse(
s,
s->new_picture->data[2] +
s->mb_x * chroma_mb_w +
s->mb_y *
s->uvlinesize * chroma_mb_h,
2681 s->dest[2],
w >>
s->chroma_x_shift,
h >>
s->chroma_y_shift,
s->uvlinesize);
2689 s->me.dia_size=
s->avctx->pre_dia_size;
2690 s->first_slice_line=1;
2691 for(
s->mb_y=
s->end_mb_y-1;
s->mb_y >=
s->start_mb_y;
s->mb_y--) {
2692 for(
s->mb_x=
s->mb_width-1;
s->mb_x >=0 ;
s->mb_x--) {
2695 s->first_slice_line=0;
2706 s->me.dia_size=
s->avctx->dia_size;
2707 s->first_slice_line=1;
2708 for(
s->mb_y=
s->start_mb_y;
s->mb_y <
s->end_mb_y;
s->mb_y++) {
2711 for(
s->mb_x=0;
s->mb_x <
s->mb_width;
s->mb_x++) {
2712 s->block_index[0]+=2;
2713 s->block_index[1]+=2;
2714 s->block_index[2]+=2;
2715 s->block_index[3]+=2;
2723 s->first_slice_line=0;
2736 const uint8_t *pix =
s->new_picture->data[0] + (yy *
s->linesize) + xx;
2738 int sum =
s->mpvencdsp.pix_sum(pix,
s->linesize);
2740 varc = (
s->mpvencdsp.pix_norm1(pix,
s->linesize) -
2741 (((unsigned) sum * sum) >> 8) + 500 + 128) >> 8;
2743 s->mb_var [
s->mb_stride *
mb_y +
mb_x] = varc;
2744 s->mb_mean[
s->mb_stride *
mb_y +
mb_x] = (sum+128)>>8;
2745 s->me.mb_var_sum_temp += varc;
2753 if(
s->partitioned_frame){
2758 }
else if ((CONFIG_MJPEG_ENCODER || CONFIG_AMV_ENCODER) &&
2761 }
else if (CONFIG_SPEEDHQ_ENCODER &&
s->out_format ==
FMT_SPEEDHQ) {
2773 uint8_t *ptr =
s->mb_info_ptr +
s->mb_info_size - 12;
2775 int mba =
s->mb_x +
s->mb_width * (
s->mb_y %
s->gob_index);
2776 int gobn =
s->mb_y /
s->gob_index;
2778 if (CONFIG_H263_ENCODER)
2780 bytestream_put_le32(&ptr,
offset);
2781 bytestream_put_byte(&ptr,
s->qscale);
2782 bytestream_put_byte(&ptr, gobn);
2783 bytestream_put_le16(&ptr, mba);
2784 bytestream_put_byte(&ptr, pred_x);
2785 bytestream_put_byte(&ptr, pred_y);
2787 bytestream_put_byte(&ptr, 0);
2788 bytestream_put_byte(&ptr, 0);
2796 s->mb_info_size += 12;
2797 s->prev_mb_info =
s->last_mb_info;
2809 if (!
s->mb_info_size)
2810 s->mb_info_size += 12;
2817 &&
s->slice_context_count == 1
2818 &&
s->pb.buf ==
s->avctx->internal->byte_buffer) {
2819 int lastgob_pos =
s->ptr_lastgob -
s->pb.buf;
2821 uint8_t *new_buffer =
NULL;
2822 int new_buffer_size = 0;
2824 if ((
s->avctx->internal->byte_buffer_size + size_increase) >= INT_MAX/8) {
2832 s->avctx->internal->byte_buffer_size + size_increase);
2836 memcpy(new_buffer,
s->avctx->internal->byte_buffer,
s->avctx->internal->byte_buffer_size);
2837 av_free(
s->avctx->internal->byte_buffer);
2838 s->avctx->internal->byte_buffer = new_buffer;
2839 s->avctx->internal->byte_buffer_size = new_buffer_size;
2841 s->ptr_lastgob =
s->pb.buf + lastgob_pos;
2851 int chr_h= 16>>
s->chroma_y_shift;
2876 s->last_dc[
i] = 128 <<
s->intra_dc_precision;
2878 s->encoding_error[
i] = 0;
2881 s->last_dc[0] = 128*8/13;
2882 s->last_dc[1] = 128*8/14;
2883 s->last_dc[2] = 128*8/14;
2886 memset(
s->last_mv, 0,
sizeof(
s->last_mv));
2890 switch(
s->codec_id){
2894 if (CONFIG_H263_ENCODER)
2898 if(CONFIG_MPEG4_ENCODER &&
s->partitioned_frame)
2905 s->first_slice_line = 1;
2906 s->ptr_lastgob =
s->pb.buf;
2907 for (mb_y_order =
s->start_mb_y; mb_y_order < s->
end_mb_y; mb_y_order++) {
2911 if (first_in_slice && mb_y_order !=
s->start_mb_y)
2913 s->last_dc[0] =
s->last_dc[1] =
s->last_dc[2] = 1024 <<
s->intra_dc_precision;
2929 int size_increase =
s->avctx->internal->byte_buffer_size/4
2937 if(
s->data_partitioning){
2951 xy=
s->mb_y*
s->mb_stride +
s->mb_x;
2957 int current_packet_size, is_gob_start;
2960 - (
s->ptr_lastgob -
s->pb.buf);
2962 is_gob_start =
s->rtp_payload_size &&
2963 current_packet_size >=
s->rtp_payload_size &&
2966 if(
s->start_mb_y ==
mb_y &&
mb_y > 0 &&
mb_x==0) is_gob_start=1;
2968 switch(
s->codec_id){
2971 if(!
s->h263_slice_structured)
2972 if(
s->mb_x ||
s->mb_y%
s->gob_index) is_gob_start=0;
2975 if(
s->mb_x==0 &&
s->mb_y!=0) is_gob_start=1;
2977 if(
s->mb_skip_run) is_gob_start=0;
2980 if(
s->mb_x==0 &&
s->mb_y!=0) is_gob_start=1;
2996 if (
s->error_rate &&
s->resync_mb_x +
s->resync_mb_y > 0) {
2998 int d = 100 /
s->error_rate;
3000 current_packet_size=0;
3001 s->pb.buf_ptr=
s->ptr_lastgob;
3006 switch(
s->codec_id){
3008 if (CONFIG_MPEG4_ENCODER) {
3015 if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER) {
3022 if (CONFIG_H263_ENCODER) {
3031 s->misc_bits+=
bits -
s->last_bits;
3035 s->ptr_lastgob += current_packet_size;
3036 s->first_slice_line=1;
3037 s->resync_mb_x=
mb_x;
3038 s->resync_mb_y=
mb_y;
3042 if( (
s->resync_mb_x ==
s->mb_x)
3043 &&
s->resync_mb_y+1 ==
s->mb_y){
3044 s->first_slice_line=0;
3054 int pb_bits_count, pb2_bits_count, tex_pb_bits_count;
3060 if(
s->data_partitioning){
3061 backup_s.pb2=
s->pb2;
3062 backup_s.tex_pb=
s->tex_pb;
3069 s->mv[0][0][0] =
s->p_mv_table[xy][0];
3070 s->mv[0][0][1] =
s->p_mv_table[xy][1];
3072 &dmin, &next_block,
s->mv[0][0][0],
s->mv[0][0][1]);
3079 j=
s->field_select[0][
i] =
s->p_field_select_table[
i][xy];
3080 s->mv[0][
i][0] =
s->p_field_mv_table[
i][j][xy][0];
3081 s->mv[0][
i][1] =
s->p_field_mv_table[
i][j][xy][1];
3084 &dmin, &next_block, 0, 0);
3093 &dmin, &next_block,
s->mv[0][0][0],
s->mv[0][0][1]);
3100 s->mv[0][
i][0] =
s->current_picture.motion_val[0][
s->block_index[
i]][0];
3101 s->mv[0][
i][1] =
s->current_picture.motion_val[0][
s->block_index[
i]][1];
3104 &dmin, &next_block, 0, 0);
3110 s->mv[0][0][0] =
s->b_forw_mv_table[xy][0];
3111 s->mv[0][0][1] =
s->b_forw_mv_table[xy][1];
3113 &dmin, &next_block,
s->mv[0][0][0],
s->mv[0][0][1]);
3119 s->mv[1][0][0] =
s->b_back_mv_table[xy][0];
3120 s->mv[1][0][1] =
s->b_back_mv_table[xy][1];
3122 &dmin, &next_block,
s->mv[1][0][0],
s->mv[1][0][1]);
3128 s->mv[0][0][0] =
s->b_bidir_forw_mv_table[xy][0];
3129 s->mv[0][0][1] =
s->b_bidir_forw_mv_table[xy][1];
3130 s->mv[1][0][0] =
s->b_bidir_back_mv_table[xy][0];
3131 s->mv[1][0][1] =
s->b_bidir_back_mv_table[xy][1];
3133 &dmin, &next_block, 0, 0);
3140 j=
s->field_select[0][
i] =
s->b_field_select_table[0][
i][xy];
3141 s->mv[0][
i][0] =
s->b_field_mv_table[0][
i][j][xy][0];
3142 s->mv[0][
i][1] =
s->b_field_mv_table[0][
i][j][xy][1];
3145 &dmin, &next_block, 0, 0);
3152 j=
s->field_select[1][
i] =
s->b_field_select_table[1][
i][xy];
3153 s->mv[1][
i][0] =
s->b_field_mv_table[1][
i][j][xy][0];
3154 s->mv[1][
i][1] =
s->b_field_mv_table[1][
i][j][xy][1];
3157 &dmin, &next_block, 0, 0);
3163 for(dir=0; dir<2; dir++){
3165 j=
s->field_select[dir][
i] =
s->b_field_select_table[dir][
i][xy];
3166 s->mv[dir][
i][0] =
s->b_field_mv_table[dir][
i][j][xy][0];
3167 s->mv[dir][
i][1] =
s->b_field_mv_table[dir][
i][j][xy][1];
3171 &dmin, &next_block, 0, 0);
3180 &dmin, &next_block, 0, 0);
3181 if(
s->h263_pred ||
s->h263_aic){
3183 s->mbintra_table[
mb_x +
mb_y*
s->mb_stride]=1;
3191 const int last_qp= backup_s.qscale;
3195 static const int dquant_tab[4]={-1,1,-2,2};
3196 int storecoefs =
s->mb_intra &&
s->dc_val[0];
3204 s->mv[0][0][0] = best_s.
mv[0][0][0];
3205 s->mv[0][0][1] = best_s.
mv[0][0][1];
3206 s->mv[1][0][0] = best_s.
mv[1][0][0];
3207 s->mv[1][0][1] = best_s.
mv[1][0][1];
3210 for(; qpi<4; qpi++){
3211 int dquant= dquant_tab[qpi];
3213 if(qp < s->
avctx->
qmin || qp >
s->avctx->qmax)
3218 dc[
i]=
s->dc_val[0][
s->block_index[
i] ];
3219 memcpy(ac[
i],
s->ac_val[0][
s->block_index[
i]],
sizeof(int16_t)*16);
3224 &dmin, &next_block,
s->mv[mvdir][0][0],
s->mv[mvdir][0][1]);
3228 s->dc_val[0][
s->block_index[
i] ]=
dc[
i];
3229 memcpy(
s->ac_val[0][
s->block_index[
i]], ac[
i],
sizeof(int16_t)*16);
3237 int mx=
s->b_direct_mv_table[xy][0];
3238 int my=
s->b_direct_mv_table[xy][1];
3240 backup_s.dquant = 0;
3245 &dmin, &next_block, mx, my);
3248 backup_s.dquant = 0;
3253 &dmin, &next_block, 0, 0);
3258 coded |=
s->block_last_index[
i];
3261 memcpy(
s->mv, best_s.
mv,
sizeof(
s->mv));
3283 &dmin, &next_block, mx, my);
3288 s->current_picture.qscale_table[xy] = best_s.
qscale;
3294 ff_copy_bits(&backup_s.pb, bit_buf[next_block^1], pb_bits_count);
3297 if(
s->data_partitioning){
3300 ff_copy_bits(&backup_s.pb2, bit_buf2[next_block^1], pb2_bits_count);
3301 s->pb2= backup_s.pb2;
3305 ff_copy_bits(&backup_s.tex_pb, bit_buf_tex[next_block^1], tex_pb_bits_count);
3306 s->tex_pb= backup_s.tex_pb;
3310 if (CONFIG_H263_ENCODER &&
3315 s->hdsp.put_pixels_tab[0][0](
s->dest[0],
s->sc.rd_scratchpad ,
s->linesize ,16);
3316 s->hdsp.put_pixels_tab[1][0](
s->dest[1],
s->sc.rd_scratchpad + 16*
s->linesize ,
s->uvlinesize, 8);
3317 s->hdsp.put_pixels_tab[1][0](
s->dest[2],
s->sc.rd_scratchpad + 16*
s->linesize + 8,
s->uvlinesize, 8);
3323 int motion_x = 0, motion_y = 0;
3331 motion_x=
s->mv[0][0][0] = 0;
3332 motion_y=
s->mv[0][0][1] = 0;
3337 motion_x=
s->mv[0][0][0] =
s->p_mv_table[xy][0];
3338 motion_y=
s->mv[0][0][1] =
s->p_mv_table[xy][1];
3345 j=
s->field_select[0][
i] =
s->p_field_select_table[
i][xy];
3346 s->mv[0][
i][0] =
s->p_field_mv_table[
i][j][xy][0];
3347 s->mv[0][
i][1] =
s->p_field_mv_table[
i][j][xy][1];
3355 s->mv[0][
i][0] =
s->current_picture.motion_val[0][
s->block_index[
i]][0];
3356 s->mv[0][
i][1] =
s->current_picture.motion_val[0][
s->block_index[
i]][1];
3360 if (CONFIG_MPEG4_ENCODER) {
3363 motion_x=
s->b_direct_mv_table[xy][0];
3364 motion_y=
s->b_direct_mv_table[xy][1];
3369 if (CONFIG_MPEG4_ENCODER) {
3378 s->mv[0][0][0] =
s->b_bidir_forw_mv_table[xy][0];
3379 s->mv[0][0][1] =
s->b_bidir_forw_mv_table[xy][1];
3380 s->mv[1][0][0] =
s->b_bidir_back_mv_table[xy][0];
3381 s->mv[1][0][1] =
s->b_bidir_back_mv_table[xy][1];
3386 motion_x=
s->mv[1][0][0] =
s->b_back_mv_table[xy][0];
3387 motion_y=
s->mv[1][0][1] =
s->b_back_mv_table[xy][1];
3392 motion_x=
s->mv[0][0][0] =
s->b_forw_mv_table[xy][0];
3393 motion_y=
s->mv[0][0][1] =
s->b_forw_mv_table[xy][1];
3400 j=
s->field_select[0][
i] =
s->b_field_select_table[0][
i][xy];
3401 s->mv[0][
i][0] =
s->b_field_mv_table[0][
i][j][xy][0];
3402 s->mv[0][
i][1] =
s->b_field_mv_table[0][
i][j][xy][1];
3410 j=
s->field_select[1][
i] =
s->b_field_select_table[1][
i][xy];
3411 s->mv[1][
i][0] =
s->b_field_mv_table[1][
i][j][xy][0];
3412 s->mv[1][
i][1] =
s->b_field_mv_table[1][
i][j][xy][1];
3419 for(dir=0; dir<2; dir++){
3421 j=
s->field_select[dir][
i] =
s->b_field_select_table[dir][
i][xy];
3422 s->mv[dir][
i][0] =
s->b_field_mv_table[dir][
i][j][xy][0];
3423 s->mv[dir][
i][1] =
s->b_field_mv_table[dir][
i][j][xy][1];
3434 s->last_mv_dir =
s->mv_dir;
3436 if (CONFIG_H263_ENCODER &&
3445 s->p_mv_table[xy][0]=0;
3446 s->p_mv_table[xy][1]=0;
3453 if(
s->mb_x*16 + 16 >
s->width )
w=
s->width -
s->mb_x*16;
3454 if(
s->mb_y*16 + 16 >
s->height)
h=
s->height-
s->mb_y*16;
3456 s->encoding_error[0] +=
sse(
3457 s,
s->new_picture->data[0] +
s->mb_x*16 +
s->mb_y*
s->linesize*16,
3458 s->dest[0],
w,
h,
s->linesize);
3459 s->encoding_error[1] +=
sse(
3460 s,
s->new_picture->data[1] +
s->mb_x*8 +
s->mb_y*
s->uvlinesize*chr_h,
3461 s->dest[1],
w>>1,
h>>
s->chroma_y_shift,
s->uvlinesize);
3462 s->encoding_error[2] +=
sse(
3463 s,
s->new_picture->data[2] +
s->mb_x*8 +
s->mb_y*
s->uvlinesize*chr_h,
3464 s->dest[2],
w>>1,
h>>
s->chroma_y_shift,
s->uvlinesize);
3467 if(CONFIG_H263_ENCODER &&
s->out_format ==
FMT_H263)
3470 ff_dlog(
s->avctx,
"MB %d %d bits\n",
3476 if (CONFIG_MSMPEG4ENC &&
s->msmpeg4_version &&
s->msmpeg4_version<4 &&
s->pict_type ==
AV_PICTURE_TYPE_I)
3484 #define MERGE(field) dst->field += src->field; src->field=0
3507 for(
i=0;
i<64;
i++){
3520 if (
s->next_lambda){
3521 s->current_picture_ptr->f->quality =
3522 s->current_picture.f->quality =
s->next_lambda;
3523 if(!dry_run)
s->next_lambda= 0;
3524 }
else if (!
s->fixed_qscale) {
3526 s->current_picture_ptr->f->quality =
3527 s->current_picture.f->quality =
quality;
3528 if (
s->current_picture.f->quality < 0)
3532 if(
s->adaptive_quant){
3533 switch(
s->codec_id){
3535 if (CONFIG_MPEG4_ENCODER)
3541 if (CONFIG_H263_ENCODER)
3548 s->lambda=
s->lambda_table[0];
3551 s->lambda =
s->current_picture.f->quality;
3559 s->time =
s->current_picture_ptr->f->pts *
s->avctx->time_base.num;
3562 s->pb_time=
s->pp_time - (
s->last_non_b_time -
s->time);
3565 s->pp_time=
s->time -
s->last_non_b_time;
3566 s->last_non_b_time=
s->time;
3575 int context_count =
s->slice_context_count;
3578 s->me.mb_var_sum_temp =
3579 s->me.mc_mb_var_sum_temp = 0;
3583 if (
s->out_format ==
FMT_MPEG1 || (
s->h263_pred && !
s->msmpeg4_version))
3588 s->me.scene_change_score=0;
3593 if(
s->msmpeg4_version >= 3)
s->no_rounding=1;
3594 else s->no_rounding=0;
3597 s->no_rounding ^= 1;
3606 s->lambda=
s->last_lambda_for[
s->pict_type];
3608 s->lambda=
s->last_lambda_for[
s->last_non_b_pict_type];
3613 if(
s->q_chroma_intra_matrix !=
s->q_intra_matrix )
av_freep(&
s->q_chroma_intra_matrix);
3614 if(
s->q_chroma_intra_matrix16 !=
s->q_intra_matrix16)
av_freep(&
s->q_chroma_intra_matrix16);
3615 s->q_chroma_intra_matrix =
s->q_intra_matrix;
3616 s->q_chroma_intra_matrix16 =
s->q_intra_matrix16;
3620 for(
i=1;
i<context_count;
i++){
3631 s->lambda = (
s->lambda *
s->me_penalty_compensation + 128) >> 8;
3632 s->lambda2 = (
s->lambda2 * (int64_t)
s->me_penalty_compensation + 128) >> 8;
3643 for(
i=0;
i<
s->mb_stride*
s->mb_height;
i++)
3646 if(!
s->fixed_qscale){
3648 s->avctx->execute(
s->avctx,
mb_var_thread, &
s->thread_context[0],
NULL, context_count,
sizeof(
void*));
3651 for(
i=1;
i<context_count;
i++){
3654 s->mc_mb_var_sum =
s->me.mc_mb_var_sum_temp;
3655 s->mb_var_sum =
s->me. mb_var_sum_temp;
3658 if (
s->me.scene_change_score >
s->scenechange_threshold &&
3661 for(
i=0;
i<
s->mb_stride*
s->mb_height;
i++)
3663 if(
s->msmpeg4_version >= 3)
3665 ff_dlog(
s,
"Scene change detected, encoding as I Frame %"PRId64
" %"PRId64
"\n",
3666 s->mb_var_sum,
s->mc_mb_var_sum);
3707 for(dir=0; dir<2; dir++){
3713 s->b_field_mv_table[dir][
i][j], dir ?
s->b_code :
s->f_code,
type, 1);
3724 if (
s->qscale < 3 &&
s->max_qcoeff <= 128 &&
3733 if (
s->avctx->intra_matrix) {
3735 luma_matrix =
s->avctx->intra_matrix;
3737 if (
s->avctx->chroma_intra_matrix)
3738 chroma_matrix =
s->avctx->chroma_intra_matrix;
3742 int j =
s->idsp.idct_permutation[
i];
3744 s->chroma_intra_matrix[j] =
av_clip_uint8((chroma_matrix[
i] *
s->qscale) >> 3);
3747 s->y_dc_scale_table=
3749 s->chroma_intra_matrix[0] =
3752 s->intra_matrix,
s->intra_quant_bias, 8, 8, 1);
3754 s->chroma_intra_matrix,
s->intra_quant_bias, 8, 8, 1);
3758 static const uint8_t y[32] = {13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13};
3759 static const uint8_t
c[32] = {14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14};
3760 for (
int i = 1;
i < 64;
i++) {
3766 s->y_dc_scale_table = y;
3767 s->c_dc_scale_table =
c;
3768 s->intra_matrix[0] = 13;
3769 s->chroma_intra_matrix[0] = 14;
3771 s->intra_matrix,
s->intra_quant_bias, 8, 8, 1);
3773 s->chroma_intra_matrix,
s->intra_quant_bias, 8, 8, 1);
3779 s->current_picture_ptr->f->key_frame =
3781 s->current_picture_ptr->f->pict_type =
3782 s->current_picture.f->pict_type =
s->pict_type;
3784 if (
s->current_picture.f->key_frame)
3785 s->picture_in_gop_number=0;
3787 s->mb_x =
s->mb_y = 0;
3789 switch(
s->out_format) {
3790 #if CONFIG_MJPEG_ENCODER || CONFIG_AMV_ENCODER
3796 if (CONFIG_SPEEDHQ_ENCODER)
3800 if (CONFIG_H261_ENCODER)
3806 else if (CONFIG_MSMPEG4ENC &&
s->msmpeg4_version)
3808 else if (CONFIG_MPEG4_ENCODER &&
s->h263_pred) {
3821 else if (CONFIG_H263_ENCODER)
3825 if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
3832 s->header_bits=
bits -
s->last_bits;
3834 for(
i=1;
i<context_count;
i++){
3837 s->avctx->execute(
s->avctx,
encode_thread, &
s->thread_context[0],
NULL, context_count,
sizeof(
void*));
3838 for(
i=1;
i<context_count;
i++){
3839 if (
s->pb.buf_end ==
s->thread_context[
i]->pb.buf)
3848 const int intra=
s->mb_intra;
3851 s->dct_count[intra]++;
3853 for(
i=0;
i<64;
i++){
3858 s->dct_error_sum[intra][
i] +=
level;
3859 level -=
s->dct_offset[intra][
i];
3862 s->dct_error_sum[intra][
i] -=
level;
3863 level +=
s->dct_offset[intra][
i];
3872 int16_t *
block,
int n,
3876 const uint8_t *scantable;
3877 const uint8_t *perm_scantable;
3879 unsigned int threshold1, threshold2;
3891 int coeff_count[64];
3892 int qmul, qadd, start_i, last_non_zero,
i,
dc;
3893 const int esc_length=
s->ac_esc_length;
3895 uint8_t * last_length;
3901 if(
s->dct_error_sum)
3907 else mpeg2_qscale =
qscale << 1;
3911 scantable=
s->intra_scantable.scantable;
3912 perm_scantable=
s->intra_scantable.permutated;
3929 qmat = n < 4 ?
s->q_intra_matrix[
qscale] :
s->q_chroma_intra_matrix[
qscale];
3930 matrix = n < 4 ?
s->intra_matrix :
s->chroma_intra_matrix;
3934 if (n > 3 &&
s->intra_chroma_ac_vlc_length) {
3935 length =
s->intra_chroma_ac_vlc_length;
3936 last_length=
s->intra_chroma_ac_vlc_last_length;
3938 length =
s->intra_ac_vlc_length;
3939 last_length=
s->intra_ac_vlc_last_length;
3942 scantable=
s->inter_scantable.scantable;
3943 perm_scantable=
s->inter_scantable.permutated;
3946 qmat =
s->q_inter_matrix[
qscale];
3948 length =
s->inter_ac_vlc_length;
3949 last_length=
s->inter_ac_vlc_last_length;
3954 threshold2= (threshold1<<1);
3956 for(
i=63;
i>=start_i;
i--) {
3957 const int j = scantable[
i];
3960 if(((
unsigned)(
level+threshold1))>threshold2){
3966 for(
i=start_i;
i<=last_non_zero;
i++) {
3967 const int j = scantable[
i];
3972 if(((
unsigned)(
level+threshold1))>threshold2){
3995 if(last_non_zero < start_i){
3996 memset(
block + start_i, 0, (64-start_i)*
sizeof(int16_t));
3997 return last_non_zero;
4000 score_tab[start_i]= 0;
4001 survivor[0]= start_i;
4004 for(
i=start_i;
i<=last_non_zero;
i++){
4005 int level_index, j, zero_distortion;
4007 int best_score=256*256*256*120;
4011 zero_distortion= dct_coeff*dct_coeff;
4013 for(level_index=0; level_index < coeff_count[
i]; level_index++){
4022 unquant_coeff= alevel*qmul + qadd;
4024 j =
s->idsp.idct_permutation[scantable[
i]];
4025 unquant_coeff = alevel *
matrix[j] * 8;
4027 j =
s->idsp.idct_permutation[scantable[
i]];
4029 unquant_coeff = (
int)( alevel * mpeg2_qscale *
matrix[j]) >> 4;
4030 unquant_coeff = (unquant_coeff - 1) | 1;
4032 unquant_coeff = ((( alevel << 1) + 1) * mpeg2_qscale * ((
int)
matrix[j])) >> 5;
4033 unquant_coeff = (unquant_coeff - 1) | 1;
4038 distortion= (unquant_coeff - dct_coeff) * (unquant_coeff - dct_coeff) - zero_distortion;
4040 if((
level&(~127)) == 0){
4041 for(j=survivor_count-1; j>=0; j--){
4042 int run=
i - survivor[j];
4044 score += score_tab[
i-
run];
4046 if(score < best_score){
4049 level_tab[
i+1]=
level-64;
4054 for(j=survivor_count-1; j>=0; j--){
4055 int run=
i - survivor[j];
4057 score += score_tab[
i-
run];
4058 if(score < last_score){
4061 last_level=
level-64;
4067 distortion += esc_length*
lambda;
4068 for(j=survivor_count-1; j>=0; j--){
4069 int run=
i - survivor[j];
4070 int score= distortion + score_tab[
i-
run];
4072 if(score < best_score){
4075 level_tab[
i+1]=
level-64;
4080 for(j=survivor_count-1; j>=0; j--){
4081 int run=
i - survivor[j];
4082 int score= distortion + score_tab[
i-
run];
4083 if(score < last_score){
4086 last_level=
level-64;
4094 score_tab[
i+1]= best_score;
4097 if(last_non_zero <= 27){
4098 for(; survivor_count; survivor_count--){
4099 if(score_tab[ survivor[survivor_count-1] ] <= best_score)
4103 for(; survivor_count; survivor_count--){
4104 if(score_tab[ survivor[survivor_count-1] ] <= best_score +
lambda)
4109 survivor[ survivor_count++ ]=
i+1;
4113 last_score= 256*256*256*120;
4114 for(
i= survivor[0];
i<=last_non_zero + 1;
i++){
4115 int score= score_tab[
i];
4119 if(score < last_score){
4122 last_level= level_tab[
i];
4123 last_run= run_tab[
i];
4128 s->coded_score[n] = last_score;
4131 last_non_zero= last_i - 1;
4132 memset(
block + start_i, 0, (64-start_i)*
sizeof(int16_t));
4134 if(last_non_zero < start_i)
4135 return last_non_zero;
4137 if(last_non_zero == 0 && start_i == 0){
4139 int best_score=
dc *
dc;
4141 for(
i=0;
i<coeff_count[0];
i++){
4144 int unquant_coeff, score, distortion;
4147 unquant_coeff= (alevel*qmul + qadd)>>3;
4149 unquant_coeff = ((( alevel << 1) + 1) * mpeg2_qscale * ((
int)
matrix[0])) >> 5;
4150 unquant_coeff = (unquant_coeff - 1) | 1;
4152 unquant_coeff = (unquant_coeff + 4) >> 3;
4153 unquant_coeff<<= 3 + 3;
4155 distortion= (unquant_coeff -
dc) * (unquant_coeff -
dc);
4158 else score= distortion + esc_length*
lambda;
4160 if(score < best_score){
4162 best_level=
level - 64;
4165 block[0]= best_level;
4166 s->coded_score[n] = best_score -
dc*
dc;
4167 if(best_level == 0)
return -1;
4168 else return last_non_zero;
4174 block[ perm_scantable[last_non_zero] ]= last_level;
4177 for(;
i>start_i;
i -= run_tab[
i] + 1){
4178 block[ perm_scantable[
i-1] ]= level_tab[
i];
4181 return last_non_zero;
4196 if(
i==0)
s*= sqrt(0.5);
4197 if(j==0)
s*= sqrt(0.5);
4210 const uint8_t *scantable;
4211 const uint8_t *perm_scantable;
4217 int qmul, qadd, start_i, last_non_zero,
i,
dc;
4219 uint8_t * last_length;
4221 int rle_index,
run, q = 1, sum;
4223 if(
basis[0][0] == 0)
4229 scantable=
s->intra_scantable.scantable;
4230 perm_scantable=
s->intra_scantable.permutated;
4248 if (n > 3 &&
s->intra_chroma_ac_vlc_length) {
4249 length =
s->intra_chroma_ac_vlc_length;
4250 last_length=
s->intra_chroma_ac_vlc_last_length;
4252 length =
s->intra_ac_vlc_length;
4253 last_length=
s->intra_ac_vlc_last_length;
4256 scantable=
s->inter_scantable.scantable;
4257 perm_scantable=
s->inter_scantable.permutated;
4260 length =
s->inter_ac_vlc_length;
4261 last_length=
s->inter_ac_vlc_last_length;
4263 last_non_zero =
s->block_last_index[n];
4266 for(
i=0;
i<64;
i++){
4271 for(
i=0;
i<64;
i++){
4277 w= 15 + (48*qns*one +
w/2)/
w;
4290 for(
i=start_i;
i<=last_non_zero;
i++){
4291 int j= perm_scantable[
i];
4298 run_tab[rle_index++]=
run;
4308 int best_score =
s->mpvencdsp.try_8x8basis(rem,
weight,
basis[0], 0);
4311 int run2, best_unquant_change=0, analyze_gradient;
4312 analyze_gradient = last_non_zero > 2 ||
s->quantizer_noise_shaping >= 3;
4314 if(analyze_gradient){
4315 for(
i=0;
i<64;
i++){
4325 int change, old_coeff;
4331 for(change=-1; change<=1; change+=2){
4332 int new_level=
level + change;
4333 int score, new_coeff;
4335 new_coeff= q*new_level;
4336 if(new_coeff >= 2048 || new_coeff < 0)
4339 score =
s->mpvencdsp.try_8x8basis(rem,
weight,
basis[0],
4340 new_coeff - old_coeff);
4341 if(score<best_score){
4344 best_change= change;
4345 best_unquant_change= new_coeff - old_coeff;
4352 run2= run_tab[rle_index++];
4356 for(
i=start_i;
i<64;
i++){
4357 int j= perm_scantable[
i];
4359 int change, old_coeff;
4361 if(
s->quantizer_noise_shaping < 3 &&
i > last_non_zero + 1)
4366 else old_coeff= qmul*
level + qadd;
4367 run2= run_tab[rle_index++];
4374 for(change=-1; change<=1; change+=2){
4375 int new_level=
level + change;
4376 int score, new_coeff, unquant_change;
4383 if(new_level<0) new_coeff= qmul*new_level - qadd;
4384 else new_coeff= qmul*new_level + qadd;
4385 if(new_coeff >= 2048 || new_coeff <= -2048)
4390 if(level < 63 && level > -63){
4391 if(
i < last_non_zero)
4401 if(analyze_gradient){
4402 int g= d1[ scantable[
i] ];
4403 if(
g && (
g^new_level) >= 0)
4407 if(
i < last_non_zero){
4408 int next_i=
i + run2 + 1;
4409 int next_level=
block[ perm_scantable[next_i] ] + 64;
4411 if(next_level&(~127))
4414 if(next_i < last_non_zero)
4434 if(
i < last_non_zero){
4435 int next_i=
i + run2 + 1;
4436 int next_level=
block[ perm_scantable[next_i] ] + 64;
4438 if(next_level&(~127))
4441 if(next_i < last_non_zero)
4460 unquant_change= new_coeff - old_coeff;
4463 score +=
s->mpvencdsp.try_8x8basis(rem,
weight,
basis[j],
4465 if(score<best_score){
4468 best_change= change;
4469 best_unquant_change= unquant_change;
4473 prev_level=
level + 64;
4474 if(prev_level&(~127))
4484 int j= perm_scantable[ best_coeff ];
4486 block[j] += best_change;
4488 if(best_coeff > last_non_zero){
4489 last_non_zero= best_coeff;
4492 for(; last_non_zero>=start_i; last_non_zero--){
4493 if(
block[perm_scantable[last_non_zero]])
4500 for(
i=start_i;
i<=last_non_zero;
i++){
4501 int j= perm_scantable[
i];
4505 run_tab[rle_index++]=
run;
4512 s->mpvencdsp.add_8x8basis(rem,
basis[j], best_unquant_change);
4518 return last_non_zero;
4533 const uint8_t *scantable,
int last)
4544 for (
i = 0;
i <= last;
i++) {
4545 const int j = scantable[
i];
4550 for (
i = 0;
i <= last;
i++) {
4551 const int j = scantable[
i];
4552 const int perm_j = permutation[j];
4558 int16_t *
block,
int n,
4561 int i, j,
level, last_non_zero, q, start_i;
4563 const uint8_t *scantable;
4566 unsigned int threshold1, threshold2;
4570 if(
s->dct_error_sum)
4574 scantable=
s->intra_scantable.scantable;
4589 qmat = n < 4 ?
s->q_intra_matrix[
qscale] :
s->q_chroma_intra_matrix[
qscale];
4592 scantable=
s->inter_scantable.scantable;
4595 qmat =
s->q_inter_matrix[
qscale];
4599 threshold2= (threshold1<<1);
4600 for(
i=63;
i>=start_i;
i--) {
4604 if(((
unsigned)(
level+threshold1))>threshold2){
4611 for(
i=start_i;
i<=last_non_zero;
i++) {
4617 if(((
unsigned)(
level+threshold1))>threshold2){
4635 scantable, last_non_zero);
4637 return last_non_zero;
#define FF_ALLOCZ_TYPED_ARRAY(p, nelem)
static int encode_frame(AVCodecContext *c, const AVFrame *frame, AVPacket *pkt)
av_cold int ff_mpv_common_init(MpegEncContext *s)
init common structure for both encoder and decoder.
void ff_speedhq_end_slice(MpegEncContext *s)
int ff_encode_reordered_opaque(AVCodecContext *avctx, AVPacket *pkt, const AVFrame *frame)
Propagate user opaque values from the frame to avctx/pkt as needed.
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
uint16_t * mb_type
Table for candidate MB types for encoding (defines in mpegutils.h)
#define MV_TYPE_16X16
1 vector for the whole mb
#define AV_LOG_WARNING
Something somehow does not look correct.
av_cold int ff_speedhq_encode_init(MpegEncContext *s)
const AVClass ff_mpv_enc_class
int data_partitioning
data partitioning flag from header
static void set_frame_distances(MpegEncContext *s)
static int get_bits_diff(MpegEncContext *s)
int avcodec_receive_packet(AVCodecContext *avctx, AVPacket *avpkt)
Read encoded data from the encoder.
unsigned int lambda
Lagrange multiplier used in rate distortion.
#define H263_GOB_HEIGHT(h)
av_cold int ff_h261_encode_init(MpegEncContext *s)
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
void ff_clean_mpeg4_qscales(MpegEncContext *s)
modify mb_type & qscale so that encoding is actually possible in MPEG-4
void ff_mpeg1_encode_mb(MpegEncContext *s, int16_t block[8][64], int motion_x, int motion_y)
int b_code
backward MV resolution for B-frames (MPEG-4)
void ff_mpv_common_defaults(MpegEncContext *s)
Set the given MpegEncContext to common defaults (same for encoding and decoding).
int av_frame_get_buffer(AVFrame *frame, int align)
Allocate new buffer(s) for audio or video data.
int64_t rc_min_rate
minimum bitrate
void ff_fix_long_p_mvs(MpegEncContext *s, int type)
void ff_speedhq_encode_picture_header(MpegEncContext *s)
int ff_wmv2_encode_picture_header(MpegEncContext *s)
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
#define AVERROR_EOF
End of file.
int ff_side_data_set_encoder_stats(AVPacket *pkt, int quality, int64_t *error, int error_count, int pict_type)
static av_always_inline void encode_mb(MpegEncContext *s, int motion_x, int motion_y)
void ff_h261_encode_picture_header(MpegEncContext *s)
#define AV_CODEC_FLAG_QSCALE
Use fixed qscale.
static int sse_mb(MpegEncContext *s)
static void init_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Initialize the PutBitContext s.
void ff_block_permute(int16_t *block, uint8_t *permutation, const uint8_t *scantable, int last)
Permute an 8x8 block according to permutation.
static int16_t basis[64][64]
uint16_t * intra_matrix
custom intra quantization matrix Must be allocated with the av_malloc() family of functions,...
Picture current_picture
copy of the current picture structure.
static const uint8_t mv_bits[2][16][10]
static int estimate_motion_thread(AVCodecContext *c, void *arg)
float lumi_masking
luminance masking (0-> disabled)
static void update_noise_reduction(MpegEncContext *s)
#define MV_DIRECT
bidirectional mode where the difference equals the MV of the last P/S/I-Frame (MPEG-4)
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
int partitioned_frame
is current frame partitioned
uint16_t(* dct_offset)[64]
int ff_update_duplicate_context(MpegEncContext *dst, const MpegEncContext *src)
This structure describes decoded (raw) audio or video data.
static void put_bits(Jpeg2000EncoderContext *s, int val, int n)
put n times val bit
#define INTERLACED_DCT(s)
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
#define CANDIDATE_MB_TYPE_INTER_I
int capabilities
Codec capabilities.
static const int BUF_BITS
void ff_h261_encode_mb(MpegEncContext *s, int16_t block[6][64], int motion_x, int motion_y)
static int alloc_picture(MpegEncContext *s, Picture *pic, int shared)
int av_packet_shrink_side_data(AVPacket *pkt, enum AVPacketSideDataType type, size_t size)
Shrink the already allocated side data buffer.
static int put_bytes_count(const PutBitContext *s, int round_up)
#define CANDIDATE_MB_TYPE_BACKWARD_I
const uint8_t ff_mpeg2_non_linear_qscale[32]
av_cold int ff_mjpeg_encode_init(MpegEncContext *s)
void ff_clean_intra_table_entries(MpegEncContext *s)
Clean dc, ac, coded_block for the current non-intra MB.
#define AV_LOG_VERBOSE
Detailed information.
#define PICT_BOTTOM_FIELD
av_cold void ff_pixblockdsp_init(PixblockDSPContext *c, AVCodecContext *avctx)
void ff_init_block_index(MpegEncContext *s)
void ff_msmpeg4_encode_mb(MpegEncContext *s, int16_t block[6][64], int motion_x, int motion_y)
int64_t duration
Duration of this packet in AVStream->time_base units, 0 if unknown.
#define FF_MPV_FLAG_SKIP_RD
const uint8_t ff_mpeg12_dc_scale_table[4][32]
struct AVCodecContext * avctx
#define FF_COMPLIANCE_EXPERIMENTAL
Allow nonstandardized experimental things.
av_cold int ff_rate_control_init(MpegEncContext *s)
static double sqr(double in)
#define AV_CODEC_FLAG_PSNR
error[?] variables will be set during encoding.
#define CANDIDATE_MB_TYPE_SKIPPED
static int pre_estimate_motion_thread(AVCodecContext *c, void *arg)
static void get_visual_weight(int16_t *weight, const uint8_t *ptr, int stride)
PutBitContext pb
bit output
int mb_decision
macroblock decision mode
#define CANDIDATE_MB_TYPE_INTER
AVCPBProperties * ff_add_cpb_side_data(AVCodecContext *avctx)
Add a CPB properties side data to an encoding context.
int qmax
maximum quantizer
#define AV_CODEC_FLAG_INTERLACED_ME
interlaced motion estimation
AVBufferRef * buf[AV_NUM_DATA_POINTERS]
AVBuffer references backing the data for this frame.
void ff_flv_encode_picture_header(MpegEncContext *s)
#define AV_CODEC_FLAG_4MV
4 MV per MB allowed / advanced prediction for H.263.
void ff_set_cmp(MECmpContext *c, me_cmp_func *cmp, int type)
static int encode_picture(MpegEncContext *s)
#define AV_PKT_FLAG_KEY
The packet contains a keyframe.
void av_packet_free(AVPacket **pkt)
Free the packet, if the packet is reference counted, it will be unreferenced first.
trying all byte sequences megabyte in length and selecting the best looking sequence will yield cases to try But a word about quality
#define MPEGVIDEO_MAX_PLANES
int mv[2][4][2]
motion vectors for a macroblock first coordinate : 0 = forward 1 = backward second " : depend...
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
int64_t av_gcd(int64_t a, int64_t b)
Compute the greatest common divisor of two integer operands.
#define CANDIDATE_MB_TYPE_FORWARD_I
#define FF_MPV_COMMON_MOTION_EST_OPTS
av_cold void ff_mpegvideoencdsp_init(MpegvideoEncDSPContext *c, AVCodecContext *avctx)
int ff_mpv_encode_picture(AVCodecContext *avctx, AVPacket *pkt, const AVFrame *pic_arg, int *got_packet)
#define FF_MPV_COMMON_OPTS
void ff_copy_bits(PutBitContext *pb, const uint8_t *src, int length)
Copy the content of src to the bitstream.
static int estimate_qp(MpegEncContext *s, int dry_run)
int pict_type
AV_PICTURE_TYPE_I, AV_PICTURE_TYPE_P, AV_PICTURE_TYPE_B, ...
int av_packet_add_side_data(AVPacket *pkt, enum AVPacketSideDataType type, uint8_t *data, size_t size)
Wrap an existing array as a packet side data.
int ff_match_2uint16(const uint16_t(*tab)[2], int size, int a, int b)
Return the index into tab at which {a,b} match elements {[0],[1]} of tab.
const struct AVCodec * codec
static void merge_context_after_me(MpegEncContext *dst, MpegEncContext *src)
int16_t * ff_h263_pred_motion(MpegEncContext *s, int block, int dir, int *px, int *py)
static int skip_check(MpegEncContext *s, const Picture *p, const Picture *ref)
av_cold void ff_msmpeg4_encode_init(MpegEncContext *s)
#define CANDIDATE_MB_TYPE_BIDIR
av_cold void ff_me_cmp_init(MECmpContext *c, AVCodecContext *avctx)
av_cold void ff_h263dsp_init(H263DSPContext *ctx)
int flags
AV_CODEC_FLAG_*.
int bit_rate_tolerance
number of bits the bitstream is allowed to diverge from the reference.
int av_pix_fmt_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift)
Utility function to access log2_chroma_w log2_chroma_h from the pixel format AVPixFmtDescriptor.
#define FF_ALLOC_TYPED_ARRAY(p, nelem)
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
#define AV_CODEC_FLAG_LOW_DELAY
Force low delay.
static av_always_inline float scale(float x, float s)
#define FF_MPV_FLAG_CBP_RD
void ff_h263_update_motion_val(MpegEncContext *s)
#define AV_CODEC_FLAG_LOOP_FILTER
loop filter.
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
static void mpv_encode_init_static(void)
#define CANDIDATE_MB_TYPE_INTER4V
static int put_bytes_left(const PutBitContext *s, int round_up)
#define MAX_PICTURE_COUNT
av_cold int ff_dct_encode_init(MpegEncContext *s)
#define AV_CODEC_FLAG_INTERLACED_DCT
Use interlaced DCT.
int ff_mpv_reallocate_putbitbuffer(MpegEncContext *s, size_t threshold, size_t size_increase)
void ff_mjpeg_encode_mb(MpegEncContext *s, int16_t block[12][64])
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
float ff_rate_estimate_qscale(MpegEncContext *s, int dry_run)
void ff_mpv_common_end(MpegEncContext *s)
static int frame_start(MpegEncContext *s)
static int ff_thread_once(char *control, void(*routine)(void))
void ff_init_qscale_tab(MpegEncContext *s)
init s->current_picture.qscale_table from s->lambda_table
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
#define FF_ARRAY_ELEMS(a)
static void update_mb_info(MpegEncContext *s, int startcode)
av_cold void ff_fdctdsp_init(FDCTDSPContext *c, AVCodecContext *avctx)
static uint8_t default_fcode_tab[MAX_MV *2+1]
int ff_mpeg4_set_direct_mv(MpegEncContext *s, int mx, int my)
static void mpv_reconstruct_mb(MpegEncContext *s, int16_t block[12][64])
@ AV_PIX_FMT_YUVJ422P
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
static void build_basis(uint8_t *perm)
int has_b_frames
Size of the frame reordering buffer in the decoder.
AVCodecContext * avcodec_alloc_context3(const AVCodec *codec)
Allocate an AVCodecContext and set its fields to default values.
const uint8_t ff_h263_chroma_qscale_table[32]
static int get_sae(const uint8_t *src, int ref, int stride)
static void rebase_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Rebase the bit writer onto a reallocated buffer.
void ff_mpeg_unref_picture(AVCodecContext *avctx, Picture *pic)
Deallocate a picture; frees the picture tables in case they need to be reallocated anyway.
void ff_mpeg4_stuffing(PutBitContext *pbc)
add MPEG-4 stuffing bits (01...1)
static double av_q2d(AVRational a)
Convert an AVRational to a double.
void ff_estimate_b_frame_motion(MpegEncContext *s, int mb_x, int mb_y)
#define LOCAL_ALIGNED_16(t, v,...)
#define av_assert0(cond)
assert() equivalent, that is always enabled.
int bits_per_raw_sample
Bits per sample/pixel of internal libavcodec pixel/sample format.
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
void ff_write_quant_matrix(PutBitContext *pb, uint16_t *matrix)
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
int64_t rc_max_rate
maximum bitrate
uint64_t error[AV_NUM_DATA_POINTERS]
error
This structure describes the bitrate properties of an encoded bitstream.
@ AV_PIX_FMT_YUVJ444P
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
float p_masking
p block masking (0-> disabled)
static int mb_var_thread(AVCodecContext *c, void *arg)
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
int rc_buffer_size
decoder bitstream buffer size
int ff_find_unused_picture(AVCodecContext *avctx, Picture *picture, int shared)
PutBitContext pb2
used for data partitioned VOPs
#define LIBAVUTIL_VERSION_INT
void ff_write_pass1_stats(MpegEncContext *s)
void ff_mpeg4_encode_mb(MpegEncContext *s, int16_t block[6][64], int motion_x, int motion_y)
Describe the class of an AVClass context structure.
#define PTRDIFF_SPECIFIER
static av_always_inline void mpv_reconstruct_mb_internal(MpegEncContext *s, int16_t block[12][64], int lowres_flag, int is_mpeg12)
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
static void write_mb_info(MpegEncContext *s)
int f_code
forward MV resolution
static int bias(int x, int c)
av_cold void ff_mpv_idct_init(MpegEncContext *s)
void avcodec_free_context(AVCodecContext **avctx)
Free the codec context and everything associated with it and write NULL to the provided pointer.
struct AVCodecInternal * internal
Private context used for internal data.
@ AV_PIX_FMT_YUVJ420P
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
int64_t bit_rate
the average bitrate
#define ROUNDED_DIV(a, b)
void ff_faandct(int16_t *data)
const char * av_default_item_name(void *ptr)
Return the context name.
@ AV_PICTURE_TYPE_I
Intra.
void ff_fdct_ifast(int16_t *data)
static void update_duplicate_context_after_me(MpegEncContext *dst, const MpegEncContext *src)
void ff_set_qscale(MpegEncContext *s, int qscale)
set qscale and update qscale dependent variables.
#define AV_CODEC_FLAG_AC_PRED
H.263 advanced intra coding / MPEG-4 AC prediction.
int ildct_cmp
interlaced DCT comparison function
int ff_vbv_update(MpegEncContext *s, int frame_size)
av_cold int ff_mpv_encode_end(AVCodecContext *avctx)
#define FF_MB_DECISION_SIMPLE
uses mb_cmp
int attribute_align_arg avcodec_open2(AVCodecContext *avctx, const AVCodec *codec, AVDictionary **options)
Initialize the AVCodecContext to use the given AVCodec.
void ff_jpeg_fdct_islow_8(int16_t *data)
int trellis
trellis RD quantization
void ff_mpeg4_init_partitions(MpegEncContext *s)
int ff_alloc_picture(AVCodecContext *avctx, Picture *pic, MotionEstContext *me, ScratchpadContext *sc, int shared, int encoding, int chroma_x_shift, int chroma_y_shift, int out_format, int mb_stride, int mb_width, int mb_height, int b8_stride, ptrdiff_t *linesize, ptrdiff_t *uvlinesize)
Allocate a Picture.
void ff_mjpeg_amv_encode_picture_header(MpegEncContext *s)
void(* op_pixels_func)(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h)
int ff_mpeg4_encode_picture_header(MpegEncContext *s)
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
void ff_jpeg_fdct_islow_10(int16_t *data)
void ff_mpeg1_encode_init(MpegEncContext *s)
void(* qpel_mc_func)(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
static int weight(int i, int blen, int offset)
void ff_clean_h263_qscales(MpegEncContext *s)
modify qscale so that encoding is actually possible in H.263 (limit difference to -2....
#define MV_TYPE_8X8
4 vectors (H.263, MPEG-4 4MV)
float temporal_cplx_masking
temporary complexity masking (0-> disabled)
static void mpv_encode_defaults(MpegEncContext *s)
Set the given MpegEncContext to defaults for encoding.
static void denoise_dct_c(MpegEncContext *s, int16_t *block)
static void set_put_bits_buffer_size(PutBitContext *s, int size)
Change the end of the buffer.
void ff_mpeg4_merge_partitions(MpegEncContext *s)
static const uint8_t tab[16]
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented.
#define FF_DEBUG_DCT_COEFF
void ff_dct_encode_init_x86(MpegEncContext *s)
char * stats_out
pass1 encoding statistics output buffer
static av_always_inline void encode_mb_internal(MpegEncContext *s, int motion_x, int motion_y, int mb_block_height, int mb_block_width, int mb_block_count, int chroma_x_shift, int chroma_y_shift, int chroma_format)
#define AV_CODEC_FLAG_QPEL
Use qpel MC.
static void merge_context_after_encode(MpegEncContext *dst, MpegEncContext *src)
int display_picture_number
#define AV_CODEC_FLAG_GRAY
Only decode/encode grayscale.
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled top and top right vectors is used as motion vector prediction the used motion vector is the sum of the predictor and(mvx_diff, mvy_diff) *mv_scale Intra DC Prediction block[y][x] dc[1]
int ff_mpeg_ref_picture(AVCodecContext *avctx, Picture *dst, Picture *src)
int gop_size
the number of pictures in a group of pictures, or 0 for intra_only
static int load_input_picture(MpegEncContext *s, const AVFrame *pic_arg)
void ff_mpeg4_clean_buffers(MpegEncContext *s)
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
static void dct_single_coeff_elimination(MpegEncContext *s, int n, int threshold)
static int shift(int a, int b)
uint16_t intra_matrix[64]
matrix transmitted in the bitstream
static void ff_update_block_index(MpegEncContext *s, int bits_per_raw_sample, int lowres, int chroma_x_shift)
int ff_pre_estimate_p_frame_motion(MpegEncContext *s, int mb_x, int mb_y)
void ff_mpeg1_clean_buffers(MpegEncContext *s)
unsigned int lambda2
(lambda*lambda) >> FF_LAMBDA_SHIFT
const int16_t ff_mpeg4_default_intra_matrix[64]
void ff_msmpeg4_encode_ext_header(MpegEncContext *s)
#define AV_NOPTS_VALUE
Undefined timestamp value.
static const AVOption mpv_generic_options[]
uint8_t * byte_buffer
temporary buffer used for encoders to store their bitstream
#define FF_MPV_FLAG_QP_RD
#define CANDIDATE_MB_TYPE_DIRECT0
@ AV_PKT_DATA_H263_MB_INFO
An AV_PKT_DATA_H263_MB_INFO side data packet contains a number of structures with info about macroblo...
int64_t min_bitrate
Minimum bitrate of the stream, in bits per second.
const uint16_t ff_mpeg1_default_intra_matrix[256]
static av_always_inline int diff(const struct color_info *a, const struct color_info *b, const int trans_thresh)
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed.
#define AV_CODEC_FLAG_PASS2
Use internal 2pass ratecontrol in second pass mode.
#define FF_COMPLIANCE_NORMAL
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
const int16_t ff_mpeg4_default_non_intra_matrix[64]
#define ALLOCZ_ARRAYS(p, mult, numb)
#define AV_CODEC_CAP_SLICE_THREADS
Codec supports slice-based (or partition-based) multithreading.
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
#define MV_TYPE_FIELD
2 vectors, one per field
void ff_h263_encode_mb(MpegEncContext *s, int16_t block[6][64], int motion_x, int motion_y)
int flags
A combination of AV_PKT_FLAG values.
AVPacket * av_packet_alloc(void)
Allocate an AVPacket and set its fields to default values.
int64_t avg_bitrate
Average bitrate of the stream, in bits per second.
void ff_mpv_motion(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int dir, uint8_t *const *ref_picture, op_pixels_func(*pix_op)[4], qpel_mc_func(*qpix_op)[16])
unsigned int byte_buffer_size
#define UNI_AC_ENC_INDEX(run, level)
#define AV_LOG_INFO
Standard information.
static void update_qscale(MpegEncContext *s)
int ff_mjpeg_add_icc_profile_size(AVCodecContext *avctx, const AVFrame *frame, size_t *max_pkt_size)
void ff_msmpeg4_encode_picture_header(MpegEncContext *s)
uint64_t vbv_delay
The delay between the time the packet this structure is associated with is received and the time when...
void ff_speedhq_encode_mb(MpegEncContext *s, int16_t block[12][64])
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
#define i(width, name, range_min, range_max)
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
static int put_bits_count(PutBitContext *s)
static int dct_quantize_trellis_c(MpegEncContext *s, int16_t *block, int n, int qscale, int *overflow)
uint64_t encoding_error[MPEGVIDEO_MAX_PLANES]
static int get_intra_count(MpegEncContext *s, const uint8_t *src, const uint8_t *ref, int stride)
static int encode_thread(AVCodecContext *c, void *arg)
const uint32_t ff_square_tab[512]
static int estimate_best_b_count(MpegEncContext *s)
int intra_dc_precision
precision of the intra DC coefficient - 8
PutBitContext tex_pb
used for data partitioned VOPs
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
int64_t max_bitrate
Maximum bitrate of the stream, in bits per second.
static const int32_t qmat16[MAT_SIZE]
void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_malloc but the buffer has additional AV_INPUT_BUFFER_PADDING_SIZE at the end w...
void ff_h261_reorder_mb_index(MpegEncContext *s)
void ff_h263_encode_init(MpegEncContext *s)
const uint16_t ff_h263_format[8][2]
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
static void copy_context_before_encode(MpegEncContext *d, const MpegEncContext *s)
@ AV_PKT_DATA_CPB_PROPERTIES
This side data corresponds to the AVCPBProperties struct.
int ff_init_me(MpegEncContext *s)
void ff_h263_encode_gob_header(MpegEncContext *s, int mb_line)
Encode a group of blocks header.
int avcodec_send_frame(AVCodecContext *avctx, const AVFrame *frame)
Supply a raw video or audio frame to the encoder.
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
av_cold void ff_rate_control_uninit(MpegEncContext *s)
#define CANDIDATE_MB_TYPE_DIRECT
double buffer_index
amount of bits in the video/audio buffer
const uint8_t ff_zigzag_direct[64]
#define AV_CODEC_FLAG_CLOSED_GOP
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
const uint16_t ff_mpeg1_default_non_intra_matrix[64]
int64_t buffer_size
The size of the buffer to which the ratecontrol is applied, in bits.
int strict_std_compliance
strictly follow the standard (MPEG-4, ...).
#define CANDIDATE_MB_TYPE_BIDIR_I
const uint16_t ff_inv_aanscales[64]
static void encode_mb_hq(MpegEncContext *s, MpegEncContext *backup, MpegEncContext *best, PutBitContext pb[2], PutBitContext pb2[2], PutBitContext tex_pb[2], int *dmin, int *next_block, int motion_x, int motion_y)
void ff_h263_loop_filter(MpegEncContext *s)
void ff_mpeg1_encode_picture_header(MpegEncContext *s)
#define AV_INPUT_BUFFER_PADDING_SIZE
void ff_set_mpeg4_time(MpegEncContext *s)
void ff_fix_long_mvs(MpegEncContext *s, uint8_t *field_select_table, int field_select, int16_t(*mv_table)[2], int f_code, int type, int truncate)
int dquant
qscale difference to prev qscale
float dark_masking
darkness masking (0-> disabled)
main external API structure.
static uint8_t default_mv_penalty[MAX_FCODE+1][MAX_DMV *2+1]
void ff_estimate_p_frame_motion(MpegEncContext *s, int mb_x, int mb_y)
void ff_mpeg4_encode_video_packet_header(MpegEncContext *s)
static uint8_t * put_bits_ptr(PutBitContext *s)
Return the pointer to the byte where the bitstream writer will put the next bit.
#define CANDIDATE_MB_TYPE_INTRA
@ AV_PICTURE_TYPE_B
Bi-dir predicted.
uint8_t * av_packet_new_side_data(AVPacket *pkt, enum AVPacketSideDataType type, size_t size)
Allocate new information of a packet.
int qmin
minimum quantizer
static int select_input_picture(MpegEncContext *s)
float spatial_cplx_masking
spatial complexity masking (0-> disabled)
static void frame_end(MpegEncContext *s)
static int ref[MAX_W *MAX_W]
#define AV_CODEC_CAP_DELAY
Encoder or decoder requires flushing with NULL input at the end in order to give the complete and cor...
static float mean(const float *input, int size)
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
#define CANDIDATE_MB_TYPE_FORWARD
#define FF_MB_DECISION_RD
rate distortion
void ff_h263_encode_picture_header(MpegEncContext *s)
@ AV_PICTURE_TYPE_P
Predicted.
#define AVERROR_ENCODER_NOT_FOUND
Encoder not found.
int ff_speedhq_mb_y_order_to_mb(int mb_y_order, int mb_height, int *first_in_slice)
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
int max_b_frames
maximum number of B-frames between non-B-frames Note: The output will be delayed by max_b_frames+1 re...
void ff_convert_matrix(MpegEncContext *s, int(*qmat)[64], uint16_t(*qmat16)[2][64], const uint16_t *quant_matrix, int bias, int qmin, int qmax, int intra)
Undefined Behavior In the C some operations are like signed integer overflow
void(* fdct)(int16_t *block)
av_cold int ff_mpv_encode_init(AVCodecContext *avctx)
float rc_max_available_vbv_use
Ratecontrol attempt to use, at maximum, of what can be used without an underflow.
static void flush_put_bits(PutBitContext *s)
Pad the end of the output stream with zeros.
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
static int dct_quantize_refine(MpegEncContext *s, int16_t *block, int16_t *weight, int16_t *orig, int n, int qscale)
int ff_rv10_encode_picture_header(MpegEncContext *s)
static void copy_context_after_encode(MpegEncContext *d, const MpegEncContext *s)
int slices
Number of slices.
#define FF_MB_DECISION_BITS
chooses the one which needs the fewest bits
This structure stores compressed data.
static void clip_coeffs(MpegEncContext *s, int16_t *block, int last_index)
uint16_t * inter_matrix
custom inter quantization matrix Must be allocated with the av_malloc() family of functions,...
int width
picture width / height.
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
static const double coeff[2][5]
The exact code depends on how similar the blocks are and how related they are to the block
static int sse(MpegEncContext *s, const uint8_t *src1, const uint8_t *src2, int w, int h, int stride)
int misc_bits
cbp, mb_type
void ff_mjpeg_encode_picture_trailer(PutBitContext *pb, int header_bits)
void ff_get_2pass_fcode(MpegEncContext *s)
int end_mb_y
end mb_y of this thread (so current thread should process start_mb_y <= row < end_mb_y)
#define FF_QP2LAMBDA
factor to convert from H.263 QP to lambda
#define FF_MPV_FLAG_STRICT_GOP
int ff_dct_quantize_c(MpegEncContext *s, int16_t *block, int n, int qscale, int *overflow)
static const uint8_t sp5x_qscale_five_quant_table[][64]
@ AV_PICTURE_TYPE_S
S(GMC)-VOP MPEG-4.
@ AV_CODEC_ID_MPEG2VIDEO
preferred ID for MPEG-1/2 video decoding
int ff_alloc_packet(AVCodecContext *avctx, AVPacket *avpkt, int64_t size)
Check AVPacket size and allocate data.
void ff_mpeg1_encode_slice_header(MpegEncContext *s)
int ff_mjpeg_encode_stuffing(MpegEncContext *s)
Writes the complete JPEG frame when optimal huffman tables are enabled, otherwise writes the stuffing...
void ff_wmv2_encode_mb(MpegEncContext *s, int16_t block[6][64], int motion_x, int motion_y)
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown) That is the width of a pixel divided by the height of the pixel.
void ff_rv20_encode_picture_header(MpegEncContext *s)
#define CANDIDATE_MB_TYPE_BACKWARD
int ff_get_best_fcode(MpegEncContext *s, const int16_t(*mv_table)[2], int type)
const uint16_t ff_aanscales[64]
AVCPBProperties * av_cpb_properties_alloc(size_t *size)
Allocate a CPB properties structure and initialize its fields to default values.
#define AV_CODEC_FLAG_PASS1
Use internal 2pass ratecontrol in first pass mode.
static void write_slice_end(MpegEncContext *s)