Go to the documentation of this file.
34 #include "config_components.h"
81 #define QUANT_BIAS_SHIFT 8
83 #define QMAT_SHIFT_MMX 16
109 uint16_t (*
qmat16)[2][64],
110 const uint16_t *quant_matrix,
111 int bias,
int qmin,
int qmax,
int intra)
122 else qscale2 =
qscale << 1;
129 for (
i = 0;
i < 64;
i++) {
130 const int j =
s->idsp.idct_permutation[
i];
131 int64_t den = (int64_t) qscale2 * quant_matrix[j];
141 for (
i = 0;
i < 64;
i++) {
142 const int j =
s->idsp.idct_permutation[
i];
143 int64_t den =
ff_aanscales[
i] * (int64_t) qscale2 * quant_matrix[j];
153 for (
i = 0;
i < 64;
i++) {
154 const int j =
s->idsp.idct_permutation[
i];
155 int64_t den = (int64_t) qscale2 * quant_matrix[j];
175 for (
i = intra;
i < 64;
i++) {
187 "Warning, QMAT_SHIFT is larger than %d, overflows possible\n",
194 if (
s->q_scale_type == 1 && 0) {
196 int bestdiff=INT_MAX;
204 if (
diff < bestdiff) {
213 s->qscale =
av_clip(
s->qscale,
s->avctx->qmin,
s->vbv_ignore_qmax ? 31 :
s->avctx->qmax);
226 for (
i = 0;
i < 64;
i++) {
238 int8_t *
const qscale_table =
s->current_picture.qscale_table;
241 for (
i = 0;
i <
s->mb_num;
i++) {
242 unsigned int lam =
s->lambda_table[
s->mb_index2xy[
i]];
244 qscale_table[
s->mb_index2xy[
i]] =
av_clip(qp,
s->avctx->qmin,
252 #define COPY(a) dst->a= src->a
268 for (
int i = -16;
i < 16;
i++)
287 s->input_picture_number = 0;
288 s->picture_in_gop_number = 0;
297 if (CONFIG_H263_ENCODER)
299 if (!
s->dct_quantize)
303 s->fast_dct_quantize =
s->dct_quantize;
304 if (
s->avctx->trellis)
343 "keyframe interval too large!, reducing it from %d to %d\n",
355 "max b frames must be 0 or positive for mpegvideo based encoders\n");
366 s->rtp_mode = !!
s->rtp_payload_size;
370 if (
s->intra_dc_precision < 0) {
371 s->intra_dc_precision += 8;
372 }
else if (
s->intra_dc_precision >= 8)
373 s->intra_dc_precision -= 8;
375 if (
s->intra_dc_precision < 0) {
377 "intra dc precision must be positive, note some applications use"
378 " 0 and some 8 as base meaning 8bit, the value must not be smaller than that\n");
388 if (
s->gop_size <= 1) {
442 "Warning min_rate > 0 but min_rate != max_rate isn't recommended!\n");
459 "impossible bitrate constraints, this will fail\n");
469 if (!
s->fixed_qscale &&
475 if (nbt <= INT_MAX) {
488 "Warning vbv_delay will be set to 0xFFFF (=VBR) as the "
489 "specified vbv buffer is too large for the given bitrate!\n");
501 "OBMC is only supported with simple mb decision\n");
516 "Invalid pixel aspect ratio %i/%i, limit is 255/255 reducing\n",
569 if (
s->scenechange_threshold < 1000000000 &&
572 "closed gop with scene change detection are not supported yet, "
573 "set threshold to 1000000000\n");
581 "low delay forcing is only available for mpeg2, "
582 "set strict_std_compliance to 'unofficial' or lower in order to allow it\n");
585 if (
s->max_b_frames != 0) {
587 "B-frames cannot be used with low delay\n");
592 if (
s->q_scale_type == 1) {
595 "non linear quant only supports qmax <= 28 currently\n");
608 "notice: b_frame_strategy only affects the first pass\n");
609 s->b_frame_strategy = 0;
623 s->inter_quant_bias = 0;
625 s->intra_quant_bias = 0;
640 "timebase %d/%d not supported by MPEG 4 standard, "
641 "the maximum admitted value for the timebase denominator "
652 avctx->
delay =
s->low_delay ? 0 : (
s->max_b_frames + 1);
657 avctx->
delay =
s->low_delay ? 0 : (
s->max_b_frames + 1);
660 #if CONFIG_MJPEG_ENCODER || CONFIG_AMV_ENCODER
674 if (!CONFIG_SPEEDHQ_ENCODER)
682 if (!CONFIG_H261_ENCODER)
686 "The specified picture size of %dx%d is not valid for the "
687 "H.261 codec.\nValid sizes are 176x144, 352x288\n",
688 s->width,
s->height);
697 if (!CONFIG_H263_ENCODER)
700 s->width,
s->height) == 8) {
702 "The specified picture size of %dx%d is not valid for "
703 "the H.263 codec.\nValid sizes are 128x96, 176x144, "
704 "352x288, 704x576, and 1408x1152. "
705 "Try H.263+.\n",
s->width,
s->height);
717 s->modified_quant =
s->h263_aic;
719 s->unrestricted_mv =
s->obmc ||
s->loop_filter ||
s->umvplus;
729 s->unrestricted_mv = 1;
743 s->modified_quant = 1;
747 s->unrestricted_mv = 0;
752 s->unrestricted_mv = 1;
753 s->low_delay =
s->max_b_frames ? 0 : 1;
754 avctx->
delay =
s->low_delay ? 0 : (
s->max_b_frames + 1);
759 s->unrestricted_mv = 1;
760 s->msmpeg4_version = 2;
767 s->unrestricted_mv = 1;
768 s->msmpeg4_version = 3;
769 s->flipflop_rounding = 1;
776 s->unrestricted_mv = 1;
777 s->msmpeg4_version = 4;
778 s->flipflop_rounding = 1;
785 s->unrestricted_mv = 1;
786 s->msmpeg4_version = 5;
787 s->flipflop_rounding = 1;
799 s->progressive_frame =
826 if (
s->noise_reduction) {
833 if ((CONFIG_H263P_ENCODER || CONFIG_RV20_ENCODER) &&
s->modified_quant)
836 if (
s->slice_context_count > 1) {
840 s->h263_slice_structured = 1;
843 s->quant_precision = 5;
846 ff_set_cmp(&
s->mecc,
s->mecc.frame_skip_cmp,
s->frame_skip_cmp);
848 if (CONFIG_H261_ENCODER &&
s->out_format ==
FMT_H261) {
850 }
else if ((CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
853 }
else if (CONFIG_H263_ENCODER &&
s->out_format ==
FMT_H263) {
860 for (
i = 0;
i < 64;
i++) {
861 int j =
s->idsp.idct_permutation[
i];
874 s->chroma_intra_matrix[j] =
898 if (
s->b_frame_strategy == 2) {
899 for (
i = 0;
i <
s->max_b_frames + 2;
i++) {
901 if (!
s->tmp_frames[
i])
905 s->tmp_frames[
i]->width =
s->width >>
s->brd_scale;
906 s->tmp_frames[
i]->height =
s->height >>
s->brd_scale;
941 if(
s->q_chroma_intra_matrix !=
s->q_intra_matrix )
av_freep(&
s->q_chroma_intra_matrix);
942 if(
s->q_chroma_intra_matrix16 !=
s->q_intra_matrix16)
av_freep(&
s->q_chroma_intra_matrix16);
943 s->q_chroma_intra_matrix=
NULL;
944 s->q_chroma_intra_matrix16=
NULL;
961 for (y = 0; y < 16; y++) {
962 for (x = 0; x < 16; x++) {
979 for (y = 0; y <
h; y += 16) {
980 for (x = 0; x <
w; x += 16) {
987 acc += sae + 500 < sad;
996 s->chroma_x_shift,
s->chroma_y_shift,
s->out_format,
997 s->mb_stride,
s->mb_width,
s->mb_height,
s->b8_stride,
998 &
s->linesize, &
s->uvlinesize);
1005 int i, display_picture_number = 0,
ret;
1006 int encoding_delay =
s->max_b_frames ?
s->max_b_frames
1007 : (
s->low_delay ? 0 : 1);
1008 int flush_offset = 1;
1013 display_picture_number =
s->input_picture_number++;
1017 int64_t last =
s->user_specified_pts;
1021 "Invalid pts (%"PRId64
") <= last (%"PRId64
")\n",
1026 if (!
s->low_delay && display_picture_number == 1)
1027 s->dts_delta =
pts - last;
1029 s->user_specified_pts =
pts;
1032 s->user_specified_pts =
1033 pts =
s->user_specified_pts + 1;
1035 "Warning: AVFrame.pts=? trying to guess (%"PRId64
")\n",
1038 pts = display_picture_number;
1042 if (!pic_arg->
buf[0] ||
1044 pic_arg->
linesize[1] !=
s->uvlinesize ||
1047 if ((
s->width & 15) || (
s->height & 15))
1055 pic_arg->
linesize[1],
s->linesize,
s->uvlinesize);
1061 pic = &
s->picture[
i];
1078 int h_chroma_shift, v_chroma_shift;
1083 for (
i = 0;
i < 3;
i++) {
1085 int dst_stride =
i ?
s->uvlinesize :
s->linesize;
1086 int h_shift =
i ? h_chroma_shift : 0;
1087 int v_shift =
i ? v_chroma_shift : 0;
1088 int w =
s->width >> h_shift;
1089 int h =
s->height >> v_shift;
1091 uint8_t *dst = pic->
f->
data[
i];
1095 && !
s->progressive_sequence
1096 &&
FFALIGN(
s->height, 32) -
s->height > 16)
1099 if (!
s->avctx->rc_buffer_size)
1102 if (src_stride == dst_stride)
1103 memcpy(dst,
src, src_stride *
h);
1106 uint8_t *dst2 = dst;
1108 memcpy(dst2,
src,
w);
1113 if ((
s->width & 15) || (
s->height & (vpad-1))) {
1114 s->mpvencdsp.draw_edges(dst, dst_stride,
1133 for (flush_offset = 0; flush_offset < encoding_delay + 1; flush_offset++)
1134 if (
s->input_picture[flush_offset])
1137 if (flush_offset <= 1)
1140 encoding_delay = encoding_delay - flush_offset + 1;
1145 s->input_picture[
i - flush_offset] =
s->input_picture[
i];
1147 s->input_picture[encoding_delay] = (
Picture*) pic;
1156 int64_t score64 = 0;
1158 for (plane = 0; plane < 3; plane++) {
1160 const int bw = plane ? 1 : 2;
1161 for (y = 0; y <
s->mb_height * bw; y++) {
1162 for (x = 0; x <
s->mb_width * bw; x++) {
1163 int off = p->
shared ? 0 : 16;
1164 uint8_t *dptr = p->
f->
data[plane] + 8 * (x + y *
stride) + off;
1165 uint8_t *rptr =
ref->f->data[plane] + 8 * (x + y *
stride);
1166 int v =
s->mecc.frame_skip_cmp[1](
s, dptr, rptr,
stride, 8);
1168 switch (
FFABS(
s->frame_skip_exp)) {
1169 case 0: score =
FFMAX(score, v);
break;
1170 case 1: score +=
FFABS(v);
break;
1171 case 2: score64 += v * (int64_t)v;
break;
1172 case 3: score64 +=
FFABS(v * (int64_t)v * v);
break;
1173 case 4: score64 += (v * (int64_t)v) * (v * (int64_t)v);
break;
1182 if (
s->frame_skip_exp < 0)
1183 score64 = pow(score64 / (
double)(
s->mb_width *
s->mb_height),
1184 -1.0/
s->frame_skip_exp);
1188 if (score64 < ((
s->frame_skip_factor * (int64_t)
s->lambda) >> 8))
1217 const int scale =
s->brd_scale;
1221 int64_t best_rd = INT64_MAX;
1222 int best_b_count = -1;
1237 b_lambda = p_lambda;
1241 for (
i = 0;
i <
s->max_b_frames + 2;
i++) {
1242 Picture pre_input, *pre_input_ptr =
i ?
s->input_picture[
i - 1] :
1243 s->next_picture_ptr;
1246 if (pre_input_ptr && (!
i ||
s->input_picture[
i - 1])) {
1247 pre_input = *pre_input_ptr;
1256 s->mpvencdsp.shrink[
scale](
s->tmp_frames[
i]->data[0],
1257 s->tmp_frames[
i]->linesize[0],
1261 s->mpvencdsp.shrink[
scale](
s->tmp_frames[
i]->data[1],
1262 s->tmp_frames[
i]->linesize[1],
1266 s->mpvencdsp.shrink[
scale](
s->tmp_frames[
i]->data[2],
1267 s->tmp_frames[
i]->linesize[2],
1274 for (j = 0; j <
s->max_b_frames + 1; j++) {
1278 if (!
s->input_picture[j])
1291 c->mb_decision =
s->avctx->mb_decision;
1292 c->me_cmp =
s->avctx->me_cmp;
1293 c->mb_cmp =
s->avctx->mb_cmp;
1294 c->me_sub_cmp =
s->avctx->me_sub_cmp;
1296 c->time_base =
s->avctx->time_base;
1297 c->max_b_frames =
s->max_b_frames;
1315 for (
i = 0;
i <
s->max_b_frames + 1;
i++) {
1316 int is_p =
i % (j + 1) == j ||
i ==
s->max_b_frames;
1318 s->tmp_frames[
i + 1]->pict_type = is_p ?
1320 s->tmp_frames[
i + 1]->quality = is_p ? p_lambda : b_lambda;
1339 rd +=
c->error[0] +
c->error[1] +
c->error[2];
1357 return best_b_count;
1365 s->reordered_input_picture[
i - 1] =
s->reordered_input_picture[
i];
1369 if (!
s->reordered_input_picture[0] &&
s->input_picture[0]) {
1370 if (
s->frame_skip_threshold ||
s->frame_skip_factor) {
1371 if (
s->picture_in_gop_number <
s->gop_size &&
1372 s->next_picture_ptr &&
1384 !
s->next_picture_ptr ||
s->intra_only) {
1385 s->reordered_input_picture[0] =
s->input_picture[0];
1387 s->reordered_input_picture[0]->f->coded_picture_number =
1388 s->coded_picture_number++;
1393 for (
i = 0;
i <
s->max_b_frames + 1;
i++) {
1394 int pict_num =
s->input_picture[0]->f->display_picture_number +
i;
1396 if (pict_num >=
s->rc_context.num_entries)
1398 if (!
s->input_picture[
i]) {
1403 s->input_picture[
i]->f->pict_type =
1404 s->rc_context.entry[pict_num].new_pict_type;
1408 if (
s->b_frame_strategy == 0) {
1409 b_frames =
s->max_b_frames;
1410 while (b_frames && !
s->input_picture[b_frames])
1412 }
else if (
s->b_frame_strategy == 1) {
1413 for (
i = 1;
i <
s->max_b_frames + 1;
i++) {
1414 if (
s->input_picture[
i] &&
1415 s->input_picture[
i]->b_frame_score == 0) {
1416 s->input_picture[
i]->b_frame_score =
1418 s->input_picture[
i ]->f->data[0],
1419 s->input_picture[
i - 1]->f->data[0],
1423 for (
i = 0;
i <
s->max_b_frames + 1;
i++) {
1424 if (!
s->input_picture[
i] ||
1425 s->input_picture[
i]->b_frame_score - 1 >
1426 s->mb_num /
s->b_sensitivity)
1430 b_frames =
FFMAX(0,
i - 1);
1433 for (
i = 0;
i < b_frames + 1;
i++) {
1434 s->input_picture[
i]->b_frame_score = 0;
1436 }
else if (
s->b_frame_strategy == 2) {
1444 for (
i = b_frames - 1;
i >= 0;
i--) {
1445 int type =
s->input_picture[
i]->f->pict_type;
1450 b_frames ==
s->max_b_frames) {
1452 "warning, too many B-frames in a row\n");
1455 if (
s->picture_in_gop_number + b_frames >=
s->gop_size) {
1457 s->gop_size >
s->picture_in_gop_number) {
1458 b_frames =
s->gop_size -
s->picture_in_gop_number - 1;
1470 s->reordered_input_picture[0] =
s->input_picture[b_frames];
1473 s->reordered_input_picture[0]->f->coded_picture_number =
1474 s->coded_picture_number++;
1475 for (
i = 0;
i < b_frames;
i++) {
1476 s->reordered_input_picture[
i + 1] =
s->input_picture[
i];
1477 s->reordered_input_picture[
i + 1]->f->pict_type =
1479 s->reordered_input_picture[
i + 1]->f->coded_picture_number =
1480 s->coded_picture_number++;
1487 if (
s->reordered_input_picture[0]) {
1488 s->reordered_input_picture[0]->reference =
1489 s->reordered_input_picture[0]->f->pict_type !=
1493 s->reordered_input_picture[0]->f)))
1496 if (
s->reordered_input_picture[0]->shared ||
s->avctx->rc_buffer_size) {
1504 pic = &
s->picture[
i];
1506 pic->
reference =
s->reordered_input_picture[0]->reference;
1517 s->reordered_input_picture[0]->shared = 0;
1519 s->current_picture_ptr = pic;
1522 s->current_picture_ptr =
s->reordered_input_picture[0];
1523 for (
i = 0;
i < 4;
i++) {
1524 if (
s->new_picture->data[
i])
1528 s->picture_number =
s->new_picture->display_picture_number;
1535 if (
s->unrestricted_mv &&
1536 s->current_picture.reference &&
1539 int hshift =
desc->log2_chroma_w;
1540 int vshift =
desc->log2_chroma_h;
1541 s->mpvencdsp.draw_edges(
s->current_picture.f->data[0],
1542 s->current_picture.f->linesize[0],
1543 s->h_edge_pos,
s->v_edge_pos,
1546 s->mpvencdsp.draw_edges(
s->current_picture.f->data[1],
1547 s->current_picture.f->linesize[1],
1548 s->h_edge_pos >> hshift,
1549 s->v_edge_pos >> vshift,
1553 s->mpvencdsp.draw_edges(
s->current_picture.f->data[2],
1554 s->current_picture.f->linesize[2],
1555 s->h_edge_pos >> hshift,
1556 s->v_edge_pos >> vshift,
1564 s->last_pict_type =
s->pict_type;
1565 s->last_lambda_for [
s->pict_type] =
s->current_picture_ptr->f->quality;
1567 s->last_non_b_pict_type =
s->pict_type;
1574 for (intra = 0; intra < 2; intra++) {
1575 if (
s->dct_count[intra] > (1 << 16)) {
1576 for (
i = 0;
i < 64;
i++) {
1577 s->dct_error_sum[intra][
i] >>= 1;
1579 s->dct_count[intra] >>= 1;
1582 for (
i = 0;
i < 64;
i++) {
1583 s->dct_offset[intra][
i] = (
s->noise_reduction *
1584 s->dct_count[intra] +
1585 s->dct_error_sum[intra][
i] / 2) /
1586 (
s->dct_error_sum[intra][
i] + 1);
1597 s->last_picture_ptr !=
s->next_picture_ptr &&
1598 s->last_picture_ptr->f->buf[0]) {
1602 s->current_picture_ptr->f->pict_type =
s->pict_type;
1607 s->current_picture_ptr)) < 0)
1611 s->last_picture_ptr =
s->next_picture_ptr;
1612 s->next_picture_ptr =
s->current_picture_ptr;
1615 if (
s->last_picture_ptr) {
1617 if (
s->last_picture_ptr->f->buf[0] &&
1619 s->last_picture_ptr)) < 0)
1622 if (
s->next_picture_ptr) {
1624 if (
s->next_picture_ptr->f->buf[0] &&
1626 s->next_picture_ptr)) < 0)
1632 for (
i = 0;
i < 4;
i++) {
1634 s->current_picture.f->data[
i] +=
1635 s->current_picture.f->linesize[
i];
1637 s->current_picture.f->linesize[
i] *= 2;
1638 s->last_picture.f->linesize[
i] *= 2;
1639 s->next_picture.f->linesize[
i] *= 2;
1644 s->dct_unquantize_intra =
s->dct_unquantize_mpeg2_intra;
1645 s->dct_unquantize_inter =
s->dct_unquantize_mpeg2_inter;
1647 s->dct_unquantize_intra =
s->dct_unquantize_h263_intra;
1648 s->dct_unquantize_inter =
s->dct_unquantize_h263_inter;
1650 s->dct_unquantize_intra =
s->dct_unquantize_mpeg1_intra;
1651 s->dct_unquantize_inter =
s->dct_unquantize_mpeg1_inter;
1654 if (
s->dct_error_sum) {
1663 const AVFrame *pic_arg,
int *got_packet)
1666 int i, stuffing_count,
ret;
1667 int context_count =
s->slice_context_count;
1669 s->vbv_ignore_qmax = 0;
1671 s->picture_in_gop_number++;
1681 if (
s->new_picture->data[0]) {
1682 int growing_buffer = context_count == 1 && !
s->data_partitioning;
1683 size_t pkt_size = 10000 +
s->mb_width *
s->mb_height *
1696 s->mb_width*
s->mb_height*12);
1697 s->prev_mb_info =
s->last_mb_info =
s->mb_info_size = 0;
1700 for (
i = 0;
i < context_count;
i++) {
1701 int start_y =
s->thread_context[
i]->start_mb_y;
1703 int h =
s->mb_height;
1704 uint8_t *start =
pkt->
data + (size_t)(((int64_t)
pkt->
size) * start_y /
h);
1705 uint8_t *end =
pkt->
data + (size_t)(((int64_t)
pkt->
size) * end_y /
h);
1710 s->pict_type =
s->new_picture->pict_type;
1717 if (growing_buffer) {
1727 if ((CONFIG_MJPEG_ENCODER || CONFIG_AMV_ENCODER) &&
s->out_format ==
FMT_MJPEG)
1737 s->lambda <
s->lmax) {
1738 s->next_lambda =
FFMAX(
s->lambda + min_step,
s->lambda *
1739 (
s->qscale + 1) /
s->qscale);
1740 if (
s->adaptive_quant) {
1742 for (
i = 0;
i <
s->mb_height *
s->mb_stride;
i++)
1743 s->lambda_table[
i] =
1744 FFMAX(
s->lambda_table[
i] + min_step,
1745 s->lambda_table[
i] * (
s->qscale + 1) /
1751 if (
s->flipflop_rounding ||
1754 s->no_rounding ^= 1;
1757 s->time_base =
s->last_time_base;
1758 s->last_non_b_time =
s->time -
s->pp_time;
1760 for (
i = 0;
i < context_count;
i++) {
1764 s->vbv_ignore_qmax = 1;
1775 for (
i = 0;
i < 4;
i++) {
1776 s->current_picture_ptr->encoding_error[
i] =
s->current_picture.encoding_error[
i];
1777 avctx->
error[
i] +=
s->current_picture_ptr->encoding_error[
i];
1780 s->current_picture_ptr->encoding_error,
1786 s->misc_bits +
s->i_tex_bits +
1792 s->stuffing_bits = 8*stuffing_count;
1793 if (stuffing_count) {
1799 switch (
s->codec_id) {
1802 while (stuffing_count--) {
1809 stuffing_count -= 4;
1810 while (stuffing_count--) {
1816 s->stuffing_bits = 0;
1834 int minbits =
s->frame_bits - 8 *
1835 (
s->vbv_delay_pos - 1);
1836 double bits =
s->rc_context.buffer_index + minbits - inbits;
1837 uint8_t *
const vbv_delay_ptr =
s->pb.buf +
s->vbv_delay_pos;
1841 "Internal error, negative bits\n");
1853 vbv_delay_ptr[0] &= 0xF8;
1856 vbv_delay_ptr[2] &= 0x07;
1865 (uint8_t*)props, props_size);
1871 s->total_bits +=
s->frame_bits;
1873 pkt->
pts =
s->current_picture.f->pts;
1875 if (!
s->current_picture.f->coded_picture_number)
1882 if (
s->current_picture.f->key_frame)
1892 if (!
s->picture[
i].reference)
1904 int n,
int threshold)
1906 static const char tab[64] = {
1907 3, 2, 2, 1, 1, 1, 1, 1,
1908 1, 1, 1, 1, 1, 1, 1, 1,
1909 1, 1, 1, 1, 1, 1, 1, 1,
1910 0, 0, 0, 0, 0, 0, 0, 0,
1911 0, 0, 0, 0, 0, 0, 0, 0,
1912 0, 0, 0, 0, 0, 0, 0, 0,
1913 0, 0, 0, 0, 0, 0, 0, 0,
1914 0, 0, 0, 0, 0, 0, 0, 0
1919 int16_t *
block =
s->block[n];
1920 const int last_index =
s->block_last_index[n];
1923 if (threshold < 0) {
1925 threshold = -threshold;
1930 if (last_index <= skip_dc - 1)
1933 for (
i = 0;
i <= last_index;
i++) {
1934 const int j =
s->intra_scantable.permutated[
i];
1937 if (skip_dc &&
i == 0)
1941 }
else if (
level > 1) {
1947 if (score >= threshold)
1949 for (
i = skip_dc;
i <= last_index;
i++) {
1950 const int j =
s->intra_scantable.permutated[
i];
1954 s->block_last_index[n] = 0;
1956 s->block_last_index[n] = -1;
1963 const int maxlevel =
s->max_qcoeff;
1964 const int minlevel =
s->min_qcoeff;
1972 for (;
i <= last_index;
i++) {
1973 const int j =
s->intra_scantable.permutated[
i];
1976 if (
level > maxlevel) {
1979 }
else if (
level < minlevel) {
1989 "warning, clipping %d dct coefficients to %d..%d\n",
1997 for (y = 0; y < 8; y++) {
1998 for (x = 0; x < 8; x++) {
2004 for (y2 =
FFMAX(y - 1, 0); y2 <
FFMIN(8, y + 2); y2++) {
2005 for (x2=
FFMAX(x - 1, 0); x2 <
FFMIN(8, x + 2); x2++) {
2006 int v = ptr[x2 + y2 *
stride];
2018 int motion_x,
int motion_y,
2019 int mb_block_height,
2028 #define INTERLACED_DCT(s) ((chroma_format == CHROMA_420 || chroma_format == CHROMA_422) && \
2029 (s)->avctx->flags & AV_CODEC_FLAG_INTERLACED_DCT)
2031 int16_t orig[12][64];
2032 const int mb_x =
s->mb_x;
2033 const int mb_y =
s->mb_y;
2037 int uv_dct_offset =
s->uvlinesize * 8;
2038 uint8_t *ptr_y, *ptr_cb, *ptr_cr;
2039 ptrdiff_t wrap_y, wrap_c;
2041 for (
i = 0;
i < mb_block_count;
i++)
2042 skip_dct[
i] =
s->skipdct;
2044 if (
s->adaptive_quant) {
2045 const int last_qp =
s->qscale;
2046 const int mb_xy =
mb_x +
mb_y *
s->mb_stride;
2048 s->lambda =
s->lambda_table[mb_xy];
2052 s->qscale =
s->current_picture_ptr->qscale_table[mb_xy];
2053 s->dquant =
s->qscale - last_qp;
2074 wrap_y =
s->linesize;
2075 wrap_c =
s->uvlinesize;
2076 ptr_y =
s->new_picture->data[0] +
2078 ptr_cb =
s->new_picture->data[1] +
2079 (
mb_y * mb_block_height * wrap_c) +
mb_x * mb_block_width;
2080 ptr_cr =
s->new_picture->data[2] +
2081 (
mb_y * mb_block_height * wrap_c) +
mb_x * mb_block_width;
2084 uint8_t *ebuf =
s->sc.edge_emu_buffer + 38 * wrap_y;
2087 s->vdsp.emulated_edge_mc(ebuf, ptr_y,
2090 s->width,
s->height);
2092 s->vdsp.emulated_edge_mc(ebuf + 16 * wrap_y, ptr_cb,
2094 mb_block_width, mb_block_height,
2095 mb_x * mb_block_width,
mb_y * mb_block_height,
2097 ptr_cb = ebuf + 16 * wrap_y;
2098 s->vdsp.emulated_edge_mc(ebuf + 16 * wrap_y + 16, ptr_cr,
2100 mb_block_width, mb_block_height,
2101 mb_x * mb_block_width,
mb_y * mb_block_height,
2103 ptr_cr = ebuf + 16 * wrap_y + 16;
2108 int progressive_score, interlaced_score;
2110 s->interlaced_dct = 0;
2111 progressive_score =
s->mecc.ildct_cmp[4](
s, ptr_y,
NULL, wrap_y, 8) +
2112 s->mecc.ildct_cmp[4](
s, ptr_y + wrap_y * 8,
2113 NULL, wrap_y, 8) - 400;
2115 if (progressive_score > 0) {
2116 interlaced_score =
s->mecc.ildct_cmp[4](
s, ptr_y,
2117 NULL, wrap_y * 2, 8) +
2118 s->mecc.ildct_cmp[4](
s, ptr_y + wrap_y,
2119 NULL, wrap_y * 2, 8);
2120 if (progressive_score > interlaced_score) {
2121 s->interlaced_dct = 1;
2124 uv_dct_offset = wrap_c;
2133 s->pdsp.get_pixels(
s->block[0], ptr_y, wrap_y);
2134 s->pdsp.get_pixels(
s->block[1], ptr_y + 8, wrap_y);
2135 s->pdsp.get_pixels(
s->block[2], ptr_y +
dct_offset, wrap_y);
2136 s->pdsp.get_pixels(
s->block[3], ptr_y +
dct_offset + 8, wrap_y);
2142 s->pdsp.get_pixels(
s->block[4], ptr_cb, wrap_c);
2143 s->pdsp.get_pixels(
s->block[5], ptr_cr, wrap_c);
2145 s->pdsp.get_pixels(
s->block[6], ptr_cb + uv_dct_offset, wrap_c);
2146 s->pdsp.get_pixels(
s->block[7], ptr_cr + uv_dct_offset, wrap_c);
2148 s->pdsp.get_pixels(
s->block[ 6], ptr_cb + 8, wrap_c);
2149 s->pdsp.get_pixels(
s->block[ 7], ptr_cr + 8, wrap_c);
2150 s->pdsp.get_pixels(
s->block[ 8], ptr_cb + uv_dct_offset, wrap_c);
2151 s->pdsp.get_pixels(
s->block[ 9], ptr_cr + uv_dct_offset, wrap_c);
2152 s->pdsp.get_pixels(
s->block[10], ptr_cb + uv_dct_offset + 8, wrap_c);
2153 s->pdsp.get_pixels(
s->block[11], ptr_cr + uv_dct_offset + 8, wrap_c);
2159 uint8_t *dest_y, *dest_cb, *dest_cr;
2161 dest_y =
s->dest[0];
2162 dest_cb =
s->dest[1];
2163 dest_cr =
s->dest[2];
2166 op_pix =
s->hdsp.put_pixels_tab;
2167 op_qpix =
s->qdsp.put_qpel_pixels_tab;
2169 op_pix =
s->hdsp.put_no_rnd_pixels_tab;
2170 op_qpix =
s->qdsp.put_no_rnd_qpel_pixels_tab;
2175 s->last_picture.f->data,
2177 op_pix =
s->hdsp.avg_pixels_tab;
2178 op_qpix =
s->qdsp.avg_qpel_pixels_tab;
2182 s->next_picture.f->data,
2187 int progressive_score, interlaced_score;
2189 s->interlaced_dct = 0;
2190 progressive_score =
s->mecc.ildct_cmp[0](
s, dest_y, ptr_y, wrap_y, 8) +
2191 s->mecc.ildct_cmp[0](
s, dest_y + wrap_y * 8,
2196 progressive_score -= 400;
2198 if (progressive_score > 0) {
2199 interlaced_score =
s->mecc.ildct_cmp[0](
s, dest_y, ptr_y,
2201 s->mecc.ildct_cmp[0](
s, dest_y + wrap_y,
2205 if (progressive_score > interlaced_score) {
2206 s->interlaced_dct = 1;
2209 uv_dct_offset = wrap_c;
2217 s->pdsp.diff_pixels(
s->block[0], ptr_y, dest_y, wrap_y);
2218 s->pdsp.diff_pixels(
s->block[1], ptr_y + 8, dest_y + 8, wrap_y);
2221 s->pdsp.diff_pixels(
s->block[3], ptr_y +
dct_offset + 8,
2228 s->pdsp.diff_pixels(
s->block[4], ptr_cb, dest_cb, wrap_c);
2229 s->pdsp.diff_pixels(
s->block[5], ptr_cr, dest_cr, wrap_c);
2231 s->pdsp.diff_pixels(
s->block[6], ptr_cb + uv_dct_offset,
2232 dest_cb + uv_dct_offset, wrap_c);
2233 s->pdsp.diff_pixels(
s->block[7], ptr_cr + uv_dct_offset,
2234 dest_cr + uv_dct_offset, wrap_c);
2238 if (
s->current_picture.mc_mb_var[
s->mb_stride *
mb_y +
mb_x] <
2239 2 *
s->qscale *
s->qscale) {
2241 if (
s->mecc.sad[1](
NULL, ptr_y, dest_y, wrap_y, 8) < 20 *
s->qscale)
2243 if (
s->mecc.sad[1](
NULL, ptr_y + 8, dest_y + 8, wrap_y, 8) < 20 *
s->qscale)
2246 wrap_y, 8) < 20 *
s->qscale)
2249 wrap_y, 8) < 20 *
s->qscale)
2251 if (
s->mecc.sad[1](
NULL, ptr_cb, dest_cb, wrap_c, 8) < 20 *
s->qscale)
2253 if (
s->mecc.sad[1](
NULL, ptr_cr, dest_cr, wrap_c, 8) < 20 *
s->qscale)
2256 if (
s->mecc.sad[1](
NULL, ptr_cb + uv_dct_offset,
2257 dest_cb + uv_dct_offset,
2258 wrap_c, 8) < 20 *
s->qscale)
2260 if (
s->mecc.sad[1](
NULL, ptr_cr + uv_dct_offset,
2261 dest_cr + uv_dct_offset,
2262 wrap_c, 8) < 20 *
s->qscale)
2268 if (
s->quantizer_noise_shaping) {
2289 memcpy(orig[0],
s->block[0],
sizeof(int16_t) * 64 * mb_block_count);
2295 for (
i = 0;
i < mb_block_count;
i++) {
2298 s->block_last_index[
i] =
s->dct_quantize(
s,
s->block[
i],
i,
s->qscale, &
overflow);
2307 s->block_last_index[
i] = -1;
2309 if (
s->quantizer_noise_shaping) {
2310 for (
i = 0;
i < mb_block_count;
i++) {
2312 s->block_last_index[
i] =
2314 orig[
i],
i,
s->qscale);
2319 if (
s->luma_elim_threshold && !
s->mb_intra)
2320 for (
i = 0;
i < 4;
i++)
2322 if (
s->chroma_elim_threshold && !
s->mb_intra)
2323 for (
i = 4;
i < mb_block_count;
i++)
2327 for (
i = 0;
i < mb_block_count;
i++) {
2328 if (
s->block_last_index[
i] == -1)
2329 s->coded_score[
i] = INT_MAX / 256;
2335 s->block_last_index[4] =
2336 s->block_last_index[5] = 0;
2338 s->block[5][0] = (1024 +
s->c_dc_scale / 2) /
s->c_dc_scale;
2340 for (
i=6;
i<12;
i++) {
2341 s->block_last_index[
i] = 0;
2342 s->block[
i][0] =
s->block[4][0];
2349 for (
i = 0;
i < mb_block_count;
i++) {
2351 if (
s->block_last_index[
i] > 0) {
2352 for (j = 63; j > 0; j--) {
2353 if (
s->block[
i][
s->intra_scantable.permutated[j]])
2356 s->block_last_index[
i] = j;
2362 switch(
s->codec_id){
2365 if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
2369 if (CONFIG_MPEG4_ENCODER)
2379 if (CONFIG_WMV2_ENCODER)
2383 if (CONFIG_H261_ENCODER)
2391 if (CONFIG_H263_ENCODER)
2394 #if CONFIG_MJPEG_ENCODER || CONFIG_AMV_ENCODER
2401 if (CONFIG_SPEEDHQ_ENCODER)
2424 memcpy(
d->last_mv,
s->last_mv, 2*2*2*
sizeof(
int));
2427 d->mb_skip_run=
s->mb_skip_run;
2429 d->last_dc[
i] =
s->last_dc[
i];
2432 d->mv_bits=
s->mv_bits;
2433 d->i_tex_bits=
s->i_tex_bits;
2434 d->p_tex_bits=
s->p_tex_bits;
2435 d->i_count=
s->i_count;
2436 d->skip_count=
s->skip_count;
2437 d->misc_bits=
s->misc_bits;
2441 d->qscale=
s->qscale;
2442 d->dquant=
s->dquant;
2444 d->esc3_level_length=
s->esc3_level_length;
2452 memcpy(
d->mv,
s->mv, 2*4*2*
sizeof(
int));
2453 memcpy(
d->last_mv,
s->last_mv, 2*2*2*
sizeof(
int));
2456 d->mb_skip_run=
s->mb_skip_run;
2458 d->last_dc[
i] =
s->last_dc[
i];
2461 d->mv_bits=
s->mv_bits;
2462 d->i_tex_bits=
s->i_tex_bits;
2463 d->p_tex_bits=
s->p_tex_bits;
2464 d->i_count=
s->i_count;
2465 d->skip_count=
s->skip_count;
2466 d->misc_bits=
s->misc_bits;
2468 d->mb_intra=
s->mb_intra;
2469 d->mb_skipped=
s->mb_skipped;
2470 d->mv_type=
s->mv_type;
2471 d->mv_dir=
s->mv_dir;
2473 if(
s->data_partitioning){
2475 d->tex_pb=
s->tex_pb;
2479 d->block_last_index[
i]=
s->block_last_index[
i];
2480 d->interlaced_dct=
s->interlaced_dct;
2481 d->qscale=
s->qscale;
2483 d->esc3_level_length=
s->esc3_level_length;
2488 int *dmin,
int *next_block,
int motion_x,
int motion_y)
2491 uint8_t *dest_backup[3];
2495 s->block=
s->blocks[*next_block];
2496 s->pb=
pb[*next_block];
2497 if(
s->data_partitioning){
2498 s->pb2 =
pb2 [*next_block];
2499 s->tex_pb=
tex_pb[*next_block];
2503 memcpy(dest_backup,
s->dest,
sizeof(
s->dest));
2504 s->dest[0] =
s->sc.rd_scratchpad;
2505 s->dest[1] =
s->sc.rd_scratchpad + 16*
s->linesize;
2506 s->dest[2] =
s->sc.rd_scratchpad + 16*
s->linesize + 8;
2513 if(
s->data_partitioning){
2521 score *=
s->lambda2;
2526 memcpy(
s->dest, dest_backup,
sizeof(
s->dest));
2544 else if(
w==8 &&
h==8)
2561 int chroma_mb_w =
w >>
s->chroma_x_shift;
2562 int chroma_mb_h =
h >>
s->chroma_y_shift;
2564 if(
s->mb_x*16 + 16 >
s->width )
w=
s->width -
s->mb_x*16;
2565 if(
s->mb_y*16 + 16 >
s->height)
h=
s->height-
s->mb_y*16;
2569 return s->mecc.nsse[0](
s,
s->new_picture->data[0] +
s->mb_x * 16 +
s->mb_y *
s->linesize * 16,
2570 s->dest[0],
s->linesize, 16) +
2571 s->mecc.nsse[1](
s,
s->new_picture->data[1] +
s->mb_x * chroma_mb_w +
s->mb_y *
s->uvlinesize * chroma_mb_h,
2572 s->dest[1],
s->uvlinesize, chroma_mb_h) +
2573 s->mecc.nsse[1](
s,
s->new_picture->data[2] +
s->mb_x * chroma_mb_w +
s->mb_y *
s->uvlinesize * chroma_mb_h,
2574 s->dest[2],
s->uvlinesize, chroma_mb_h);
2576 return s->mecc.sse[0](
NULL,
s->new_picture->data[0] +
s->mb_x * 16 +
s->mb_y *
s->linesize * 16,
2577 s->dest[0],
s->linesize, 16) +
2578 s->mecc.sse[1](
NULL,
s->new_picture->data[1] +
s->mb_x * chroma_mb_w +
s->mb_y *
s->uvlinesize * chroma_mb_h,
2579 s->dest[1],
s->uvlinesize, chroma_mb_h) +
2580 s->mecc.sse[1](
NULL,
s->new_picture->data[2] +
s->mb_x * chroma_mb_w +
s->mb_y *
s->uvlinesize * chroma_mb_h,
2581 s->dest[2],
s->uvlinesize, chroma_mb_h);
2584 return sse(
s,
s->new_picture->data[0] +
s->mb_x * 16 +
s->mb_y *
s->linesize * 16,
2585 s->dest[0],
w,
h,
s->linesize) +
2586 sse(
s,
s->new_picture->data[1] +
s->mb_x * chroma_mb_w +
s->mb_y *
s->uvlinesize * chroma_mb_h,
2587 s->dest[1],
w >>
s->chroma_x_shift,
h >>
s->chroma_y_shift,
s->uvlinesize) +
2588 sse(
s,
s->new_picture->data[2] +
s->mb_x * chroma_mb_w +
s->mb_y *
s->uvlinesize * chroma_mb_h,
2589 s->dest[2],
w >>
s->chroma_x_shift,
h >>
s->chroma_y_shift,
s->uvlinesize);
2597 s->me.dia_size=
s->avctx->pre_dia_size;
2598 s->first_slice_line=1;
2599 for(
s->mb_y=
s->end_mb_y-1;
s->mb_y >=
s->start_mb_y;
s->mb_y--) {
2600 for(
s->mb_x=
s->mb_width-1;
s->mb_x >=0 ;
s->mb_x--) {
2603 s->first_slice_line=0;
2614 s->me.dia_size=
s->avctx->dia_size;
2615 s->first_slice_line=1;
2616 for(
s->mb_y=
s->start_mb_y;
s->mb_y <
s->end_mb_y;
s->mb_y++) {
2619 for(
s->mb_x=0;
s->mb_x <
s->mb_width;
s->mb_x++) {
2620 s->block_index[0]+=2;
2621 s->block_index[1]+=2;
2622 s->block_index[2]+=2;
2623 s->block_index[3]+=2;
2631 s->first_slice_line=0;
2644 uint8_t *pix =
s->new_picture->data[0] + (yy *
s->linesize) + xx;
2646 int sum =
s->mpvencdsp.pix_sum(pix,
s->linesize);
2648 varc = (
s->mpvencdsp.pix_norm1(pix,
s->linesize) -
2649 (((unsigned) sum * sum) >> 8) + 500 + 128) >> 8;
2651 s->current_picture.mb_var [
s->mb_stride *
mb_y +
mb_x] = varc;
2652 s->current_picture.mb_mean[
s->mb_stride *
mb_y +
mb_x] = (sum+128)>>8;
2653 s->me.mb_var_sum_temp += varc;
2661 if(
s->partitioned_frame){
2666 }
else if ((CONFIG_MJPEG_ENCODER || CONFIG_AMV_ENCODER) &&
2669 }
else if (CONFIG_SPEEDHQ_ENCODER &&
s->out_format ==
FMT_SPEEDHQ) {
2681 uint8_t *ptr =
s->mb_info_ptr +
s->mb_info_size - 12;
2683 int mba =
s->mb_x +
s->mb_width * (
s->mb_y %
s->gob_index);
2684 int gobn =
s->mb_y /
s->gob_index;
2686 if (CONFIG_H263_ENCODER)
2688 bytestream_put_le32(&ptr,
offset);
2689 bytestream_put_byte(&ptr,
s->qscale);
2690 bytestream_put_byte(&ptr, gobn);
2691 bytestream_put_le16(&ptr, mba);
2692 bytestream_put_byte(&ptr, pred_x);
2693 bytestream_put_byte(&ptr, pred_y);
2695 bytestream_put_byte(&ptr, 0);
2696 bytestream_put_byte(&ptr, 0);
2704 s->mb_info_size += 12;
2705 s->prev_mb_info =
s->last_mb_info;
2717 if (!
s->mb_info_size)
2718 s->mb_info_size += 12;
2725 &&
s->slice_context_count == 1
2726 &&
s->pb.buf ==
s->avctx->internal->byte_buffer) {
2727 int lastgob_pos =
s->ptr_lastgob -
s->pb.buf;
2729 uint8_t *new_buffer =
NULL;
2730 int new_buffer_size = 0;
2732 if ((
s->avctx->internal->byte_buffer_size + size_increase) >= INT_MAX/8) {
2740 s->avctx->internal->byte_buffer_size + size_increase);
2744 memcpy(new_buffer,
s->avctx->internal->byte_buffer,
s->avctx->internal->byte_buffer_size);
2745 av_free(
s->avctx->internal->byte_buffer);
2746 s->avctx->internal->byte_buffer = new_buffer;
2747 s->avctx->internal->byte_buffer_size = new_buffer_size;
2749 s->ptr_lastgob =
s->pb.buf + lastgob_pos;
2759 int chr_h= 16>>
s->chroma_y_shift;
2784 s->last_dc[
i] = 128 <<
s->intra_dc_precision;
2786 s->current_picture.encoding_error[
i] = 0;
2789 s->last_dc[0] = 128*8/13;
2790 s->last_dc[1] = 128*8/14;
2791 s->last_dc[2] = 128*8/14;
2794 memset(
s->last_mv, 0,
sizeof(
s->last_mv));
2798 switch(
s->codec_id){
2802 if (CONFIG_H263_ENCODER)
2806 if(CONFIG_MPEG4_ENCODER &&
s->partitioned_frame)
2813 s->first_slice_line = 1;
2814 s->ptr_lastgob =
s->pb.buf;
2815 for (mb_y_order =
s->start_mb_y; mb_y_order < s->
end_mb_y; mb_y_order++) {
2819 if (first_in_slice && mb_y_order !=
s->start_mb_y)
2821 s->last_dc[0] =
s->last_dc[1] =
s->last_dc[2] = 1024 <<
s->intra_dc_precision;
2837 int size_increase =
s->avctx->internal->byte_buffer_size/4
2845 if(
s->data_partitioning){
2859 xy=
s->mb_y*
s->mb_stride +
s->mb_x;
2865 int current_packet_size, is_gob_start;
2868 - (
s->ptr_lastgob -
s->pb.buf);
2870 is_gob_start =
s->rtp_payload_size &&
2871 current_packet_size >=
s->rtp_payload_size &&
2874 if(
s->start_mb_y ==
mb_y &&
mb_y > 0 &&
mb_x==0) is_gob_start=1;
2876 switch(
s->codec_id){
2879 if(!
s->h263_slice_structured)
2880 if(
s->mb_x ||
s->mb_y%
s->gob_index) is_gob_start=0;
2883 if(
s->mb_x==0 &&
s->mb_y!=0) is_gob_start=1;
2885 if(
s->mb_skip_run) is_gob_start=0;
2888 if(
s->mb_x==0 &&
s->mb_y!=0) is_gob_start=1;
2904 if (
s->error_rate &&
s->resync_mb_x +
s->resync_mb_y > 0) {
2906 int d = 100 /
s->error_rate;
2908 current_packet_size=0;
2909 s->pb.buf_ptr=
s->ptr_lastgob;
2914 switch(
s->codec_id){
2916 if (CONFIG_MPEG4_ENCODER) {
2923 if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER) {
2930 if (CONFIG_H263_ENCODER) {
2939 s->misc_bits+=
bits -
s->last_bits;
2943 s->ptr_lastgob += current_packet_size;
2944 s->first_slice_line=1;
2945 s->resync_mb_x=
mb_x;
2946 s->resync_mb_y=
mb_y;
2950 if( (
s->resync_mb_x ==
s->mb_x)
2951 &&
s->resync_mb_y+1 ==
s->mb_y){
2952 s->first_slice_line=0;
2962 int pb_bits_count, pb2_bits_count, tex_pb_bits_count;
2968 if(
s->data_partitioning){
2969 backup_s.pb2=
s->pb2;
2970 backup_s.tex_pb=
s->tex_pb;
2977 s->mv[0][0][0] =
s->p_mv_table[xy][0];
2978 s->mv[0][0][1] =
s->p_mv_table[xy][1];
2980 &dmin, &next_block,
s->mv[0][0][0],
s->mv[0][0][1]);
2987 j=
s->field_select[0][
i] =
s->p_field_select_table[
i][xy];
2988 s->mv[0][
i][0] =
s->p_field_mv_table[
i][j][xy][0];
2989 s->mv[0][
i][1] =
s->p_field_mv_table[
i][j][xy][1];
2992 &dmin, &next_block, 0, 0);
3001 &dmin, &next_block,
s->mv[0][0][0],
s->mv[0][0][1]);
3008 s->mv[0][
i][0] =
s->current_picture.motion_val[0][
s->block_index[
i]][0];
3009 s->mv[0][
i][1] =
s->current_picture.motion_val[0][
s->block_index[
i]][1];
3012 &dmin, &next_block, 0, 0);
3018 s->mv[0][0][0] =
s->b_forw_mv_table[xy][0];
3019 s->mv[0][0][1] =
s->b_forw_mv_table[xy][1];
3021 &dmin, &next_block,
s->mv[0][0][0],
s->mv[0][0][1]);
3027 s->mv[1][0][0] =
s->b_back_mv_table[xy][0];
3028 s->mv[1][0][1] =
s->b_back_mv_table[xy][1];
3030 &dmin, &next_block,
s->mv[1][0][0],
s->mv[1][0][1]);
3036 s->mv[0][0][0] =
s->b_bidir_forw_mv_table[xy][0];
3037 s->mv[0][0][1] =
s->b_bidir_forw_mv_table[xy][1];
3038 s->mv[1][0][0] =
s->b_bidir_back_mv_table[xy][0];
3039 s->mv[1][0][1] =
s->b_bidir_back_mv_table[xy][1];
3041 &dmin, &next_block, 0, 0);
3048 j=
s->field_select[0][
i] =
s->b_field_select_table[0][
i][xy];
3049 s->mv[0][
i][0] =
s->b_field_mv_table[0][
i][j][xy][0];
3050 s->mv[0][
i][1] =
s->b_field_mv_table[0][
i][j][xy][1];
3053 &dmin, &next_block, 0, 0);
3060 j=
s->field_select[1][
i] =
s->b_field_select_table[1][
i][xy];
3061 s->mv[1][
i][0] =
s->b_field_mv_table[1][
i][j][xy][0];
3062 s->mv[1][
i][1] =
s->b_field_mv_table[1][
i][j][xy][1];
3065 &dmin, &next_block, 0, 0);
3071 for(dir=0; dir<2; dir++){
3073 j=
s->field_select[dir][
i] =
s->b_field_select_table[dir][
i][xy];
3074 s->mv[dir][
i][0] =
s->b_field_mv_table[dir][
i][j][xy][0];
3075 s->mv[dir][
i][1] =
s->b_field_mv_table[dir][
i][j][xy][1];
3079 &dmin, &next_block, 0, 0);
3088 &dmin, &next_block, 0, 0);
3089 if(
s->h263_pred ||
s->h263_aic){
3091 s->mbintra_table[
mb_x +
mb_y*
s->mb_stride]=1;
3099 const int last_qp= backup_s.qscale;
3103 static const int dquant_tab[4]={-1,1,-2,2};
3104 int storecoefs =
s->mb_intra &&
s->dc_val[0];
3112 s->mv[0][0][0] = best_s.
mv[0][0][0];
3113 s->mv[0][0][1] = best_s.
mv[0][0][1];
3114 s->mv[1][0][0] = best_s.
mv[1][0][0];
3115 s->mv[1][0][1] = best_s.
mv[1][0][1];
3118 for(; qpi<4; qpi++){
3119 int dquant= dquant_tab[qpi];
3121 if(qp < s->
avctx->
qmin || qp >
s->avctx->qmax)
3126 dc[
i]=
s->dc_val[0][
s->block_index[
i] ];
3127 memcpy(ac[
i],
s->ac_val[0][
s->block_index[
i]],
sizeof(int16_t)*16);
3132 &dmin, &next_block,
s->mv[mvdir][0][0],
s->mv[mvdir][0][1]);
3136 s->dc_val[0][
s->block_index[
i] ]=
dc[
i];
3137 memcpy(
s->ac_val[0][
s->block_index[
i]], ac[
i],
sizeof(int16_t)*16);
3145 int mx=
s->b_direct_mv_table[xy][0];
3146 int my=
s->b_direct_mv_table[xy][1];
3148 backup_s.dquant = 0;
3153 &dmin, &next_block, mx, my);
3156 backup_s.dquant = 0;
3161 &dmin, &next_block, 0, 0);
3166 coded |=
s->block_last_index[
i];
3169 memcpy(
s->mv, best_s.
mv,
sizeof(
s->mv));
3191 &dmin, &next_block, mx, my);
3196 s->current_picture.qscale_table[xy] = best_s.
qscale;
3202 ff_copy_bits(&backup_s.pb, bit_buf[next_block^1], pb_bits_count);
3205 if(
s->data_partitioning){
3208 ff_copy_bits(&backup_s.pb2, bit_buf2[next_block^1], pb2_bits_count);
3209 s->pb2= backup_s.pb2;
3213 ff_copy_bits(&backup_s.tex_pb, bit_buf_tex[next_block^1], tex_pb_bits_count);
3214 s->tex_pb= backup_s.tex_pb;
3218 if (CONFIG_H263_ENCODER &&
3223 s->hdsp.put_pixels_tab[0][0](
s->dest[0],
s->sc.rd_scratchpad ,
s->linesize ,16);
3224 s->hdsp.put_pixels_tab[1][0](
s->dest[1],
s->sc.rd_scratchpad + 16*
s->linesize ,
s->uvlinesize, 8);
3225 s->hdsp.put_pixels_tab[1][0](
s->dest[2],
s->sc.rd_scratchpad + 16*
s->linesize + 8,
s->uvlinesize, 8);
3231 int motion_x = 0, motion_y = 0;
3239 motion_x=
s->mv[0][0][0] = 0;
3240 motion_y=
s->mv[0][0][1] = 0;
3245 motion_x=
s->mv[0][0][0] =
s->p_mv_table[xy][0];
3246 motion_y=
s->mv[0][0][1] =
s->p_mv_table[xy][1];
3253 j=
s->field_select[0][
i] =
s->p_field_select_table[
i][xy];
3254 s->mv[0][
i][0] =
s->p_field_mv_table[
i][j][xy][0];
3255 s->mv[0][
i][1] =
s->p_field_mv_table[
i][j][xy][1];
3263 s->mv[0][
i][0] =
s->current_picture.motion_val[0][
s->block_index[
i]][0];
3264 s->mv[0][
i][1] =
s->current_picture.motion_val[0][
s->block_index[
i]][1];
3268 if (CONFIG_MPEG4_ENCODER) {
3271 motion_x=
s->b_direct_mv_table[xy][0];
3272 motion_y=
s->b_direct_mv_table[xy][1];
3277 if (CONFIG_MPEG4_ENCODER) {
3286 s->mv[0][0][0] =
s->b_bidir_forw_mv_table[xy][0];
3287 s->mv[0][0][1] =
s->b_bidir_forw_mv_table[xy][1];
3288 s->mv[1][0][0] =
s->b_bidir_back_mv_table[xy][0];
3289 s->mv[1][0][1] =
s->b_bidir_back_mv_table[xy][1];
3294 motion_x=
s->mv[1][0][0] =
s->b_back_mv_table[xy][0];
3295 motion_y=
s->mv[1][0][1] =
s->b_back_mv_table[xy][1];
3300 motion_x=
s->mv[0][0][0] =
s->b_forw_mv_table[xy][0];
3301 motion_y=
s->mv[0][0][1] =
s->b_forw_mv_table[xy][1];
3308 j=
s->field_select[0][
i] =
s->b_field_select_table[0][
i][xy];
3309 s->mv[0][
i][0] =
s->b_field_mv_table[0][
i][j][xy][0];
3310 s->mv[0][
i][1] =
s->b_field_mv_table[0][
i][j][xy][1];
3318 j=
s->field_select[1][
i] =
s->b_field_select_table[1][
i][xy];
3319 s->mv[1][
i][0] =
s->b_field_mv_table[1][
i][j][xy][0];
3320 s->mv[1][
i][1] =
s->b_field_mv_table[1][
i][j][xy][1];
3327 for(dir=0; dir<2; dir++){
3329 j=
s->field_select[dir][
i] =
s->b_field_select_table[dir][
i][xy];
3330 s->mv[dir][
i][0] =
s->b_field_mv_table[dir][
i][j][xy][0];
3331 s->mv[dir][
i][1] =
s->b_field_mv_table[dir][
i][j][xy][1];
3342 s->last_mv_dir =
s->mv_dir;
3344 if (CONFIG_H263_ENCODER &&
3353 s->p_mv_table[xy][0]=0;
3354 s->p_mv_table[xy][1]=0;
3361 if(
s->mb_x*16 + 16 >
s->width )
w=
s->width -
s->mb_x*16;
3362 if(
s->mb_y*16 + 16 >
s->height)
h=
s->height-
s->mb_y*16;
3364 s->current_picture.encoding_error[0] +=
sse(
3365 s,
s->new_picture->data[0] +
s->mb_x*16 +
s->mb_y*
s->linesize*16,
3366 s->dest[0],
w,
h,
s->linesize);
3367 s->current_picture.encoding_error[1] +=
sse(
3368 s,
s->new_picture->data[1] +
s->mb_x*8 +
s->mb_y*
s->uvlinesize*chr_h,
3369 s->dest[1],
w>>1,
h>>
s->chroma_y_shift,
s->uvlinesize);
3370 s->current_picture.encoding_error[2] +=
sse(
3371 s,
s->new_picture->data[2] +
s->mb_x*8 +
s->mb_y*
s->uvlinesize*chr_h,
3372 s->dest[2],
w>>1,
h>>
s->chroma_y_shift,
s->uvlinesize);
3375 if(CONFIG_H263_ENCODER &&
s->out_format ==
FMT_H263)
3378 ff_dlog(
s->avctx,
"MB %d %d bits\n",
3392 #define MERGE(field) dst->field += src->field; src->field=0
3415 for(
i=0;
i<64;
i++){
3428 if (
s->next_lambda){
3429 s->current_picture_ptr->f->quality =
3430 s->current_picture.f->quality =
s->next_lambda;
3431 if(!dry_run)
s->next_lambda= 0;
3432 }
else if (!
s->fixed_qscale) {
3434 s->current_picture_ptr->f->quality =
3435 s->current_picture.f->quality =
quality;
3436 if (
s->current_picture.f->quality < 0)
3440 if(
s->adaptive_quant){
3441 switch(
s->codec_id){
3443 if (CONFIG_MPEG4_ENCODER)
3449 if (CONFIG_H263_ENCODER)
3456 s->lambda=
s->lambda_table[0];
3459 s->lambda =
s->current_picture.f->quality;
3467 s->time =
s->current_picture_ptr->f->pts *
s->avctx->time_base.num;
3470 s->pb_time=
s->pp_time - (
s->last_non_b_time -
s->time);
3473 s->pp_time=
s->time -
s->last_non_b_time;
3474 s->last_non_b_time=
s->time;
3483 int context_count =
s->slice_context_count;
3488 s->me.mb_var_sum_temp =
3489 s->me.mc_mb_var_sum_temp = 0;
3493 if (
s->out_format ==
FMT_MPEG1 || (
s->h263_pred && !
s->msmpeg4_version))
3498 s->me.scene_change_score=0;
3503 if(
s->msmpeg4_version >= 3)
s->no_rounding=1;
3504 else s->no_rounding=0;
3507 s->no_rounding ^= 1;
3516 s->lambda=
s->last_lambda_for[
s->pict_type];
3518 s->lambda=
s->last_lambda_for[
s->last_non_b_pict_type];
3523 if(
s->q_chroma_intra_matrix !=
s->q_intra_matrix )
av_freep(&
s->q_chroma_intra_matrix);
3524 if(
s->q_chroma_intra_matrix16 !=
s->q_intra_matrix16)
av_freep(&
s->q_chroma_intra_matrix16);
3525 s->q_chroma_intra_matrix =
s->q_intra_matrix;
3526 s->q_chroma_intra_matrix16 =
s->q_intra_matrix16;
3530 for(
i=1;
i<context_count;
i++){
3541 s->lambda = (
s->lambda *
s->me_penalty_compensation + 128) >> 8;
3542 s->lambda2 = (
s->lambda2 * (int64_t)
s->me_penalty_compensation + 128) >> 8;
3553 for(
i=0;
i<
s->mb_stride*
s->mb_height;
i++)
3556 if(!
s->fixed_qscale){
3558 s->avctx->execute(
s->avctx,
mb_var_thread, &
s->thread_context[0],
NULL, context_count,
sizeof(
void*));
3561 for(
i=1;
i<context_count;
i++){
3564 s->current_picture.mc_mb_var_sum=
s->current_picture_ptr->mc_mb_var_sum=
s->me.mc_mb_var_sum_temp;
3565 s->current_picture. mb_var_sum=
s->current_picture_ptr-> mb_var_sum=
s->me. mb_var_sum_temp;
3568 if (
s->me.scene_change_score >
s->scenechange_threshold &&
3571 for(
i=0;
i<
s->mb_stride*
s->mb_height;
i++)
3573 if(
s->msmpeg4_version >= 3)
3575 ff_dlog(
s,
"Scene change detected, encoding as I Frame %"PRId64
" %"PRId64
"\n",
3576 s->current_picture.mb_var_sum,
s->current_picture.mc_mb_var_sum);
3617 for(dir=0; dir<2; dir++){
3623 s->b_field_mv_table[dir][
i][j], dir ?
s->b_code :
s->f_code,
type, 1);
3634 if (
s->qscale < 3 &&
s->max_qcoeff <= 128 &&
3643 if (
s->avctx->intra_matrix) {
3645 luma_matrix =
s->avctx->intra_matrix;
3647 if (
s->avctx->chroma_intra_matrix)
3648 chroma_matrix =
s->avctx->chroma_intra_matrix;
3652 int j =
s->idsp.idct_permutation[
i];
3654 s->chroma_intra_matrix[j] =
av_clip_uint8((chroma_matrix[
i] *
s->qscale) >> 3);
3657 s->y_dc_scale_table=
3659 s->chroma_intra_matrix[0] =
3662 s->intra_matrix,
s->intra_quant_bias, 8, 8, 1);
3664 s->chroma_intra_matrix,
s->intra_quant_bias, 8, 8, 1);
3668 static const uint8_t y[32] = {13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13};
3669 static const uint8_t
c[32] = {14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14};
3670 for (
int i = 1;
i < 64;
i++) {
3676 s->y_dc_scale_table = y;
3677 s->c_dc_scale_table =
c;
3678 s->intra_matrix[0] = 13;
3679 s->chroma_intra_matrix[0] = 14;
3681 s->intra_matrix,
s->intra_quant_bias, 8, 8, 1);
3683 s->chroma_intra_matrix,
s->intra_quant_bias, 8, 8, 1);
3687 s->y_dc_scale_table=
3692 s->current_picture_ptr->f->key_frame =
3694 s->current_picture_ptr->f->pict_type =
3695 s->current_picture.f->pict_type =
s->pict_type;
3697 if (
s->current_picture.f->key_frame)
3698 s->picture_in_gop_number=0;
3700 s->mb_x =
s->mb_y = 0;
3702 switch(
s->out_format) {
3703 #if CONFIG_MJPEG_ENCODER || CONFIG_AMV_ENCODER
3709 if (CONFIG_SPEEDHQ_ENCODER)
3713 if (CONFIG_H261_ENCODER)
3721 else if (CONFIG_MPEG4_ENCODER &&
s->h263_pred) {
3734 else if (CONFIG_H263_ENCODER)
3738 if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
3745 s->header_bits=
bits -
s->last_bits;
3747 for(
i=1;
i<context_count;
i++){
3750 s->avctx->execute(
s->avctx,
encode_thread, &
s->thread_context[0],
NULL, context_count,
sizeof(
void*));
3751 for(
i=1;
i<context_count;
i++){
3752 if (
s->pb.buf_end ==
s->thread_context[
i]->pb.buf)
3761 const int intra=
s->mb_intra;
3764 s->dct_count[intra]++;
3766 for(
i=0;
i<64;
i++){
3771 s->dct_error_sum[intra][
i] +=
level;
3772 level -=
s->dct_offset[intra][
i];
3775 s->dct_error_sum[intra][
i] -=
level;
3776 level +=
s->dct_offset[intra][
i];
3785 int16_t *
block,
int n,
3789 const uint8_t *scantable;
3790 const uint8_t *perm_scantable;
3792 unsigned int threshold1, threshold2;
3804 int coeff_count[64];
3805 int qmul, qadd, start_i, last_non_zero,
i,
dc;
3806 const int esc_length=
s->ac_esc_length;
3808 uint8_t * last_length;
3814 if(
s->dct_error_sum)
3820 else mpeg2_qscale =
qscale << 1;
3824 scantable=
s->intra_scantable.scantable;
3825 perm_scantable=
s->intra_scantable.permutated;
3842 qmat = n < 4 ?
s->q_intra_matrix[
qscale] :
s->q_chroma_intra_matrix[
qscale];
3843 matrix = n < 4 ?
s->intra_matrix :
s->chroma_intra_matrix;
3847 if (n > 3 &&
s->intra_chroma_ac_vlc_length) {
3848 length =
s->intra_chroma_ac_vlc_length;
3849 last_length=
s->intra_chroma_ac_vlc_last_length;
3851 length =
s->intra_ac_vlc_length;
3852 last_length=
s->intra_ac_vlc_last_length;
3855 scantable=
s->inter_scantable.scantable;
3856 perm_scantable=
s->inter_scantable.permutated;
3859 qmat =
s->q_inter_matrix[
qscale];
3861 length =
s->inter_ac_vlc_length;
3862 last_length=
s->inter_ac_vlc_last_length;
3867 threshold2= (threshold1<<1);
3869 for(
i=63;
i>=start_i;
i--) {
3870 const int j = scantable[
i];
3873 if(((
unsigned)(
level+threshold1))>threshold2){
3879 for(
i=start_i;
i<=last_non_zero;
i++) {
3880 const int j = scantable[
i];
3885 if(((
unsigned)(
level+threshold1))>threshold2){
3908 if(last_non_zero < start_i){
3909 memset(
block + start_i, 0, (64-start_i)*
sizeof(int16_t));
3910 return last_non_zero;
3913 score_tab[start_i]= 0;
3914 survivor[0]= start_i;
3917 for(
i=start_i;
i<=last_non_zero;
i++){
3918 int level_index, j, zero_distortion;
3920 int best_score=256*256*256*120;
3924 zero_distortion= dct_coeff*dct_coeff;
3926 for(level_index=0; level_index < coeff_count[
i]; level_index++){
3935 unquant_coeff= alevel*qmul + qadd;
3937 j =
s->idsp.idct_permutation[scantable[
i]];
3938 unquant_coeff = alevel *
matrix[j] * 8;
3940 j =
s->idsp.idct_permutation[scantable[
i]];
3942 unquant_coeff = (
int)( alevel * mpeg2_qscale *
matrix[j]) >> 4;
3943 unquant_coeff = (unquant_coeff - 1) | 1;
3945 unquant_coeff = ((( alevel << 1) + 1) * mpeg2_qscale * ((
int)
matrix[j])) >> 5;
3946 unquant_coeff = (unquant_coeff - 1) | 1;
3951 distortion= (unquant_coeff - dct_coeff) * (unquant_coeff - dct_coeff) - zero_distortion;
3953 if((
level&(~127)) == 0){
3954 for(j=survivor_count-1; j>=0; j--){
3955 int run=
i - survivor[j];
3957 score += score_tab[
i-
run];
3959 if(score < best_score){
3962 level_tab[
i+1]=
level-64;
3967 for(j=survivor_count-1; j>=0; j--){
3968 int run=
i - survivor[j];
3970 score += score_tab[
i-
run];
3971 if(score < last_score){
3974 last_level=
level-64;
3980 distortion += esc_length*
lambda;
3981 for(j=survivor_count-1; j>=0; j--){
3982 int run=
i - survivor[j];
3983 int score= distortion + score_tab[
i-
run];
3985 if(score < best_score){
3988 level_tab[
i+1]=
level-64;
3993 for(j=survivor_count-1; j>=0; j--){
3994 int run=
i - survivor[j];
3995 int score= distortion + score_tab[
i-
run];
3996 if(score < last_score){
3999 last_level=
level-64;
4007 score_tab[
i+1]= best_score;
4010 if(last_non_zero <= 27){
4011 for(; survivor_count; survivor_count--){
4012 if(score_tab[ survivor[survivor_count-1] ] <= best_score)
4016 for(; survivor_count; survivor_count--){
4017 if(score_tab[ survivor[survivor_count-1] ] <= best_score +
lambda)
4022 survivor[ survivor_count++ ]=
i+1;
4026 last_score= 256*256*256*120;
4027 for(
i= survivor[0];
i<=last_non_zero + 1;
i++){
4028 int score= score_tab[
i];
4032 if(score < last_score){
4035 last_level= level_tab[
i];
4036 last_run= run_tab[
i];
4041 s->coded_score[n] = last_score;
4044 last_non_zero= last_i - 1;
4045 memset(
block + start_i, 0, (64-start_i)*
sizeof(int16_t));
4047 if(last_non_zero < start_i)
4048 return last_non_zero;
4050 if(last_non_zero == 0 && start_i == 0){
4052 int best_score=
dc *
dc;
4054 for(
i=0;
i<coeff_count[0];
i++){
4057 int unquant_coeff, score, distortion;
4060 unquant_coeff= (alevel*qmul + qadd)>>3;
4062 unquant_coeff = ((( alevel << 1) + 1) * mpeg2_qscale * ((
int)
matrix[0])) >> 5;
4063 unquant_coeff = (unquant_coeff - 1) | 1;
4065 unquant_coeff = (unquant_coeff + 4) >> 3;
4066 unquant_coeff<<= 3 + 3;
4068 distortion= (unquant_coeff -
dc) * (unquant_coeff -
dc);
4071 else score= distortion + esc_length*
lambda;
4073 if(score < best_score){
4075 best_level=
level - 64;
4078 block[0]= best_level;
4079 s->coded_score[n] = best_score -
dc*
dc;
4080 if(best_level == 0)
return -1;
4081 else return last_non_zero;
4087 block[ perm_scantable[last_non_zero] ]= last_level;
4090 for(;
i>start_i;
i -= run_tab[
i] + 1){
4091 block[ perm_scantable[
i-1] ]= level_tab[
i];
4094 return last_non_zero;
4109 if(
i==0)
s*= sqrt(0.5);
4110 if(j==0)
s*= sqrt(0.5);
4123 const uint8_t *scantable;
4124 const uint8_t *perm_scantable;
4130 int qmul, qadd, start_i, last_non_zero,
i,
dc;
4132 uint8_t * last_length;
4134 int rle_index,
run, q = 1, sum;
4136 if(
basis[0][0] == 0)
4142 scantable=
s->intra_scantable.scantable;
4143 perm_scantable=
s->intra_scantable.permutated;
4161 if (n > 3 &&
s->intra_chroma_ac_vlc_length) {
4162 length =
s->intra_chroma_ac_vlc_length;
4163 last_length=
s->intra_chroma_ac_vlc_last_length;
4165 length =
s->intra_ac_vlc_length;
4166 last_length=
s->intra_ac_vlc_last_length;
4169 scantable=
s->inter_scantable.scantable;
4170 perm_scantable=
s->inter_scantable.permutated;
4173 length =
s->inter_ac_vlc_length;
4174 last_length=
s->inter_ac_vlc_last_length;
4176 last_non_zero =
s->block_last_index[n];
4179 for(
i=0;
i<64;
i++){
4184 for(
i=0;
i<64;
i++){
4190 w= 15 + (48*qns*one +
w/2)/
w;
4203 for(
i=start_i;
i<=last_non_zero;
i++){
4204 int j= perm_scantable[
i];
4211 run_tab[rle_index++]=
run;
4221 int best_score =
s->mpvencdsp.try_8x8basis(rem,
weight,
basis[0], 0);
4224 int run2, best_unquant_change=0, analyze_gradient;
4225 analyze_gradient = last_non_zero > 2 ||
s->quantizer_noise_shaping >= 3;
4227 if(analyze_gradient){
4228 for(
i=0;
i<64;
i++){
4238 int change, old_coeff;
4244 for(change=-1; change<=1; change+=2){
4245 int new_level=
level + change;
4246 int score, new_coeff;
4248 new_coeff= q*new_level;
4249 if(new_coeff >= 2048 || new_coeff < 0)
4252 score =
s->mpvencdsp.try_8x8basis(rem,
weight,
basis[0],
4253 new_coeff - old_coeff);
4254 if(score<best_score){
4257 best_change= change;
4258 best_unquant_change= new_coeff - old_coeff;
4265 run2= run_tab[rle_index++];
4269 for(
i=start_i;
i<64;
i++){
4270 int j= perm_scantable[
i];
4272 int change, old_coeff;
4274 if(
s->quantizer_noise_shaping < 3 &&
i > last_non_zero + 1)
4279 else old_coeff= qmul*
level + qadd;
4280 run2= run_tab[rle_index++];
4287 for(change=-1; change<=1; change+=2){
4288 int new_level=
level + change;
4289 int score, new_coeff, unquant_change;
4296 if(new_level<0) new_coeff= qmul*new_level - qadd;
4297 else new_coeff= qmul*new_level + qadd;
4298 if(new_coeff >= 2048 || new_coeff <= -2048)
4303 if(level < 63 && level > -63){
4304 if(
i < last_non_zero)
4314 if(analyze_gradient){
4315 int g= d1[ scantable[
i] ];
4316 if(
g && (
g^new_level) >= 0)
4320 if(
i < last_non_zero){
4321 int next_i=
i + run2 + 1;
4322 int next_level=
block[ perm_scantable[next_i] ] + 64;
4324 if(next_level&(~127))
4327 if(next_i < last_non_zero)
4347 if(
i < last_non_zero){
4348 int next_i=
i + run2 + 1;
4349 int next_level=
block[ perm_scantable[next_i] ] + 64;
4351 if(next_level&(~127))
4354 if(next_i < last_non_zero)
4373 unquant_change= new_coeff - old_coeff;
4376 score +=
s->mpvencdsp.try_8x8basis(rem,
weight,
basis[j],
4378 if(score<best_score){
4381 best_change= change;
4382 best_unquant_change= unquant_change;
4386 prev_level=
level + 64;
4387 if(prev_level&(~127))
4397 int j= perm_scantable[ best_coeff ];
4399 block[j] += best_change;
4401 if(best_coeff > last_non_zero){
4402 last_non_zero= best_coeff;
4405 for(; last_non_zero>=start_i; last_non_zero--){
4406 if(
block[perm_scantable[last_non_zero]])
4413 for(
i=start_i;
i<=last_non_zero;
i++){
4414 int j= perm_scantable[
i];
4418 run_tab[rle_index++]=
run;
4425 s->mpvencdsp.add_8x8basis(rem,
basis[j], best_unquant_change);
4431 return last_non_zero;
4446 const uint8_t *scantable,
int last)
4457 for (
i = 0;
i <= last;
i++) {
4458 const int j = scantable[
i];
4463 for (
i = 0;
i <= last;
i++) {
4464 const int j = scantable[
i];
4465 const int perm_j = permutation[j];
4471 int16_t *
block,
int n,
4474 int i, j,
level, last_non_zero, q, start_i;
4476 const uint8_t *scantable;
4479 unsigned int threshold1, threshold2;
4483 if(
s->dct_error_sum)
4487 scantable=
s->intra_scantable.scantable;
4502 qmat = n < 4 ?
s->q_intra_matrix[
qscale] :
s->q_chroma_intra_matrix[
qscale];
4505 scantable=
s->inter_scantable.scantable;
4508 qmat =
s->q_inter_matrix[
qscale];
4512 threshold2= (threshold1<<1);
4513 for(
i=63;
i>=start_i;
i--) {
4517 if(((
unsigned)(
level+threshold1))>threshold2){
4524 for(
i=start_i;
i<=last_non_zero;
i++) {
4530 if(((
unsigned)(
level+threshold1))>threshold2){
4548 scantable, last_non_zero);
4550 return last_non_zero;
#define FF_ALLOCZ_TYPED_ARRAY(p, nelem)
av_cold int ff_mpv_common_init(MpegEncContext *s)
init common structure for both encoder and decoder.
void ff_speedhq_end_slice(MpegEncContext *s)
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
uint16_t * mb_type
Table for candidate MB types for encoding (defines in mpegutils.h)
#define MV_TYPE_16X16
1 vector for the whole mb
#define AV_LOG_WARNING
Something somehow does not look correct.
av_cold int ff_speedhq_encode_init(MpegEncContext *s)
const AVClass ff_mpv_enc_class
int data_partitioning
data partitioning flag from header
static void set_frame_distances(MpegEncContext *s)
static int get_bits_diff(MpegEncContext *s)
int avcodec_receive_packet(AVCodecContext *avctx, AVPacket *avpkt)
Read encoded data from the encoder.
unsigned int lambda
Lagrange multiplier used in rate distortion.
#define H263_GOB_HEIGHT(h)
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
int ff_wmv2_encode_picture_header(MpegEncContext *s, int picture_number)
void ff_clean_mpeg4_qscales(MpegEncContext *s)
modify mb_type & qscale so that encoding is actually possible in MPEG-4
void ff_mpeg1_encode_mb(MpegEncContext *s, int16_t block[8][64], int motion_x, int motion_y)
int b_code
backward MV resolution for B-frames (MPEG-4)
void ff_mpv_common_defaults(MpegEncContext *s)
Set the given MpegEncContext to common defaults (same for encoding and decoding).
int av_frame_get_buffer(AVFrame *frame, int align)
Allocate new buffer(s) for audio or video data.
#define FF_COMPLIANCE_EXPERIMENTAL
Allow nonstandardized experimental things.
int64_t rc_min_rate
minimum bitrate
void ff_fix_long_p_mvs(MpegEncContext *s, int type)
void ff_speedhq_encode_picture_header(MpegEncContext *s)
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
#define AVERROR_EOF
End of file.
int ff_side_data_set_encoder_stats(AVPacket *pkt, int quality, int64_t *error, int error_count, int pict_type)
static av_always_inline void encode_mb(MpegEncContext *s, int motion_x, int motion_y)
#define AV_CODEC_FLAG_QSCALE
Use fixed qscale.
static int sse_mb(MpegEncContext *s)
static void init_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Initialize the PutBitContext s.
void ff_block_permute(int16_t *block, uint8_t *permutation, const uint8_t *scantable, int last)
Permute an 8x8 block according to permutation.
av_cold void ff_qpeldsp_init(QpelDSPContext *c)
static int16_t basis[64][64]
uint16_t * intra_matrix
custom intra quantization matrix Must be allocated with the av_malloc() family of functions,...
Picture current_picture
copy of the current picture structure.
static const uint8_t mv_bits[2][16][10]
static int estimate_motion_thread(AVCodecContext *c, void *arg)
float lumi_masking
luminance masking (0-> disabled)
static void update_noise_reduction(MpegEncContext *s)
#define MV_DIRECT
bidirectional mode where the difference equals the MV of the last P/S/I-Frame (MPEG-4)
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
void ff_flv_encode_picture_header(MpegEncContext *s, int picture_number)
int partitioned_frame
is current frame partitioned
uint16_t(* dct_offset)[64]
int ff_update_duplicate_context(MpegEncContext *dst, const MpegEncContext *src)
This structure describes decoded (raw) audio or video data.
static void put_bits(Jpeg2000EncoderContext *s, int val, int n)
put n times val bit
#define INTERLACED_DCT(s)
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
#define CANDIDATE_MB_TYPE_INTER_I
int capabilities
Codec capabilities.
static const int BUF_BITS
void ff_h261_encode_mb(MpegEncContext *s, int16_t block[6][64], int motion_x, int motion_y)
static int alloc_picture(MpegEncContext *s, Picture *pic, int shared)
int av_packet_shrink_side_data(AVPacket *pkt, enum AVPacketSideDataType type, size_t size)
Shrink the already allocated side data buffer.
static int put_bytes_count(const PutBitContext *s, int round_up)
#define CANDIDATE_MB_TYPE_BACKWARD_I
const uint8_t ff_mpeg2_non_linear_qscale[32]
av_cold int ff_mjpeg_encode_init(MpegEncContext *s)
void ff_clean_intra_table_entries(MpegEncContext *s)
Clean dc, ac, coded_block for the current non-intra MB.
#define AV_LOG_VERBOSE
Detailed information.
#define PICT_BOTTOM_FIELD
av_cold void ff_pixblockdsp_init(PixblockDSPContext *c, AVCodecContext *avctx)
void ff_init_block_index(MpegEncContext *s)
void ff_msmpeg4_encode_mb(MpegEncContext *s, int16_t block[6][64], int motion_x, int motion_y)
#define FF_MPV_FLAG_SKIP_RD
struct AVCodecContext * avctx
av_cold int ff_rate_control_init(MpegEncContext *s)
#define AV_CODEC_FLAG_PSNR
error[?] variables will be set during encoding.
#define CANDIDATE_MB_TYPE_SKIPPED
static int pre_estimate_motion_thread(AVCodecContext *c, void *arg)
PutBitContext pb
bit output
int mb_decision
macroblock decision mode
#define CANDIDATE_MB_TYPE_INTER
AVCPBProperties * ff_add_cpb_side_data(AVCodecContext *avctx)
Add a CPB properties side data to an encoding context.
int qmax
maximum quantizer
#define AV_CODEC_FLAG_INTERLACED_ME
interlaced motion estimation
AVBufferRef * buf[AV_NUM_DATA_POINTERS]
AVBuffer references backing the data for this frame.
#define AV_CODEC_FLAG_4MV
4 MV per MB allowed / advanced prediction for H.263.
void ff_set_cmp(MECmpContext *c, me_cmp_func *cmp, int type)
#define AV_PKT_FLAG_KEY
The packet contains a keyframe.
void av_packet_free(AVPacket **pkt)
Free the packet, if the packet is reference counted, it will be unreferenced first.
trying all byte sequences megabyte in length and selecting the best looking sequence will yield cases to try But a word about quality
#define MPEGVIDEO_MAX_PLANES
int mv[2][4][2]
motion vectors for a macroblock first coordinate : 0 = forward 1 = backward second " : depend...
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
void ff_h261_encode_picture_header(MpegEncContext *s, int picture_number)
int64_t av_gcd(int64_t a, int64_t b)
Compute the greatest common divisor of two integer operands.
#define CANDIDATE_MB_TYPE_FORWARD_I
#define FF_MPV_COMMON_MOTION_EST_OPTS
av_cold void ff_mpegvideoencdsp_init(MpegvideoEncDSPContext *c, AVCodecContext *avctx)
int ff_mpv_encode_picture(AVCodecContext *avctx, AVPacket *pkt, const AVFrame *pic_arg, int *got_packet)
#define FF_MPV_COMMON_OPTS
void ff_copy_bits(PutBitContext *pb, const uint8_t *src, int length)
Copy the content of src to the bitstream.
static int estimate_qp(MpegEncContext *s, int dry_run)
int pict_type
AV_PICTURE_TYPE_I, AV_PICTURE_TYPE_P, AV_PICTURE_TYPE_B, ...
int av_packet_add_side_data(AVPacket *pkt, enum AVPacketSideDataType type, uint8_t *data, size_t size)
Wrap an existing array as a packet side data.
int ff_match_2uint16(const uint16_t(*tab)[2], int size, int a, int b)
Return the index into tab at which {a,b} match elements {[0],[1]} of tab.
const struct AVCodec * codec
static void merge_context_after_me(MpegEncContext *dst, MpegEncContext *src)
int16_t * ff_h263_pred_motion(MpegEncContext *s, int block, int dir, int *px, int *py)
void ff_rv20_encode_picture_header(MpegEncContext *s, int picture_number)
av_cold void ff_msmpeg4_encode_init(MpegEncContext *s)
#define CANDIDATE_MB_TYPE_BIDIR
static const struct twinvq_data tab
static int get_intra_count(MpegEncContext *s, uint8_t *src, uint8_t *ref, int stride)
av_cold void ff_me_cmp_init(MECmpContext *c, AVCodecContext *avctx)
av_cold void ff_h263dsp_init(H263DSPContext *ctx)
int flags
AV_CODEC_FLAG_*.
#define CONFIG_MSMPEG4_ENCODER
int bit_rate_tolerance
number of bits the bitstream is allowed to diverge from the reference.
int av_pix_fmt_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift)
Utility function to access log2_chroma_w log2_chroma_h from the pixel format AVPixFmtDescriptor.
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
#define AV_CODEC_FLAG_LOW_DELAY
Force low delay.
static av_always_inline float scale(float x, float s)
#define FF_MPV_FLAG_CBP_RD
void ff_h263_update_motion_val(MpegEncContext *s)
#define AV_CODEC_FLAG_LOOP_FILTER
loop filter.
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
static void mpv_encode_init_static(void)
#define CANDIDATE_MB_TYPE_INTER4V
static int put_bytes_left(const PutBitContext *s, int round_up)
#define MAX_PICTURE_COUNT
av_cold int ff_dct_encode_init(MpegEncContext *s)
#define AV_CODEC_FLAG_INTERLACED_DCT
Use interlaced DCT.
int ff_mpv_reallocate_putbitbuffer(MpegEncContext *s, size_t threshold, size_t size_increase)
void ff_mjpeg_encode_mb(MpegEncContext *s, int16_t block[12][64])
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
float ff_rate_estimate_qscale(MpegEncContext *s, int dry_run)
void ff_mpv_common_end(MpegEncContext *s)
static int frame_start(MpegEncContext *s)
static int ff_thread_once(char *control, void(*routine)(void))
void ff_init_qscale_tab(MpegEncContext *s)
init s->current_picture.qscale_table from s->lambda_table
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
#define FF_ARRAY_ELEMS(a)
static void update_mb_info(MpegEncContext *s, int startcode)
av_cold void ff_fdctdsp_init(FDCTDSPContext *c, AVCodecContext *avctx)
static uint8_t default_fcode_tab[MAX_MV *2+1]
int ff_mpeg4_set_direct_mv(MpegEncContext *s, int mx, int my)
@ AV_PIX_FMT_YUVJ422P
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
static void build_basis(uint8_t *perm)
int has_b_frames
Size of the frame reordering buffer in the decoder.
AVCodecContext * avcodec_alloc_context3(const AVCodec *codec)
Allocate an AVCodecContext and set its fields to default values.
const uint8_t ff_h263_chroma_qscale_table[32]
static void rebase_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Rebase the bit writer onto a reallocated buffer.
static int encode_frame(AVCodecContext *c, AVFrame *frame, AVPacket *pkt)
void ff_mpeg_unref_picture(AVCodecContext *avctx, Picture *pic)
Deallocate a picture; frees the picture tables in case they need to be reallocated anyway.
const uint8_t *const ff_mpeg2_dc_scale_table[4]
void ff_mpeg1_encode_picture_header(MpegEncContext *s, int picture_number)
void ff_mpeg4_stuffing(PutBitContext *pbc)
add MPEG-4 stuffing bits (01...1)
static int sse(MpegEncContext *s, uint8_t *src1, uint8_t *src2, int w, int h, int stride)
static double av_q2d(AVRational a)
Convert an AVRational to a double.
void ff_estimate_b_frame_motion(MpegEncContext *s, int mb_x, int mb_y)
static void fn() direct(const ftype *in, const ctype *ir, int len, ftype *out)
#define LOCAL_ALIGNED_16(t, v,...)
#define av_assert0(cond)
assert() equivalent, that is always enabled.
int bits_per_raw_sample
Bits per sample/pixel of internal libavcodec pixel/sample format.
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
void ff_write_quant_matrix(PutBitContext *pb, uint16_t *matrix)
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
int64_t rc_max_rate
maximum bitrate
uint64_t error[AV_NUM_DATA_POINTERS]
error
This structure describes the bitrate properties of an encoded bitstream.
@ AV_PIX_FMT_YUVJ444P
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
float p_masking
p block masking (0-> disabled)
static int mb_var_thread(AVCodecContext *c, void *arg)
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
int rc_buffer_size
decoder bitstream buffer size
int ff_find_unused_picture(AVCodecContext *avctx, Picture *picture, int shared)
PutBitContext pb2
used for data partitioned VOPs
static double sqr(double x)
#define LIBAVUTIL_VERSION_INT
void ff_write_pass1_stats(MpegEncContext *s)
void ff_mpeg4_encode_mb(MpegEncContext *s, int16_t block[6][64], int motion_x, int motion_y)
Describe the class of an AVClass context structure.
#define PTRDIFF_SPECIFIER
int ff_mpeg4_encode_picture_header(MpegEncContext *s, int picture_number)
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
static void write_mb_info(MpegEncContext *s)
int f_code
forward MV resolution
av_cold void ff_mpv_idct_init(MpegEncContext *s)
void avcodec_free_context(AVCodecContext **avctx)
Free the codec context and everything associated with it and write NULL to the provided pointer.
struct AVCodecInternal * internal
Private context used for internal data.
@ AV_PIX_FMT_YUVJ420P
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
int64_t bit_rate
the average bitrate
#define ROUNDED_DIV(a, b)
void ff_faandct(int16_t *data)
const char * av_default_item_name(void *ptr)
Return the context name.
@ AV_PICTURE_TYPE_I
Intra.
void ff_fdct_ifast(int16_t *data)
static void update_duplicate_context_after_me(MpegEncContext *dst, const MpegEncContext *src)
void ff_set_qscale(MpegEncContext *s, int qscale)
set qscale and update qscale dependent variables.
#define AV_CODEC_FLAG_AC_PRED
H.263 advanced intra coding / MPEG-4 AC prediction.
int ildct_cmp
interlaced DCT comparison function
int ff_vbv_update(MpegEncContext *s, int frame_size)
av_cold int ff_mpv_encode_end(AVCodecContext *avctx)
#define FF_MB_DECISION_SIMPLE
uses mb_cmp
int attribute_align_arg avcodec_open2(AVCodecContext *avctx, const AVCodec *codec, AVDictionary **options)
Initialize the AVCodecContext to use the given AVCodec.
void ff_jpeg_fdct_islow_8(int16_t *data)
int trellis
trellis RD quantization
void ff_mpeg4_init_partitions(MpegEncContext *s)
int ff_alloc_picture(AVCodecContext *avctx, Picture *pic, MotionEstContext *me, ScratchpadContext *sc, int shared, int encoding, int chroma_x_shift, int chroma_y_shift, int out_format, int mb_stride, int mb_width, int mb_height, int b8_stride, ptrdiff_t *linesize, ptrdiff_t *uvlinesize)
Allocate a Picture.
void ff_mjpeg_amv_encode_picture_header(MpegEncContext *s)
void(* op_pixels_func)(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h)
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
void ff_jpeg_fdct_islow_10(int16_t *data)
void ff_mpeg1_encode_init(MpegEncContext *s)
void(* qpel_mc_func)(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
static int weight(int i, int blen, int offset)
void ff_clean_h263_qscales(MpegEncContext *s)
modify qscale so that encoding is actually possible in H.263 (limit difference to -2....
#define MV_TYPE_8X8
4 vectors (H.263, MPEG-4 4MV)
float temporal_cplx_masking
temporary complexity masking (0-> disabled)
static void mpv_encode_defaults(MpegEncContext *s)
Set the given MpegEncContext to defaults for encoding.
static void denoise_dct_c(MpegEncContext *s, int16_t *block)
static void set_put_bits_buffer_size(PutBitContext *s, int size)
Change the end of the buffer.
void ff_mpeg4_merge_partitions(MpegEncContext *s)
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented.
static int get_sae(uint8_t *src, int ref, int stride)
void ff_dct_encode_init_x86(MpegEncContext *s)
char * stats_out
pass1 encoding statistics output buffer
static av_always_inline void encode_mb_internal(MpegEncContext *s, int motion_x, int motion_y, int mb_block_height, int mb_block_width, int mb_block_count, int chroma_x_shift, int chroma_y_shift, int chroma_format)
#define AV_CODEC_FLAG_QPEL
Use qpel MC.
void ff_h263_encode_picture_header(MpegEncContext *s, int picture_number)
static void merge_context_after_encode(MpegEncContext *dst, MpegEncContext *src)
#define AV_CODEC_FLAG_GRAY
Only decode/encode grayscale.
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled top and top right vectors is used as motion vector prediction the used motion vector is the sum of the predictor and(mvx_diff, mvy_diff) *mv_scale Intra DC Prediction block[y][x] dc[1]
int ff_mpeg_ref_picture(AVCodecContext *avctx, Picture *dst, Picture *src)
int gop_size
the number of pictures in a group of pictures, or 0 for intra_only
static int load_input_picture(MpegEncContext *s, const AVFrame *pic_arg)
void ff_mpeg4_clean_buffers(MpegEncContext *s)
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
static void dct_single_coeff_elimination(MpegEncContext *s, int n, int threshold)
uint16_t intra_matrix[64]
matrix transmitted in the bitstream
int ff_pre_estimate_p_frame_motion(MpegEncContext *s, int mb_x, int mb_y)
void ff_mpeg1_clean_buffers(MpegEncContext *s)
int ff_rv10_encode_picture_header(MpegEncContext *s, int picture_number)
unsigned int lambda2
(lambda*lambda) >> FF_LAMBDA_SHIFT
const int16_t ff_mpeg4_default_intra_matrix[64]
void ff_mpv_motion(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int dir, uint8_t **ref_picture, op_pixels_func(*pix_op)[4], qpel_mc_func(*qpix_op)[16])
void ff_msmpeg4_encode_ext_header(MpegEncContext *s)
#define AV_NOPTS_VALUE
Undefined timestamp value.
static const AVOption mpv_generic_options[]
uint8_t * byte_buffer
temporary buffer used for encoders to store their bitstream
#define FF_MPV_FLAG_QP_RD
#define CANDIDATE_MB_TYPE_DIRECT0
@ AV_PKT_DATA_H263_MB_INFO
An AV_PKT_DATA_H263_MB_INFO side data packet contains a number of structures with info about macroblo...
int64_t min_bitrate
Minimum bitrate of the stream, in bits per second.
const uint16_t ff_mpeg1_default_intra_matrix[256]
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed.
#define AV_CODEC_FLAG_PASS2
Use internal 2pass ratecontrol in second pass mode.
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
const int16_t ff_mpeg4_default_non_intra_matrix[64]
#define AV_CODEC_CAP_SLICE_THREADS
Codec supports slice-based (or partition-based) multithreading.
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
#define MV_TYPE_FIELD
2 vectors, one per field
void ff_h263_encode_mb(MpegEncContext *s, int16_t block[6][64], int motion_x, int motion_y)
int flags
A combination of AV_PKT_FLAG values.
AVPacket * av_packet_alloc(void)
Allocate an AVPacket and set its fields to default values.
int64_t avg_bitrate
Average bitrate of the stream, in bits per second.
unsigned int byte_buffer_size
uint64_t encoding_error[MPEGVIDEO_MAX_PLANES]
#define FF_COMPLIANCE_NORMAL
#define UNI_AC_ENC_INDEX(run, level)
#define AV_LOG_INFO
Standard information.
static void update_qscale(MpegEncContext *s)
int ff_mjpeg_add_icc_profile_size(AVCodecContext *avctx, const AVFrame *frame, size_t *max_pkt_size)
static void ff_update_block_index(MpegEncContext *s)
uint64_t vbv_delay
The delay between the time the packet this structure is associated with is received and the time when...
void ff_speedhq_encode_mb(MpegEncContext *s, int16_t block[12][64])
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
#define i(width, name, range_min, range_max)
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
static int put_bits_count(PutBitContext *s)
static int dct_quantize_trellis_c(MpegEncContext *s, int16_t *block, int n, int qscale, int *overflow)
static int encode_thread(AVCodecContext *c, void *arg)
const uint32_t ff_square_tab[512]
static int estimate_best_b_count(MpegEncContext *s)
int intra_dc_precision
precision of the intra DC coefficient - 8
PutBitContext tex_pb
used for data partitioned VOPs
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
int64_t max_bitrate
Maximum bitrate of the stream, in bits per second.
static const int32_t qmat16[MAT_SIZE]
void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_malloc but the buffer has additional AV_INPUT_BUFFER_PADDING_SIZE at the end w...
void ff_h261_reorder_mb_index(MpegEncContext *s)
void ff_h263_encode_init(MpegEncContext *s)
static void get_visual_weight(int16_t *weight, uint8_t *ptr, int stride)
const uint16_t ff_h263_format[8][2]
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
static void copy_context_before_encode(MpegEncContext *d, const MpegEncContext *s)
int ff_h261_get_picture_format(int width, int height)
@ AV_PKT_DATA_CPB_PROPERTIES
This side data corresponds to the AVCPBProperties struct.
int ff_init_me(MpegEncContext *s)
void ff_h263_encode_gob_header(MpegEncContext *s, int mb_line)
Encode a group of blocks header.
int avcodec_send_frame(AVCodecContext *avctx, const AVFrame *frame)
Supply a raw video or audio frame to the encoder.
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
av_cold void ff_rate_control_uninit(MpegEncContext *s)
#define CANDIDATE_MB_TYPE_DIRECT
double buffer_index
amount of bits in the video/audio buffer
void ff_msmpeg4_encode_picture_header(MpegEncContext *s, int picture_number)
const uint8_t ff_zigzag_direct[64]
#define AV_CODEC_FLAG_CLOSED_GOP
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
const uint16_t ff_mpeg1_default_non_intra_matrix[64]
int64_t buffer_size
The size of the buffer to which the ratecontrol is applied, in bits.
int strict_std_compliance
strictly follow the standard (MPEG-4, ...).
#define CANDIDATE_MB_TYPE_BIDIR_I
const uint16_t ff_inv_aanscales[64]
static void encode_mb_hq(MpegEncContext *s, MpegEncContext *backup, MpegEncContext *best, PutBitContext pb[2], PutBitContext pb2[2], PutBitContext tex_pb[2], int *dmin, int *next_block, int motion_x, int motion_y)
void ff_h263_loop_filter(MpegEncContext *s)
#define AV_INPUT_BUFFER_PADDING_SIZE
void ff_set_mpeg4_time(MpegEncContext *s)
void ff_fix_long_mvs(MpegEncContext *s, uint8_t *field_select_table, int field_select, int16_t(*mv_table)[2], int f_code, int type, int truncate)
int dquant
qscale difference to prev qscale
float dark_masking
darkness masking (0-> disabled)
main external API structure.
static uint8_t default_mv_penalty[MAX_FCODE+1][MAX_DMV *2+1]
void ff_estimate_p_frame_motion(MpegEncContext *s, int mb_x, int mb_y)
void ff_mpeg4_encode_video_packet_header(MpegEncContext *s)
static uint8_t * put_bits_ptr(PutBitContext *s)
Return the pointer to the byte where the bitstream writer will put the next bit.
#define CANDIDATE_MB_TYPE_INTRA
@ AV_PICTURE_TYPE_B
Bi-dir predicted.
uint8_t * av_packet_new_side_data(AVPacket *pkt, enum AVPacketSideDataType type, size_t size)
Allocate new information of a packet.
int qmin
minimum quantizer
static int select_input_picture(MpegEncContext *s)
float spatial_cplx_masking
spatial complexity masking (0-> disabled)
static void frame_end(MpegEncContext *s)
static int ref[MAX_W *MAX_W]
void ff_mpv_reconstruct_mb(MpegEncContext *s, int16_t block[12][64])
#define AV_CODEC_CAP_DELAY
Encoder or decoder requires flushing with NULL input at the end in order to give the complete and cor...
static float mean(const float *input, int size)
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
#define CANDIDATE_MB_TYPE_FORWARD
#define FF_MB_DECISION_RD
rate distortion
static int shift(int a, int b)
@ AV_PICTURE_TYPE_P
Predicted.
#define AVERROR_ENCODER_NOT_FOUND
Encoder not found.
int ff_speedhq_mb_y_order_to_mb(int mb_y_order, int mb_height, int *first_in_slice)
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
int max_b_frames
maximum number of B-frames between non-B-frames Note: The output will be delayed by max_b_frames+1 re...
void ff_convert_matrix(MpegEncContext *s, int(*qmat)[64], uint16_t(*qmat16)[2][64], const uint16_t *quant_matrix, int bias, int qmin, int qmax, int intra)
Undefined Behavior In the C some operations are like signed integer overflow
void(* fdct)(int16_t *block)
static int skip_check(MpegEncContext *s, Picture *p, Picture *ref)
av_cold int ff_mpv_encode_init(AVCodecContext *avctx)
float rc_max_available_vbv_use
Ratecontrol attempt to use, at maximum, of what can be used without an underflow.
static void flush_put_bits(PutBitContext *s)
Pad the end of the output stream with zeros.
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
static int dct_quantize_refine(MpegEncContext *s, int16_t *block, int16_t *weight, int16_t *orig, int n, int qscale)
static av_always_inline int diff(const uint32_t a, const uint32_t b)
static void copy_context_after_encode(MpegEncContext *d, const MpegEncContext *s)
int slices
Number of slices.
#define FF_MB_DECISION_BITS
chooses the one which needs the fewest bits
This structure stores compressed data.
static void clip_coeffs(MpegEncContext *s, int16_t *block, int last_index)
uint16_t * inter_matrix
custom inter quantization matrix Must be allocated with the av_malloc() family of functions,...
static int encode_picture(MpegEncContext *s, int picture_number)
int width
picture width / height.
av_cold void ff_h261_encode_init(MpegEncContext *s)
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
static const double coeff[2][5]
The exact code depends on how similar the blocks are and how related they are to the block
int misc_bits
cbp, mb_type
void ff_mjpeg_encode_picture_trailer(PutBitContext *pb, int header_bits)
void ff_get_2pass_fcode(MpegEncContext *s)
int end_mb_y
end mb_y of this thread (so current thread should process start_mb_y <= row < end_mb_y)
#define FF_QP2LAMBDA
factor to convert from H.263 QP to lambda
#define FF_MPV_FLAG_STRICT_GOP
int ff_dct_quantize_c(MpegEncContext *s, int16_t *block, int n, int qscale, int *overflow)
static const uint8_t sp5x_qscale_five_quant_table[][64]
int display_picture_number
picture number in display order
@ AV_PICTURE_TYPE_S
S(GMC)-VOP MPEG-4.
@ AV_CODEC_ID_MPEG2VIDEO
preferred ID for MPEG-1/2 video decoding
int ff_alloc_packet(AVCodecContext *avctx, AVPacket *avpkt, int64_t size)
Check AVPacket size and allocate data.
void ff_mpeg1_encode_slice_header(MpegEncContext *s)
int ff_mjpeg_encode_stuffing(MpegEncContext *s)
Writes the complete JPEG frame when optimal huffman tables are enabled, otherwise writes the stuffing...
void ff_wmv2_encode_mb(MpegEncContext *s, int16_t block[6][64], int motion_x, int motion_y)
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown) That is the width of a pixel divided by the height of the pixel.
#define CANDIDATE_MB_TYPE_BACKWARD
int ff_get_best_fcode(MpegEncContext *s, const int16_t(*mv_table)[2], int type)
const uint16_t ff_aanscales[64]
AVCPBProperties * av_cpb_properties_alloc(size_t *size)
Allocate a CPB properties structure and initialize its fields to default values.
#define AV_CODEC_FLAG_PASS1
Use internal 2pass ratecontrol in first pass mode.
static void write_slice_end(MpegEncContext *s)