00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018
00019
00020
00021
00022
00023
00024
00030 #include "libavutil/intmath.h"
00031 #include "libavutil/mathematics.h"
00032 #include "libavutil/opt.h"
00033 #include "avcodec.h"
00034 #include "dsputil.h"
00035 #include "mpegvideo.h"
00036 #include "mpegvideo_common.h"
00037 #include "h263.h"
00038 #include "mjpegenc.h"
00039 #include "msmpeg4.h"
00040 #include "faandct.h"
00041 #include "thread.h"
00042 #include "aandcttab.h"
00043 #include "flv.h"
00044 #include "mpeg4video.h"
00045 #include "internal.h"
00046 #include <limits.h>
00047 #include "sp5x.h"
00048
00049
00050
00051
00052 static int encode_picture(MpegEncContext *s, int picture_number);
00053 static int dct_quantize_refine(MpegEncContext *s, DCTELEM *block, int16_t *weight, DCTELEM *orig, int n, int qscale);
00054 static int sse_mb(MpegEncContext *s);
00055 static void denoise_dct_c(MpegEncContext *s, DCTELEM *block);
00056 static int dct_quantize_trellis_c(MpegEncContext *s, DCTELEM *block, int n, int qscale, int *overflow);
00057
00058
00059
00060
00061
00062
00063 static uint8_t default_mv_penalty[MAX_FCODE + 1][MAX_MV * 2 + 1];
00064 static uint8_t default_fcode_tab[MAX_MV * 2 + 1];
00065
00066 void ff_convert_matrix(DSPContext *dsp, int (*qmat)[64],
00067 uint16_t (*qmat16)[2][64],
00068 const uint16_t *quant_matrix,
00069 int bias, int qmin, int qmax, int intra)
00070 {
00071 int qscale;
00072 int shift = 0;
00073
00074 for (qscale = qmin; qscale <= qmax; qscale++) {
00075 int i;
00076 if (dsp->fdct == ff_jpeg_fdct_islow_8 ||
00077 dsp->fdct == ff_jpeg_fdct_islow_10
00078 #ifdef FAAN_POSTSCALE
00079 || dsp->fdct == ff_faandct
00080 #endif
00081 ) {
00082 for (i = 0; i < 64; i++) {
00083 const int j = dsp->idct_permutation[i];
00084
00085
00086
00087
00088
00089
00090 qmat[qscale][i] = (int)((UINT64_C(1) << QMAT_SHIFT) /
00091 (qscale * quant_matrix[j]));
00092 }
00093 } else if (dsp->fdct == fdct_ifast
00094 #ifndef FAAN_POSTSCALE
00095 || dsp->fdct == ff_faandct
00096 #endif
00097 ) {
00098 for (i = 0; i < 64; i++) {
00099 const int j = dsp->idct_permutation[i];
00100
00101
00102
00103
00104
00105
00106 qmat[qscale][i] = (int)((UINT64_C(1) << (QMAT_SHIFT + 14)) /
00107 (ff_aanscales[i] * qscale * quant_matrix[j]));
00108 }
00109 } else {
00110 for (i = 0; i < 64; i++) {
00111 const int j = dsp->idct_permutation[i];
00112
00113
00114
00115
00116
00117 qmat[qscale][i] = (int)((UINT64_C(1) << QMAT_SHIFT) /
00118 (qscale * quant_matrix[j]));
00119
00120
00121 qmat16[qscale][0][i] = (1 << QMAT_SHIFT_MMX) /
00122 (qscale * quant_matrix[j]);
00123
00124 if (qmat16[qscale][0][i] == 0 ||
00125 qmat16[qscale][0][i] == 128 * 256)
00126 qmat16[qscale][0][i] = 128 * 256 - 1;
00127 qmat16[qscale][1][i] =
00128 ROUNDED_DIV(bias << (16 - QUANT_BIAS_SHIFT),
00129 qmat16[qscale][0][i]);
00130 }
00131 }
00132
00133 for (i = intra; i < 64; i++) {
00134 int64_t max = 8191;
00135 if (dsp->fdct == fdct_ifast
00136 #ifndef FAAN_POSTSCALE
00137 || dsp->fdct == ff_faandct
00138 #endif
00139 ) {
00140 max = (8191LL * ff_aanscales[i]) >> 14;
00141 }
00142 while (((max * qmat[qscale][i]) >> shift) > INT_MAX) {
00143 shift++;
00144 }
00145 }
00146 }
00147 if (shift) {
00148 av_log(NULL, AV_LOG_INFO,
00149 "Warning, QMAT_SHIFT is larger than %d, overflows possible\n",
00150 QMAT_SHIFT - shift);
00151 }
00152 }
00153
00154 static inline void update_qscale(MpegEncContext *s)
00155 {
00156 s->qscale = (s->lambda * 139 + FF_LAMBDA_SCALE * 64) >>
00157 (FF_LAMBDA_SHIFT + 7);
00158 s->qscale = av_clip(s->qscale, s->avctx->qmin, s->avctx->qmax);
00159
00160 s->lambda2 = (s->lambda * s->lambda + FF_LAMBDA_SCALE / 2) >>
00161 FF_LAMBDA_SHIFT;
00162 }
00163
00164 void ff_write_quant_matrix(PutBitContext *pb, uint16_t *matrix)
00165 {
00166 int i;
00167
00168 if (matrix) {
00169 put_bits(pb, 1, 1);
00170 for (i = 0; i < 64; i++) {
00171 put_bits(pb, 8, matrix[ff_zigzag_direct[i]]);
00172 }
00173 } else
00174 put_bits(pb, 1, 0);
00175 }
00176
00180 void ff_init_qscale_tab(MpegEncContext *s)
00181 {
00182 int8_t * const qscale_table = s->current_picture.f.qscale_table;
00183 int i;
00184
00185 for (i = 0; i < s->mb_num; i++) {
00186 unsigned int lam = s->lambda_table[s->mb_index2xy[i]];
00187 int qp = (lam * 139 + FF_LAMBDA_SCALE * 64) >> (FF_LAMBDA_SHIFT + 7);
00188 qscale_table[s->mb_index2xy[i]] = av_clip(qp, s->avctx->qmin,
00189 s->avctx->qmax);
00190 }
00191 }
00192
00193 static void copy_picture_attributes(MpegEncContext *s,
00194 AVFrame *dst,
00195 AVFrame *src)
00196 {
00197 int i;
00198
00199 dst->pict_type = src->pict_type;
00200 dst->quality = src->quality;
00201 dst->coded_picture_number = src->coded_picture_number;
00202 dst->display_picture_number = src->display_picture_number;
00203
00204 dst->pts = src->pts;
00205 dst->interlaced_frame = src->interlaced_frame;
00206 dst->top_field_first = src->top_field_first;
00207
00208 if (s->avctx->me_threshold) {
00209 if (!src->motion_val[0])
00210 av_log(s->avctx, AV_LOG_ERROR, "AVFrame.motion_val not set!\n");
00211 if (!src->mb_type)
00212 av_log(s->avctx, AV_LOG_ERROR, "AVFrame.mb_type not set!\n");
00213 if (!src->ref_index[0])
00214 av_log(s->avctx, AV_LOG_ERROR, "AVFrame.ref_index not set!\n");
00215 if (src->motion_subsample_log2 != dst->motion_subsample_log2)
00216 av_log(s->avctx, AV_LOG_ERROR,
00217 "AVFrame.motion_subsample_log2 doesn't match! (%d!=%d)\n",
00218 src->motion_subsample_log2, dst->motion_subsample_log2);
00219
00220 memcpy(dst->mb_type, src->mb_type,
00221 s->mb_stride * s->mb_height * sizeof(dst->mb_type[0]));
00222
00223 for (i = 0; i < 2; i++) {
00224 int stride = ((16 * s->mb_width ) >>
00225 src->motion_subsample_log2) + 1;
00226 int height = ((16 * s->mb_height) >> src->motion_subsample_log2);
00227
00228 if (src->motion_val[i] &&
00229 src->motion_val[i] != dst->motion_val[i]) {
00230 memcpy(dst->motion_val[i], src->motion_val[i],
00231 2 * stride * height * sizeof(int16_t));
00232 }
00233 if (src->ref_index[i] && src->ref_index[i] != dst->ref_index[i]) {
00234 memcpy(dst->ref_index[i], src->ref_index[i],
00235 s->mb_stride * 4 * s->mb_height * sizeof(int8_t));
00236 }
00237 }
00238 }
00239 }
00240
00241 static void update_duplicate_context_after_me(MpegEncContext *dst,
00242 MpegEncContext *src)
00243 {
00244 #define COPY(a) dst->a= src->a
00245 COPY(pict_type);
00246 COPY(current_picture);
00247 COPY(f_code);
00248 COPY(b_code);
00249 COPY(qscale);
00250 COPY(lambda);
00251 COPY(lambda2);
00252 COPY(picture_in_gop_number);
00253 COPY(gop_picture_number);
00254 COPY(frame_pred_frame_dct);
00255 COPY(progressive_frame);
00256 COPY(partitioned_frame);
00257 #undef COPY
00258 }
00259
00264 static void MPV_encode_defaults(MpegEncContext *s)
00265 {
00266 int i;
00267 MPV_common_defaults(s);
00268
00269 for (i = -16; i < 16; i++) {
00270 default_fcode_tab[i + MAX_MV] = 1;
00271 }
00272 s->me.mv_penalty = default_mv_penalty;
00273 s->fcode_tab = default_fcode_tab;
00274 }
00275
00276
00277 av_cold int MPV_encode_init(AVCodecContext *avctx)
00278 {
00279 MpegEncContext *s = avctx->priv_data;
00280 int i;
00281 int chroma_h_shift, chroma_v_shift;
00282
00283 MPV_encode_defaults(s);
00284
00285 switch (avctx->codec_id) {
00286 case CODEC_ID_MPEG2VIDEO:
00287 if (avctx->pix_fmt != PIX_FMT_YUV420P &&
00288 avctx->pix_fmt != PIX_FMT_YUV422P) {
00289 av_log(avctx, AV_LOG_ERROR,
00290 "only YUV420 and YUV422 are supported\n");
00291 return -1;
00292 }
00293 break;
00294 case CODEC_ID_LJPEG:
00295 if (avctx->pix_fmt != PIX_FMT_YUVJ420P &&
00296 avctx->pix_fmt != PIX_FMT_YUVJ422P &&
00297 avctx->pix_fmt != PIX_FMT_YUVJ444P &&
00298 avctx->pix_fmt != PIX_FMT_BGRA &&
00299 ((avctx->pix_fmt != PIX_FMT_YUV420P &&
00300 avctx->pix_fmt != PIX_FMT_YUV422P &&
00301 avctx->pix_fmt != PIX_FMT_YUV444P) ||
00302 avctx->strict_std_compliance > FF_COMPLIANCE_UNOFFICIAL)) {
00303 av_log(avctx, AV_LOG_ERROR, "colorspace not supported in LJPEG\n");
00304 return -1;
00305 }
00306 break;
00307 case CODEC_ID_MJPEG:
00308 case CODEC_ID_AMV:
00309 if (avctx->pix_fmt != PIX_FMT_YUVJ420P &&
00310 avctx->pix_fmt != PIX_FMT_YUVJ422P &&
00311 ((avctx->pix_fmt != PIX_FMT_YUV420P &&
00312 avctx->pix_fmt != PIX_FMT_YUV422P) ||
00313 avctx->strict_std_compliance > FF_COMPLIANCE_UNOFFICIAL)) {
00314 av_log(avctx, AV_LOG_ERROR, "colorspace not supported in jpeg\n");
00315 return -1;
00316 }
00317 break;
00318 default:
00319 if (avctx->pix_fmt != PIX_FMT_YUV420P) {
00320 av_log(avctx, AV_LOG_ERROR, "only YUV420 is supported\n");
00321 return -1;
00322 }
00323 }
00324
00325 switch (avctx->pix_fmt) {
00326 case PIX_FMT_YUVJ422P:
00327 case PIX_FMT_YUV422P:
00328 s->chroma_format = CHROMA_422;
00329 break;
00330 case PIX_FMT_YUVJ420P:
00331 case PIX_FMT_YUV420P:
00332 default:
00333 s->chroma_format = CHROMA_420;
00334 break;
00335 }
00336
00337 s->bit_rate = avctx->bit_rate;
00338 s->width = avctx->width;
00339 s->height = avctx->height;
00340 if (avctx->gop_size > 600 &&
00341 avctx->strict_std_compliance > FF_COMPLIANCE_EXPERIMENTAL) {
00342 av_log(avctx, AV_LOG_WARNING,
00343 "keyframe interval too large!, reducing it from %d to %d\n",
00344 avctx->gop_size, 600);
00345 avctx->gop_size = 600;
00346 }
00347 s->gop_size = avctx->gop_size;
00348 s->avctx = avctx;
00349 s->flags = avctx->flags;
00350 s->flags2 = avctx->flags2;
00351 s->max_b_frames = avctx->max_b_frames;
00352 s->codec_id = avctx->codec->id;
00353 s->luma_elim_threshold = avctx->luma_elim_threshold;
00354 s->chroma_elim_threshold = avctx->chroma_elim_threshold;
00355 s->strict_std_compliance = avctx->strict_std_compliance;
00356 #if FF_API_MPEGVIDEO_GLOBAL_OPTS
00357 if (avctx->flags & CODEC_FLAG_PART)
00358 s->data_partitioning = 1;
00359 #endif
00360 s->quarter_sample = (avctx->flags & CODEC_FLAG_QPEL) != 0;
00361 s->mpeg_quant = avctx->mpeg_quant;
00362 s->rtp_mode = !!avctx->rtp_payload_size;
00363 s->intra_dc_precision = avctx->intra_dc_precision;
00364 s->user_specified_pts = AV_NOPTS_VALUE;
00365
00366 if (s->gop_size <= 1) {
00367 s->intra_only = 1;
00368 s->gop_size = 12;
00369 } else {
00370 s->intra_only = 0;
00371 }
00372
00373 s->me_method = avctx->me_method;
00374
00375
00376 s->fixed_qscale = !!(avctx->flags & CODEC_FLAG_QSCALE);
00377
00378 s->adaptive_quant = (s->avctx->lumi_masking ||
00379 s->avctx->dark_masking ||
00380 s->avctx->temporal_cplx_masking ||
00381 s->avctx->spatial_cplx_masking ||
00382 s->avctx->p_masking ||
00383 s->avctx->border_masking ||
00384 (s->flags & CODEC_FLAG_QP_RD)) &&
00385 !s->fixed_qscale;
00386
00387 s->loop_filter = !!(s->flags & CODEC_FLAG_LOOP_FILTER);
00388 #if FF_API_MPEGVIDEO_GLOBAL_OPTS
00389 s->alternate_scan = !!(s->flags & CODEC_FLAG_ALT_SCAN);
00390 s->intra_vlc_format = !!(s->flags2 & CODEC_FLAG2_INTRA_VLC);
00391 s->q_scale_type = !!(s->flags2 & CODEC_FLAG2_NON_LINEAR_QUANT);
00392 s->obmc = !!(s->flags & CODEC_FLAG_OBMC);
00393 #endif
00394
00395 if (avctx->rc_max_rate && !avctx->rc_buffer_size) {
00396 av_log(avctx, AV_LOG_ERROR,
00397 "a vbv buffer size is needed, "
00398 "for encoding with a maximum bitrate\n");
00399 return -1;
00400 }
00401
00402 if (avctx->rc_min_rate && avctx->rc_max_rate != avctx->rc_min_rate) {
00403 av_log(avctx, AV_LOG_INFO,
00404 "Warning min_rate > 0 but min_rate != max_rate isn't recommended!\n");
00405 }
00406
00407 if (avctx->rc_min_rate && avctx->rc_min_rate > avctx->bit_rate) {
00408 av_log(avctx, AV_LOG_ERROR, "bitrate below min bitrate\n");
00409 return -1;
00410 }
00411
00412 if (avctx->rc_max_rate && avctx->rc_max_rate < avctx->bit_rate) {
00413 av_log(avctx, AV_LOG_ERROR, "bitrate above max bitrate\n");
00414 return -1;
00415 }
00416
00417 if (avctx->rc_max_rate &&
00418 avctx->rc_max_rate == avctx->bit_rate &&
00419 avctx->rc_max_rate != avctx->rc_min_rate) {
00420 av_log(avctx, AV_LOG_INFO,
00421 "impossible bitrate constraints, this will fail\n");
00422 }
00423
00424 if (avctx->rc_buffer_size &&
00425 avctx->bit_rate * (int64_t)avctx->time_base.num >
00426 avctx->rc_buffer_size * (int64_t)avctx->time_base.den) {
00427 av_log(avctx, AV_LOG_ERROR, "VBV buffer too small for bitrate\n");
00428 return -1;
00429 }
00430
00431 if (!s->fixed_qscale &&
00432 avctx->bit_rate * av_q2d(avctx->time_base) >
00433 avctx->bit_rate_tolerance) {
00434 av_log(avctx, AV_LOG_ERROR,
00435 "bitrate tolerance too small for bitrate\n");
00436 return -1;
00437 }
00438
00439 if (s->avctx->rc_max_rate &&
00440 s->avctx->rc_min_rate == s->avctx->rc_max_rate &&
00441 (s->codec_id == CODEC_ID_MPEG1VIDEO ||
00442 s->codec_id == CODEC_ID_MPEG2VIDEO) &&
00443 90000LL * (avctx->rc_buffer_size - 1) >
00444 s->avctx->rc_max_rate * 0xFFFFLL) {
00445 av_log(avctx, AV_LOG_INFO,
00446 "Warning vbv_delay will be set to 0xFFFF (=VBR) as the "
00447 "specified vbv buffer is too large for the given bitrate!\n");
00448 }
00449
00450 if ((s->flags & CODEC_FLAG_4MV) && s->codec_id != CODEC_ID_MPEG4 &&
00451 s->codec_id != CODEC_ID_H263 && s->codec_id != CODEC_ID_H263P &&
00452 s->codec_id != CODEC_ID_FLV1) {
00453 av_log(avctx, AV_LOG_ERROR, "4MV not supported by codec\n");
00454 return -1;
00455 }
00456
00457 if (s->obmc && s->avctx->mb_decision != FF_MB_DECISION_SIMPLE) {
00458 av_log(avctx, AV_LOG_ERROR,
00459 "OBMC is only supported with simple mb decision\n");
00460 return -1;
00461 }
00462
00463 #if FF_API_MPEGVIDEO_GLOBAL_OPTS
00464 if (s->obmc && s->codec_id != CODEC_ID_H263 &&
00465 s->codec_id != CODEC_ID_H263P) {
00466 av_log(avctx, AV_LOG_ERROR, "OBMC is only supported with H263(+)\n");
00467 return -1;
00468 }
00469 #endif
00470
00471 if (s->quarter_sample && s->codec_id != CODEC_ID_MPEG4) {
00472 av_log(avctx, AV_LOG_ERROR, "qpel not supported by codec\n");
00473 return -1;
00474 }
00475
00476 #if FF_API_MPEGVIDEO_GLOBAL_OPTS
00477 if (s->data_partitioning && s->codec_id != CODEC_ID_MPEG4) {
00478 av_log(avctx, AV_LOG_ERROR,
00479 "data partitioning not supported by codec\n");
00480 return -1;
00481 }
00482 #endif
00483
00484 if (s->max_b_frames &&
00485 s->codec_id != CODEC_ID_MPEG4 &&
00486 s->codec_id != CODEC_ID_MPEG1VIDEO &&
00487 s->codec_id != CODEC_ID_MPEG2VIDEO) {
00488 av_log(avctx, AV_LOG_ERROR, "b frames not supported by codec\n");
00489 return -1;
00490 }
00491
00492 if ((s->codec_id == CODEC_ID_MPEG4 ||
00493 s->codec_id == CODEC_ID_H263 ||
00494 s->codec_id == CODEC_ID_H263P) &&
00495 (avctx->sample_aspect_ratio.num > 255 ||
00496 avctx->sample_aspect_ratio.den > 255)) {
00497 av_log(avctx, AV_LOG_WARNING,
00498 "Invalid pixel aspect ratio %i/%i, limit is 255/255 reducing\n",
00499 avctx->sample_aspect_ratio.num, avctx->sample_aspect_ratio.den);
00500 av_reduce(&avctx->sample_aspect_ratio.num, &avctx->sample_aspect_ratio.den,
00501 avctx->sample_aspect_ratio.num, avctx->sample_aspect_ratio.den, 255);
00502 }
00503
00504 if ((s->flags & (CODEC_FLAG_INTERLACED_DCT | CODEC_FLAG_INTERLACED_ME
00505 #if FF_API_MPEGVIDEO_GLOBAL_OPTS
00506 | CODEC_FLAG_ALT_SCAN
00507 #endif
00508 )) &&
00509 s->codec_id != CODEC_ID_MPEG4 && s->codec_id != CODEC_ID_MPEG2VIDEO) {
00510 av_log(avctx, AV_LOG_ERROR, "interlacing not supported by codec\n");
00511 return -1;
00512 }
00513
00514
00515 if (s->mpeg_quant && s->codec_id != CODEC_ID_MPEG4) {
00516 av_log(avctx, AV_LOG_ERROR,
00517 "mpeg2 style quantization not supported by codec\n");
00518 return -1;
00519 }
00520
00521 if ((s->flags & CODEC_FLAG_CBP_RD) && !avctx->trellis) {
00522 av_log(avctx, AV_LOG_ERROR, "CBP RD needs trellis quant\n");
00523 return -1;
00524 }
00525
00526 if ((s->flags & CODEC_FLAG_QP_RD) &&
00527 s->avctx->mb_decision != FF_MB_DECISION_RD) {
00528 av_log(avctx, AV_LOG_ERROR, "QP RD needs mbd=2\n");
00529 return -1;
00530 }
00531
00532 if (s->avctx->scenechange_threshold < 1000000000 &&
00533 (s->flags & CODEC_FLAG_CLOSED_GOP)) {
00534 av_log(avctx, AV_LOG_ERROR,
00535 "closed gop with scene change detection are not supported yet, "
00536 "set threshold to 1000000000\n");
00537 return -1;
00538 }
00539
00540 #if FF_API_MPEGVIDEO_GLOBAL_OPTS
00541 if ((s->flags2 & CODEC_FLAG2_INTRA_VLC) &&
00542 s->codec_id != CODEC_ID_MPEG2VIDEO) {
00543 av_log(avctx, AV_LOG_ERROR,
00544 "intra vlc table not supported by codec\n");
00545 return -1;
00546 }
00547 #endif
00548
00549 if (s->flags & CODEC_FLAG_LOW_DELAY) {
00550 if (s->codec_id != CODEC_ID_MPEG2VIDEO) {
00551 av_log(avctx, AV_LOG_ERROR,
00552 "low delay forcing is only available for mpeg2\n");
00553 return -1;
00554 }
00555 if (s->max_b_frames != 0) {
00556 av_log(avctx, AV_LOG_ERROR,
00557 "b frames cannot be used with low delay\n");
00558 return -1;
00559 }
00560 }
00561
00562 if (s->q_scale_type == 1) {
00563 #if FF_API_MPEGVIDEO_GLOBAL_OPTS
00564 if (s->codec_id != CODEC_ID_MPEG2VIDEO) {
00565 av_log(avctx, AV_LOG_ERROR,
00566 "non linear quant is only available for mpeg2\n");
00567 return -1;
00568 }
00569 #endif
00570 if (avctx->qmax > 12) {
00571 av_log(avctx, AV_LOG_ERROR,
00572 "non linear quant only supports qmax <= 12 currently\n");
00573 return -1;
00574 }
00575 }
00576
00577 if (s->avctx->thread_count > 1 &&
00578 s->codec_id != CODEC_ID_MPEG4 &&
00579 s->codec_id != CODEC_ID_MPEG1VIDEO &&
00580 s->codec_id != CODEC_ID_MPEG2VIDEO &&
00581 (s->codec_id != CODEC_ID_H263P
00582 #if FF_API_MPEGVIDEO_GLOBAL_OPTS
00583 || !(s->flags & CODEC_FLAG_H263P_SLICE_STRUCT)
00584 #endif
00585 )) {
00586 av_log(avctx, AV_LOG_ERROR,
00587 "multi threaded encoding not supported by codec\n");
00588 return -1;
00589 }
00590
00591 if (s->avctx->thread_count < 1) {
00592 av_log(avctx, AV_LOG_ERROR,
00593 "automatic thread number detection not supported by codec, "
00594 "patch welcome\n");
00595 return -1;
00596 }
00597
00598 if (s->avctx->thread_count > 1)
00599 s->rtp_mode = 1;
00600
00601 if (!avctx->time_base.den || !avctx->time_base.num) {
00602 av_log(avctx, AV_LOG_ERROR, "framerate not set\n");
00603 return -1;
00604 }
00605
00606 i = (INT_MAX / 2 + 128) >> 8;
00607 if (avctx->me_threshold >= i) {
00608 av_log(avctx, AV_LOG_ERROR, "me_threshold too large, max is %d\n",
00609 i - 1);
00610 return -1;
00611 }
00612 if (avctx->mb_threshold >= i) {
00613 av_log(avctx, AV_LOG_ERROR, "mb_threshold too large, max is %d\n",
00614 i - 1);
00615 return -1;
00616 }
00617
00618 if (avctx->b_frame_strategy && (avctx->flags & CODEC_FLAG_PASS2)) {
00619 av_log(avctx, AV_LOG_INFO,
00620 "notice: b_frame_strategy only affects the first pass\n");
00621 avctx->b_frame_strategy = 0;
00622 }
00623
00624 i = av_gcd(avctx->time_base.den, avctx->time_base.num);
00625 if (i > 1) {
00626 av_log(avctx, AV_LOG_INFO, "removing common factors from framerate\n");
00627 avctx->time_base.den /= i;
00628 avctx->time_base.num /= i;
00629
00630 }
00631
00632 if (s->mpeg_quant || s->codec_id == CODEC_ID_MPEG1VIDEO || s->codec_id == CODEC_ID_MPEG2VIDEO || s->codec_id == CODEC_ID_MJPEG || s->codec_id==CODEC_ID_AMV) {
00633
00634 s->intra_quant_bias = 3 << (QUANT_BIAS_SHIFT - 3);
00635 s->inter_quant_bias = 0;
00636 } else {
00637 s->intra_quant_bias = 0;
00638
00639 s->inter_quant_bias = -(1 << (QUANT_BIAS_SHIFT - 2));
00640 }
00641
00642 if (avctx->intra_quant_bias != FF_DEFAULT_QUANT_BIAS)
00643 s->intra_quant_bias = avctx->intra_quant_bias;
00644 if (avctx->inter_quant_bias != FF_DEFAULT_QUANT_BIAS)
00645 s->inter_quant_bias = avctx->inter_quant_bias;
00646
00647 av_log(avctx, AV_LOG_DEBUG, "intra_quant_bias = %d inter_quant_bias = %d\n",s->intra_quant_bias,s->inter_quant_bias);
00648
00649 avcodec_get_chroma_sub_sample(avctx->pix_fmt, &chroma_h_shift,
00650 &chroma_v_shift);
00651
00652 if (avctx->codec_id == CODEC_ID_MPEG4 &&
00653 s->avctx->time_base.den > (1 << 16) - 1) {
00654 av_log(avctx, AV_LOG_ERROR,
00655 "timebase %d/%d not supported by MPEG 4 standard, "
00656 "the maximum admitted value for the timebase denominator "
00657 "is %d\n", s->avctx->time_base.num, s->avctx->time_base.den,
00658 (1 << 16) - 1);
00659 return -1;
00660 }
00661 s->time_increment_bits = av_log2(s->avctx->time_base.den - 1) + 1;
00662
00663 switch (avctx->codec->id) {
00664 case CODEC_ID_MPEG1VIDEO:
00665 s->out_format = FMT_MPEG1;
00666 s->low_delay = !!(s->flags & CODEC_FLAG_LOW_DELAY);
00667 avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
00668 break;
00669 case CODEC_ID_MPEG2VIDEO:
00670 s->out_format = FMT_MPEG1;
00671 s->low_delay = !!(s->flags & CODEC_FLAG_LOW_DELAY);
00672 avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
00673 s->rtp_mode = 1;
00674 break;
00675 case CODEC_ID_LJPEG:
00676 case CODEC_ID_MJPEG:
00677 case CODEC_ID_AMV:
00678 s->out_format = FMT_MJPEG;
00679 s->intra_only = 1;
00680 if (avctx->codec->id == CODEC_ID_LJPEG && avctx->pix_fmt == PIX_FMT_BGRA) {
00681 s->mjpeg_vsample[0] = s->mjpeg_hsample[0] =
00682 s->mjpeg_vsample[1] = s->mjpeg_hsample[1] =
00683 s->mjpeg_vsample[2] = s->mjpeg_hsample[2] = 1;
00684 } else {
00685 s->mjpeg_vsample[0] = 2;
00686 s->mjpeg_vsample[1] = 2 >> chroma_v_shift;
00687 s->mjpeg_vsample[2] = 2 >> chroma_v_shift;
00688 s->mjpeg_hsample[0] = 2;
00689 s->mjpeg_hsample[1] = 2 >> chroma_h_shift;
00690 s->mjpeg_hsample[2] = 2 >> chroma_h_shift;
00691 }
00692 if (!(CONFIG_MJPEG_ENCODER || CONFIG_LJPEG_ENCODER) ||
00693 ff_mjpeg_encode_init(s) < 0)
00694 return -1;
00695 avctx->delay = 0;
00696 s->low_delay = 1;
00697 break;
00698 case CODEC_ID_H261:
00699 if (!CONFIG_H261_ENCODER)
00700 return -1;
00701 if (ff_h261_get_picture_format(s->width, s->height) < 0) {
00702 av_log(avctx, AV_LOG_ERROR,
00703 "The specified picture size of %dx%d is not valid for the "
00704 "H.261 codec.\nValid sizes are 176x144, 352x288\n",
00705 s->width, s->height);
00706 return -1;
00707 }
00708 s->out_format = FMT_H261;
00709 avctx->delay = 0;
00710 s->low_delay = 1;
00711 break;
00712 case CODEC_ID_H263:
00713 if (!CONFIG_H263_ENCODER)
00714 return -1;
00715 if (ff_match_2uint16(ff_h263_format, FF_ARRAY_ELEMS(ff_h263_format),
00716 s->width, s->height) == 8) {
00717 av_log(avctx, AV_LOG_ERROR,
00718 "The specified picture size of %dx%d is not valid for "
00719 "the H.263 codec.\nValid sizes are 128x96, 176x144, "
00720 "352x288, 704x576, and 1408x1152. "
00721 "Try H.263+.\n", s->width, s->height);
00722 return -1;
00723 }
00724 s->out_format = FMT_H263;
00725 avctx->delay = 0;
00726 s->low_delay = 1;
00727 break;
00728 case CODEC_ID_H263P:
00729 s->out_format = FMT_H263;
00730 s->h263_plus = 1;
00731
00732 #if FF_API_MPEGVIDEO_GLOBAL_OPTS
00733 if (avctx->flags & CODEC_FLAG_H263P_UMV)
00734 s->umvplus = 1;
00735 if (avctx->flags & CODEC_FLAG_H263P_AIV)
00736 s->alt_inter_vlc = 1;
00737 if (avctx->flags & CODEC_FLAG_H263P_SLICE_STRUCT)
00738 s->h263_slice_structured = 1;
00739 #endif
00740 s->h263_aic = (avctx->flags & CODEC_FLAG_AC_PRED) ? 1 : 0;
00741 s->modified_quant = s->h263_aic;
00742 s->loop_filter = (avctx->flags & CODEC_FLAG_LOOP_FILTER) ? 1 : 0;
00743 s->unrestricted_mv = s->obmc || s->loop_filter || s->umvplus;
00744
00745
00746
00747 avctx->delay = 0;
00748 s->low_delay = 1;
00749 break;
00750 case CODEC_ID_FLV1:
00751 s->out_format = FMT_H263;
00752 s->h263_flv = 2;
00753 s->unrestricted_mv = 1;
00754 s->rtp_mode = 0;
00755 avctx->delay = 0;
00756 s->low_delay = 1;
00757 break;
00758 case CODEC_ID_RV10:
00759 s->out_format = FMT_H263;
00760 avctx->delay = 0;
00761 s->low_delay = 1;
00762 break;
00763 case CODEC_ID_RV20:
00764 s->out_format = FMT_H263;
00765 avctx->delay = 0;
00766 s->low_delay = 1;
00767 s->modified_quant = 1;
00768 s->h263_aic = 1;
00769 s->h263_plus = 1;
00770 s->loop_filter = 1;
00771 s->unrestricted_mv = 0;
00772 break;
00773 case CODEC_ID_MPEG4:
00774 s->out_format = FMT_H263;
00775 s->h263_pred = 1;
00776 s->unrestricted_mv = 1;
00777 s->low_delay = s->max_b_frames ? 0 : 1;
00778 avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
00779 break;
00780 case CODEC_ID_MSMPEG4V2:
00781 s->out_format = FMT_H263;
00782 s->h263_pred = 1;
00783 s->unrestricted_mv = 1;
00784 s->msmpeg4_version = 2;
00785 avctx->delay = 0;
00786 s->low_delay = 1;
00787 break;
00788 case CODEC_ID_MSMPEG4V3:
00789 s->out_format = FMT_H263;
00790 s->h263_pred = 1;
00791 s->unrestricted_mv = 1;
00792 s->msmpeg4_version = 3;
00793 s->flipflop_rounding = 1;
00794 avctx->delay = 0;
00795 s->low_delay = 1;
00796 break;
00797 case CODEC_ID_WMV1:
00798 s->out_format = FMT_H263;
00799 s->h263_pred = 1;
00800 s->unrestricted_mv = 1;
00801 s->msmpeg4_version = 4;
00802 s->flipflop_rounding = 1;
00803 avctx->delay = 0;
00804 s->low_delay = 1;
00805 break;
00806 case CODEC_ID_WMV2:
00807 s->out_format = FMT_H263;
00808 s->h263_pred = 1;
00809 s->unrestricted_mv = 1;
00810 s->msmpeg4_version = 5;
00811 s->flipflop_rounding = 1;
00812 avctx->delay = 0;
00813 s->low_delay = 1;
00814 break;
00815 default:
00816 return -1;
00817 }
00818
00819 avctx->has_b_frames = !s->low_delay;
00820
00821 s->encoding = 1;
00822
00823 s->progressive_frame =
00824 s->progressive_sequence = !(avctx->flags & (CODEC_FLAG_INTERLACED_DCT |
00825 CODEC_FLAG_INTERLACED_ME) ||
00826 s->alternate_scan);
00827
00828
00829 if (MPV_common_init(s) < 0)
00830 return -1;
00831
00832 if (!s->dct_quantize)
00833 s->dct_quantize = dct_quantize_c;
00834 if (!s->denoise_dct)
00835 s->denoise_dct = denoise_dct_c;
00836 s->fast_dct_quantize = s->dct_quantize;
00837 if (avctx->trellis)
00838 s->dct_quantize = dct_quantize_trellis_c;
00839
00840 if ((CONFIG_H263P_ENCODER || CONFIG_RV20_ENCODER) && s->modified_quant)
00841 s->chroma_qscale_table = ff_h263_chroma_qscale_table;
00842
00843 s->quant_precision = 5;
00844
00845 ff_set_cmp(&s->dsp, s->dsp.ildct_cmp, s->avctx->ildct_cmp);
00846 ff_set_cmp(&s->dsp, s->dsp.frame_skip_cmp, s->avctx->frame_skip_cmp);
00847
00848 if (CONFIG_H261_ENCODER && s->out_format == FMT_H261)
00849 ff_h261_encode_init(s);
00850 if (CONFIG_H263_ENCODER && s->out_format == FMT_H263)
00851 ff_h263_encode_init(s);
00852 if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version)
00853 ff_msmpeg4_encode_init(s);
00854 if ((CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
00855 && s->out_format == FMT_MPEG1)
00856 ff_mpeg1_encode_init(s);
00857
00858
00859 for (i = 0; i < 64; i++) {
00860 int j = s->dsp.idct_permutation[i];
00861 if (CONFIG_MPEG4_ENCODER && s->codec_id == CODEC_ID_MPEG4 &&
00862 s->mpeg_quant) {
00863 s->intra_matrix[j] = ff_mpeg4_default_intra_matrix[i];
00864 s->inter_matrix[j] = ff_mpeg4_default_non_intra_matrix[i];
00865 } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
00866 s->intra_matrix[j] =
00867 s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
00868 } else {
00869
00870 s->intra_matrix[j] = ff_mpeg1_default_intra_matrix[i];
00871 s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
00872 }
00873 if (s->avctx->intra_matrix)
00874 s->intra_matrix[j] = s->avctx->intra_matrix[i];
00875 if (s->avctx->inter_matrix)
00876 s->inter_matrix[j] = s->avctx->inter_matrix[i];
00877 }
00878
00879
00880
00881 if (s->out_format != FMT_MJPEG) {
00882 ff_convert_matrix(&s->dsp, s->q_intra_matrix, s->q_intra_matrix16,
00883 s->intra_matrix, s->intra_quant_bias, avctx->qmin,
00884 31, 1);
00885 ff_convert_matrix(&s->dsp, s->q_inter_matrix, s->q_inter_matrix16,
00886 s->inter_matrix, s->inter_quant_bias, avctx->qmin,
00887 31, 0);
00888 }
00889
00890 if (ff_rate_control_init(s) < 0)
00891 return -1;
00892
00893 return 0;
00894 }
00895
00896 av_cold int MPV_encode_end(AVCodecContext *avctx)
00897 {
00898 MpegEncContext *s = avctx->priv_data;
00899
00900 ff_rate_control_uninit(s);
00901
00902 MPV_common_end(s);
00903 if ((CONFIG_MJPEG_ENCODER || CONFIG_LJPEG_ENCODER) &&
00904 s->out_format == FMT_MJPEG)
00905 ff_mjpeg_encode_close(s);
00906
00907 av_freep(&avctx->extradata);
00908
00909 return 0;
00910 }
00911
00912 static int get_sae(uint8_t *src, int ref, int stride)
00913 {
00914 int x,y;
00915 int acc = 0;
00916
00917 for (y = 0; y < 16; y++) {
00918 for (x = 0; x < 16; x++) {
00919 acc += FFABS(src[x + y * stride] - ref);
00920 }
00921 }
00922
00923 return acc;
00924 }
00925
00926 static int get_intra_count(MpegEncContext *s, uint8_t *src,
00927 uint8_t *ref, int stride)
00928 {
00929 int x, y, w, h;
00930 int acc = 0;
00931
00932 w = s->width & ~15;
00933 h = s->height & ~15;
00934
00935 for (y = 0; y < h; y += 16) {
00936 for (x = 0; x < w; x += 16) {
00937 int offset = x + y * stride;
00938 int sad = s->dsp.sad[0](NULL, src + offset, ref + offset, stride,
00939 16);
00940 int mean = (s->dsp.pix_sum(src + offset, stride) + 128) >> 8;
00941 int sae = get_sae(src + offset, mean, stride);
00942
00943 acc += sae + 500 < sad;
00944 }
00945 }
00946 return acc;
00947 }
00948
00949
00950 static int load_input_picture(MpegEncContext *s, AVFrame *pic_arg)
00951 {
00952 AVFrame *pic = NULL;
00953 int64_t pts;
00954 int i;
00955 const int encoding_delay = s->max_b_frames;
00956 int direct = 1;
00957
00958 if (pic_arg) {
00959 pts = pic_arg->pts;
00960 pic_arg->display_picture_number = s->input_picture_number++;
00961
00962 if (pts != AV_NOPTS_VALUE) {
00963 if (s->user_specified_pts != AV_NOPTS_VALUE) {
00964 int64_t time = pts;
00965 int64_t last = s->user_specified_pts;
00966
00967 if (time <= last) {
00968 av_log(s->avctx, AV_LOG_ERROR,
00969 "Error, Invalid timestamp=%"PRId64", "
00970 "last=%"PRId64"\n", pts, s->user_specified_pts);
00971 return -1;
00972 }
00973 }
00974 s->user_specified_pts = pts;
00975 } else {
00976 if (s->user_specified_pts != AV_NOPTS_VALUE) {
00977 s->user_specified_pts =
00978 pts = s->user_specified_pts + 1;
00979 av_log(s->avctx, AV_LOG_INFO,
00980 "Warning: AVFrame.pts=? trying to guess (%"PRId64")\n",
00981 pts);
00982 } else {
00983 pts = pic_arg->display_picture_number;
00984 }
00985 }
00986 }
00987
00988 if (pic_arg) {
00989 if (encoding_delay && !(s->flags & CODEC_FLAG_INPUT_PRESERVED))
00990 direct = 0;
00991 if (pic_arg->linesize[0] != s->linesize)
00992 direct = 0;
00993 if (pic_arg->linesize[1] != s->uvlinesize)
00994 direct = 0;
00995 if (pic_arg->linesize[2] != s->uvlinesize)
00996 direct = 0;
00997
00998
00999
01000
01001 if (direct) {
01002 i = ff_find_unused_picture(s, 1);
01003 if (i < 0)
01004 return i;
01005
01006 pic = (AVFrame *) &s->picture[i];
01007 pic->reference = 3;
01008
01009 for (i = 0; i < 4; i++) {
01010 pic->data[i] = pic_arg->data[i];
01011 pic->linesize[i] = pic_arg->linesize[i];
01012 }
01013 if (ff_alloc_picture(s, (Picture *) pic, 1) < 0) {
01014 return -1;
01015 }
01016 } else {
01017 i = ff_find_unused_picture(s, 0);
01018 if (i < 0)
01019 return i;
01020
01021 pic = (AVFrame *) &s->picture[i];
01022 pic->reference = 3;
01023
01024 if (ff_alloc_picture(s, (Picture *) pic, 0) < 0) {
01025 return -1;
01026 }
01027
01028 if (pic->data[0] + INPLACE_OFFSET == pic_arg->data[0] &&
01029 pic->data[1] + INPLACE_OFFSET == pic_arg->data[1] &&
01030 pic->data[2] + INPLACE_OFFSET == pic_arg->data[2]) {
01031
01032 } else {
01033 int h_chroma_shift, v_chroma_shift;
01034 avcodec_get_chroma_sub_sample(s->avctx->pix_fmt, &h_chroma_shift,
01035 &v_chroma_shift);
01036
01037 for (i = 0; i < 3; i++) {
01038 int src_stride = pic_arg->linesize[i];
01039 int dst_stride = i ? s->uvlinesize : s->linesize;
01040 int h_shift = i ? h_chroma_shift : 0;
01041 int v_shift = i ? v_chroma_shift : 0;
01042 int w = s->width >> h_shift;
01043 int h = s->height >> v_shift;
01044 uint8_t *src = pic_arg->data[i];
01045 uint8_t *dst = pic->data[i];
01046
01047 if(s->codec_id == CODEC_ID_AMV && !(s->avctx->flags & CODEC_FLAG_EMU_EDGE)){
01048 h= ((s->height+15)/16*16)>>v_shift;
01049 }
01050
01051 if (!s->avctx->rc_buffer_size)
01052 dst += INPLACE_OFFSET;
01053
01054 if (src_stride == dst_stride)
01055 memcpy(dst, src, src_stride * h);
01056 else {
01057 while (h--) {
01058 memcpy(dst, src, w);
01059 dst += dst_stride;
01060 src += src_stride;
01061 }
01062 }
01063 }
01064 }
01065 }
01066 copy_picture_attributes(s, pic, pic_arg);
01067 pic->pts = pts;
01068 }
01069
01070
01071 for (i = 1; i < MAX_PICTURE_COUNT ; i++)
01072 s->input_picture[i - 1] = s->input_picture[i];
01073
01074 s->input_picture[encoding_delay] = (Picture*) pic;
01075
01076 return 0;
01077 }
01078
01079 static int skip_check(MpegEncContext *s, Picture *p, Picture *ref)
01080 {
01081 int x, y, plane;
01082 int score = 0;
01083 int64_t score64 = 0;
01084
01085 for (plane = 0; plane < 3; plane++) {
01086 const int stride = p->f.linesize[plane];
01087 const int bw = plane ? 1 : 2;
01088 for (y = 0; y < s->mb_height * bw; y++) {
01089 for (x = 0; x < s->mb_width * bw; x++) {
01090 int off = p->f.type == FF_BUFFER_TYPE_SHARED ? 0 : 16;
01091 uint8_t *dptr = p->f.data[plane] + 8 * (x + y * stride) + off;
01092 uint8_t *rptr = ref->f.data[plane] + 8 * (x + y * stride);
01093 int v = s->dsp.frame_skip_cmp[1](s, dptr, rptr, stride, 8);
01094
01095 switch (s->avctx->frame_skip_exp) {
01096 case 0: score = FFMAX(score, v); break;
01097 case 1: score += FFABS(v); break;
01098 case 2: score += v * v; break;
01099 case 3: score64 += FFABS(v * v * (int64_t)v); break;
01100 case 4: score64 += v * v * (int64_t)(v * v); break;
01101 }
01102 }
01103 }
01104 }
01105
01106 if (score)
01107 score64 = score;
01108
01109 if (score64 < s->avctx->frame_skip_threshold)
01110 return 1;
01111 if (score64 < ((s->avctx->frame_skip_factor * (int64_t)s->lambda) >> 8))
01112 return 1;
01113 return 0;
01114 }
01115
01116 static int estimate_best_b_count(MpegEncContext *s)
01117 {
01118 AVCodec *codec = avcodec_find_encoder(s->avctx->codec_id);
01119 AVCodecContext *c = avcodec_alloc_context3(NULL);
01120 AVFrame input[FF_MAX_B_FRAMES + 2];
01121 const int scale = s->avctx->brd_scale;
01122 int i, j, out_size, p_lambda, b_lambda, lambda2;
01123 int outbuf_size = s->width * s->height;
01124 uint8_t *outbuf = av_malloc(outbuf_size);
01125 int64_t best_rd = INT64_MAX;
01126 int best_b_count = -1;
01127
01128 assert(scale >= 0 && scale <= 3);
01129
01130
01131
01132 p_lambda = s->last_lambda_for[AV_PICTURE_TYPE_P];
01133
01134 b_lambda = s->last_lambda_for[AV_PICTURE_TYPE_B];
01135 if (!b_lambda)
01136 b_lambda = p_lambda;
01137 lambda2 = (b_lambda * b_lambda + (1 << FF_LAMBDA_SHIFT) / 2) >>
01138 FF_LAMBDA_SHIFT;
01139
01140 c->width = s->width >> scale;
01141 c->height = s->height >> scale;
01142 c->flags = CODEC_FLAG_QSCALE | CODEC_FLAG_PSNR |
01143 CODEC_FLAG_INPUT_PRESERVED ;
01144 c->flags |= s->avctx->flags & CODEC_FLAG_QPEL;
01145 c->mb_decision = s->avctx->mb_decision;
01146 c->me_cmp = s->avctx->me_cmp;
01147 c->mb_cmp = s->avctx->mb_cmp;
01148 c->me_sub_cmp = s->avctx->me_sub_cmp;
01149 c->pix_fmt = PIX_FMT_YUV420P;
01150 c->time_base = s->avctx->time_base;
01151 c->max_b_frames = s->max_b_frames;
01152
01153 if (avcodec_open2(c, codec, NULL) < 0)
01154 return -1;
01155
01156 for (i = 0; i < s->max_b_frames + 2; i++) {
01157 int ysize = c->width * c->height;
01158 int csize = (c->width / 2) * (c->height / 2);
01159 Picture pre_input, *pre_input_ptr = i ? s->input_picture[i - 1] :
01160 s->next_picture_ptr;
01161
01162 avcodec_get_frame_defaults(&input[i]);
01163 input[i].data[0] = av_malloc(ysize + 2 * csize);
01164 input[i].data[1] = input[i].data[0] + ysize;
01165 input[i].data[2] = input[i].data[1] + csize;
01166 input[i].linesize[0] = c->width;
01167 input[i].linesize[1] =
01168 input[i].linesize[2] = c->width / 2;
01169
01170 if (pre_input_ptr && (!i || s->input_picture[i - 1])) {
01171 pre_input = *pre_input_ptr;
01172
01173 if (pre_input.f.type != FF_BUFFER_TYPE_SHARED && i) {
01174 pre_input.f.data[0] += INPLACE_OFFSET;
01175 pre_input.f.data[1] += INPLACE_OFFSET;
01176 pre_input.f.data[2] += INPLACE_OFFSET;
01177 }
01178
01179 s->dsp.shrink[scale](input[i].data[0], input[i].linesize[0],
01180 pre_input.f.data[0], pre_input.f.linesize[0],
01181 c->width, c->height);
01182 s->dsp.shrink[scale](input[i].data[1], input[i].linesize[1],
01183 pre_input.f.data[1], pre_input.f.linesize[1],
01184 c->width >> 1, c->height >> 1);
01185 s->dsp.shrink[scale](input[i].data[2], input[i].linesize[2],
01186 pre_input.f.data[2], pre_input.f.linesize[2],
01187 c->width >> 1, c->height >> 1);
01188 }
01189 }
01190
01191 for (j = 0; j < s->max_b_frames + 1; j++) {
01192 int64_t rd = 0;
01193
01194 if (!s->input_picture[j])
01195 break;
01196
01197 c->error[0] = c->error[1] = c->error[2] = 0;
01198
01199 input[0].pict_type = AV_PICTURE_TYPE_I;
01200 input[0].quality = 1 * FF_QP2LAMBDA;
01201 out_size = avcodec_encode_video(c, outbuf,
01202 outbuf_size, &input[0]);
01203
01204
01205 for (i = 0; i < s->max_b_frames + 1; i++) {
01206 int is_p = i % (j + 1) == j || i == s->max_b_frames;
01207
01208 input[i + 1].pict_type = is_p ?
01209 AV_PICTURE_TYPE_P : AV_PICTURE_TYPE_B;
01210 input[i + 1].quality = is_p ? p_lambda : b_lambda;
01211 out_size = avcodec_encode_video(c, outbuf, outbuf_size,
01212 &input[i + 1]);
01213 rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3);
01214 }
01215
01216
01217 while (out_size) {
01218 out_size = avcodec_encode_video(c, outbuf, outbuf_size, NULL);
01219 rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3);
01220 }
01221
01222 rd += c->error[0] + c->error[1] + c->error[2];
01223
01224 if (rd < best_rd) {
01225 best_rd = rd;
01226 best_b_count = j;
01227 }
01228 }
01229
01230 av_freep(&outbuf);
01231 avcodec_close(c);
01232 av_freep(&c);
01233
01234 for (i = 0; i < s->max_b_frames + 2; i++) {
01235 av_freep(&input[i].data[0]);
01236 }
01237
01238 return best_b_count;
01239 }
01240
01241 static int select_input_picture(MpegEncContext *s)
01242 {
01243 int i;
01244
01245 for (i = 1; i < MAX_PICTURE_COUNT; i++)
01246 s->reordered_input_picture[i - 1] = s->reordered_input_picture[i];
01247 s->reordered_input_picture[MAX_PICTURE_COUNT - 1] = NULL;
01248
01249
01250 if (s->reordered_input_picture[0] == NULL && s->input_picture[0]) {
01251 if (
01252 s->next_picture_ptr == NULL || s->intra_only) {
01253 s->reordered_input_picture[0] = s->input_picture[0];
01254 s->reordered_input_picture[0]->f.pict_type = AV_PICTURE_TYPE_I;
01255 s->reordered_input_picture[0]->f.coded_picture_number =
01256 s->coded_picture_number++;
01257 } else {
01258 int b_frames;
01259
01260 if (s->avctx->frame_skip_threshold || s->avctx->frame_skip_factor) {
01261 if (s->picture_in_gop_number < s->gop_size &&
01262 skip_check(s, s->input_picture[0], s->next_picture_ptr)) {
01263
01264
01265
01266
01267
01268 if (s->input_picture[0]->f.type == FF_BUFFER_TYPE_SHARED) {
01269 for (i = 0; i < 4; i++)
01270 s->input_picture[0]->f.data[i] = NULL;
01271 s->input_picture[0]->f.type = 0;
01272 } else {
01273 assert(s->input_picture[0]->f.type == FF_BUFFER_TYPE_USER ||
01274 s->input_picture[0]->f.type == FF_BUFFER_TYPE_INTERNAL);
01275
01276 s->avctx->release_buffer(s->avctx,
01277 (AVFrame *) s->input_picture[0]);
01278 }
01279
01280 emms_c();
01281 ff_vbv_update(s, 0);
01282
01283 goto no_output_pic;
01284 }
01285 }
01286
01287 if (s->flags & CODEC_FLAG_PASS2) {
01288 for (i = 0; i < s->max_b_frames + 1; i++) {
01289 int pict_num = s->input_picture[0]->f.display_picture_number + i;
01290
01291 if (pict_num >= s->rc_context.num_entries)
01292 break;
01293 if (!s->input_picture[i]) {
01294 s->rc_context.entry[pict_num - 1].new_pict_type = AV_PICTURE_TYPE_P;
01295 break;
01296 }
01297
01298 s->input_picture[i]->f.pict_type =
01299 s->rc_context.entry[pict_num].new_pict_type;
01300 }
01301 }
01302
01303 if (s->avctx->b_frame_strategy == 0) {
01304 b_frames = s->max_b_frames;
01305 while (b_frames && !s->input_picture[b_frames])
01306 b_frames--;
01307 } else if (s->avctx->b_frame_strategy == 1) {
01308 for (i = 1; i < s->max_b_frames + 1; i++) {
01309 if (s->input_picture[i] &&
01310 s->input_picture[i]->b_frame_score == 0) {
01311 s->input_picture[i]->b_frame_score =
01312 get_intra_count(s,
01313 s->input_picture[i ]->f.data[0],
01314 s->input_picture[i - 1]->f.data[0],
01315 s->linesize) + 1;
01316 }
01317 }
01318 for (i = 0; i < s->max_b_frames + 1; i++) {
01319 if (s->input_picture[i] == NULL ||
01320 s->input_picture[i]->b_frame_score - 1 >
01321 s->mb_num / s->avctx->b_sensitivity)
01322 break;
01323 }
01324
01325 b_frames = FFMAX(0, i - 1);
01326
01327
01328 for (i = 0; i < b_frames + 1; i++) {
01329 s->input_picture[i]->b_frame_score = 0;
01330 }
01331 } else if (s->avctx->b_frame_strategy == 2) {
01332 b_frames = estimate_best_b_count(s);
01333 } else {
01334 av_log(s->avctx, AV_LOG_ERROR, "illegal b frame strategy\n");
01335 b_frames = 0;
01336 }
01337
01338 emms_c();
01339
01340
01341
01342
01343 for (i = b_frames - 1; i >= 0; i--) {
01344 int type = s->input_picture[i]->f.pict_type;
01345 if (type && type != AV_PICTURE_TYPE_B)
01346 b_frames = i;
01347 }
01348 if (s->input_picture[b_frames]->f.pict_type == AV_PICTURE_TYPE_B &&
01349 b_frames == s->max_b_frames) {
01350 av_log(s->avctx, AV_LOG_ERROR,
01351 "warning, too many b frames in a row\n");
01352 }
01353
01354 if (s->picture_in_gop_number + b_frames >= s->gop_size) {
01355 if ((s->flags2 & CODEC_FLAG2_STRICT_GOP) &&
01356 s->gop_size > s->picture_in_gop_number) {
01357 b_frames = s->gop_size - s->picture_in_gop_number - 1;
01358 } else {
01359 if (s->flags & CODEC_FLAG_CLOSED_GOP)
01360 b_frames = 0;
01361 s->input_picture[b_frames]->f.pict_type = AV_PICTURE_TYPE_I;
01362 }
01363 }
01364
01365 if ((s->flags & CODEC_FLAG_CLOSED_GOP) && b_frames &&
01366 s->input_picture[b_frames]->f.pict_type == AV_PICTURE_TYPE_I)
01367 b_frames--;
01368
01369 s->reordered_input_picture[0] = s->input_picture[b_frames];
01370 if (s->reordered_input_picture[0]->f.pict_type != AV_PICTURE_TYPE_I)
01371 s->reordered_input_picture[0]->f.pict_type = AV_PICTURE_TYPE_P;
01372 s->reordered_input_picture[0]->f.coded_picture_number =
01373 s->coded_picture_number++;
01374 for (i = 0; i < b_frames; i++) {
01375 s->reordered_input_picture[i + 1] = s->input_picture[i];
01376 s->reordered_input_picture[i + 1]->f.pict_type =
01377 AV_PICTURE_TYPE_B;
01378 s->reordered_input_picture[i + 1]->f.coded_picture_number =
01379 s->coded_picture_number++;
01380 }
01381 }
01382 }
01383 no_output_pic:
01384 if (s->reordered_input_picture[0]) {
01385 s->reordered_input_picture[0]->f.reference =
01386 s->reordered_input_picture[0]->f.pict_type !=
01387 AV_PICTURE_TYPE_B ? 3 : 0;
01388
01389 ff_copy_picture(&s->new_picture, s->reordered_input_picture[0]);
01390
01391 if (s->reordered_input_picture[0]->f.type == FF_BUFFER_TYPE_SHARED ||
01392 s->avctx->rc_buffer_size) {
01393
01394
01395
01396 Picture *pic;
01397 int i = ff_find_unused_picture(s, 0);
01398 if (i < 0)
01399 return i;
01400 pic = &s->picture[i];
01401
01402 pic->f.reference = s->reordered_input_picture[0]->f.reference;
01403 if (ff_alloc_picture(s, pic, 0) < 0) {
01404 return -1;
01405 }
01406
01407
01408 if (s->reordered_input_picture[0]->f.type == FF_BUFFER_TYPE_INTERNAL)
01409 s->avctx->release_buffer(s->avctx,
01410 (AVFrame *) s->reordered_input_picture[0]);
01411 for (i = 0; i < 4; i++)
01412 s->reordered_input_picture[0]->f.data[i] = NULL;
01413 s->reordered_input_picture[0]->f.type = 0;
01414
01415 copy_picture_attributes(s, (AVFrame *) pic,
01416 (AVFrame *) s->reordered_input_picture[0]);
01417
01418 s->current_picture_ptr = pic;
01419 } else {
01420
01421
01422 assert(s->reordered_input_picture[0]->f.type ==
01423 FF_BUFFER_TYPE_USER ||
01424 s->reordered_input_picture[0]->f.type ==
01425 FF_BUFFER_TYPE_INTERNAL);
01426
01427 s->current_picture_ptr = s->reordered_input_picture[0];
01428 for (i = 0; i < 4; i++) {
01429 s->new_picture.f.data[i] += INPLACE_OFFSET;
01430 }
01431 }
01432 ff_copy_picture(&s->current_picture, s->current_picture_ptr);
01433
01434 s->picture_number = s->new_picture.f.display_picture_number;
01435
01436 } else {
01437 memset(&s->new_picture, 0, sizeof(Picture));
01438 }
01439 return 0;
01440 }
01441
01442 int MPV_encode_picture(AVCodecContext *avctx,
01443 unsigned char *buf, int buf_size, void *data)
01444 {
01445 MpegEncContext *s = avctx->priv_data;
01446 AVFrame *pic_arg = data;
01447 int i, stuffing_count;
01448 int context_count = s->slice_context_count;
01449
01450 for (i = 0; i < context_count; i++) {
01451 int start_y = s->thread_context[i]->start_mb_y;
01452 int end_y = s->thread_context[i]-> end_mb_y;
01453 int h = s->mb_height;
01454 uint8_t *start = buf + (size_t)(((int64_t) buf_size) * start_y / h);
01455 uint8_t *end = buf + (size_t)(((int64_t) buf_size) * end_y / h);
01456
01457 init_put_bits(&s->thread_context[i]->pb, start, end - start);
01458 }
01459
01460 s->picture_in_gop_number++;
01461
01462 if (load_input_picture(s, pic_arg) < 0)
01463 return -1;
01464
01465 if (select_input_picture(s) < 0) {
01466 return -1;
01467 }
01468
01469
01470 if (s->new_picture.f.data[0]) {
01471 s->pict_type = s->new_picture.f.pict_type;
01472
01473
01474
01475 MPV_frame_start(s, avctx);
01476 vbv_retry:
01477 if (encode_picture(s, s->picture_number) < 0)
01478 return -1;
01479
01480 avctx->header_bits = s->header_bits;
01481 avctx->mv_bits = s->mv_bits;
01482 avctx->misc_bits = s->misc_bits;
01483 avctx->i_tex_bits = s->i_tex_bits;
01484 avctx->p_tex_bits = s->p_tex_bits;
01485 avctx->i_count = s->i_count;
01486
01487 avctx->p_count = s->mb_num - s->i_count - s->skip_count;
01488 avctx->skip_count = s->skip_count;
01489
01490 MPV_frame_end(s);
01491
01492 if (CONFIG_MJPEG_ENCODER && s->out_format == FMT_MJPEG)
01493 ff_mjpeg_encode_picture_trailer(s);
01494
01495 if (avctx->rc_buffer_size) {
01496 RateControlContext *rcc = &s->rc_context;
01497 int max_size = rcc->buffer_index * avctx->rc_max_available_vbv_use;
01498
01499 if (put_bits_count(&s->pb) > max_size &&
01500 s->lambda < s->avctx->lmax) {
01501 s->next_lambda = FFMAX(s->lambda + 1, s->lambda *
01502 (s->qscale + 1) / s->qscale);
01503 if (s->adaptive_quant) {
01504 int i;
01505 for (i = 0; i < s->mb_height * s->mb_stride; i++)
01506 s->lambda_table[i] =
01507 FFMAX(s->lambda_table[i] + 1,
01508 s->lambda_table[i] * (s->qscale + 1) /
01509 s->qscale);
01510 }
01511 s->mb_skipped = 0;
01512
01513 if (s->pict_type == AV_PICTURE_TYPE_P) {
01514 if (s->flipflop_rounding ||
01515 s->codec_id == CODEC_ID_H263P ||
01516 s->codec_id == CODEC_ID_MPEG4)
01517 s->no_rounding ^= 1;
01518 }
01519 if (s->pict_type != AV_PICTURE_TYPE_B) {
01520 s->time_base = s->last_time_base;
01521 s->last_non_b_time = s->time - s->pp_time;
01522 }
01523
01524 for (i = 0; i < context_count; i++) {
01525 PutBitContext *pb = &s->thread_context[i]->pb;
01526 init_put_bits(pb, pb->buf, pb->buf_end - pb->buf);
01527 }
01528 goto vbv_retry;
01529 }
01530
01531 assert(s->avctx->rc_max_rate);
01532 }
01533
01534 if (s->flags & CODEC_FLAG_PASS1)
01535 ff_write_pass1_stats(s);
01536
01537 for (i = 0; i < 4; i++) {
01538 s->current_picture_ptr->f.error[i] = s->current_picture.f.error[i];
01539 avctx->error[i] += s->current_picture_ptr->f.error[i];
01540 }
01541
01542 if (s->flags & CODEC_FLAG_PASS1)
01543 assert(avctx->header_bits + avctx->mv_bits + avctx->misc_bits +
01544 avctx->i_tex_bits + avctx->p_tex_bits ==
01545 put_bits_count(&s->pb));
01546 flush_put_bits(&s->pb);
01547 s->frame_bits = put_bits_count(&s->pb);
01548
01549 stuffing_count = ff_vbv_update(s, s->frame_bits);
01550 if (stuffing_count) {
01551 if (s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb) >> 3) <
01552 stuffing_count + 50) {
01553 av_log(s->avctx, AV_LOG_ERROR, "stuffing too large\n");
01554 return -1;
01555 }
01556
01557 switch (s->codec_id) {
01558 case CODEC_ID_MPEG1VIDEO:
01559 case CODEC_ID_MPEG2VIDEO:
01560 while (stuffing_count--) {
01561 put_bits(&s->pb, 8, 0);
01562 }
01563 break;
01564 case CODEC_ID_MPEG4:
01565 put_bits(&s->pb, 16, 0);
01566 put_bits(&s->pb, 16, 0x1C3);
01567 stuffing_count -= 4;
01568 while (stuffing_count--) {
01569 put_bits(&s->pb, 8, 0xFF);
01570 }
01571 break;
01572 default:
01573 av_log(s->avctx, AV_LOG_ERROR, "vbv buffer overflow\n");
01574 }
01575 flush_put_bits(&s->pb);
01576 s->frame_bits = put_bits_count(&s->pb);
01577 }
01578
01579
01580 if (s->avctx->rc_max_rate &&
01581 s->avctx->rc_min_rate == s->avctx->rc_max_rate &&
01582 s->out_format == FMT_MPEG1 &&
01583 90000LL * (avctx->rc_buffer_size - 1) <=
01584 s->avctx->rc_max_rate * 0xFFFFLL) {
01585 int vbv_delay, min_delay;
01586 double inbits = s->avctx->rc_max_rate *
01587 av_q2d(s->avctx->time_base);
01588 int minbits = s->frame_bits - 8 *
01589 (s->vbv_delay_ptr - s->pb.buf - 1);
01590 double bits = s->rc_context.buffer_index + minbits - inbits;
01591
01592 if (bits < 0)
01593 av_log(s->avctx, AV_LOG_ERROR,
01594 "Internal error, negative bits\n");
01595
01596 assert(s->repeat_first_field == 0);
01597
01598 vbv_delay = bits * 90000 / s->avctx->rc_max_rate;
01599 min_delay = (minbits * 90000LL + s->avctx->rc_max_rate - 1) /
01600 s->avctx->rc_max_rate;
01601
01602 vbv_delay = FFMAX(vbv_delay, min_delay);
01603
01604 assert(vbv_delay < 0xFFFF);
01605
01606 s->vbv_delay_ptr[0] &= 0xF8;
01607 s->vbv_delay_ptr[0] |= vbv_delay >> 13;
01608 s->vbv_delay_ptr[1] = vbv_delay >> 5;
01609 s->vbv_delay_ptr[2] &= 0x07;
01610 s->vbv_delay_ptr[2] |= vbv_delay << 3;
01611 avctx->vbv_delay = vbv_delay * 300;
01612 }
01613 s->total_bits += s->frame_bits;
01614 avctx->frame_bits = s->frame_bits;
01615 } else {
01616 assert((put_bits_ptr(&s->pb) == s->pb.buf));
01617 s->frame_bits = 0;
01618 }
01619 assert((s->frame_bits & 7) == 0);
01620
01621 return s->frame_bits / 8;
01622 }
01623
01624 static inline void dct_single_coeff_elimination(MpegEncContext *s,
01625 int n, int threshold)
01626 {
01627 static const char tab[64] = {
01628 3, 2, 2, 1, 1, 1, 1, 1,
01629 1, 1, 1, 1, 1, 1, 1, 1,
01630 1, 1, 1, 1, 1, 1, 1, 1,
01631 0, 0, 0, 0, 0, 0, 0, 0,
01632 0, 0, 0, 0, 0, 0, 0, 0,
01633 0, 0, 0, 0, 0, 0, 0, 0,
01634 0, 0, 0, 0, 0, 0, 0, 0,
01635 0, 0, 0, 0, 0, 0, 0, 0
01636 };
01637 int score = 0;
01638 int run = 0;
01639 int i;
01640 DCTELEM *block = s->block[n];
01641 const int last_index = s->block_last_index[n];
01642 int skip_dc;
01643
01644 if (threshold < 0) {
01645 skip_dc = 0;
01646 threshold = -threshold;
01647 } else
01648 skip_dc = 1;
01649
01650
01651 if (last_index <= skip_dc - 1)
01652 return;
01653
01654 for (i = 0; i <= last_index; i++) {
01655 const int j = s->intra_scantable.permutated[i];
01656 const int level = FFABS(block[j]);
01657 if (level == 1) {
01658 if (skip_dc && i == 0)
01659 continue;
01660 score += tab[run];
01661 run = 0;
01662 } else if (level > 1) {
01663 return;
01664 } else {
01665 run++;
01666 }
01667 }
01668 if (score >= threshold)
01669 return;
01670 for (i = skip_dc; i <= last_index; i++) {
01671 const int j = s->intra_scantable.permutated[i];
01672 block[j] = 0;
01673 }
01674 if (block[0])
01675 s->block_last_index[n] = 0;
01676 else
01677 s->block_last_index[n] = -1;
01678 }
01679
01680 static inline void clip_coeffs(MpegEncContext *s, DCTELEM *block,
01681 int last_index)
01682 {
01683 int i;
01684 const int maxlevel = s->max_qcoeff;
01685 const int minlevel = s->min_qcoeff;
01686 int overflow = 0;
01687
01688 if (s->mb_intra) {
01689 i = 1;
01690 } else
01691 i = 0;
01692
01693 for (; i <= last_index; i++) {
01694 const int j = s->intra_scantable.permutated[i];
01695 int level = block[j];
01696
01697 if (level > maxlevel) {
01698 level = maxlevel;
01699 overflow++;
01700 } else if (level < minlevel) {
01701 level = minlevel;
01702 overflow++;
01703 }
01704
01705 block[j] = level;
01706 }
01707
01708 if (overflow && s->avctx->mb_decision == FF_MB_DECISION_SIMPLE)
01709 av_log(s->avctx, AV_LOG_INFO,
01710 "warning, clipping %d dct coefficients to %d..%d\n",
01711 overflow, minlevel, maxlevel);
01712 }
01713
01714 static void get_visual_weight(int16_t *weight, uint8_t *ptr, int stride)
01715 {
01716 int x, y;
01717
01718 for (y = 0; y < 8; y++) {
01719 for (x = 0; x < 8; x++) {
01720 int x2, y2;
01721 int sum = 0;
01722 int sqr = 0;
01723 int count = 0;
01724
01725 for (y2 = FFMAX(y - 1, 0); y2 < FFMIN(8, y + 2); y2++) {
01726 for (x2= FFMAX(x - 1, 0); x2 < FFMIN(8, x + 2); x2++) {
01727 int v = ptr[x2 + y2 * stride];
01728 sum += v;
01729 sqr += v * v;
01730 count++;
01731 }
01732 }
01733 weight[x + 8 * y]= (36 * ff_sqrt(count * sqr - sum * sum)) / count;
01734 }
01735 }
01736 }
01737
01738 static av_always_inline void encode_mb_internal(MpegEncContext *s,
01739 int motion_x, int motion_y,
01740 int mb_block_height,
01741 int mb_block_count)
01742 {
01743 int16_t weight[8][64];
01744 DCTELEM orig[8][64];
01745 const int mb_x = s->mb_x;
01746 const int mb_y = s->mb_y;
01747 int i;
01748 int skip_dct[8];
01749 int dct_offset = s->linesize * 8;
01750 uint8_t *ptr_y, *ptr_cb, *ptr_cr;
01751 int wrap_y, wrap_c;
01752
01753 for (i = 0; i < mb_block_count; i++)
01754 skip_dct[i] = s->skipdct;
01755
01756 if (s->adaptive_quant) {
01757 const int last_qp = s->qscale;
01758 const int mb_xy = mb_x + mb_y * s->mb_stride;
01759
01760 s->lambda = s->lambda_table[mb_xy];
01761 update_qscale(s);
01762
01763 if (!(s->flags & CODEC_FLAG_QP_RD)) {
01764 s->qscale = s->current_picture_ptr->f.qscale_table[mb_xy];
01765 s->dquant = s->qscale - last_qp;
01766
01767 if (s->out_format == FMT_H263) {
01768 s->dquant = av_clip(s->dquant, -2, 2);
01769
01770 if (s->codec_id == CODEC_ID_MPEG4) {
01771 if (!s->mb_intra) {
01772 if (s->pict_type == AV_PICTURE_TYPE_B) {
01773 if (s->dquant & 1 || s->mv_dir & MV_DIRECT)
01774 s->dquant = 0;
01775 }
01776 if (s->mv_type == MV_TYPE_8X8)
01777 s->dquant = 0;
01778 }
01779 }
01780 }
01781 }
01782 ff_set_qscale(s, last_qp + s->dquant);
01783 } else if (s->flags & CODEC_FLAG_QP_RD)
01784 ff_set_qscale(s, s->qscale + s->dquant);
01785
01786 wrap_y = s->linesize;
01787 wrap_c = s->uvlinesize;
01788 ptr_y = s->new_picture.f.data[0] +
01789 (mb_y * 16 * wrap_y) + mb_x * 16;
01790 ptr_cb = s->new_picture.f.data[1] +
01791 (mb_y * mb_block_height * wrap_c) + mb_x * 8;
01792 ptr_cr = s->new_picture.f.data[2] +
01793 (mb_y * mb_block_height * wrap_c) + mb_x * 8;
01794
01795 if((mb_x*16+16 > s->width || mb_y*16+16 > s->height) && s->codec_id != CODEC_ID_AMV){
01796 uint8_t *ebuf = s->edge_emu_buffer + 32;
01797 s->dsp.emulated_edge_mc(ebuf, ptr_y, wrap_y, 16, 16, mb_x * 16,
01798 mb_y * 16, s->width, s->height);
01799 ptr_y = ebuf;
01800 s->dsp.emulated_edge_mc(ebuf + 18 * wrap_y, ptr_cb, wrap_c, 8,
01801 mb_block_height, mb_x * 8, mb_y * 8,
01802 s->width >> 1, s->height >> 1);
01803 ptr_cb = ebuf + 18 * wrap_y;
01804 s->dsp.emulated_edge_mc(ebuf + 18 * wrap_y + 8, ptr_cr, wrap_c, 8,
01805 mb_block_height, mb_x * 8, mb_y * 8,
01806 s->width >> 1, s->height >> 1);
01807 ptr_cr = ebuf + 18 * wrap_y + 8;
01808 }
01809
01810 if (s->mb_intra) {
01811 if (s->flags & CODEC_FLAG_INTERLACED_DCT) {
01812 int progressive_score, interlaced_score;
01813
01814 s->interlaced_dct = 0;
01815 progressive_score = s->dsp.ildct_cmp[4](s, ptr_y,
01816 NULL, wrap_y, 8) +
01817 s->dsp.ildct_cmp[4](s, ptr_y + wrap_y * 8,
01818 NULL, wrap_y, 8) - 400;
01819
01820 if (progressive_score > 0) {
01821 interlaced_score = s->dsp.ildct_cmp[4](s, ptr_y,
01822 NULL, wrap_y * 2, 8) +
01823 s->dsp.ildct_cmp[4](s, ptr_y + wrap_y,
01824 NULL, wrap_y * 2, 8);
01825 if (progressive_score > interlaced_score) {
01826 s->interlaced_dct = 1;
01827
01828 dct_offset = wrap_y;
01829 wrap_y <<= 1;
01830 if (s->chroma_format == CHROMA_422)
01831 wrap_c <<= 1;
01832 }
01833 }
01834 }
01835
01836 s->dsp.get_pixels(s->block[0], ptr_y , wrap_y);
01837 s->dsp.get_pixels(s->block[1], ptr_y + 8 , wrap_y);
01838 s->dsp.get_pixels(s->block[2], ptr_y + dct_offset , wrap_y);
01839 s->dsp.get_pixels(s->block[3], ptr_y + dct_offset + 8 , wrap_y);
01840
01841 if (s->flags & CODEC_FLAG_GRAY) {
01842 skip_dct[4] = 1;
01843 skip_dct[5] = 1;
01844 } else {
01845 s->dsp.get_pixels(s->block[4], ptr_cb, wrap_c);
01846 s->dsp.get_pixels(s->block[5], ptr_cr, wrap_c);
01847 if (!s->chroma_y_shift) {
01848 s->dsp.get_pixels(s->block[6],
01849 ptr_cb + (dct_offset >> 1), wrap_c);
01850 s->dsp.get_pixels(s->block[7],
01851 ptr_cr + (dct_offset >> 1), wrap_c);
01852 }
01853 }
01854 } else {
01855 op_pixels_func (*op_pix)[4];
01856 qpel_mc_func (*op_qpix)[16];
01857 uint8_t *dest_y, *dest_cb, *dest_cr;
01858
01859 dest_y = s->dest[0];
01860 dest_cb = s->dest[1];
01861 dest_cr = s->dest[2];
01862
01863 if ((!s->no_rounding) || s->pict_type == AV_PICTURE_TYPE_B) {
01864 op_pix = s->dsp.put_pixels_tab;
01865 op_qpix = s->dsp.put_qpel_pixels_tab;
01866 } else {
01867 op_pix = s->dsp.put_no_rnd_pixels_tab;
01868 op_qpix = s->dsp.put_no_rnd_qpel_pixels_tab;
01869 }
01870
01871 if (s->mv_dir & MV_DIR_FORWARD) {
01872 MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f.data,
01873 op_pix, op_qpix);
01874 op_pix = s->dsp.avg_pixels_tab;
01875 op_qpix = s->dsp.avg_qpel_pixels_tab;
01876 }
01877 if (s->mv_dir & MV_DIR_BACKWARD) {
01878 MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f.data,
01879 op_pix, op_qpix);
01880 }
01881
01882 if (s->flags & CODEC_FLAG_INTERLACED_DCT) {
01883 int progressive_score, interlaced_score;
01884
01885 s->interlaced_dct = 0;
01886 progressive_score = s->dsp.ildct_cmp[0](s, dest_y,
01887 ptr_y, wrap_y,
01888 8) +
01889 s->dsp.ildct_cmp[0](s, dest_y + wrap_y * 8,
01890 ptr_y + wrap_y * 8, wrap_y,
01891 8) - 400;
01892
01893 if (s->avctx->ildct_cmp == FF_CMP_VSSE)
01894 progressive_score -= 400;
01895
01896 if (progressive_score > 0) {
01897 interlaced_score = s->dsp.ildct_cmp[0](s, dest_y,
01898 ptr_y,
01899 wrap_y * 2, 8) +
01900 s->dsp.ildct_cmp[0](s, dest_y + wrap_y,
01901 ptr_y + wrap_y,
01902 wrap_y * 2, 8);
01903
01904 if (progressive_score > interlaced_score) {
01905 s->interlaced_dct = 1;
01906
01907 dct_offset = wrap_y;
01908 wrap_y <<= 1;
01909 if (s->chroma_format == CHROMA_422)
01910 wrap_c <<= 1;
01911 }
01912 }
01913 }
01914
01915 s->dsp.diff_pixels(s->block[0], ptr_y, dest_y, wrap_y);
01916 s->dsp.diff_pixels(s->block[1], ptr_y + 8, dest_y + 8, wrap_y);
01917 s->dsp.diff_pixels(s->block[2], ptr_y + dct_offset,
01918 dest_y + dct_offset, wrap_y);
01919 s->dsp.diff_pixels(s->block[3], ptr_y + dct_offset + 8,
01920 dest_y + dct_offset + 8, wrap_y);
01921
01922 if (s->flags & CODEC_FLAG_GRAY) {
01923 skip_dct[4] = 1;
01924 skip_dct[5] = 1;
01925 } else {
01926 s->dsp.diff_pixels(s->block[4], ptr_cb, dest_cb, wrap_c);
01927 s->dsp.diff_pixels(s->block[5], ptr_cr, dest_cr, wrap_c);
01928 if (!s->chroma_y_shift) {
01929 s->dsp.diff_pixels(s->block[6], ptr_cb + (dct_offset >> 1),
01930 dest_cb + (dct_offset >> 1), wrap_c);
01931 s->dsp.diff_pixels(s->block[7], ptr_cr + (dct_offset >> 1),
01932 dest_cr + (dct_offset >> 1), wrap_c);
01933 }
01934 }
01935
01936 if (s->current_picture.mc_mb_var[s->mb_stride * mb_y + mb_x] <
01937 2 * s->qscale * s->qscale) {
01938
01939 if (s->dsp.sad[1](NULL, ptr_y , dest_y,
01940 wrap_y, 8) < 20 * s->qscale)
01941 skip_dct[0] = 1;
01942 if (s->dsp.sad[1](NULL, ptr_y + 8,
01943 dest_y + 8, wrap_y, 8) < 20 * s->qscale)
01944 skip_dct[1] = 1;
01945 if (s->dsp.sad[1](NULL, ptr_y + dct_offset,
01946 dest_y + dct_offset, wrap_y, 8) < 20 * s->qscale)
01947 skip_dct[2] = 1;
01948 if (s->dsp.sad[1](NULL, ptr_y + dct_offset + 8,
01949 dest_y + dct_offset + 8,
01950 wrap_y, 8) < 20 * s->qscale)
01951 skip_dct[3] = 1;
01952 if (s->dsp.sad[1](NULL, ptr_cb, dest_cb,
01953 wrap_c, 8) < 20 * s->qscale)
01954 skip_dct[4] = 1;
01955 if (s->dsp.sad[1](NULL, ptr_cr, dest_cr,
01956 wrap_c, 8) < 20 * s->qscale)
01957 skip_dct[5] = 1;
01958 if (!s->chroma_y_shift) {
01959 if (s->dsp.sad[1](NULL, ptr_cb + (dct_offset >> 1),
01960 dest_cb + (dct_offset >> 1),
01961 wrap_c, 8) < 20 * s->qscale)
01962 skip_dct[6] = 1;
01963 if (s->dsp.sad[1](NULL, ptr_cr + (dct_offset >> 1),
01964 dest_cr + (dct_offset >> 1),
01965 wrap_c, 8) < 20 * s->qscale)
01966 skip_dct[7] = 1;
01967 }
01968 }
01969 }
01970
01971 if (s->avctx->quantizer_noise_shaping) {
01972 if (!skip_dct[0])
01973 get_visual_weight(weight[0], ptr_y , wrap_y);
01974 if (!skip_dct[1])
01975 get_visual_weight(weight[1], ptr_y + 8, wrap_y);
01976 if (!skip_dct[2])
01977 get_visual_weight(weight[2], ptr_y + dct_offset , wrap_y);
01978 if (!skip_dct[3])
01979 get_visual_weight(weight[3], ptr_y + dct_offset + 8, wrap_y);
01980 if (!skip_dct[4])
01981 get_visual_weight(weight[4], ptr_cb , wrap_c);
01982 if (!skip_dct[5])
01983 get_visual_weight(weight[5], ptr_cr , wrap_c);
01984 if (!s->chroma_y_shift) {
01985 if (!skip_dct[6])
01986 get_visual_weight(weight[6], ptr_cb + (dct_offset >> 1),
01987 wrap_c);
01988 if (!skip_dct[7])
01989 get_visual_weight(weight[7], ptr_cr + (dct_offset >> 1),
01990 wrap_c);
01991 }
01992 memcpy(orig[0], s->block[0], sizeof(DCTELEM) * 64 * mb_block_count);
01993 }
01994
01995
01996 assert(s->out_format != FMT_MJPEG || s->qscale == 8);
01997 {
01998 for (i = 0; i < mb_block_count; i++) {
01999 if (!skip_dct[i]) {
02000 int overflow;
02001 s->block_last_index[i] = s->dct_quantize(s, s->block[i], i, s->qscale, &overflow);
02002
02003
02004
02005
02006
02007 if (overflow)
02008 clip_coeffs(s, s->block[i], s->block_last_index[i]);
02009 } else
02010 s->block_last_index[i] = -1;
02011 }
02012 if (s->avctx->quantizer_noise_shaping) {
02013 for (i = 0; i < mb_block_count; i++) {
02014 if (!skip_dct[i]) {
02015 s->block_last_index[i] =
02016 dct_quantize_refine(s, s->block[i], weight[i],
02017 orig[i], i, s->qscale);
02018 }
02019 }
02020 }
02021
02022 if (s->luma_elim_threshold && !s->mb_intra)
02023 for (i = 0; i < 4; i++)
02024 dct_single_coeff_elimination(s, i, s->luma_elim_threshold);
02025 if (s->chroma_elim_threshold && !s->mb_intra)
02026 for (i = 4; i < mb_block_count; i++)
02027 dct_single_coeff_elimination(s, i, s->chroma_elim_threshold);
02028
02029 if (s->flags & CODEC_FLAG_CBP_RD) {
02030 for (i = 0; i < mb_block_count; i++) {
02031 if (s->block_last_index[i] == -1)
02032 s->coded_score[i] = INT_MAX / 256;
02033 }
02034 }
02035 }
02036
02037 if ((s->flags & CODEC_FLAG_GRAY) && s->mb_intra) {
02038 s->block_last_index[4] =
02039 s->block_last_index[5] = 0;
02040 s->block[4][0] =
02041 s->block[5][0] = (1024 + s->c_dc_scale / 2) / s->c_dc_scale;
02042 }
02043
02044
02045 if (s->alternate_scan && s->dct_quantize != dct_quantize_c) {
02046 for (i = 0; i < mb_block_count; i++) {
02047 int j;
02048 if (s->block_last_index[i] > 0) {
02049 for (j = 63; j > 0; j--) {
02050 if (s->block[i][s->intra_scantable.permutated[j]])
02051 break;
02052 }
02053 s->block_last_index[i] = j;
02054 }
02055 }
02056 }
02057
02058
02059 switch(s->codec_id){
02060 case CODEC_ID_MPEG1VIDEO:
02061 case CODEC_ID_MPEG2VIDEO:
02062 if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
02063 mpeg1_encode_mb(s, s->block, motion_x, motion_y);
02064 break;
02065 case CODEC_ID_MPEG4:
02066 if (CONFIG_MPEG4_ENCODER)
02067 mpeg4_encode_mb(s, s->block, motion_x, motion_y);
02068 break;
02069 case CODEC_ID_MSMPEG4V2:
02070 case CODEC_ID_MSMPEG4V3:
02071 case CODEC_ID_WMV1:
02072 if (CONFIG_MSMPEG4_ENCODER)
02073 msmpeg4_encode_mb(s, s->block, motion_x, motion_y);
02074 break;
02075 case CODEC_ID_WMV2:
02076 if (CONFIG_WMV2_ENCODER)
02077 ff_wmv2_encode_mb(s, s->block, motion_x, motion_y);
02078 break;
02079 case CODEC_ID_H261:
02080 if (CONFIG_H261_ENCODER)
02081 ff_h261_encode_mb(s, s->block, motion_x, motion_y);
02082 break;
02083 case CODEC_ID_H263:
02084 case CODEC_ID_H263P:
02085 case CODEC_ID_FLV1:
02086 case CODEC_ID_RV10:
02087 case CODEC_ID_RV20:
02088 if (CONFIG_H263_ENCODER)
02089 ff_h263_encode_mb(s, s->block, motion_x, motion_y);
02090 break;
02091 case CODEC_ID_MJPEG:
02092 case CODEC_ID_AMV:
02093 if (CONFIG_MJPEG_ENCODER)
02094 ff_mjpeg_encode_mb(s, s->block);
02095 break;
02096 default:
02097 assert(0);
02098 }
02099 }
02100
02101 static av_always_inline void encode_mb(MpegEncContext *s, int motion_x, int motion_y)
02102 {
02103 if (s->chroma_format == CHROMA_420) encode_mb_internal(s, motion_x, motion_y, 8, 6);
02104 else encode_mb_internal(s, motion_x, motion_y, 16, 8);
02105 }
02106
02107 static inline void copy_context_before_encode(MpegEncContext *d, MpegEncContext *s, int type){
02108 int i;
02109
02110 memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int));
02111
02112
02113 d->mb_skip_run= s->mb_skip_run;
02114 for(i=0; i<3; i++)
02115 d->last_dc[i] = s->last_dc[i];
02116
02117
02118 d->mv_bits= s->mv_bits;
02119 d->i_tex_bits= s->i_tex_bits;
02120 d->p_tex_bits= s->p_tex_bits;
02121 d->i_count= s->i_count;
02122 d->f_count= s->f_count;
02123 d->b_count= s->b_count;
02124 d->skip_count= s->skip_count;
02125 d->misc_bits= s->misc_bits;
02126 d->last_bits= 0;
02127
02128 d->mb_skipped= 0;
02129 d->qscale= s->qscale;
02130 d->dquant= s->dquant;
02131
02132 d->esc3_level_length= s->esc3_level_length;
02133 }
02134
02135 static inline void copy_context_after_encode(MpegEncContext *d, MpegEncContext *s, int type){
02136 int i;
02137
02138 memcpy(d->mv, s->mv, 2*4*2*sizeof(int));
02139 memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int));
02140
02141
02142 d->mb_skip_run= s->mb_skip_run;
02143 for(i=0; i<3; i++)
02144 d->last_dc[i] = s->last_dc[i];
02145
02146
02147 d->mv_bits= s->mv_bits;
02148 d->i_tex_bits= s->i_tex_bits;
02149 d->p_tex_bits= s->p_tex_bits;
02150 d->i_count= s->i_count;
02151 d->f_count= s->f_count;
02152 d->b_count= s->b_count;
02153 d->skip_count= s->skip_count;
02154 d->misc_bits= s->misc_bits;
02155
02156 d->mb_intra= s->mb_intra;
02157 d->mb_skipped= s->mb_skipped;
02158 d->mv_type= s->mv_type;
02159 d->mv_dir= s->mv_dir;
02160 d->pb= s->pb;
02161 if(s->data_partitioning){
02162 d->pb2= s->pb2;
02163 d->tex_pb= s->tex_pb;
02164 }
02165 d->block= s->block;
02166 for(i=0; i<8; i++)
02167 d->block_last_index[i]= s->block_last_index[i];
02168 d->interlaced_dct= s->interlaced_dct;
02169 d->qscale= s->qscale;
02170
02171 d->esc3_level_length= s->esc3_level_length;
02172 }
02173
02174 static inline void encode_mb_hq(MpegEncContext *s, MpegEncContext *backup, MpegEncContext *best, int type,
02175 PutBitContext pb[2], PutBitContext pb2[2], PutBitContext tex_pb[2],
02176 int *dmin, int *next_block, int motion_x, int motion_y)
02177 {
02178 int score;
02179 uint8_t *dest_backup[3];
02180
02181 copy_context_before_encode(s, backup, type);
02182
02183 s->block= s->blocks[*next_block];
02184 s->pb= pb[*next_block];
02185 if(s->data_partitioning){
02186 s->pb2 = pb2 [*next_block];
02187 s->tex_pb= tex_pb[*next_block];
02188 }
02189
02190 if(*next_block){
02191 memcpy(dest_backup, s->dest, sizeof(s->dest));
02192 s->dest[0] = s->rd_scratchpad;
02193 s->dest[1] = s->rd_scratchpad + 16*s->linesize;
02194 s->dest[2] = s->rd_scratchpad + 16*s->linesize + 8;
02195 assert(s->linesize >= 32);
02196 }
02197
02198 encode_mb(s, motion_x, motion_y);
02199
02200 score= put_bits_count(&s->pb);
02201 if(s->data_partitioning){
02202 score+= put_bits_count(&s->pb2);
02203 score+= put_bits_count(&s->tex_pb);
02204 }
02205
02206 if(s->avctx->mb_decision == FF_MB_DECISION_RD){
02207 MPV_decode_mb(s, s->block);
02208
02209 score *= s->lambda2;
02210 score += sse_mb(s) << FF_LAMBDA_SHIFT;
02211 }
02212
02213 if(*next_block){
02214 memcpy(s->dest, dest_backup, sizeof(s->dest));
02215 }
02216
02217 if(score<*dmin){
02218 *dmin= score;
02219 *next_block^=1;
02220
02221 copy_context_after_encode(best, s, type);
02222 }
02223 }
02224
02225 static int sse(MpegEncContext *s, uint8_t *src1, uint8_t *src2, int w, int h, int stride){
02226 uint32_t *sq = ff_squareTbl + 256;
02227 int acc=0;
02228 int x,y;
02229
02230 if(w==16 && h==16)
02231 return s->dsp.sse[0](NULL, src1, src2, stride, 16);
02232 else if(w==8 && h==8)
02233 return s->dsp.sse[1](NULL, src1, src2, stride, 8);
02234
02235 for(y=0; y<h; y++){
02236 for(x=0; x<w; x++){
02237 acc+= sq[src1[x + y*stride] - src2[x + y*stride]];
02238 }
02239 }
02240
02241 assert(acc>=0);
02242
02243 return acc;
02244 }
02245
02246 static int sse_mb(MpegEncContext *s){
02247 int w= 16;
02248 int h= 16;
02249
02250 if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
02251 if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
02252
02253 if(w==16 && h==16)
02254 if(s->avctx->mb_cmp == FF_CMP_NSSE){
02255 return s->dsp.nsse[0](s, s->new_picture.f.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], s->linesize, 16)
02256 +s->dsp.nsse[1](s, s->new_picture.f.data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[1], s->uvlinesize, 8)
02257 +s->dsp.nsse[1](s, s->new_picture.f.data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[2], s->uvlinesize, 8);
02258 }else{
02259 return s->dsp.sse[0](NULL, s->new_picture.f.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], s->linesize, 16)
02260 +s->dsp.sse[1](NULL, s->new_picture.f.data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[1], s->uvlinesize, 8)
02261 +s->dsp.sse[1](NULL, s->new_picture.f.data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[2], s->uvlinesize, 8);
02262 }
02263 else
02264 return sse(s, s->new_picture.f.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], w, h, s->linesize)
02265 +sse(s, s->new_picture.f.data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[1], w>>1, h>>1, s->uvlinesize)
02266 +sse(s, s->new_picture.f.data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[2], w>>1, h>>1, s->uvlinesize);
02267 }
02268
02269 static int pre_estimate_motion_thread(AVCodecContext *c, void *arg){
02270 MpegEncContext *s= *(void**)arg;
02271
02272
02273 s->me.pre_pass=1;
02274 s->me.dia_size= s->avctx->pre_dia_size;
02275 s->first_slice_line=1;
02276 for(s->mb_y= s->end_mb_y-1; s->mb_y >= s->start_mb_y; s->mb_y--) {
02277 for(s->mb_x=s->mb_width-1; s->mb_x >=0 ;s->mb_x--) {
02278 ff_pre_estimate_p_frame_motion(s, s->mb_x, s->mb_y);
02279 }
02280 s->first_slice_line=0;
02281 }
02282
02283 s->me.pre_pass=0;
02284
02285 return 0;
02286 }
02287
02288 static int estimate_motion_thread(AVCodecContext *c, void *arg){
02289 MpegEncContext *s= *(void**)arg;
02290
02291 ff_check_alignment();
02292
02293 s->me.dia_size= s->avctx->dia_size;
02294 s->first_slice_line=1;
02295 for(s->mb_y= s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
02296 s->mb_x=0;
02297 ff_init_block_index(s);
02298 for(s->mb_x=0; s->mb_x < s->mb_width; s->mb_x++) {
02299 s->block_index[0]+=2;
02300 s->block_index[1]+=2;
02301 s->block_index[2]+=2;
02302 s->block_index[3]+=2;
02303
02304
02305 if(s->pict_type==AV_PICTURE_TYPE_B)
02306 ff_estimate_b_frame_motion(s, s->mb_x, s->mb_y);
02307 else
02308 ff_estimate_p_frame_motion(s, s->mb_x, s->mb_y);
02309 }
02310 s->first_slice_line=0;
02311 }
02312 return 0;
02313 }
02314
02315 static int mb_var_thread(AVCodecContext *c, void *arg){
02316 MpegEncContext *s= *(void**)arg;
02317 int mb_x, mb_y;
02318
02319 ff_check_alignment();
02320
02321 for(mb_y=s->start_mb_y; mb_y < s->end_mb_y; mb_y++) {
02322 for(mb_x=0; mb_x < s->mb_width; mb_x++) {
02323 int xx = mb_x * 16;
02324 int yy = mb_y * 16;
02325 uint8_t *pix = s->new_picture.f.data[0] + (yy * s->linesize) + xx;
02326 int varc;
02327 int sum = s->dsp.pix_sum(pix, s->linesize);
02328
02329 varc = (s->dsp.pix_norm1(pix, s->linesize) - (((unsigned)sum*sum)>>8) + 500 + 128)>>8;
02330
02331 s->current_picture.mb_var [s->mb_stride * mb_y + mb_x] = varc;
02332 s->current_picture.mb_mean[s->mb_stride * mb_y + mb_x] = (sum+128)>>8;
02333 s->me.mb_var_sum_temp += varc;
02334 }
02335 }
02336 return 0;
02337 }
02338
02339 static void write_slice_end(MpegEncContext *s){
02340 if(CONFIG_MPEG4_ENCODER && s->codec_id==CODEC_ID_MPEG4){
02341 if(s->partitioned_frame){
02342 ff_mpeg4_merge_partitions(s);
02343 }
02344
02345 ff_mpeg4_stuffing(&s->pb);
02346 }else if(CONFIG_MJPEG_ENCODER && s->out_format == FMT_MJPEG){
02347 ff_mjpeg_encode_stuffing(&s->pb);
02348 }
02349
02350 avpriv_align_put_bits(&s->pb);
02351 flush_put_bits(&s->pb);
02352
02353 if((s->flags&CODEC_FLAG_PASS1) && !s->partitioned_frame)
02354 s->misc_bits+= get_bits_diff(s);
02355 }
02356
02357 static int encode_thread(AVCodecContext *c, void *arg){
02358 MpegEncContext *s= *(void**)arg;
02359 int mb_x, mb_y, pdif = 0;
02360 int chr_h= 16>>s->chroma_y_shift;
02361 int i, j;
02362 MpegEncContext best_s, backup_s;
02363 uint8_t bit_buf[2][MAX_MB_BYTES];
02364 uint8_t bit_buf2[2][MAX_MB_BYTES];
02365 uint8_t bit_buf_tex[2][MAX_MB_BYTES];
02366 PutBitContext pb[2], pb2[2], tex_pb[2];
02367
02368
02369 ff_check_alignment();
02370
02371 for(i=0; i<2; i++){
02372 init_put_bits(&pb [i], bit_buf [i], MAX_MB_BYTES);
02373 init_put_bits(&pb2 [i], bit_buf2 [i], MAX_MB_BYTES);
02374 init_put_bits(&tex_pb[i], bit_buf_tex[i], MAX_MB_BYTES);
02375 }
02376
02377 s->last_bits= put_bits_count(&s->pb);
02378 s->mv_bits=0;
02379 s->misc_bits=0;
02380 s->i_tex_bits=0;
02381 s->p_tex_bits=0;
02382 s->i_count=0;
02383 s->f_count=0;
02384 s->b_count=0;
02385 s->skip_count=0;
02386
02387 for(i=0; i<3; i++){
02388
02389
02390 s->last_dc[i] = 128 << s->intra_dc_precision;
02391
02392 s->current_picture.f.error[i] = 0;
02393 }
02394 if(s->codec_id==CODEC_ID_AMV){
02395 s->last_dc[0] = 128*8/13;
02396 s->last_dc[1] = 128*8/14;
02397 s->last_dc[2] = 128*8/14;
02398 }
02399 s->mb_skip_run = 0;
02400 memset(s->last_mv, 0, sizeof(s->last_mv));
02401
02402 s->last_mv_dir = 0;
02403
02404 switch(s->codec_id){
02405 case CODEC_ID_H263:
02406 case CODEC_ID_H263P:
02407 case CODEC_ID_FLV1:
02408 if (CONFIG_H263_ENCODER)
02409 s->gob_index = ff_h263_get_gob_height(s);
02410 break;
02411 case CODEC_ID_MPEG4:
02412 if(CONFIG_MPEG4_ENCODER && s->partitioned_frame)
02413 ff_mpeg4_init_partitions(s);
02414 break;
02415 }
02416
02417 s->resync_mb_x=0;
02418 s->resync_mb_y=0;
02419 s->first_slice_line = 1;
02420 s->ptr_lastgob = s->pb.buf;
02421 for(mb_y= s->start_mb_y; mb_y < s->end_mb_y; mb_y++) {
02422
02423 s->mb_x=0;
02424 s->mb_y= mb_y;
02425
02426 ff_set_qscale(s, s->qscale);
02427 ff_init_block_index(s);
02428
02429 for(mb_x=0; mb_x < s->mb_width; mb_x++) {
02430 int xy= mb_y*s->mb_stride + mb_x;
02431 int mb_type= s->mb_type[xy];
02432
02433 int dmin= INT_MAX;
02434 int dir;
02435
02436 if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < MAX_MB_BYTES){
02437 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
02438 return -1;
02439 }
02440 if(s->data_partitioning){
02441 if( s->pb2 .buf_end - s->pb2 .buf - (put_bits_count(&s-> pb2)>>3) < MAX_MB_BYTES
02442 || s->tex_pb.buf_end - s->tex_pb.buf - (put_bits_count(&s->tex_pb )>>3) < MAX_MB_BYTES){
02443 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
02444 return -1;
02445 }
02446 }
02447
02448 s->mb_x = mb_x;
02449 s->mb_y = mb_y;
02450 ff_update_block_index(s);
02451
02452 if(CONFIG_H261_ENCODER && s->codec_id == CODEC_ID_H261){
02453 ff_h261_reorder_mb_index(s);
02454 xy= s->mb_y*s->mb_stride + s->mb_x;
02455 mb_type= s->mb_type[xy];
02456 }
02457
02458
02459 if(s->rtp_mode){
02460 int current_packet_size, is_gob_start;
02461
02462 current_packet_size= ((put_bits_count(&s->pb)+7)>>3) - (s->ptr_lastgob - s->pb.buf);
02463
02464 is_gob_start= s->avctx->rtp_payload_size && current_packet_size >= s->avctx->rtp_payload_size && mb_y + mb_x>0;
02465
02466 if(s->start_mb_y == mb_y && mb_y > 0 && mb_x==0) is_gob_start=1;
02467
02468 switch(s->codec_id){
02469 case CODEC_ID_H263:
02470 case CODEC_ID_H263P:
02471 if(!s->h263_slice_structured)
02472 if(s->mb_x || s->mb_y%s->gob_index) is_gob_start=0;
02473 break;
02474 case CODEC_ID_MPEG2VIDEO:
02475 if(s->mb_x==0 && s->mb_y!=0) is_gob_start=1;
02476 case CODEC_ID_MPEG1VIDEO:
02477 if(s->mb_skip_run) is_gob_start=0;
02478 break;
02479 }
02480
02481 if(is_gob_start){
02482 if(s->start_mb_y != mb_y || mb_x!=0){
02483 write_slice_end(s);
02484
02485 if(CONFIG_MPEG4_ENCODER && s->codec_id==CODEC_ID_MPEG4 && s->partitioned_frame){
02486 ff_mpeg4_init_partitions(s);
02487 }
02488 }
02489
02490 assert((put_bits_count(&s->pb)&7) == 0);
02491 current_packet_size= put_bits_ptr(&s->pb) - s->ptr_lastgob;
02492
02493 if(s->avctx->error_rate && s->resync_mb_x + s->resync_mb_y > 0){
02494 int r= put_bits_count(&s->pb)/8 + s->picture_number + 16 + s->mb_x + s->mb_y;
02495 int d= 100 / s->avctx->error_rate;
02496 if(r % d == 0){
02497 current_packet_size=0;
02498 s->pb.buf_ptr= s->ptr_lastgob;
02499 assert(put_bits_ptr(&s->pb) == s->ptr_lastgob);
02500 }
02501 }
02502
02503 if (s->avctx->rtp_callback){
02504 int number_mb = (mb_y - s->resync_mb_y)*s->mb_width + mb_x - s->resync_mb_x;
02505 s->avctx->rtp_callback(s->avctx, s->ptr_lastgob, current_packet_size, number_mb);
02506 }
02507
02508 switch(s->codec_id){
02509 case CODEC_ID_MPEG4:
02510 if (CONFIG_MPEG4_ENCODER) {
02511 ff_mpeg4_encode_video_packet_header(s);
02512 ff_mpeg4_clean_buffers(s);
02513 }
02514 break;
02515 case CODEC_ID_MPEG1VIDEO:
02516 case CODEC_ID_MPEG2VIDEO:
02517 if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER) {
02518 ff_mpeg1_encode_slice_header(s);
02519 ff_mpeg1_clean_buffers(s);
02520 }
02521 break;
02522 case CODEC_ID_H263:
02523 case CODEC_ID_H263P:
02524 if (CONFIG_H263_ENCODER)
02525 ff_h263_encode_gob_header(s, mb_y);
02526 break;
02527 }
02528
02529 if(s->flags&CODEC_FLAG_PASS1){
02530 int bits= put_bits_count(&s->pb);
02531 s->misc_bits+= bits - s->last_bits;
02532 s->last_bits= bits;
02533 }
02534
02535 s->ptr_lastgob += current_packet_size;
02536 s->first_slice_line=1;
02537 s->resync_mb_x=mb_x;
02538 s->resync_mb_y=mb_y;
02539 }
02540 }
02541
02542 if( (s->resync_mb_x == s->mb_x)
02543 && s->resync_mb_y+1 == s->mb_y){
02544 s->first_slice_line=0;
02545 }
02546
02547 s->mb_skipped=0;
02548 s->dquant=0;
02549
02550 if(mb_type & (mb_type-1) || (s->flags & CODEC_FLAG_QP_RD)){
02551 int next_block=0;
02552 int pb_bits_count, pb2_bits_count, tex_pb_bits_count;
02553
02554 copy_context_before_encode(&backup_s, s, -1);
02555 backup_s.pb= s->pb;
02556 best_s.data_partitioning= s->data_partitioning;
02557 best_s.partitioned_frame= s->partitioned_frame;
02558 if(s->data_partitioning){
02559 backup_s.pb2= s->pb2;
02560 backup_s.tex_pb= s->tex_pb;
02561 }
02562
02563 if(mb_type&CANDIDATE_MB_TYPE_INTER){
02564 s->mv_dir = MV_DIR_FORWARD;
02565 s->mv_type = MV_TYPE_16X16;
02566 s->mb_intra= 0;
02567 s->mv[0][0][0] = s->p_mv_table[xy][0];
02568 s->mv[0][0][1] = s->p_mv_table[xy][1];
02569 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER, pb, pb2, tex_pb,
02570 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
02571 }
02572 if(mb_type&CANDIDATE_MB_TYPE_INTER_I){
02573 s->mv_dir = MV_DIR_FORWARD;
02574 s->mv_type = MV_TYPE_FIELD;
02575 s->mb_intra= 0;
02576 for(i=0; i<2; i++){
02577 j= s->field_select[0][i] = s->p_field_select_table[i][xy];
02578 s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0];
02579 s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1];
02580 }
02581 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER_I, pb, pb2, tex_pb,
02582 &dmin, &next_block, 0, 0);
02583 }
02584 if(mb_type&CANDIDATE_MB_TYPE_SKIPPED){
02585 s->mv_dir = MV_DIR_FORWARD;
02586 s->mv_type = MV_TYPE_16X16;
02587 s->mb_intra= 0;
02588 s->mv[0][0][0] = 0;
02589 s->mv[0][0][1] = 0;
02590 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_SKIPPED, pb, pb2, tex_pb,
02591 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
02592 }
02593 if(mb_type&CANDIDATE_MB_TYPE_INTER4V){
02594 s->mv_dir = MV_DIR_FORWARD;
02595 s->mv_type = MV_TYPE_8X8;
02596 s->mb_intra= 0;
02597 for(i=0; i<4; i++){
02598 s->mv[0][i][0] = s->current_picture.f.motion_val[0][s->block_index[i]][0];
02599 s->mv[0][i][1] = s->current_picture.f.motion_val[0][s->block_index[i]][1];
02600 }
02601 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER4V, pb, pb2, tex_pb,
02602 &dmin, &next_block, 0, 0);
02603 }
02604 if(mb_type&CANDIDATE_MB_TYPE_FORWARD){
02605 s->mv_dir = MV_DIR_FORWARD;
02606 s->mv_type = MV_TYPE_16X16;
02607 s->mb_intra= 0;
02608 s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
02609 s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
02610 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_FORWARD, pb, pb2, tex_pb,
02611 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
02612 }
02613 if(mb_type&CANDIDATE_MB_TYPE_BACKWARD){
02614 s->mv_dir = MV_DIR_BACKWARD;
02615 s->mv_type = MV_TYPE_16X16;
02616 s->mb_intra= 0;
02617 s->mv[1][0][0] = s->b_back_mv_table[xy][0];
02618 s->mv[1][0][1] = s->b_back_mv_table[xy][1];
02619 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BACKWARD, pb, pb2, tex_pb,
02620 &dmin, &next_block, s->mv[1][0][0], s->mv[1][0][1]);
02621 }
02622 if(mb_type&CANDIDATE_MB_TYPE_BIDIR){
02623 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
02624 s->mv_type = MV_TYPE_16X16;
02625 s->mb_intra= 0;
02626 s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
02627 s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
02628 s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
02629 s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
02630 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BIDIR, pb, pb2, tex_pb,
02631 &dmin, &next_block, 0, 0);
02632 }
02633 if(mb_type&CANDIDATE_MB_TYPE_FORWARD_I){
02634 s->mv_dir = MV_DIR_FORWARD;
02635 s->mv_type = MV_TYPE_FIELD;
02636 s->mb_intra= 0;
02637 for(i=0; i<2; i++){
02638 j= s->field_select[0][i] = s->b_field_select_table[0][i][xy];
02639 s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
02640 s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
02641 }
02642 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_FORWARD_I, pb, pb2, tex_pb,
02643 &dmin, &next_block, 0, 0);
02644 }
02645 if(mb_type&CANDIDATE_MB_TYPE_BACKWARD_I){
02646 s->mv_dir = MV_DIR_BACKWARD;
02647 s->mv_type = MV_TYPE_FIELD;
02648 s->mb_intra= 0;
02649 for(i=0; i<2; i++){
02650 j= s->field_select[1][i] = s->b_field_select_table[1][i][xy];
02651 s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
02652 s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
02653 }
02654 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BACKWARD_I, pb, pb2, tex_pb,
02655 &dmin, &next_block, 0, 0);
02656 }
02657 if(mb_type&CANDIDATE_MB_TYPE_BIDIR_I){
02658 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
02659 s->mv_type = MV_TYPE_FIELD;
02660 s->mb_intra= 0;
02661 for(dir=0; dir<2; dir++){
02662 for(i=0; i<2; i++){
02663 j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy];
02664 s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
02665 s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
02666 }
02667 }
02668 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BIDIR_I, pb, pb2, tex_pb,
02669 &dmin, &next_block, 0, 0);
02670 }
02671 if(mb_type&CANDIDATE_MB_TYPE_INTRA){
02672 s->mv_dir = 0;
02673 s->mv_type = MV_TYPE_16X16;
02674 s->mb_intra= 1;
02675 s->mv[0][0][0] = 0;
02676 s->mv[0][0][1] = 0;
02677 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTRA, pb, pb2, tex_pb,
02678 &dmin, &next_block, 0, 0);
02679 if(s->h263_pred || s->h263_aic){
02680 if(best_s.mb_intra)
02681 s->mbintra_table[mb_x + mb_y*s->mb_stride]=1;
02682 else
02683 ff_clean_intra_table_entries(s);
02684 }
02685 }
02686
02687 if((s->flags & CODEC_FLAG_QP_RD) && dmin < INT_MAX){
02688 if(best_s.mv_type==MV_TYPE_16X16){
02689 const int last_qp= backup_s.qscale;
02690 int qpi, qp, dc[6];
02691 DCTELEM ac[6][16];
02692 const int mvdir= (best_s.mv_dir&MV_DIR_BACKWARD) ? 1 : 0;
02693 static const int dquant_tab[4]={-1,1,-2,2};
02694
02695 assert(backup_s.dquant == 0);
02696
02697
02698 s->mv_dir= best_s.mv_dir;
02699 s->mv_type = MV_TYPE_16X16;
02700 s->mb_intra= best_s.mb_intra;
02701 s->mv[0][0][0] = best_s.mv[0][0][0];
02702 s->mv[0][0][1] = best_s.mv[0][0][1];
02703 s->mv[1][0][0] = best_s.mv[1][0][0];
02704 s->mv[1][0][1] = best_s.mv[1][0][1];
02705
02706 qpi = s->pict_type == AV_PICTURE_TYPE_B ? 2 : 0;
02707 for(; qpi<4; qpi++){
02708 int dquant= dquant_tab[qpi];
02709 qp= last_qp + dquant;
02710 if(qp < s->avctx->qmin || qp > s->avctx->qmax)
02711 continue;
02712 backup_s.dquant= dquant;
02713 if(s->mb_intra && s->dc_val[0]){
02714 for(i=0; i<6; i++){
02715 dc[i]= s->dc_val[0][ s->block_index[i] ];
02716 memcpy(ac[i], s->ac_val[0][s->block_index[i]], sizeof(DCTELEM)*16);
02717 }
02718 }
02719
02720 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER , pb, pb2, tex_pb,
02721 &dmin, &next_block, s->mv[mvdir][0][0], s->mv[mvdir][0][1]);
02722 if(best_s.qscale != qp){
02723 if(s->mb_intra && s->dc_val[0]){
02724 for(i=0; i<6; i++){
02725 s->dc_val[0][ s->block_index[i] ]= dc[i];
02726 memcpy(s->ac_val[0][s->block_index[i]], ac[i], sizeof(DCTELEM)*16);
02727 }
02728 }
02729 }
02730 }
02731 }
02732 }
02733 if(CONFIG_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT){
02734 int mx= s->b_direct_mv_table[xy][0];
02735 int my= s->b_direct_mv_table[xy][1];
02736
02737 backup_s.dquant = 0;
02738 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
02739 s->mb_intra= 0;
02740 ff_mpeg4_set_direct_mv(s, mx, my);
02741 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_DIRECT, pb, pb2, tex_pb,
02742 &dmin, &next_block, mx, my);
02743 }
02744 if(CONFIG_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT0){
02745 backup_s.dquant = 0;
02746 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
02747 s->mb_intra= 0;
02748 ff_mpeg4_set_direct_mv(s, 0, 0);
02749 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_DIRECT, pb, pb2, tex_pb,
02750 &dmin, &next_block, 0, 0);
02751 }
02752 if(!best_s.mb_intra && s->flags2&CODEC_FLAG2_SKIP_RD){
02753 int coded=0;
02754 for(i=0; i<6; i++)
02755 coded |= s->block_last_index[i];
02756 if(coded){
02757 int mx,my;
02758 memcpy(s->mv, best_s.mv, sizeof(s->mv));
02759 if(CONFIG_MPEG4_ENCODER && best_s.mv_dir & MV_DIRECT){
02760 mx=my=0;
02761 ff_mpeg4_set_direct_mv(s, mx, my);
02762 }else if(best_s.mv_dir&MV_DIR_BACKWARD){
02763 mx= s->mv[1][0][0];
02764 my= s->mv[1][0][1];
02765 }else{
02766 mx= s->mv[0][0][0];
02767 my= s->mv[0][0][1];
02768 }
02769
02770 s->mv_dir= best_s.mv_dir;
02771 s->mv_type = best_s.mv_type;
02772 s->mb_intra= 0;
02773
02774
02775
02776
02777 backup_s.dquant= 0;
02778 s->skipdct=1;
02779 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER , pb, pb2, tex_pb,
02780 &dmin, &next_block, mx, my);
02781 s->skipdct=0;
02782 }
02783 }
02784
02785 s->current_picture.f.qscale_table[xy] = best_s.qscale;
02786
02787 copy_context_after_encode(s, &best_s, -1);
02788
02789 pb_bits_count= put_bits_count(&s->pb);
02790 flush_put_bits(&s->pb);
02791 avpriv_copy_bits(&backup_s.pb, bit_buf[next_block^1], pb_bits_count);
02792 s->pb= backup_s.pb;
02793
02794 if(s->data_partitioning){
02795 pb2_bits_count= put_bits_count(&s->pb2);
02796 flush_put_bits(&s->pb2);
02797 avpriv_copy_bits(&backup_s.pb2, bit_buf2[next_block^1], pb2_bits_count);
02798 s->pb2= backup_s.pb2;
02799
02800 tex_pb_bits_count= put_bits_count(&s->tex_pb);
02801 flush_put_bits(&s->tex_pb);
02802 avpriv_copy_bits(&backup_s.tex_pb, bit_buf_tex[next_block^1], tex_pb_bits_count);
02803 s->tex_pb= backup_s.tex_pb;
02804 }
02805 s->last_bits= put_bits_count(&s->pb);
02806
02807 if (CONFIG_H263_ENCODER &&
02808 s->out_format == FMT_H263 && s->pict_type!=AV_PICTURE_TYPE_B)
02809 ff_h263_update_motion_val(s);
02810
02811 if(next_block==0){
02812 s->dsp.put_pixels_tab[0][0](s->dest[0], s->rd_scratchpad , s->linesize ,16);
02813 s->dsp.put_pixels_tab[1][0](s->dest[1], s->rd_scratchpad + 16*s->linesize , s->uvlinesize, 8);
02814 s->dsp.put_pixels_tab[1][0](s->dest[2], s->rd_scratchpad + 16*s->linesize + 8, s->uvlinesize, 8);
02815 }
02816
02817 if(s->avctx->mb_decision == FF_MB_DECISION_BITS)
02818 MPV_decode_mb(s, s->block);
02819 } else {
02820 int motion_x = 0, motion_y = 0;
02821 s->mv_type=MV_TYPE_16X16;
02822
02823
02824 switch(mb_type){
02825 case CANDIDATE_MB_TYPE_INTRA:
02826 s->mv_dir = 0;
02827 s->mb_intra= 1;
02828 motion_x= s->mv[0][0][0] = 0;
02829 motion_y= s->mv[0][0][1] = 0;
02830 break;
02831 case CANDIDATE_MB_TYPE_INTER:
02832 s->mv_dir = MV_DIR_FORWARD;
02833 s->mb_intra= 0;
02834 motion_x= s->mv[0][0][0] = s->p_mv_table[xy][0];
02835 motion_y= s->mv[0][0][1] = s->p_mv_table[xy][1];
02836 break;
02837 case CANDIDATE_MB_TYPE_INTER_I:
02838 s->mv_dir = MV_DIR_FORWARD;
02839 s->mv_type = MV_TYPE_FIELD;
02840 s->mb_intra= 0;
02841 for(i=0; i<2; i++){
02842 j= s->field_select[0][i] = s->p_field_select_table[i][xy];
02843 s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0];
02844 s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1];
02845 }
02846 break;
02847 case CANDIDATE_MB_TYPE_INTER4V:
02848 s->mv_dir = MV_DIR_FORWARD;
02849 s->mv_type = MV_TYPE_8X8;
02850 s->mb_intra= 0;
02851 for(i=0; i<4; i++){
02852 s->mv[0][i][0] = s->current_picture.f.motion_val[0][s->block_index[i]][0];
02853 s->mv[0][i][1] = s->current_picture.f.motion_val[0][s->block_index[i]][1];
02854 }
02855 break;
02856 case CANDIDATE_MB_TYPE_DIRECT:
02857 if (CONFIG_MPEG4_ENCODER) {
02858 s->mv_dir = MV_DIR_FORWARD|MV_DIR_BACKWARD|MV_DIRECT;
02859 s->mb_intra= 0;
02860 motion_x=s->b_direct_mv_table[xy][0];
02861 motion_y=s->b_direct_mv_table[xy][1];
02862 ff_mpeg4_set_direct_mv(s, motion_x, motion_y);
02863 }
02864 break;
02865 case CANDIDATE_MB_TYPE_DIRECT0:
02866 if (CONFIG_MPEG4_ENCODER) {
02867 s->mv_dir = MV_DIR_FORWARD|MV_DIR_BACKWARD|MV_DIRECT;
02868 s->mb_intra= 0;
02869 ff_mpeg4_set_direct_mv(s, 0, 0);
02870 }
02871 break;
02872 case CANDIDATE_MB_TYPE_BIDIR:
02873 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
02874 s->mb_intra= 0;
02875 s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
02876 s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
02877 s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
02878 s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
02879 break;
02880 case CANDIDATE_MB_TYPE_BACKWARD:
02881 s->mv_dir = MV_DIR_BACKWARD;
02882 s->mb_intra= 0;
02883 motion_x= s->mv[1][0][0] = s->b_back_mv_table[xy][0];
02884 motion_y= s->mv[1][0][1] = s->b_back_mv_table[xy][1];
02885 break;
02886 case CANDIDATE_MB_TYPE_FORWARD:
02887 s->mv_dir = MV_DIR_FORWARD;
02888 s->mb_intra= 0;
02889 motion_x= s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
02890 motion_y= s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
02891
02892 break;
02893 case CANDIDATE_MB_TYPE_FORWARD_I:
02894 s->mv_dir = MV_DIR_FORWARD;
02895 s->mv_type = MV_TYPE_FIELD;
02896 s->mb_intra= 0;
02897 for(i=0; i<2; i++){
02898 j= s->field_select[0][i] = s->b_field_select_table[0][i][xy];
02899 s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
02900 s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
02901 }
02902 break;
02903 case CANDIDATE_MB_TYPE_BACKWARD_I:
02904 s->mv_dir = MV_DIR_BACKWARD;
02905 s->mv_type = MV_TYPE_FIELD;
02906 s->mb_intra= 0;
02907 for(i=0; i<2; i++){
02908 j= s->field_select[1][i] = s->b_field_select_table[1][i][xy];
02909 s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
02910 s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
02911 }
02912 break;
02913 case CANDIDATE_MB_TYPE_BIDIR_I:
02914 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
02915 s->mv_type = MV_TYPE_FIELD;
02916 s->mb_intra= 0;
02917 for(dir=0; dir<2; dir++){
02918 for(i=0; i<2; i++){
02919 j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy];
02920 s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
02921 s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
02922 }
02923 }
02924 break;
02925 default:
02926 av_log(s->avctx, AV_LOG_ERROR, "illegal MB type\n");
02927 }
02928
02929 encode_mb(s, motion_x, motion_y);
02930
02931
02932 s->last_mv_dir = s->mv_dir;
02933
02934 if (CONFIG_H263_ENCODER &&
02935 s->out_format == FMT_H263 && s->pict_type!=AV_PICTURE_TYPE_B)
02936 ff_h263_update_motion_val(s);
02937
02938 MPV_decode_mb(s, s->block);
02939 }
02940
02941
02942 if(s->mb_intra ){
02943 s->p_mv_table[xy][0]=0;
02944 s->p_mv_table[xy][1]=0;
02945 }
02946
02947 if(s->flags&CODEC_FLAG_PSNR){
02948 int w= 16;
02949 int h= 16;
02950
02951 if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
02952 if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
02953
02954 s->current_picture.f.error[0] += sse(
02955 s, s->new_picture.f.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16,
02956 s->dest[0], w, h, s->linesize);
02957 s->current_picture.f.error[1] += sse(
02958 s, s->new_picture.f.data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*chr_h,
02959 s->dest[1], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
02960 s->current_picture.f.error[2] += sse(
02961 s, s->new_picture.f.data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*chr_h,
02962 s->dest[2], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
02963 }
02964 if(s->loop_filter){
02965 if(CONFIG_H263_ENCODER && s->out_format == FMT_H263)
02966 ff_h263_loop_filter(s);
02967 }
02968
02969 }
02970 }
02971
02972
02973 if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version && s->msmpeg4_version<4 && s->pict_type == AV_PICTURE_TYPE_I)
02974 msmpeg4_encode_ext_header(s);
02975
02976 write_slice_end(s);
02977
02978
02979 if (s->avctx->rtp_callback) {
02980 int number_mb = (mb_y - s->resync_mb_y)*s->mb_width - s->resync_mb_x;
02981 pdif = put_bits_ptr(&s->pb) - s->ptr_lastgob;
02982
02983 emms_c();
02984 s->avctx->rtp_callback(s->avctx, s->ptr_lastgob, pdif, number_mb);
02985 }
02986
02987 return 0;
02988 }
02989
02990 #define MERGE(field) dst->field += src->field; src->field=0
02991 static void merge_context_after_me(MpegEncContext *dst, MpegEncContext *src){
02992 MERGE(me.scene_change_score);
02993 MERGE(me.mc_mb_var_sum_temp);
02994 MERGE(me.mb_var_sum_temp);
02995 }
02996
02997 static void merge_context_after_encode(MpegEncContext *dst, MpegEncContext *src){
02998 int i;
02999
03000 MERGE(dct_count[0]);
03001 MERGE(dct_count[1]);
03002 MERGE(mv_bits);
03003 MERGE(i_tex_bits);
03004 MERGE(p_tex_bits);
03005 MERGE(i_count);
03006 MERGE(f_count);
03007 MERGE(b_count);
03008 MERGE(skip_count);
03009 MERGE(misc_bits);
03010 MERGE(error_count);
03011 MERGE(padding_bug_score);
03012 MERGE(current_picture.f.error[0]);
03013 MERGE(current_picture.f.error[1]);
03014 MERGE(current_picture.f.error[2]);
03015
03016 if(dst->avctx->noise_reduction){
03017 for(i=0; i<64; i++){
03018 MERGE(dct_error_sum[0][i]);
03019 MERGE(dct_error_sum[1][i]);
03020 }
03021 }
03022
03023 assert(put_bits_count(&src->pb) % 8 ==0);
03024 assert(put_bits_count(&dst->pb) % 8 ==0);
03025 avpriv_copy_bits(&dst->pb, src->pb.buf, put_bits_count(&src->pb));
03026 flush_put_bits(&dst->pb);
03027 }
03028
03029 static int estimate_qp(MpegEncContext *s, int dry_run){
03030 if (s->next_lambda){
03031 s->current_picture_ptr->f.quality =
03032 s->current_picture.f.quality = s->next_lambda;
03033 if(!dry_run) s->next_lambda= 0;
03034 } else if (!s->fixed_qscale) {
03035 s->current_picture_ptr->f.quality =
03036 s->current_picture.f.quality = ff_rate_estimate_qscale(s, dry_run);
03037 if (s->current_picture.f.quality < 0)
03038 return -1;
03039 }
03040
03041 if(s->adaptive_quant){
03042 switch(s->codec_id){
03043 case CODEC_ID_MPEG4:
03044 if (CONFIG_MPEG4_ENCODER)
03045 ff_clean_mpeg4_qscales(s);
03046 break;
03047 case CODEC_ID_H263:
03048 case CODEC_ID_H263P:
03049 case CODEC_ID_FLV1:
03050 if (CONFIG_H263_ENCODER)
03051 ff_clean_h263_qscales(s);
03052 break;
03053 default:
03054 ff_init_qscale_tab(s);
03055 }
03056
03057 s->lambda= s->lambda_table[0];
03058
03059 }else
03060 s->lambda = s->current_picture.f.quality;
03061
03062 update_qscale(s);
03063 return 0;
03064 }
03065
03066
03067 static void set_frame_distances(MpegEncContext * s){
03068 assert(s->current_picture_ptr->f.pts != AV_NOPTS_VALUE);
03069 s->time = s->current_picture_ptr->f.pts * s->avctx->time_base.num;
03070
03071 if(s->pict_type==AV_PICTURE_TYPE_B){
03072 s->pb_time= s->pp_time - (s->last_non_b_time - s->time);
03073 assert(s->pb_time > 0 && s->pb_time < s->pp_time);
03074 }else{
03075 s->pp_time= s->time - s->last_non_b_time;
03076 s->last_non_b_time= s->time;
03077 assert(s->picture_number==0 || s->pp_time > 0);
03078 }
03079 }
03080
03081 static int encode_picture(MpegEncContext *s, int picture_number)
03082 {
03083 int i;
03084 int bits;
03085 int context_count = s->slice_context_count;
03086
03087 s->picture_number = picture_number;
03088
03089
03090 s->me.mb_var_sum_temp =
03091 s->me.mc_mb_var_sum_temp = 0;
03092
03093
03094
03095 if (s->codec_id == CODEC_ID_MPEG1VIDEO || s->codec_id == CODEC_ID_MPEG2VIDEO || (s->h263_pred && !s->msmpeg4_version))
03096 set_frame_distances(s);
03097 if(CONFIG_MPEG4_ENCODER && s->codec_id == CODEC_ID_MPEG4)
03098 ff_set_mpeg4_time(s);
03099
03100 s->me.scene_change_score=0;
03101
03102
03103
03104 if(s->pict_type==AV_PICTURE_TYPE_I){
03105 if(s->msmpeg4_version >= 3) s->no_rounding=1;
03106 else s->no_rounding=0;
03107 }else if(s->pict_type!=AV_PICTURE_TYPE_B){
03108 if(s->flipflop_rounding || s->codec_id == CODEC_ID_H263P || s->codec_id == CODEC_ID_MPEG4)
03109 s->no_rounding ^= 1;
03110 }
03111
03112 if(s->flags & CODEC_FLAG_PASS2){
03113 if (estimate_qp(s,1) < 0)
03114 return -1;
03115 ff_get_2pass_fcode(s);
03116 }else if(!(s->flags & CODEC_FLAG_QSCALE)){
03117 if(s->pict_type==AV_PICTURE_TYPE_B)
03118 s->lambda= s->last_lambda_for[s->pict_type];
03119 else
03120 s->lambda= s->last_lambda_for[s->last_non_b_pict_type];
03121 update_qscale(s);
03122 }
03123
03124 if(s->codec_id != CODEC_ID_AMV){
03125 if(s->q_chroma_intra_matrix != s->q_intra_matrix ) av_freep(&s->q_chroma_intra_matrix);
03126 if(s->q_chroma_intra_matrix16 != s->q_intra_matrix16) av_freep(&s->q_chroma_intra_matrix16);
03127 s->q_chroma_intra_matrix = s->q_intra_matrix;
03128 s->q_chroma_intra_matrix16 = s->q_intra_matrix16;
03129 }
03130
03131 s->mb_intra=0;
03132 for(i=1; i<context_count; i++){
03133 ff_update_duplicate_context(s->thread_context[i], s);
03134 }
03135
03136 if(ff_init_me(s)<0)
03137 return -1;
03138
03139
03140 if(s->pict_type != AV_PICTURE_TYPE_I){
03141 s->lambda = (s->lambda * s->avctx->me_penalty_compensation + 128)>>8;
03142 s->lambda2= (s->lambda2* (int64_t)s->avctx->me_penalty_compensation + 128)>>8;
03143 if(s->pict_type != AV_PICTURE_TYPE_B && s->avctx->me_threshold==0){
03144 if((s->avctx->pre_me && s->last_non_b_pict_type==AV_PICTURE_TYPE_I) || s->avctx->pre_me==2){
03145 s->avctx->execute(s->avctx, pre_estimate_motion_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
03146 }
03147 }
03148
03149 s->avctx->execute(s->avctx, estimate_motion_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
03150 }else {
03151
03152 for(i=0; i<s->mb_stride*s->mb_height; i++)
03153 s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA;
03154
03155 if(!s->fixed_qscale){
03156
03157 s->avctx->execute(s->avctx, mb_var_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
03158 }
03159 }
03160 for(i=1; i<context_count; i++){
03161 merge_context_after_me(s, s->thread_context[i]);
03162 }
03163 s->current_picture.mc_mb_var_sum= s->current_picture_ptr->mc_mb_var_sum= s->me.mc_mb_var_sum_temp;
03164 s->current_picture. mb_var_sum= s->current_picture_ptr-> mb_var_sum= s->me. mb_var_sum_temp;
03165 emms_c();
03166
03167 if(s->me.scene_change_score > s->avctx->scenechange_threshold && s->pict_type == AV_PICTURE_TYPE_P){
03168 s->pict_type= AV_PICTURE_TYPE_I;
03169 for(i=0; i<s->mb_stride*s->mb_height; i++)
03170 s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA;
03171
03172 }
03173
03174 if(!s->umvplus){
03175 if(s->pict_type==AV_PICTURE_TYPE_P || s->pict_type==AV_PICTURE_TYPE_S) {
03176 s->f_code= ff_get_best_fcode(s, s->p_mv_table, CANDIDATE_MB_TYPE_INTER);
03177
03178 if(s->flags & CODEC_FLAG_INTERLACED_ME){
03179 int a,b;
03180 a= ff_get_best_fcode(s, s->p_field_mv_table[0][0], CANDIDATE_MB_TYPE_INTER_I);
03181 b= ff_get_best_fcode(s, s->p_field_mv_table[1][1], CANDIDATE_MB_TYPE_INTER_I);
03182 s->f_code= FFMAX3(s->f_code, a, b);
03183 }
03184
03185 ff_fix_long_p_mvs(s);
03186 ff_fix_long_mvs(s, NULL, 0, s->p_mv_table, s->f_code, CANDIDATE_MB_TYPE_INTER, 0);
03187 if(s->flags & CODEC_FLAG_INTERLACED_ME){
03188 int j;
03189 for(i=0; i<2; i++){
03190 for(j=0; j<2; j++)
03191 ff_fix_long_mvs(s, s->p_field_select_table[i], j,
03192 s->p_field_mv_table[i][j], s->f_code, CANDIDATE_MB_TYPE_INTER_I, 0);
03193 }
03194 }
03195 }
03196
03197 if(s->pict_type==AV_PICTURE_TYPE_B){
03198 int a, b;
03199
03200 a = ff_get_best_fcode(s, s->b_forw_mv_table, CANDIDATE_MB_TYPE_FORWARD);
03201 b = ff_get_best_fcode(s, s->b_bidir_forw_mv_table, CANDIDATE_MB_TYPE_BIDIR);
03202 s->f_code = FFMAX(a, b);
03203
03204 a = ff_get_best_fcode(s, s->b_back_mv_table, CANDIDATE_MB_TYPE_BACKWARD);
03205 b = ff_get_best_fcode(s, s->b_bidir_back_mv_table, CANDIDATE_MB_TYPE_BIDIR);
03206 s->b_code = FFMAX(a, b);
03207
03208 ff_fix_long_mvs(s, NULL, 0, s->b_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_FORWARD, 1);
03209 ff_fix_long_mvs(s, NULL, 0, s->b_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BACKWARD, 1);
03210 ff_fix_long_mvs(s, NULL, 0, s->b_bidir_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_BIDIR, 1);
03211 ff_fix_long_mvs(s, NULL, 0, s->b_bidir_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BIDIR, 1);
03212 if(s->flags & CODEC_FLAG_INTERLACED_ME){
03213 int dir, j;
03214 for(dir=0; dir<2; dir++){
03215 for(i=0; i<2; i++){
03216 for(j=0; j<2; j++){
03217 int type= dir ? (CANDIDATE_MB_TYPE_BACKWARD_I|CANDIDATE_MB_TYPE_BIDIR_I)
03218 : (CANDIDATE_MB_TYPE_FORWARD_I |CANDIDATE_MB_TYPE_BIDIR_I);
03219 ff_fix_long_mvs(s, s->b_field_select_table[dir][i], j,
03220 s->b_field_mv_table[dir][i][j], dir ? s->b_code : s->f_code, type, 1);
03221 }
03222 }
03223 }
03224 }
03225 }
03226 }
03227
03228 if (estimate_qp(s, 0) < 0)
03229 return -1;
03230
03231 if(s->qscale < 3 && s->max_qcoeff<=128 && s->pict_type==AV_PICTURE_TYPE_I && !(s->flags & CODEC_FLAG_QSCALE))
03232 s->qscale= 3;
03233
03234 if (s->out_format == FMT_MJPEG) {
03235
03236 for(i=1;i<64;i++){
03237 int j= s->dsp.idct_permutation[i];
03238
03239 s->intra_matrix[j] = av_clip_uint8((ff_mpeg1_default_intra_matrix[i] * s->qscale) >> 3);
03240 }
03241 s->y_dc_scale_table=
03242 s->c_dc_scale_table= ff_mpeg2_dc_scale_table[s->intra_dc_precision];
03243 s->intra_matrix[0] = ff_mpeg2_dc_scale_table[s->intra_dc_precision][8];
03244 ff_convert_matrix(&s->dsp, s->q_intra_matrix, s->q_intra_matrix16,
03245 s->intra_matrix, s->intra_quant_bias, 8, 8, 1);
03246 s->qscale= 8;
03247 }
03248 if(s->codec_id == CODEC_ID_AMV){
03249 static const uint8_t y[32]={13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13};
03250 static const uint8_t c[32]={14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14};
03251 for(i=1;i<64;i++){
03252 int j= s->dsp.idct_permutation[ff_zigzag_direct[i]];
03253
03254 s->intra_matrix[j] = sp5x_quant_table[5*2+0][i];
03255 s->chroma_intra_matrix[j] = sp5x_quant_table[5*2+1][i];
03256 }
03257 s->y_dc_scale_table= y;
03258 s->c_dc_scale_table= c;
03259 s->intra_matrix[0] = 13;
03260 s->chroma_intra_matrix[0] = 14;
03261 ff_convert_matrix(&s->dsp, s->q_intra_matrix, s->q_intra_matrix16,
03262 s->intra_matrix, s->intra_quant_bias, 8, 8, 1);
03263 ff_convert_matrix(&s->dsp, s->q_chroma_intra_matrix, s->q_chroma_intra_matrix16,
03264 s->chroma_intra_matrix, s->intra_quant_bias, 8, 8, 1);
03265 s->qscale= 8;
03266 }
03267
03268
03269 s->current_picture_ptr->f.key_frame =
03270 s->current_picture.f.key_frame = s->pict_type == AV_PICTURE_TYPE_I;
03271 s->current_picture_ptr->f.pict_type =
03272 s->current_picture.f.pict_type = s->pict_type;
03273
03274 if (s->current_picture.f.key_frame)
03275 s->picture_in_gop_number=0;
03276
03277 s->last_bits= put_bits_count(&s->pb);
03278 switch(s->out_format) {
03279 case FMT_MJPEG:
03280 if (CONFIG_MJPEG_ENCODER)
03281 ff_mjpeg_encode_picture_header(s);
03282 break;
03283 case FMT_H261:
03284 if (CONFIG_H261_ENCODER)
03285 ff_h261_encode_picture_header(s, picture_number);
03286 break;
03287 case FMT_H263:
03288 if (CONFIG_WMV2_ENCODER && s->codec_id == CODEC_ID_WMV2)
03289 ff_wmv2_encode_picture_header(s, picture_number);
03290 else if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version)
03291 msmpeg4_encode_picture_header(s, picture_number);
03292 else if (CONFIG_MPEG4_ENCODER && s->h263_pred)
03293 mpeg4_encode_picture_header(s, picture_number);
03294 else if (CONFIG_RV10_ENCODER && s->codec_id == CODEC_ID_RV10)
03295 rv10_encode_picture_header(s, picture_number);
03296 else if (CONFIG_RV20_ENCODER && s->codec_id == CODEC_ID_RV20)
03297 rv20_encode_picture_header(s, picture_number);
03298 else if (CONFIG_FLV_ENCODER && s->codec_id == CODEC_ID_FLV1)
03299 ff_flv_encode_picture_header(s, picture_number);
03300 else if (CONFIG_H263_ENCODER)
03301 ff_h263_encode_picture_header(s, picture_number);
03302 break;
03303 case FMT_MPEG1:
03304 if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
03305 mpeg1_encode_picture_header(s, picture_number);
03306 break;
03307 case FMT_H264:
03308 break;
03309 default:
03310 assert(0);
03311 }
03312 bits= put_bits_count(&s->pb);
03313 s->header_bits= bits - s->last_bits;
03314
03315 for(i=1; i<context_count; i++){
03316 update_duplicate_context_after_me(s->thread_context[i], s);
03317 }
03318 s->avctx->execute(s->avctx, encode_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
03319 for(i=1; i<context_count; i++){
03320 merge_context_after_encode(s, s->thread_context[i]);
03321 }
03322 emms_c();
03323 return 0;
03324 }
03325
03326 static void denoise_dct_c(MpegEncContext *s, DCTELEM *block){
03327 const int intra= s->mb_intra;
03328 int i;
03329
03330 s->dct_count[intra]++;
03331
03332 for(i=0; i<64; i++){
03333 int level= block[i];
03334
03335 if(level){
03336 if(level>0){
03337 s->dct_error_sum[intra][i] += level;
03338 level -= s->dct_offset[intra][i];
03339 if(level<0) level=0;
03340 }else{
03341 s->dct_error_sum[intra][i] -= level;
03342 level += s->dct_offset[intra][i];
03343 if(level>0) level=0;
03344 }
03345 block[i]= level;
03346 }
03347 }
03348 }
03349
03350 static int dct_quantize_trellis_c(MpegEncContext *s,
03351 DCTELEM *block, int n,
03352 int qscale, int *overflow){
03353 const int *qmat;
03354 const uint8_t *scantable= s->intra_scantable.scantable;
03355 const uint8_t *perm_scantable= s->intra_scantable.permutated;
03356 int max=0;
03357 unsigned int threshold1, threshold2;
03358 int bias=0;
03359 int run_tab[65];
03360 int level_tab[65];
03361 int score_tab[65];
03362 int survivor[65];
03363 int survivor_count;
03364 int last_run=0;
03365 int last_level=0;
03366 int last_score= 0;
03367 int last_i;
03368 int coeff[2][64];
03369 int coeff_count[64];
03370 int qmul, qadd, start_i, last_non_zero, i, dc;
03371 const int esc_length= s->ac_esc_length;
03372 uint8_t * length;
03373 uint8_t * last_length;
03374 const int lambda= s->lambda2 >> (FF_LAMBDA_SHIFT - 6);
03375
03376 s->dsp.fdct (block);
03377
03378 if(s->dct_error_sum)
03379 s->denoise_dct(s, block);
03380 qmul= qscale*16;
03381 qadd= ((qscale-1)|1)*8;
03382
03383 if (s->mb_intra) {
03384 int q;
03385 if (!s->h263_aic) {
03386 if (n < 4)
03387 q = s->y_dc_scale;
03388 else
03389 q = s->c_dc_scale;
03390 q = q << 3;
03391 } else{
03392
03393 q = 1 << 3;
03394 qadd=0;
03395 }
03396
03397
03398 block[0] = (block[0] + (q >> 1)) / q;
03399 start_i = 1;
03400 last_non_zero = 0;
03401 qmat = n < 4 ? s->q_intra_matrix[qscale] : s->q_chroma_intra_matrix[qscale];
03402 if(s->mpeg_quant || s->out_format == FMT_MPEG1)
03403 bias= 1<<(QMAT_SHIFT-1);
03404 length = s->intra_ac_vlc_length;
03405 last_length= s->intra_ac_vlc_last_length;
03406 } else {
03407 start_i = 0;
03408 last_non_zero = -1;
03409 qmat = s->q_inter_matrix[qscale];
03410 length = s->inter_ac_vlc_length;
03411 last_length= s->inter_ac_vlc_last_length;
03412 }
03413 last_i= start_i;
03414
03415 threshold1= (1<<QMAT_SHIFT) - bias - 1;
03416 threshold2= (threshold1<<1);
03417
03418 for(i=63; i>=start_i; i--) {
03419 const int j = scantable[i];
03420 int level = block[j] * qmat[j];
03421
03422 if(((unsigned)(level+threshold1))>threshold2){
03423 last_non_zero = i;
03424 break;
03425 }
03426 }
03427
03428 for(i=start_i; i<=last_non_zero; i++) {
03429 const int j = scantable[i];
03430 int level = block[j] * qmat[j];
03431
03432
03433
03434 if(((unsigned)(level+threshold1))>threshold2){
03435 if(level>0){
03436 level= (bias + level)>>QMAT_SHIFT;
03437 coeff[0][i]= level;
03438 coeff[1][i]= level-1;
03439
03440 }else{
03441 level= (bias - level)>>QMAT_SHIFT;
03442 coeff[0][i]= -level;
03443 coeff[1][i]= -level+1;
03444
03445 }
03446 coeff_count[i]= FFMIN(level, 2);
03447 assert(coeff_count[i]);
03448 max |=level;
03449 }else{
03450 coeff[0][i]= (level>>31)|1;
03451 coeff_count[i]= 1;
03452 }
03453 }
03454
03455 *overflow= s->max_qcoeff < max;
03456
03457 if(last_non_zero < start_i){
03458 memset(block + start_i, 0, (64-start_i)*sizeof(DCTELEM));
03459 return last_non_zero;
03460 }
03461
03462 score_tab[start_i]= 0;
03463 survivor[0]= start_i;
03464 survivor_count= 1;
03465
03466 for(i=start_i; i<=last_non_zero; i++){
03467 int level_index, j, zero_distortion;
03468 int dct_coeff= FFABS(block[ scantable[i] ]);
03469 int best_score=256*256*256*120;
03470
03471 if ( s->dsp.fdct == fdct_ifast
03472 #ifndef FAAN_POSTSCALE
03473 || s->dsp.fdct == ff_faandct
03474 #endif
03475 )
03476 dct_coeff= (dct_coeff*ff_inv_aanscales[ scantable[i] ]) >> 12;
03477 zero_distortion= dct_coeff*dct_coeff;
03478
03479 for(level_index=0; level_index < coeff_count[i]; level_index++){
03480 int distortion;
03481 int level= coeff[level_index][i];
03482 const int alevel= FFABS(level);
03483 int unquant_coeff;
03484
03485 assert(level);
03486
03487 if(s->out_format == FMT_H263){
03488 unquant_coeff= alevel*qmul + qadd;
03489 }else{
03490 j= s->dsp.idct_permutation[ scantable[i] ];
03491 if(s->mb_intra){
03492 unquant_coeff = (int)( alevel * qscale * s->intra_matrix[j]) >> 3;
03493 unquant_coeff = (unquant_coeff - 1) | 1;
03494 }else{
03495 unquant_coeff = ((( alevel << 1) + 1) * qscale * ((int) s->inter_matrix[j])) >> 4;
03496 unquant_coeff = (unquant_coeff - 1) | 1;
03497 }
03498 unquant_coeff<<= 3;
03499 }
03500
03501 distortion= (unquant_coeff - dct_coeff) * (unquant_coeff - dct_coeff) - zero_distortion;
03502 level+=64;
03503 if((level&(~127)) == 0){
03504 for(j=survivor_count-1; j>=0; j--){
03505 int run= i - survivor[j];
03506 int score= distortion + length[UNI_AC_ENC_INDEX(run, level)]*lambda;
03507 score += score_tab[i-run];
03508
03509 if(score < best_score){
03510 best_score= score;
03511 run_tab[i+1]= run;
03512 level_tab[i+1]= level-64;
03513 }
03514 }
03515
03516 if(s->out_format == FMT_H263){
03517 for(j=survivor_count-1; j>=0; j--){
03518 int run= i - survivor[j];
03519 int score= distortion + last_length[UNI_AC_ENC_INDEX(run, level)]*lambda;
03520 score += score_tab[i-run];
03521 if(score < last_score){
03522 last_score= score;
03523 last_run= run;
03524 last_level= level-64;
03525 last_i= i+1;
03526 }
03527 }
03528 }
03529 }else{
03530 distortion += esc_length*lambda;
03531 for(j=survivor_count-1; j>=0; j--){
03532 int run= i - survivor[j];
03533 int score= distortion + score_tab[i-run];
03534
03535 if(score < best_score){
03536 best_score= score;
03537 run_tab[i+1]= run;
03538 level_tab[i+1]= level-64;
03539 }
03540 }
03541
03542 if(s->out_format == FMT_H263){
03543 for(j=survivor_count-1; j>=0; j--){
03544 int run= i - survivor[j];
03545 int score= distortion + score_tab[i-run];
03546 if(score < last_score){
03547 last_score= score;
03548 last_run= run;
03549 last_level= level-64;
03550 last_i= i+1;
03551 }
03552 }
03553 }
03554 }
03555 }
03556
03557 score_tab[i+1]= best_score;
03558
03559
03560 if(last_non_zero <= 27){
03561 for(; survivor_count; survivor_count--){
03562 if(score_tab[ survivor[survivor_count-1] ] <= best_score)
03563 break;
03564 }
03565 }else{
03566 for(; survivor_count; survivor_count--){
03567 if(score_tab[ survivor[survivor_count-1] ] <= best_score + lambda)
03568 break;
03569 }
03570 }
03571
03572 survivor[ survivor_count++ ]= i+1;
03573 }
03574
03575 if(s->out_format != FMT_H263){
03576 last_score= 256*256*256*120;
03577 for(i= survivor[0]; i<=last_non_zero + 1; i++){
03578 int score= score_tab[i];
03579 if(i) score += lambda*2;
03580
03581 if(score < last_score){
03582 last_score= score;
03583 last_i= i;
03584 last_level= level_tab[i];
03585 last_run= run_tab[i];
03586 }
03587 }
03588 }
03589
03590 s->coded_score[n] = last_score;
03591
03592 dc= FFABS(block[0]);
03593 last_non_zero= last_i - 1;
03594 memset(block + start_i, 0, (64-start_i)*sizeof(DCTELEM));
03595
03596 if(last_non_zero < start_i)
03597 return last_non_zero;
03598
03599 if(last_non_zero == 0 && start_i == 0){
03600 int best_level= 0;
03601 int best_score= dc * dc;
03602
03603 for(i=0; i<coeff_count[0]; i++){
03604 int level= coeff[i][0];
03605 int alevel= FFABS(level);
03606 int unquant_coeff, score, distortion;
03607
03608 if(s->out_format == FMT_H263){
03609 unquant_coeff= (alevel*qmul + qadd)>>3;
03610 }else{
03611 unquant_coeff = ((( alevel << 1) + 1) * qscale * ((int) s->inter_matrix[0])) >> 4;
03612 unquant_coeff = (unquant_coeff - 1) | 1;
03613 }
03614 unquant_coeff = (unquant_coeff + 4) >> 3;
03615 unquant_coeff<<= 3 + 3;
03616
03617 distortion= (unquant_coeff - dc) * (unquant_coeff - dc);
03618 level+=64;
03619 if((level&(~127)) == 0) score= distortion + last_length[UNI_AC_ENC_INDEX(0, level)]*lambda;
03620 else score= distortion + esc_length*lambda;
03621
03622 if(score < best_score){
03623 best_score= score;
03624 best_level= level - 64;
03625 }
03626 }
03627 block[0]= best_level;
03628 s->coded_score[n] = best_score - dc*dc;
03629 if(best_level == 0) return -1;
03630 else return last_non_zero;
03631 }
03632
03633 i= last_i;
03634 assert(last_level);
03635
03636 block[ perm_scantable[last_non_zero] ]= last_level;
03637 i -= last_run + 1;
03638
03639 for(; i>start_i; i -= run_tab[i] + 1){
03640 block[ perm_scantable[i-1] ]= level_tab[i];
03641 }
03642
03643 return last_non_zero;
03644 }
03645
03646
03647 static int16_t basis[64][64];
03648
03649 static void build_basis(uint8_t *perm){
03650 int i, j, x, y;
03651 emms_c();
03652 for(i=0; i<8; i++){
03653 for(j=0; j<8; j++){
03654 for(y=0; y<8; y++){
03655 for(x=0; x<8; x++){
03656 double s= 0.25*(1<<BASIS_SHIFT);
03657 int index= 8*i + j;
03658 int perm_index= perm[index];
03659 if(i==0) s*= sqrt(0.5);
03660 if(j==0) s*= sqrt(0.5);
03661 basis[perm_index][8*x + y]= lrintf(s * cos((M_PI/8.0)*i*(x+0.5)) * cos((M_PI/8.0)*j*(y+0.5)));
03662 }
03663 }
03664 }
03665 }
03666 }
03667
03668 static int dct_quantize_refine(MpegEncContext *s,
03669 DCTELEM *block, int16_t *weight, DCTELEM *orig,
03670 int n, int qscale){
03671 int16_t rem[64];
03672 LOCAL_ALIGNED_16(DCTELEM, d1, [64]);
03673 const uint8_t *scantable= s->intra_scantable.scantable;
03674 const uint8_t *perm_scantable= s->intra_scantable.permutated;
03675
03676
03677 int run_tab[65];
03678 int prev_run=0;
03679 int prev_level=0;
03680 int qmul, qadd, start_i, last_non_zero, i, dc;
03681 uint8_t * length;
03682 uint8_t * last_length;
03683 int lambda;
03684 int rle_index, run, q = 1, sum;
03685 #ifdef REFINE_STATS
03686 static int count=0;
03687 static int after_last=0;
03688 static int to_zero=0;
03689 static int from_zero=0;
03690 static int raise=0;
03691 static int lower=0;
03692 static int messed_sign=0;
03693 #endif
03694
03695 if(basis[0][0] == 0)
03696 build_basis(s->dsp.idct_permutation);
03697
03698 qmul= qscale*2;
03699 qadd= (qscale-1)|1;
03700 if (s->mb_intra) {
03701 if (!s->h263_aic) {
03702 if (n < 4)
03703 q = s->y_dc_scale;
03704 else
03705 q = s->c_dc_scale;
03706 } else{
03707
03708 q = 1;
03709 qadd=0;
03710 }
03711 q <<= RECON_SHIFT-3;
03712
03713 dc= block[0]*q;
03714
03715 start_i = 1;
03716
03717
03718 length = s->intra_ac_vlc_length;
03719 last_length= s->intra_ac_vlc_last_length;
03720 } else {
03721 dc= 0;
03722 start_i = 0;
03723 length = s->inter_ac_vlc_length;
03724 last_length= s->inter_ac_vlc_last_length;
03725 }
03726 last_non_zero = s->block_last_index[n];
03727
03728 #ifdef REFINE_STATS
03729 {START_TIMER
03730 #endif
03731 dc += (1<<(RECON_SHIFT-1));
03732 for(i=0; i<64; i++){
03733 rem[i]= dc - (orig[i]<<RECON_SHIFT);
03734 }
03735 #ifdef REFINE_STATS
03736 STOP_TIMER("memset rem[]")}
03737 #endif
03738 sum=0;
03739 for(i=0; i<64; i++){
03740 int one= 36;
03741 int qns=4;
03742 int w;
03743
03744 w= FFABS(weight[i]) + qns*one;
03745 w= 15 + (48*qns*one + w/2)/w;
03746
03747 weight[i] = w;
03748
03749
03750 assert(w>0);
03751 assert(w<(1<<6));
03752 sum += w*w;
03753 }
03754 lambda= sum*(uint64_t)s->lambda2 >> (FF_LAMBDA_SHIFT - 6 + 6 + 6 + 6);
03755 #ifdef REFINE_STATS
03756 {START_TIMER
03757 #endif
03758 run=0;
03759 rle_index=0;
03760 for(i=start_i; i<=last_non_zero; i++){
03761 int j= perm_scantable[i];
03762 const int level= block[j];
03763 int coeff;
03764
03765 if(level){
03766 if(level<0) coeff= qmul*level - qadd;
03767 else coeff= qmul*level + qadd;
03768 run_tab[rle_index++]=run;
03769 run=0;
03770
03771 s->dsp.add_8x8basis(rem, basis[j], coeff);
03772 }else{
03773 run++;
03774 }
03775 }
03776 #ifdef REFINE_STATS
03777 if(last_non_zero>0){
03778 STOP_TIMER("init rem[]")
03779 }
03780 }
03781
03782 {START_TIMER
03783 #endif
03784 for(;;){
03785 int best_score=s->dsp.try_8x8basis(rem, weight, basis[0], 0);
03786 int best_coeff=0;
03787 int best_change=0;
03788 int run2, best_unquant_change=0, analyze_gradient;
03789 #ifdef REFINE_STATS
03790 {START_TIMER
03791 #endif
03792 analyze_gradient = last_non_zero > 2 || s->avctx->quantizer_noise_shaping >= 3;
03793
03794 if(analyze_gradient){
03795 #ifdef REFINE_STATS
03796 {START_TIMER
03797 #endif
03798 for(i=0; i<64; i++){
03799 int w= weight[i];
03800
03801 d1[i] = (rem[i]*w*w + (1<<(RECON_SHIFT+12-1)))>>(RECON_SHIFT+12);
03802 }
03803 #ifdef REFINE_STATS
03804 STOP_TIMER("rem*w*w")}
03805 {START_TIMER
03806 #endif
03807 s->dsp.fdct(d1);
03808 #ifdef REFINE_STATS
03809 STOP_TIMER("dct")}
03810 #endif
03811 }
03812
03813 if(start_i){
03814 const int level= block[0];
03815 int change, old_coeff;
03816
03817 assert(s->mb_intra);
03818
03819 old_coeff= q*level;
03820
03821 for(change=-1; change<=1; change+=2){
03822 int new_level= level + change;
03823 int score, new_coeff;
03824
03825 new_coeff= q*new_level;
03826 if(new_coeff >= 2048 || new_coeff < 0)
03827 continue;
03828
03829 score= s->dsp.try_8x8basis(rem, weight, basis[0], new_coeff - old_coeff);
03830 if(score<best_score){
03831 best_score= score;
03832 best_coeff= 0;
03833 best_change= change;
03834 best_unquant_change= new_coeff - old_coeff;
03835 }
03836 }
03837 }
03838
03839 run=0;
03840 rle_index=0;
03841 run2= run_tab[rle_index++];
03842 prev_level=0;
03843 prev_run=0;
03844
03845 for(i=start_i; i<64; i++){
03846 int j= perm_scantable[i];
03847 const int level= block[j];
03848 int change, old_coeff;
03849
03850 if(s->avctx->quantizer_noise_shaping < 3 && i > last_non_zero + 1)
03851 break;
03852
03853 if(level){
03854 if(level<0) old_coeff= qmul*level - qadd;
03855 else old_coeff= qmul*level + qadd;
03856 run2= run_tab[rle_index++];
03857 }else{
03858 old_coeff=0;
03859 run2--;
03860 assert(run2>=0 || i >= last_non_zero );
03861 }
03862
03863 for(change=-1; change<=1; change+=2){
03864 int new_level= level + change;
03865 int score, new_coeff, unquant_change;
03866
03867 score=0;
03868 if(s->avctx->quantizer_noise_shaping < 2 && FFABS(new_level) > FFABS(level))
03869 continue;
03870
03871 if(new_level){
03872 if(new_level<0) new_coeff= qmul*new_level - qadd;
03873 else new_coeff= qmul*new_level + qadd;
03874 if(new_coeff >= 2048 || new_coeff <= -2048)
03875 continue;
03876
03877
03878 if(level){
03879 if(level < 63 && level > -63){
03880 if(i < last_non_zero)
03881 score += length[UNI_AC_ENC_INDEX(run, new_level+64)]
03882 - length[UNI_AC_ENC_INDEX(run, level+64)];
03883 else
03884 score += last_length[UNI_AC_ENC_INDEX(run, new_level+64)]
03885 - last_length[UNI_AC_ENC_INDEX(run, level+64)];
03886 }
03887 }else{
03888 assert(FFABS(new_level)==1);
03889
03890 if(analyze_gradient){
03891 int g= d1[ scantable[i] ];
03892 if(g && (g^new_level) >= 0)
03893 continue;
03894 }
03895
03896 if(i < last_non_zero){
03897 int next_i= i + run2 + 1;
03898 int next_level= block[ perm_scantable[next_i] ] + 64;
03899
03900 if(next_level&(~127))
03901 next_level= 0;
03902
03903 if(next_i < last_non_zero)
03904 score += length[UNI_AC_ENC_INDEX(run, 65)]
03905 + length[UNI_AC_ENC_INDEX(run2, next_level)]
03906 - length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)];
03907 else
03908 score += length[UNI_AC_ENC_INDEX(run, 65)]
03909 + last_length[UNI_AC_ENC_INDEX(run2, next_level)]
03910 - last_length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)];
03911 }else{
03912 score += last_length[UNI_AC_ENC_INDEX(run, 65)];
03913 if(prev_level){
03914 score += length[UNI_AC_ENC_INDEX(prev_run, prev_level)]
03915 - last_length[UNI_AC_ENC_INDEX(prev_run, prev_level)];
03916 }
03917 }
03918 }
03919 }else{
03920 new_coeff=0;
03921 assert(FFABS(level)==1);
03922
03923 if(i < last_non_zero){
03924 int next_i= i + run2 + 1;
03925 int next_level= block[ perm_scantable[next_i] ] + 64;
03926
03927 if(next_level&(~127))
03928 next_level= 0;
03929
03930 if(next_i < last_non_zero)
03931 score += length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)]
03932 - length[UNI_AC_ENC_INDEX(run2, next_level)]
03933 - length[UNI_AC_ENC_INDEX(run, 65)];
03934 else
03935 score += last_length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)]
03936 - last_length[UNI_AC_ENC_INDEX(run2, next_level)]
03937 - length[UNI_AC_ENC_INDEX(run, 65)];
03938 }else{
03939 score += -last_length[UNI_AC_ENC_INDEX(run, 65)];
03940 if(prev_level){
03941 score += last_length[UNI_AC_ENC_INDEX(prev_run, prev_level)]
03942 - length[UNI_AC_ENC_INDEX(prev_run, prev_level)];
03943 }
03944 }
03945 }
03946
03947 score *= lambda;
03948
03949 unquant_change= new_coeff - old_coeff;
03950 assert((score < 100*lambda && score > -100*lambda) || lambda==0);
03951
03952 score+= s->dsp.try_8x8basis(rem, weight, basis[j], unquant_change);
03953 if(score<best_score){
03954 best_score= score;
03955 best_coeff= i;
03956 best_change= change;
03957 best_unquant_change= unquant_change;
03958 }
03959 }
03960 if(level){
03961 prev_level= level + 64;
03962 if(prev_level&(~127))
03963 prev_level= 0;
03964 prev_run= run;
03965 run=0;
03966 }else{
03967 run++;
03968 }
03969 }
03970 #ifdef REFINE_STATS
03971 STOP_TIMER("iterative step")}
03972 #endif
03973
03974 if(best_change){
03975 int j= perm_scantable[ best_coeff ];
03976
03977 block[j] += best_change;
03978
03979 if(best_coeff > last_non_zero){
03980 last_non_zero= best_coeff;
03981 assert(block[j]);
03982 #ifdef REFINE_STATS
03983 after_last++;
03984 #endif
03985 }else{
03986 #ifdef REFINE_STATS
03987 if(block[j]){
03988 if(block[j] - best_change){
03989 if(FFABS(block[j]) > FFABS(block[j] - best_change)){
03990 raise++;
03991 }else{
03992 lower++;
03993 }
03994 }else{
03995 from_zero++;
03996 }
03997 }else{
03998 to_zero++;
03999 }
04000 #endif
04001 for(; last_non_zero>=start_i; last_non_zero--){
04002 if(block[perm_scantable[last_non_zero]])
04003 break;
04004 }
04005 }
04006 #ifdef REFINE_STATS
04007 count++;
04008 if(256*256*256*64 % count == 0){
04009 printf("after_last:%d to_zero:%d from_zero:%d raise:%d lower:%d sign:%d xyp:%d/%d/%d\n", after_last, to_zero, from_zero, raise, lower, messed_sign, s->mb_x, s->mb_y, s->picture_number);
04010 }
04011 #endif
04012 run=0;
04013 rle_index=0;
04014 for(i=start_i; i<=last_non_zero; i++){
04015 int j= perm_scantable[i];
04016 const int level= block[j];
04017
04018 if(level){
04019 run_tab[rle_index++]=run;
04020 run=0;
04021 }else{
04022 run++;
04023 }
04024 }
04025
04026 s->dsp.add_8x8basis(rem, basis[j], best_unquant_change);
04027 }else{
04028 break;
04029 }
04030 }
04031 #ifdef REFINE_STATS
04032 if(last_non_zero>0){
04033 STOP_TIMER("iterative search")
04034 }
04035 }
04036 #endif
04037
04038 return last_non_zero;
04039 }
04040
04041 int dct_quantize_c(MpegEncContext *s,
04042 DCTELEM *block, int n,
04043 int qscale, int *overflow)
04044 {
04045 int i, j, level, last_non_zero, q, start_i;
04046 const int *qmat;
04047 const uint8_t *scantable= s->intra_scantable.scantable;
04048 int bias;
04049 int max=0;
04050 unsigned int threshold1, threshold2;
04051
04052 s->dsp.fdct (block);
04053
04054 if(s->dct_error_sum)
04055 s->denoise_dct(s, block);
04056
04057 if (s->mb_intra) {
04058 if (!s->h263_aic) {
04059 if (n < 4)
04060 q = s->y_dc_scale;
04061 else
04062 q = s->c_dc_scale;
04063 q = q << 3;
04064 } else
04065
04066 q = 1 << 3;
04067
04068
04069 block[0] = (block[0] + (q >> 1)) / q;
04070 start_i = 1;
04071 last_non_zero = 0;
04072 qmat = n < 4 ? s->q_intra_matrix[qscale] : s->q_chroma_intra_matrix[qscale];
04073 bias= s->intra_quant_bias<<(QMAT_SHIFT - QUANT_BIAS_SHIFT);
04074 } else {
04075 start_i = 0;
04076 last_non_zero = -1;
04077 qmat = s->q_inter_matrix[qscale];
04078 bias= s->inter_quant_bias<<(QMAT_SHIFT - QUANT_BIAS_SHIFT);
04079 }
04080 threshold1= (1<<QMAT_SHIFT) - bias - 1;
04081 threshold2= (threshold1<<1);
04082 for(i=63;i>=start_i;i--) {
04083 j = scantable[i];
04084 level = block[j] * qmat[j];
04085
04086 if(((unsigned)(level+threshold1))>threshold2){
04087 last_non_zero = i;
04088 break;
04089 }else{
04090 block[j]=0;
04091 }
04092 }
04093 for(i=start_i; i<=last_non_zero; i++) {
04094 j = scantable[i];
04095 level = block[j] * qmat[j];
04096
04097
04098
04099 if(((unsigned)(level+threshold1))>threshold2){
04100 if(level>0){
04101 level= (bias + level)>>QMAT_SHIFT;
04102 block[j]= level;
04103 }else{
04104 level= (bias - level)>>QMAT_SHIFT;
04105 block[j]= -level;
04106 }
04107 max |=level;
04108 }else{
04109 block[j]=0;
04110 }
04111 }
04112 *overflow= s->max_qcoeff < max;
04113
04114
04115 if (s->dsp.idct_permutation_type != FF_NO_IDCT_PERM)
04116 ff_block_permute(block, s->dsp.idct_permutation, scantable, last_non_zero);
04117
04118 return last_non_zero;
04119 }
04120
04121 #define OFFSET(x) offsetof(MpegEncContext, x)
04122 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
04123 static const AVOption h263_options[] = {
04124 { "obmc", "use overlapped block motion compensation.", OFFSET(obmc), AV_OPT_TYPE_INT, { 0 }, 0, 1, VE },
04125 { "structured_slices","Write slice start position at every GOB header instead of just GOB number.", OFFSET(h263_slice_structured), AV_OPT_TYPE_INT, { 0 }, 0, 1, VE},
04126 { NULL },
04127 };
04128
04129 static const AVClass h263_class = {
04130 .class_name = "H.263 encoder",
04131 .item_name = av_default_item_name,
04132 .option = h263_options,
04133 .version = LIBAVUTIL_VERSION_INT,
04134 };
04135
04136 AVCodec ff_h263_encoder = {
04137 .name = "h263",
04138 .type = AVMEDIA_TYPE_VIDEO,
04139 .id = CODEC_ID_H263,
04140 .priv_data_size = sizeof(MpegEncContext),
04141 .init = MPV_encode_init,
04142 .encode = MPV_encode_picture,
04143 .close = MPV_encode_end,
04144 .pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE},
04145 .long_name= NULL_IF_CONFIG_SMALL("H.263 / H.263-1996"),
04146 .priv_class = &h263_class,
04147 };
04148
04149 static const AVOption h263p_options[] = {
04150 { "umv", "Use unlimited motion vectors.", OFFSET(umvplus), AV_OPT_TYPE_INT, { 0 }, 0, 1, VE },
04151 { "aiv", "Use alternative inter VLC.", OFFSET(alt_inter_vlc), AV_OPT_TYPE_INT, { 0 }, 0, 1, VE },
04152 { "obmc", "use overlapped block motion compensation.", OFFSET(obmc), AV_OPT_TYPE_INT, { 0 }, 0, 1, VE },
04153 { "structured_slices", "Write slice start position at every GOB header instead of just GOB number.", OFFSET(h263_slice_structured), AV_OPT_TYPE_INT, { 0 }, 0, 1, VE},
04154 { NULL },
04155 };
04156 static const AVClass h263p_class = {
04157 .class_name = "H.263p encoder",
04158 .item_name = av_default_item_name,
04159 .option = h263p_options,
04160 .version = LIBAVUTIL_VERSION_INT,
04161 };
04162
04163 AVCodec ff_h263p_encoder = {
04164 .name = "h263p",
04165 .type = AVMEDIA_TYPE_VIDEO,
04166 .id = CODEC_ID_H263P,
04167 .priv_data_size = sizeof(MpegEncContext),
04168 .init = MPV_encode_init,
04169 .encode = MPV_encode_picture,
04170 .close = MPV_encode_end,
04171 .capabilities = CODEC_CAP_SLICE_THREADS,
04172 .pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE},
04173 .long_name= NULL_IF_CONFIG_SMALL("H.263+ / H.263-1998 / H.263 version 2"),
04174 .priv_class = &h263p_class,
04175 };
04176
04177 AVCodec ff_msmpeg4v2_encoder = {
04178 .name = "msmpeg4v2",
04179 .type = AVMEDIA_TYPE_VIDEO,
04180 .id = CODEC_ID_MSMPEG4V2,
04181 .priv_data_size = sizeof(MpegEncContext),
04182 .init = MPV_encode_init,
04183 .encode = MPV_encode_picture,
04184 .close = MPV_encode_end,
04185 .pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE},
04186 .long_name= NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 2"),
04187 };
04188
04189 AVCodec ff_msmpeg4v3_encoder = {
04190 .name = "msmpeg4",
04191 .type = AVMEDIA_TYPE_VIDEO,
04192 .id = CODEC_ID_MSMPEG4V3,
04193 .priv_data_size = sizeof(MpegEncContext),
04194 .init = MPV_encode_init,
04195 .encode = MPV_encode_picture,
04196 .close = MPV_encode_end,
04197 .pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE},
04198 .long_name= NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 3"),
04199 };
04200
04201 AVCodec ff_wmv1_encoder = {
04202 .name = "wmv1",
04203 .type = AVMEDIA_TYPE_VIDEO,
04204 .id = CODEC_ID_WMV1,
04205 .priv_data_size = sizeof(MpegEncContext),
04206 .init = MPV_encode_init,
04207 .encode = MPV_encode_picture,
04208 .close = MPV_encode_end,
04209 .pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE},
04210 .long_name= NULL_IF_CONFIG_SMALL("Windows Media Video 7"),
04211 };