00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018
00019
00020
00021
00022
00023
00024
00030 #include "libavutil/intmath.h"
00031 #include "libavutil/imgutils.h"
00032 #include "avcodec.h"
00033 #include "dsputil.h"
00034 #include "internal.h"
00035 #include "mpegvideo.h"
00036 #include "mpegvideo_common.h"
00037 #include "mjpegenc.h"
00038 #include "msmpeg4.h"
00039 #include "faandct.h"
00040 #include "xvmc_internal.h"
00041 #include "thread.h"
00042 #include <limits.h>
00043
00044
00045
00046
00047 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
00048 DCTELEM *block, int n, int qscale);
00049 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
00050 DCTELEM *block, int n, int qscale);
00051 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
00052 DCTELEM *block, int n, int qscale);
00053 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
00054 DCTELEM *block, int n, int qscale);
00055 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
00056 DCTELEM *block, int n, int qscale);
00057 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
00058 DCTELEM *block, int n, int qscale);
00059 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
00060 DCTELEM *block, int n, int qscale);
00061
00062
00063
00064
00065
00066
00067
00068
00069 static const uint8_t ff_default_chroma_qscale_table[32] = {
00070
00071 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
00072 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31
00073 };
00074
00075 const uint8_t ff_mpeg1_dc_scale_table[128] = {
00076
00077 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
00078 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
00079 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
00080 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
00081 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
00082 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
00083 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
00084 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
00085 };
00086
00087 static const uint8_t mpeg2_dc_scale_table1[128] = {
00088
00089 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
00090 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
00091 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
00092 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
00093 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
00094 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
00095 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
00096 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
00097 };
00098
00099 static const uint8_t mpeg2_dc_scale_table2[128] = {
00100
00101 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
00102 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
00103 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
00104 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
00105 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
00106 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
00107 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
00108 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
00109 };
00110
00111 static const uint8_t mpeg2_dc_scale_table3[128] = {
00112
00113 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
00114 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
00115 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
00116 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
00117 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
00118 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
00119 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
00120 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
00121 };
00122
00123 const uint8_t *const ff_mpeg2_dc_scale_table[4] = {
00124 ff_mpeg1_dc_scale_table,
00125 mpeg2_dc_scale_table1,
00126 mpeg2_dc_scale_table2,
00127 mpeg2_dc_scale_table3,
00128 };
00129
00130 const enum PixelFormat ff_pixfmt_list_420[] = {
00131 PIX_FMT_YUV420P,
00132 PIX_FMT_NONE
00133 };
00134
00135 const enum PixelFormat ff_hwaccel_pixfmt_list_420[] = {
00136 PIX_FMT_DXVA2_VLD,
00137 PIX_FMT_VAAPI_VLD,
00138 PIX_FMT_VDA_VLD,
00139 PIX_FMT_YUV420P,
00140 PIX_FMT_NONE
00141 };
00142
00143 const uint8_t *avpriv_mpv_find_start_code(const uint8_t *restrict p,
00144 const uint8_t *end,
00145 uint32_t * restrict state)
00146 {
00147 int i;
00148
00149 assert(p <= end);
00150 if (p >= end)
00151 return end;
00152
00153 for (i = 0; i < 3; i++) {
00154 uint32_t tmp = *state << 8;
00155 *state = tmp + *(p++);
00156 if (tmp == 0x100 || p == end)
00157 return p;
00158 }
00159
00160 while (p < end) {
00161 if (p[-1] > 1 ) p += 3;
00162 else if (p[-2] ) p += 2;
00163 else if (p[-3]|(p[-1]-1)) p++;
00164 else {
00165 p++;
00166 break;
00167 }
00168 }
00169
00170 p = FFMIN(p, end) - 4;
00171 *state = AV_RB32(p);
00172
00173 return p + 4;
00174 }
00175
00176
00177 av_cold int ff_dct_common_init(MpegEncContext *s)
00178 {
00179 dsputil_init(&s->dsp, s->avctx);
00180
00181 s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_c;
00182 s->dct_unquantize_h263_inter = dct_unquantize_h263_inter_c;
00183 s->dct_unquantize_mpeg1_intra = dct_unquantize_mpeg1_intra_c;
00184 s->dct_unquantize_mpeg1_inter = dct_unquantize_mpeg1_inter_c;
00185 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_c;
00186 if (s->flags & CODEC_FLAG_BITEXACT)
00187 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_bitexact;
00188 s->dct_unquantize_mpeg2_inter = dct_unquantize_mpeg2_inter_c;
00189
00190 #if HAVE_MMX
00191 MPV_common_init_mmx(s);
00192 #elif ARCH_ALPHA
00193 MPV_common_init_axp(s);
00194 #elif CONFIG_MLIB
00195 MPV_common_init_mlib(s);
00196 #elif HAVE_MMI
00197 MPV_common_init_mmi(s);
00198 #elif ARCH_ARM
00199 MPV_common_init_arm(s);
00200 #elif HAVE_ALTIVEC
00201 MPV_common_init_altivec(s);
00202 #elif ARCH_BFIN
00203 MPV_common_init_bfin(s);
00204 #endif
00205
00206
00207
00208
00209 if (s->alternate_scan) {
00210 ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_alternate_vertical_scan);
00211 ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_alternate_vertical_scan);
00212 } else {
00213 ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_zigzag_direct);
00214 ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_zigzag_direct);
00215 }
00216 ff_init_scantable(s->dsp.idct_permutation, &s->intra_h_scantable, ff_alternate_horizontal_scan);
00217 ff_init_scantable(s->dsp.idct_permutation, &s->intra_v_scantable, ff_alternate_vertical_scan);
00218
00219 return 0;
00220 }
00221
00222 void ff_copy_picture(Picture *dst, Picture *src)
00223 {
00224 *dst = *src;
00225 dst->f.type = FF_BUFFER_TYPE_COPY;
00226 }
00227
00231 static void free_frame_buffer(MpegEncContext *s, Picture *pic)
00232 {
00233
00234
00235
00236 if (s->codec_id != CODEC_ID_WMV3IMAGE && s->codec_id != CODEC_ID_VC1IMAGE)
00237 ff_thread_release_buffer(s->avctx, (AVFrame *) pic);
00238 else
00239 avcodec_default_release_buffer(s->avctx, (AVFrame *) pic);
00240 av_freep(&pic->f.hwaccel_picture_private);
00241 }
00242
00246 static int alloc_frame_buffer(MpegEncContext *s, Picture *pic)
00247 {
00248 int r;
00249
00250 if (s->avctx->hwaccel) {
00251 assert(!pic->f.hwaccel_picture_private);
00252 if (s->avctx->hwaccel->priv_data_size) {
00253 pic->f.hwaccel_picture_private = av_mallocz(s->avctx->hwaccel->priv_data_size);
00254 if (!pic->f.hwaccel_picture_private) {
00255 av_log(s->avctx, AV_LOG_ERROR, "alloc_frame_buffer() failed (hwaccel private data allocation)\n");
00256 return -1;
00257 }
00258 }
00259 }
00260
00261 if (s->codec_id != CODEC_ID_WMV3IMAGE && s->codec_id != CODEC_ID_VC1IMAGE)
00262 r = ff_thread_get_buffer(s->avctx, (AVFrame *) pic);
00263 else
00264 r = avcodec_default_get_buffer(s->avctx, (AVFrame *) pic);
00265
00266 if (r < 0 || !pic->f.type || !pic->f.data[0]) {
00267 av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (%d %d %p)\n",
00268 r, pic->f.type, pic->f.data[0]);
00269 av_freep(&pic->f.hwaccel_picture_private);
00270 return -1;
00271 }
00272
00273 if (s->linesize && (s->linesize != pic->f.linesize[0] ||
00274 s->uvlinesize != pic->f.linesize[1])) {
00275 av_log(s->avctx, AV_LOG_ERROR,
00276 "get_buffer() failed (stride changed)\n");
00277 free_frame_buffer(s, pic);
00278 return -1;
00279 }
00280
00281 if (pic->f.linesize[1] != pic->f.linesize[2]) {
00282 av_log(s->avctx, AV_LOG_ERROR,
00283 "get_buffer() failed (uv stride mismatch)\n");
00284 free_frame_buffer(s, pic);
00285 return -1;
00286 }
00287
00288 return 0;
00289 }
00290
00295 int ff_alloc_picture(MpegEncContext *s, Picture *pic, int shared)
00296 {
00297 const int big_mb_num = s->mb_stride * (s->mb_height + 1) + 1;
00298
00299
00300
00301 const int mb_array_size = s->mb_stride * s->mb_height;
00302 const int b8_array_size = s->b8_stride * s->mb_height * 2;
00303 const int b4_array_size = s->b4_stride * s->mb_height * 4;
00304 int i;
00305 int r = -1;
00306
00307 if (shared) {
00308 assert(pic->f.data[0]);
00309 assert(pic->f.type == 0 || pic->f.type == FF_BUFFER_TYPE_SHARED);
00310 pic->f.type = FF_BUFFER_TYPE_SHARED;
00311 } else {
00312 assert(!pic->f.data[0]);
00313
00314 if (alloc_frame_buffer(s, pic) < 0)
00315 return -1;
00316
00317 s->linesize = pic->f.linesize[0];
00318 s->uvlinesize = pic->f.linesize[1];
00319 }
00320
00321 if (pic->f.qscale_table == NULL) {
00322 if (s->encoding) {
00323 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_var,
00324 mb_array_size * sizeof(int16_t), fail)
00325 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mc_mb_var,
00326 mb_array_size * sizeof(int16_t), fail)
00327 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_mean,
00328 mb_array_size * sizeof(int8_t ), fail)
00329 }
00330
00331 FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.mbskip_table,
00332 mb_array_size * sizeof(uint8_t) + 2, fail)
00333 FF_ALLOCZ_OR_GOTO(s->avctx, pic->qscale_table_base,
00334 (big_mb_num + s->mb_stride) * sizeof(uint8_t),
00335 fail)
00336 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_type_base,
00337 (big_mb_num + s->mb_stride) * sizeof(uint32_t),
00338 fail)
00339 pic->f.mb_type = pic->mb_type_base + 2 * s->mb_stride + 1;
00340 pic->f.qscale_table = pic->qscale_table_base + 2 * s->mb_stride + 1;
00341 if (s->out_format == FMT_H264) {
00342 for (i = 0; i < 2; i++) {
00343 FF_ALLOCZ_OR_GOTO(s->avctx, pic->motion_val_base[i],
00344 2 * (b4_array_size + 4) * sizeof(int16_t),
00345 fail)
00346 pic->f.motion_val[i] = pic->motion_val_base[i] + 4;
00347 FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.ref_index[i],
00348 4 * mb_array_size * sizeof(uint8_t), fail)
00349 }
00350 pic->f.motion_subsample_log2 = 2;
00351 } else if (s->out_format == FMT_H263 || s->encoding ||
00352 (s->avctx->debug & FF_DEBUG_MV) || s->avctx->debug_mv) {
00353 for (i = 0; i < 2; i++) {
00354 FF_ALLOCZ_OR_GOTO(s->avctx, pic->motion_val_base[i],
00355 2 * (b8_array_size + 4) * sizeof(int16_t),
00356 fail)
00357 pic->f.motion_val[i] = pic->motion_val_base[i] + 4;
00358 FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.ref_index[i],
00359 4 * mb_array_size * sizeof(uint8_t), fail)
00360 }
00361 pic->f.motion_subsample_log2 = 3;
00362 }
00363 if (s->avctx->debug&FF_DEBUG_DCT_COEFF) {
00364 FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.dct_coeff,
00365 64 * mb_array_size * sizeof(DCTELEM) * 6, fail)
00366 }
00367 pic->f.qstride = s->mb_stride;
00368 FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.pan_scan,
00369 1 * sizeof(AVPanScan), fail)
00370 }
00371
00372 pic->owner2 = s;
00373
00374 return 0;
00375 fail:
00376 if (r >= 0)
00377 free_frame_buffer(s, pic);
00378 return -1;
00379 }
00380
00384 static void free_picture(MpegEncContext *s, Picture *pic)
00385 {
00386 int i;
00387
00388 if (pic->f.data[0] && pic->f.type != FF_BUFFER_TYPE_SHARED) {
00389 free_frame_buffer(s, pic);
00390 }
00391
00392 av_freep(&pic->mb_var);
00393 av_freep(&pic->mc_mb_var);
00394 av_freep(&pic->mb_mean);
00395 av_freep(&pic->f.mbskip_table);
00396 av_freep(&pic->qscale_table_base);
00397 av_freep(&pic->mb_type_base);
00398 av_freep(&pic->f.dct_coeff);
00399 av_freep(&pic->f.pan_scan);
00400 pic->f.mb_type = NULL;
00401 for (i = 0; i < 2; i++) {
00402 av_freep(&pic->motion_val_base[i]);
00403 av_freep(&pic->f.ref_index[i]);
00404 }
00405
00406 if (pic->f.type == FF_BUFFER_TYPE_SHARED) {
00407 for (i = 0; i < 4; i++) {
00408 pic->f.base[i] =
00409 pic->f.data[i] = NULL;
00410 }
00411 pic->f.type = 0;
00412 }
00413 }
00414
00415 static int init_duplicate_context(MpegEncContext *s, MpegEncContext *base)
00416 {
00417 int y_size = s->b8_stride * (2 * s->mb_height + 1);
00418 int c_size = s->mb_stride * (s->mb_height + 1);
00419 int yc_size = y_size + 2 * c_size;
00420 int i;
00421
00422
00423
00424 FF_ALLOCZ_OR_GOTO(s->avctx, s->edge_emu_buffer,
00425 (s->width + 95) * 2 * 21 * 4, fail);
00426
00427
00428
00429 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.scratchpad,
00430 (s->width + 95) * 4 * 16 * 2 * sizeof(uint8_t), fail)
00431 s->me.temp = s->me.scratchpad;
00432 s->rd_scratchpad = s->me.scratchpad;
00433 s->b_scratchpad = s->me.scratchpad;
00434 s->obmc_scratchpad = s->me.scratchpad + 16;
00435 if (s->encoding) {
00436 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.map,
00437 ME_MAP_SIZE * sizeof(uint32_t), fail)
00438 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.score_map,
00439 ME_MAP_SIZE * sizeof(uint32_t), fail)
00440 if (s->avctx->noise_reduction) {
00441 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_error_sum,
00442 2 * 64 * sizeof(int), fail)
00443 }
00444 }
00445 FF_ALLOCZ_OR_GOTO(s->avctx, s->blocks, 64 * 12 * 2 * sizeof(DCTELEM), fail)
00446 s->block = s->blocks[0];
00447
00448 for (i = 0; i < 12; i++) {
00449 s->pblocks[i] = &s->block[i];
00450 }
00451
00452 if (s->out_format == FMT_H263) {
00453
00454 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_val_base,
00455 yc_size * sizeof(int16_t) * 16, fail);
00456 s->ac_val[0] = s->ac_val_base + s->b8_stride + 1;
00457 s->ac_val[1] = s->ac_val_base + y_size + s->mb_stride + 1;
00458 s->ac_val[2] = s->ac_val[1] + c_size;
00459 }
00460
00461 return 0;
00462 fail:
00463 return -1;
00464 }
00465
00466 static void free_duplicate_context(MpegEncContext *s)
00467 {
00468 if (s == NULL)
00469 return;
00470
00471 av_freep(&s->edge_emu_buffer);
00472 av_freep(&s->me.scratchpad);
00473 s->me.temp =
00474 s->rd_scratchpad =
00475 s->b_scratchpad =
00476 s->obmc_scratchpad = NULL;
00477
00478 av_freep(&s->dct_error_sum);
00479 av_freep(&s->me.map);
00480 av_freep(&s->me.score_map);
00481 av_freep(&s->blocks);
00482 av_freep(&s->ac_val_base);
00483 s->block = NULL;
00484 }
00485
00486 static void backup_duplicate_context(MpegEncContext *bak, MpegEncContext *src)
00487 {
00488 #define COPY(a) bak->a = src->a
00489 COPY(edge_emu_buffer);
00490 COPY(me.scratchpad);
00491 COPY(me.temp);
00492 COPY(rd_scratchpad);
00493 COPY(b_scratchpad);
00494 COPY(obmc_scratchpad);
00495 COPY(me.map);
00496 COPY(me.score_map);
00497 COPY(blocks);
00498 COPY(block);
00499 COPY(start_mb_y);
00500 COPY(end_mb_y);
00501 COPY(me.map_generation);
00502 COPY(pb);
00503 COPY(dct_error_sum);
00504 COPY(dct_count[0]);
00505 COPY(dct_count[1]);
00506 COPY(ac_val_base);
00507 COPY(ac_val[0]);
00508 COPY(ac_val[1]);
00509 COPY(ac_val[2]);
00510 #undef COPY
00511 }
00512
00513 void ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src)
00514 {
00515 MpegEncContext bak;
00516 int i;
00517
00518
00519 backup_duplicate_context(&bak, dst);
00520 memcpy(dst, src, sizeof(MpegEncContext));
00521 backup_duplicate_context(dst, &bak);
00522 for (i = 0; i < 12; i++) {
00523 dst->pblocks[i] = &dst->block[i];
00524 }
00525
00526
00527 }
00528
00529 int ff_mpeg_update_thread_context(AVCodecContext *dst,
00530 const AVCodecContext *src)
00531 {
00532 MpegEncContext *s = dst->priv_data, *s1 = src->priv_data;
00533
00534 if (dst == src || !s1->context_initialized)
00535 return 0;
00536
00537
00538
00539 if (!s->context_initialized) {
00540 memcpy(s, s1, sizeof(MpegEncContext));
00541
00542 s->avctx = dst;
00543 s->picture_range_start += MAX_PICTURE_COUNT;
00544 s->picture_range_end += MAX_PICTURE_COUNT;
00545 s->bitstream_buffer = NULL;
00546 s->bitstream_buffer_size = s->allocated_bitstream_buffer_size = 0;
00547
00548 MPV_common_init(s);
00549 }
00550
00551 s->avctx->coded_height = s1->avctx->coded_height;
00552 s->avctx->coded_width = s1->avctx->coded_width;
00553 s->avctx->width = s1->avctx->width;
00554 s->avctx->height = s1->avctx->height;
00555
00556 s->coded_picture_number = s1->coded_picture_number;
00557 s->picture_number = s1->picture_number;
00558 s->input_picture_number = s1->input_picture_number;
00559
00560 memcpy(s->picture, s1->picture, s1->picture_count * sizeof(Picture));
00561 memcpy(&s->last_picture, &s1->last_picture,
00562 (char *) &s1->last_picture_ptr - (char *) &s1->last_picture);
00563
00564 s->last_picture_ptr = REBASE_PICTURE(s1->last_picture_ptr, s, s1);
00565 s->current_picture_ptr = REBASE_PICTURE(s1->current_picture_ptr, s, s1);
00566 s->next_picture_ptr = REBASE_PICTURE(s1->next_picture_ptr, s, s1);
00567
00568
00569 s->next_p_frame_damaged = s1->next_p_frame_damaged;
00570 s->workaround_bugs = s1->workaround_bugs;
00571 s->padding_bug_score = s1->padding_bug_score;
00572
00573
00574 memcpy(&s->time_increment_bits, &s1->time_increment_bits,
00575 (char *) &s1->shape - (char *) &s1->time_increment_bits);
00576
00577
00578 s->max_b_frames = s1->max_b_frames;
00579 s->low_delay = s1->low_delay;
00580 s->dropable = s1->dropable;
00581
00582
00583 s->divx_packed = s1->divx_packed;
00584
00585 if (s1->bitstream_buffer) {
00586 if (s1->bitstream_buffer_size +
00587 FF_INPUT_BUFFER_PADDING_SIZE > s->allocated_bitstream_buffer_size)
00588 av_fast_malloc(&s->bitstream_buffer,
00589 &s->allocated_bitstream_buffer_size,
00590 s1->allocated_bitstream_buffer_size);
00591 s->bitstream_buffer_size = s1->bitstream_buffer_size;
00592 memcpy(s->bitstream_buffer, s1->bitstream_buffer,
00593 s1->bitstream_buffer_size);
00594 memset(s->bitstream_buffer + s->bitstream_buffer_size, 0,
00595 FF_INPUT_BUFFER_PADDING_SIZE);
00596 }
00597
00598
00599 memcpy(&s->progressive_sequence, &s1->progressive_sequence,
00600 (char *) &s1->rtp_mode - (char *) &s1->progressive_sequence);
00601
00602 if (!s1->first_field) {
00603 s->last_pict_type = s1->pict_type;
00604 if (s1->current_picture_ptr)
00605 s->last_lambda_for[s1->pict_type] = s1->current_picture_ptr->f.quality;
00606
00607 if (s1->pict_type != AV_PICTURE_TYPE_B) {
00608 s->last_non_b_pict_type = s1->pict_type;
00609 }
00610 }
00611
00612 return 0;
00613 }
00614
00621 void MPV_common_defaults(MpegEncContext *s)
00622 {
00623 s->y_dc_scale_table =
00624 s->c_dc_scale_table = ff_mpeg1_dc_scale_table;
00625 s->chroma_qscale_table = ff_default_chroma_qscale_table;
00626 s->progressive_frame = 1;
00627 s->progressive_sequence = 1;
00628 s->picture_structure = PICT_FRAME;
00629
00630 s->coded_picture_number = 0;
00631 s->picture_number = 0;
00632 s->input_picture_number = 0;
00633
00634 s->picture_in_gop_number = 0;
00635
00636 s->f_code = 1;
00637 s->b_code = 1;
00638
00639 s->picture_range_start = 0;
00640 s->picture_range_end = MAX_PICTURE_COUNT;
00641
00642 s->slice_context_count = 1;
00643 }
00644
00650 void MPV_decode_defaults(MpegEncContext *s)
00651 {
00652 MPV_common_defaults(s);
00653 }
00654
00659 av_cold int MPV_common_init(MpegEncContext *s)
00660 {
00661 int y_size, c_size, yc_size, i, mb_array_size, mv_table_size, x, y;
00662 int nb_slices = (HAVE_THREADS &&
00663 s->avctx->active_thread_type & FF_THREAD_SLICE) ?
00664 s->avctx->thread_count : 1;
00665
00666 if (s->encoding && s->avctx->slices)
00667 nb_slices = s->avctx->slices;
00668
00669 if (s->codec_id == CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
00670 s->mb_height = (s->height + 31) / 32 * 2;
00671 else if (s->codec_id != CODEC_ID_H264)
00672 s->mb_height = (s->height + 15) / 16;
00673
00674 if (s->avctx->pix_fmt == PIX_FMT_NONE) {
00675 av_log(s->avctx, AV_LOG_ERROR,
00676 "decoding to PIX_FMT_NONE is not supported.\n");
00677 return -1;
00678 }
00679
00680 if (nb_slices > MAX_THREADS || (nb_slices > s->mb_height && s->mb_height)) {
00681 int max_slices;
00682 if (s->mb_height)
00683 max_slices = FFMIN(MAX_THREADS, s->mb_height);
00684 else
00685 max_slices = MAX_THREADS;
00686 av_log(s->avctx, AV_LOG_WARNING, "too many threads/slices (%d),"
00687 " reducing to %d\n", nb_slices, max_slices);
00688 nb_slices = max_slices;
00689 }
00690
00691 if ((s->width || s->height) &&
00692 av_image_check_size(s->width, s->height, 0, s->avctx))
00693 return -1;
00694
00695 ff_dct_common_init(s);
00696
00697 s->flags = s->avctx->flags;
00698 s->flags2 = s->avctx->flags2;
00699
00700 s->mb_width = (s->width + 15) / 16;
00701 s->mb_stride = s->mb_width + 1;
00702 s->b8_stride = s->mb_width * 2 + 1;
00703 s->b4_stride = s->mb_width * 4 + 1;
00704 mb_array_size = s->mb_height * s->mb_stride;
00705 mv_table_size = (s->mb_height + 2) * s->mb_stride + 1;
00706
00707
00708 avcodec_get_chroma_sub_sample(s->avctx->pix_fmt, &s->chroma_x_shift,
00709 &s->chroma_y_shift);
00710
00711
00712 s->h_edge_pos = s->mb_width * 16;
00713 s->v_edge_pos = s->mb_height * 16;
00714
00715 s->mb_num = s->mb_width * s->mb_height;
00716
00717 s->block_wrap[0] =
00718 s->block_wrap[1] =
00719 s->block_wrap[2] =
00720 s->block_wrap[3] = s->b8_stride;
00721 s->block_wrap[4] =
00722 s->block_wrap[5] = s->mb_stride;
00723
00724 y_size = s->b8_stride * (2 * s->mb_height + 1);
00725 c_size = s->mb_stride * (s->mb_height + 1);
00726 yc_size = y_size + 2 * c_size;
00727
00728
00729 s->codec_tag = avpriv_toupper4(s->avctx->codec_tag);
00730 s->stream_codec_tag = avpriv_toupper4(s->avctx->stream_codec_tag);
00731
00732 s->avctx->coded_frame = (AVFrame*)&s->current_picture;
00733
00734 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_index2xy, (s->mb_num + 1) * sizeof(int), fail);
00735 for (y = 0; y < s->mb_height; y++)
00736 for (x = 0; x < s->mb_width; x++)
00737 s->mb_index2xy[x + y * s->mb_width] = x + y * s->mb_stride;
00738
00739 s->mb_index2xy[s->mb_height * s->mb_width] = (s->mb_height - 1) * s->mb_stride + s->mb_width;
00740
00741 if (s->encoding) {
00742
00743 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
00744 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_forw_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
00745 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_back_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
00746 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_forw_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
00747 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_back_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
00748 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_direct_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
00749 s->p_mv_table = s->p_mv_table_base + s->mb_stride + 1;
00750 s->b_forw_mv_table = s->b_forw_mv_table_base + s->mb_stride + 1;
00751 s->b_back_mv_table = s->b_back_mv_table_base + s->mb_stride + 1;
00752 s->b_bidir_forw_mv_table= s->b_bidir_forw_mv_table_base + s->mb_stride + 1;
00753 s->b_bidir_back_mv_table= s->b_bidir_back_mv_table_base + s->mb_stride + 1;
00754 s->b_direct_mv_table = s->b_direct_mv_table_base + s->mb_stride + 1;
00755
00756 if(s->msmpeg4_version){
00757 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_stats, 2*2*(MAX_LEVEL+1)*(MAX_RUN+1)*2*sizeof(int), fail);
00758 }
00759 FF_ALLOCZ_OR_GOTO(s->avctx, s->avctx->stats_out, 256, fail);
00760
00761
00762 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_type , mb_array_size * sizeof(uint16_t), fail)
00763
00764 FF_ALLOCZ_OR_GOTO(s->avctx, s->lambda_table, mb_array_size * sizeof(int), fail)
00765
00766 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix , 64*32 * sizeof(int), fail)
00767 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_chroma_intra_matrix , 64*32 * sizeof(int), fail)
00768 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix , 64*32 * sizeof(int), fail)
00769 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix16 , 64*32*2 * sizeof(uint16_t), fail)
00770 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_chroma_intra_matrix16, 64*32*2 * sizeof(uint16_t), fail)
00771 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix16 , 64*32*2 * sizeof(uint16_t), fail)
00772 FF_ALLOCZ_OR_GOTO(s->avctx, s->input_picture, MAX_PICTURE_COUNT * sizeof(Picture*), fail)
00773 FF_ALLOCZ_OR_GOTO(s->avctx, s->reordered_input_picture, MAX_PICTURE_COUNT * sizeof(Picture*), fail)
00774
00775 if(s->avctx->noise_reduction){
00776 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_offset, 2 * 64 * sizeof(uint16_t), fail)
00777 }
00778 }
00779
00780 s->picture_count = MAX_PICTURE_COUNT * FFMAX(1, s->avctx->thread_count);
00781 FF_ALLOCZ_OR_GOTO(s->avctx, s->picture,
00782 s->picture_count * sizeof(Picture), fail);
00783 for (i = 0; i < s->picture_count; i++) {
00784 avcodec_get_frame_defaults((AVFrame *) &s->picture[i]);
00785 }
00786
00787 FF_ALLOCZ_OR_GOTO(s->avctx, s->error_status_table, mb_array_size*sizeof(uint8_t), fail)
00788
00789 if(s->codec_id==CODEC_ID_MPEG4 || (s->flags & CODEC_FLAG_INTERLACED_ME)){
00790
00791 for (i = 0; i < 2; i++) {
00792 int j, k;
00793 for (j = 0; j < 2; j++) {
00794 for (k = 0; k < 2; k++) {
00795 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_field_mv_table_base[i][j][k], mv_table_size * 2 * sizeof(int16_t), fail)
00796 s->b_field_mv_table[i][j][k] = s->b_field_mv_table_base[i][j][k] + s->mb_stride + 1;
00797 }
00798 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_field_select_table [i][j], mb_array_size * 2 * sizeof(uint8_t), fail)
00799 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_mv_table_base[i][j], mv_table_size * 2 * sizeof(int16_t), fail)
00800 s->p_field_mv_table[i][j] = s->p_field_mv_table_base[i][j] + s->mb_stride + 1;
00801 }
00802 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_select_table[i], mb_array_size * 2 * sizeof(uint8_t), fail)
00803 }
00804 }
00805 if (s->out_format == FMT_H263) {
00806
00807 FF_ALLOCZ_OR_GOTO(s->avctx, s->coded_block_base, y_size, fail);
00808 s->coded_block = s->coded_block_base + s->b8_stride + 1;
00809
00810
00811 FF_ALLOCZ_OR_GOTO(s->avctx, s->cbp_table , mb_array_size * sizeof(uint8_t), fail);
00812 FF_ALLOCZ_OR_GOTO(s->avctx, s->pred_dir_table, mb_array_size * sizeof(uint8_t), fail);
00813 }
00814
00815 if (s->h263_pred || s->h263_plus || !s->encoding) {
00816
00817
00818 FF_ALLOCZ_OR_GOTO(s->avctx, s->dc_val_base, yc_size * sizeof(int16_t), fail);
00819 s->dc_val[0] = s->dc_val_base + s->b8_stride + 1;
00820 s->dc_val[1] = s->dc_val_base + y_size + s->mb_stride + 1;
00821 s->dc_val[2] = s->dc_val[1] + c_size;
00822 for (i = 0; i < yc_size; i++)
00823 s->dc_val_base[i] = 1024;
00824 }
00825
00826
00827 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbintra_table, mb_array_size, fail);
00828 memset(s->mbintra_table, 1, mb_array_size);
00829
00830
00831 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbskip_table, mb_array_size + 2, fail);
00832
00833
00834 s->parse_context.state = -1;
00835
00836 s->context_initialized = 1;
00837 s->thread_context[0] = s;
00838
00839
00840 if (nb_slices > 1) {
00841 for (i = 1; i < nb_slices; i++) {
00842 s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
00843 memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
00844 }
00845
00846 for (i = 0; i < nb_slices; i++) {
00847 if (init_duplicate_context(s->thread_context[i], s) < 0)
00848 goto fail;
00849 s->thread_context[i]->start_mb_y =
00850 (s->mb_height * (i) + nb_slices / 2) / nb_slices;
00851 s->thread_context[i]->end_mb_y =
00852 (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
00853 }
00854 } else {
00855 if (init_duplicate_context(s, s) < 0)
00856 goto fail;
00857 s->start_mb_y = 0;
00858 s->end_mb_y = s->mb_height;
00859 }
00860 s->slice_context_count = nb_slices;
00861
00862
00863 return 0;
00864 fail:
00865 MPV_common_end(s);
00866 return -1;
00867 }
00868
00869
00870 void MPV_common_end(MpegEncContext *s)
00871 {
00872 int i, j, k;
00873
00874 if (s->slice_context_count > 1) {
00875 for (i = 0; i < s->slice_context_count; i++) {
00876 free_duplicate_context(s->thread_context[i]);
00877 }
00878 for (i = 1; i < s->slice_context_count; i++) {
00879 av_freep(&s->thread_context[i]);
00880 }
00881 s->slice_context_count = 1;
00882 } else free_duplicate_context(s);
00883
00884 av_freep(&s->parse_context.buffer);
00885 s->parse_context.buffer_size = 0;
00886
00887 av_freep(&s->mb_type);
00888 av_freep(&s->p_mv_table_base);
00889 av_freep(&s->b_forw_mv_table_base);
00890 av_freep(&s->b_back_mv_table_base);
00891 av_freep(&s->b_bidir_forw_mv_table_base);
00892 av_freep(&s->b_bidir_back_mv_table_base);
00893 av_freep(&s->b_direct_mv_table_base);
00894 s->p_mv_table = NULL;
00895 s->b_forw_mv_table = NULL;
00896 s->b_back_mv_table = NULL;
00897 s->b_bidir_forw_mv_table = NULL;
00898 s->b_bidir_back_mv_table = NULL;
00899 s->b_direct_mv_table = NULL;
00900 for (i = 0; i < 2; i++) {
00901 for (j = 0; j < 2; j++) {
00902 for (k = 0; k < 2; k++) {
00903 av_freep(&s->b_field_mv_table_base[i][j][k]);
00904 s->b_field_mv_table[i][j][k] = NULL;
00905 }
00906 av_freep(&s->b_field_select_table[i][j]);
00907 av_freep(&s->p_field_mv_table_base[i][j]);
00908 s->p_field_mv_table[i][j] = NULL;
00909 }
00910 av_freep(&s->p_field_select_table[i]);
00911 }
00912
00913 av_freep(&s->dc_val_base);
00914 av_freep(&s->coded_block_base);
00915 av_freep(&s->mbintra_table);
00916 av_freep(&s->cbp_table);
00917 av_freep(&s->pred_dir_table);
00918
00919 av_freep(&s->mbskip_table);
00920 av_freep(&s->bitstream_buffer);
00921 s->allocated_bitstream_buffer_size = 0;
00922
00923 av_freep(&s->avctx->stats_out);
00924 av_freep(&s->ac_stats);
00925 av_freep(&s->error_status_table);
00926 av_freep(&s->mb_index2xy);
00927 av_freep(&s->lambda_table);
00928 if(s->q_chroma_intra_matrix != s->q_intra_matrix ) av_freep(&s->q_chroma_intra_matrix);
00929 if(s->q_chroma_intra_matrix16 != s->q_intra_matrix16) av_freep(&s->q_chroma_intra_matrix16);
00930 s->q_chroma_intra_matrix= NULL;
00931 s->q_chroma_intra_matrix16= NULL;
00932 av_freep(&s->q_intra_matrix);
00933 av_freep(&s->q_inter_matrix);
00934 av_freep(&s->q_intra_matrix16);
00935 av_freep(&s->q_inter_matrix16);
00936 av_freep(&s->input_picture);
00937 av_freep(&s->reordered_input_picture);
00938 av_freep(&s->dct_offset);
00939
00940 if (s->picture && !s->avctx->internal->is_copy) {
00941 for (i = 0; i < s->picture_count; i++) {
00942 free_picture(s, &s->picture[i]);
00943 }
00944 }
00945 av_freep(&s->picture);
00946 s->context_initialized = 0;
00947 s->last_picture_ptr =
00948 s->next_picture_ptr =
00949 s->current_picture_ptr = NULL;
00950 s->linesize = s->uvlinesize = 0;
00951
00952 for (i = 0; i < 3; i++)
00953 av_freep(&s->visualization_buffer[i]);
00954
00955 if (!(s->avctx->active_thread_type & FF_THREAD_FRAME))
00956 avcodec_default_free_buffers(s->avctx);
00957 }
00958
00959 void ff_init_rl(RLTable *rl,
00960 uint8_t static_store[2][2 * MAX_RUN + MAX_LEVEL + 3])
00961 {
00962 int8_t max_level[MAX_RUN + 1], max_run[MAX_LEVEL + 1];
00963 uint8_t index_run[MAX_RUN + 1];
00964 int last, run, level, start, end, i;
00965
00966
00967 if (static_store && rl->max_level[0])
00968 return;
00969
00970
00971 for (last = 0; last < 2; last++) {
00972 if (last == 0) {
00973 start = 0;
00974 end = rl->last;
00975 } else {
00976 start = rl->last;
00977 end = rl->n;
00978 }
00979
00980 memset(max_level, 0, MAX_RUN + 1);
00981 memset(max_run, 0, MAX_LEVEL + 1);
00982 memset(index_run, rl->n, MAX_RUN + 1);
00983 for (i = start; i < end; i++) {
00984 run = rl->table_run[i];
00985 level = rl->table_level[i];
00986 if (index_run[run] == rl->n)
00987 index_run[run] = i;
00988 if (level > max_level[run])
00989 max_level[run] = level;
00990 if (run > max_run[level])
00991 max_run[level] = run;
00992 }
00993 if (static_store)
00994 rl->max_level[last] = static_store[last];
00995 else
00996 rl->max_level[last] = av_malloc(MAX_RUN + 1);
00997 memcpy(rl->max_level[last], max_level, MAX_RUN + 1);
00998 if (static_store)
00999 rl->max_run[last] = static_store[last] + MAX_RUN + 1;
01000 else
01001 rl->max_run[last] = av_malloc(MAX_LEVEL + 1);
01002 memcpy(rl->max_run[last], max_run, MAX_LEVEL + 1);
01003 if (static_store)
01004 rl->index_run[last] = static_store[last] + MAX_RUN + MAX_LEVEL + 2;
01005 else
01006 rl->index_run[last] = av_malloc(MAX_RUN + 1);
01007 memcpy(rl->index_run[last], index_run, MAX_RUN + 1);
01008 }
01009 }
01010
01011 void ff_init_vlc_rl(RLTable *rl)
01012 {
01013 int i, q;
01014
01015 for (q = 0; q < 32; q++) {
01016 int qmul = q * 2;
01017 int qadd = (q - 1) | 1;
01018
01019 if (q == 0) {
01020 qmul = 1;
01021 qadd = 0;
01022 }
01023 for (i = 0; i < rl->vlc.table_size; i++) {
01024 int code = rl->vlc.table[i][0];
01025 int len = rl->vlc.table[i][1];
01026 int level, run;
01027
01028 if (len == 0) {
01029 run = 66;
01030 level = MAX_LEVEL;
01031 } else if (len < 0) {
01032 run = 0;
01033 level = code;
01034 } else {
01035 if (code == rl->n) {
01036 run = 66;
01037 level = 0;
01038 } else {
01039 run = rl->table_run[code] + 1;
01040 level = rl->table_level[code] * qmul + qadd;
01041 if (code >= rl->last) run += 192;
01042 }
01043 }
01044 rl->rl_vlc[q][i].len = len;
01045 rl->rl_vlc[q][i].level = level;
01046 rl->rl_vlc[q][i].run = run;
01047 }
01048 }
01049 }
01050
01051 void ff_release_unused_pictures(MpegEncContext*s, int remove_current)
01052 {
01053 int i;
01054
01055
01056 for (i = 0; i < s->picture_count; i++) {
01057 if (s->picture[i].f.data[0] && !s->picture[i].f.reference &&
01058 (!s->picture[i].owner2 || s->picture[i].owner2 == s) &&
01059 (remove_current || &s->picture[i] != s->current_picture_ptr)
01060 ) {
01061 free_frame_buffer(s, &s->picture[i]);
01062 }
01063 }
01064 }
01065
01066 int ff_find_unused_picture(MpegEncContext *s, int shared)
01067 {
01068 int i;
01069
01070 if (shared) {
01071 for (i = s->picture_range_start; i < s->picture_range_end; i++) {
01072 if (s->picture[i].f.data[0] == NULL && s->picture[i].f.type == 0)
01073 return i;
01074 }
01075 } else {
01076 for (i = s->picture_range_start; i < s->picture_range_end; i++) {
01077 if (s->picture[i].f.data[0] == NULL && s->picture[i].f.type != 0)
01078 return i;
01079 }
01080 for (i = s->picture_range_start; i < s->picture_range_end; i++) {
01081 if (s->picture[i].f.data[0] == NULL)
01082 return i;
01083 }
01084 }
01085
01086 av_log(s->avctx, AV_LOG_FATAL,
01087 "Internal error, picture buffer overflow\n");
01088
01089
01090
01091
01092
01093
01094
01095
01096
01097
01098
01099 abort();
01100 return -1;
01101 }
01102
01103 static void update_noise_reduction(MpegEncContext *s)
01104 {
01105 int intra, i;
01106
01107 for (intra = 0; intra < 2; intra++) {
01108 if (s->dct_count[intra] > (1 << 16)) {
01109 for (i = 0; i < 64; i++) {
01110 s->dct_error_sum[intra][i] >>= 1;
01111 }
01112 s->dct_count[intra] >>= 1;
01113 }
01114
01115 for (i = 0; i < 64; i++) {
01116 s->dct_offset[intra][i] = (s->avctx->noise_reduction *
01117 s->dct_count[intra] +
01118 s->dct_error_sum[intra][i] / 2) /
01119 (s->dct_error_sum[intra][i] + 1);
01120 }
01121 }
01122 }
01123
01128 int MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
01129 {
01130 int i;
01131 Picture *pic;
01132 s->mb_skipped = 0;
01133
01134 assert(s->last_picture_ptr == NULL || s->out_format != FMT_H264 ||
01135 s->codec_id == CODEC_ID_SVQ3);
01136
01137
01138 if (s->out_format != FMT_H264 || s->codec_id == CODEC_ID_SVQ3) {
01139 if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
01140 s->last_picture_ptr != s->next_picture_ptr &&
01141 s->last_picture_ptr->f.data[0]) {
01142 if (s->last_picture_ptr->owner2 == s)
01143 free_frame_buffer(s, s->last_picture_ptr);
01144 }
01145
01146
01147
01148 if (!s->encoding) {
01149 for (i = 0; i < s->picture_count; i++) {
01150 if (s->picture[i].owner2 == s && s->picture[i].f.data[0] &&
01151 &s->picture[i] != s->last_picture_ptr &&
01152 &s->picture[i] != s->next_picture_ptr &&
01153 s->picture[i].f.reference) {
01154 if (!(avctx->active_thread_type & FF_THREAD_FRAME))
01155 av_log(avctx, AV_LOG_ERROR,
01156 "releasing zombie picture\n");
01157 free_frame_buffer(s, &s->picture[i]);
01158 }
01159 }
01160 }
01161 }
01162
01163 if (!s->encoding) {
01164 ff_release_unused_pictures(s, 1);
01165
01166 if (s->current_picture_ptr &&
01167 s->current_picture_ptr->f.data[0] == NULL) {
01168
01169
01170 pic = s->current_picture_ptr;
01171 } else {
01172 i = ff_find_unused_picture(s, 0);
01173 if (i < 0)
01174 return i;
01175 pic = &s->picture[i];
01176 }
01177
01178 pic->f.reference = 0;
01179 if (!s->dropable) {
01180 if (s->codec_id == CODEC_ID_H264)
01181 pic->f.reference = s->picture_structure;
01182 else if (s->pict_type != AV_PICTURE_TYPE_B)
01183 pic->f.reference = 3;
01184 }
01185
01186 pic->f.coded_picture_number = s->coded_picture_number++;
01187
01188 if (ff_alloc_picture(s, pic, 0) < 0)
01189 return -1;
01190
01191 s->current_picture_ptr = pic;
01192
01193 s->current_picture_ptr->f.top_field_first = s->top_field_first;
01194 if (s->codec_id == CODEC_ID_MPEG1VIDEO ||
01195 s->codec_id == CODEC_ID_MPEG2VIDEO) {
01196 if (s->picture_structure != PICT_FRAME)
01197 s->current_picture_ptr->f.top_field_first =
01198 (s->picture_structure == PICT_TOP_FIELD) == s->first_field;
01199 }
01200 s->current_picture_ptr->f.interlaced_frame = !s->progressive_frame &&
01201 !s->progressive_sequence;
01202 s->current_picture_ptr->field_picture = s->picture_structure != PICT_FRAME;
01203 }
01204
01205 s->current_picture_ptr->f.pict_type = s->pict_type;
01206
01207
01208 s->current_picture_ptr->f.key_frame = s->pict_type == AV_PICTURE_TYPE_I;
01209
01210 ff_copy_picture(&s->current_picture, s->current_picture_ptr);
01211
01212 if (s->pict_type != AV_PICTURE_TYPE_B) {
01213 s->last_picture_ptr = s->next_picture_ptr;
01214 if (!s->dropable)
01215 s->next_picture_ptr = s->current_picture_ptr;
01216 }
01217
01218
01219
01220
01221
01222
01223
01224 if (s->codec_id != CODEC_ID_H264) {
01225 if ((s->last_picture_ptr == NULL ||
01226 s->last_picture_ptr->f.data[0] == NULL) &&
01227 (s->pict_type != AV_PICTURE_TYPE_I ||
01228 s->picture_structure != PICT_FRAME)) {
01229 if (s->pict_type != AV_PICTURE_TYPE_I)
01230 av_log(avctx, AV_LOG_ERROR,
01231 "warning: first frame is no keyframe\n");
01232 else if (s->picture_structure != PICT_FRAME)
01233 av_log(avctx, AV_LOG_INFO,
01234 "allocate dummy last picture for field based first keyframe\n");
01235
01236
01237 i = ff_find_unused_picture(s, 0);
01238 if (i < 0)
01239 return i;
01240 s->last_picture_ptr= &s->picture[i];
01241 s->last_picture_ptr->f.key_frame = 0;
01242 if (ff_alloc_picture(s, s->last_picture_ptr, 0) < 0)
01243 return -1;
01244
01245 if(s->codec_id == CODEC_ID_FLV1 || s->codec_id == CODEC_ID_H263){
01246 for(i=0; i<avctx->height; i++)
01247 memset(s->last_picture_ptr->f.data[0] + s->last_picture_ptr->f.linesize[0]*i, 16, avctx->width);
01248 }
01249
01250 ff_thread_report_progress((AVFrame *) s->last_picture_ptr,
01251 INT_MAX, 0);
01252 ff_thread_report_progress((AVFrame *) s->last_picture_ptr,
01253 INT_MAX, 1);
01254 }
01255 if ((s->next_picture_ptr == NULL ||
01256 s->next_picture_ptr->f.data[0] == NULL) &&
01257 s->pict_type == AV_PICTURE_TYPE_B) {
01258
01259 i = ff_find_unused_picture(s, 0);
01260 if (i < 0)
01261 return i;
01262 s->next_picture_ptr= &s->picture[i];
01263 s->next_picture_ptr->f.key_frame = 0;
01264 if (ff_alloc_picture(s, s->next_picture_ptr, 0) < 0)
01265 return -1;
01266 ff_thread_report_progress((AVFrame *) s->next_picture_ptr,
01267 INT_MAX, 0);
01268 ff_thread_report_progress((AVFrame *) s->next_picture_ptr,
01269 INT_MAX, 1);
01270 }
01271 }
01272
01273 if (s->last_picture_ptr)
01274 ff_copy_picture(&s->last_picture, s->last_picture_ptr);
01275 if (s->next_picture_ptr)
01276 ff_copy_picture(&s->next_picture, s->next_picture_ptr);
01277
01278 if (HAVE_THREADS && (avctx->active_thread_type & FF_THREAD_FRAME) &&
01279 (s->out_format != FMT_H264 || s->codec_id == CODEC_ID_SVQ3)) {
01280 if (s->next_picture_ptr)
01281 s->next_picture_ptr->owner2 = s;
01282 if (s->last_picture_ptr)
01283 s->last_picture_ptr->owner2 = s;
01284 }
01285
01286 assert(s->pict_type == AV_PICTURE_TYPE_I || (s->last_picture_ptr &&
01287 s->last_picture_ptr->f.data[0]));
01288
01289 if (s->picture_structure!= PICT_FRAME && s->out_format != FMT_H264) {
01290 int i;
01291 for (i = 0; i < 4; i++) {
01292 if (s->picture_structure == PICT_BOTTOM_FIELD) {
01293 s->current_picture.f.data[i] +=
01294 s->current_picture.f.linesize[i];
01295 }
01296 s->current_picture.f.linesize[i] *= 2;
01297 s->last_picture.f.linesize[i] *= 2;
01298 s->next_picture.f.linesize[i] *= 2;
01299 }
01300 }
01301
01302 s->err_recognition = avctx->err_recognition;
01303
01304
01305
01306
01307 if (s->mpeg_quant || s->codec_id == CODEC_ID_MPEG2VIDEO) {
01308 s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
01309 s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
01310 } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
01311 s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
01312 s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
01313 } else {
01314 s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
01315 s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
01316 }
01317
01318 if (s->dct_error_sum) {
01319 assert(s->avctx->noise_reduction && s->encoding);
01320 update_noise_reduction(s);
01321 }
01322
01323 if (CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration)
01324 return ff_xvmc_field_start(s, avctx);
01325
01326 return 0;
01327 }
01328
01329
01330
01331 void MPV_frame_end(MpegEncContext *s)
01332 {
01333 int i;
01334
01335
01336 if (CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration) {
01337 ff_xvmc_field_end(s);
01338 } else if((s->error_count || s->encoding || !(s->avctx->codec->capabilities&CODEC_CAP_DRAW_HORIZ_BAND)) &&
01339 !s->avctx->hwaccel &&
01340 !(s->avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU) &&
01341 s->unrestricted_mv &&
01342 s->current_picture.f.reference &&
01343 !s->intra_only &&
01344 !(s->flags & CODEC_FLAG_EMU_EDGE)) {
01345 int hshift = av_pix_fmt_descriptors[s->avctx->pix_fmt].log2_chroma_w;
01346 int vshift = av_pix_fmt_descriptors[s->avctx->pix_fmt].log2_chroma_h;
01347 s->dsp.draw_edges(s->current_picture.f.data[0], s->current_picture.f.linesize[0],
01348 s->h_edge_pos, s->v_edge_pos,
01349 EDGE_WIDTH, EDGE_WIDTH,
01350 EDGE_TOP | EDGE_BOTTOM);
01351 s->dsp.draw_edges(s->current_picture.f.data[1], s->current_picture.f.linesize[1],
01352 s->h_edge_pos >> hshift, s->v_edge_pos >> vshift,
01353 EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift,
01354 EDGE_TOP | EDGE_BOTTOM);
01355 s->dsp.draw_edges(s->current_picture.f.data[2], s->current_picture.f.linesize[2],
01356 s->h_edge_pos >> hshift, s->v_edge_pos >> vshift,
01357 EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift,
01358 EDGE_TOP | EDGE_BOTTOM);
01359 }
01360
01361 emms_c();
01362
01363 s->last_pict_type = s->pict_type;
01364 s->last_lambda_for [s->pict_type] = s->current_picture_ptr->f.quality;
01365 if (s->pict_type!= AV_PICTURE_TYPE_B) {
01366 s->last_non_b_pict_type = s->pict_type;
01367 }
01368 #if 0
01369
01370 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
01371 if (s->picture[i].f.data[0] == s->current_picture.f.data[0]) {
01372 s->picture[i] = s->current_picture;
01373 break;
01374 }
01375 }
01376 assert(i < MAX_PICTURE_COUNT);
01377 #endif
01378
01379 if (s->encoding) {
01380
01381 for (i = 0; i < s->picture_count; i++) {
01382 if (s->picture[i].f.data[0] && !s->picture[i].f.reference
01383 ) {
01384 free_frame_buffer(s, &s->picture[i]);
01385 }
01386 }
01387 }
01388
01389 #if 0
01390 memset(&s->last_picture, 0, sizeof(Picture));
01391 memset(&s->next_picture, 0, sizeof(Picture));
01392 memset(&s->current_picture, 0, sizeof(Picture));
01393 #endif
01394 s->avctx->coded_frame = (AVFrame *) s->current_picture_ptr;
01395
01396 if (s->codec_id != CODEC_ID_H264 && s->current_picture.f.reference) {
01397 ff_thread_report_progress((AVFrame *) s->current_picture_ptr, INT_MAX, 0);
01398 }
01399 }
01400
01408 static void draw_line(uint8_t *buf, int sx, int sy, int ex, int ey,
01409 int w, int h, int stride, int color)
01410 {
01411 int x, y, fr, f;
01412
01413 sx = av_clip(sx, 0, w - 1);
01414 sy = av_clip(sy, 0, h - 1);
01415 ex = av_clip(ex, 0, w - 1);
01416 ey = av_clip(ey, 0, h - 1);
01417
01418 buf[sy * stride + sx] += color;
01419
01420 if (FFABS(ex - sx) > FFABS(ey - sy)) {
01421 if (sx > ex) {
01422 FFSWAP(int, sx, ex);
01423 FFSWAP(int, sy, ey);
01424 }
01425 buf += sx + sy * stride;
01426 ex -= sx;
01427 f = ((ey - sy) << 16) / ex;
01428 for(x= 0; x <= ex; x++){
01429 y = (x * f) >> 16;
01430 fr = (x * f) & 0xFFFF;
01431 buf[y * stride + x] += (color * (0x10000 - fr)) >> 16;
01432 if(fr) buf[(y + 1) * stride + x] += (color * fr ) >> 16;
01433 }
01434 } else {
01435 if (sy > ey) {
01436 FFSWAP(int, sx, ex);
01437 FFSWAP(int, sy, ey);
01438 }
01439 buf += sx + sy * stride;
01440 ey -= sy;
01441 if (ey)
01442 f = ((ex - sx) << 16) / ey;
01443 else
01444 f = 0;
01445 for(y= 0; y <= ey; y++){
01446 x = (y*f) >> 16;
01447 fr = (y*f) & 0xFFFF;
01448 buf[y * stride + x] += (color * (0x10000 - fr)) >> 16;
01449 if(fr) buf[y * stride + x + 1] += (color * fr ) >> 16;
01450 }
01451 }
01452 }
01453
01461 static void draw_arrow(uint8_t *buf, int sx, int sy, int ex,
01462 int ey, int w, int h, int stride, int color)
01463 {
01464 int dx,dy;
01465
01466 sx = av_clip(sx, -100, w + 100);
01467 sy = av_clip(sy, -100, h + 100);
01468 ex = av_clip(ex, -100, w + 100);
01469 ey = av_clip(ey, -100, h + 100);
01470
01471 dx = ex - sx;
01472 dy = ey - sy;
01473
01474 if (dx * dx + dy * dy > 3 * 3) {
01475 int rx = dx + dy;
01476 int ry = -dx + dy;
01477 int length = ff_sqrt((rx * rx + ry * ry) << 8);
01478
01479
01480 rx = ROUNDED_DIV(rx * 3 << 4, length);
01481 ry = ROUNDED_DIV(ry * 3 << 4, length);
01482
01483 draw_line(buf, sx, sy, sx + rx, sy + ry, w, h, stride, color);
01484 draw_line(buf, sx, sy, sx - ry, sy + rx, w, h, stride, color);
01485 }
01486 draw_line(buf, sx, sy, ex, ey, w, h, stride, color);
01487 }
01488
01492 void ff_print_debug_info(MpegEncContext *s, AVFrame *pict)
01493 {
01494 if (s->avctx->hwaccel || !pict || !pict->mb_type)
01495 return;
01496
01497 if (s->avctx->debug & (FF_DEBUG_SKIP | FF_DEBUG_QP | FF_DEBUG_MB_TYPE)) {
01498 int x,y;
01499
01500 av_log(s->avctx, AV_LOG_DEBUG, "New frame, type: %c\n",
01501 av_get_picture_type_char(pict->pict_type));
01502 for (y = 0; y < s->mb_height; y++) {
01503 for (x = 0; x < s->mb_width; x++) {
01504 if (s->avctx->debug & FF_DEBUG_SKIP) {
01505 int count = s->mbskip_table[x + y * s->mb_stride];
01506 if (count > 9)
01507 count = 9;
01508 av_log(s->avctx, AV_LOG_DEBUG, "%1d", count);
01509 }
01510 if (s->avctx->debug & FF_DEBUG_QP) {
01511 av_log(s->avctx, AV_LOG_DEBUG, "%2d",
01512 pict->qscale_table[x + y * s->mb_stride]);
01513 }
01514 if (s->avctx->debug & FF_DEBUG_MB_TYPE) {
01515 int mb_type = pict->mb_type[x + y * s->mb_stride];
01516
01517 if (IS_PCM(mb_type))
01518 av_log(s->avctx, AV_LOG_DEBUG, "P");
01519 else if (IS_INTRA(mb_type) && IS_ACPRED(mb_type))
01520 av_log(s->avctx, AV_LOG_DEBUG, "A");
01521 else if (IS_INTRA4x4(mb_type))
01522 av_log(s->avctx, AV_LOG_DEBUG, "i");
01523 else if (IS_INTRA16x16(mb_type))
01524 av_log(s->avctx, AV_LOG_DEBUG, "I");
01525 else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type))
01526 av_log(s->avctx, AV_LOG_DEBUG, "d");
01527 else if (IS_DIRECT(mb_type))
01528 av_log(s->avctx, AV_LOG_DEBUG, "D");
01529 else if (IS_GMC(mb_type) && IS_SKIP(mb_type))
01530 av_log(s->avctx, AV_LOG_DEBUG, "g");
01531 else if (IS_GMC(mb_type))
01532 av_log(s->avctx, AV_LOG_DEBUG, "G");
01533 else if (IS_SKIP(mb_type))
01534 av_log(s->avctx, AV_LOG_DEBUG, "S");
01535 else if (!USES_LIST(mb_type, 1))
01536 av_log(s->avctx, AV_LOG_DEBUG, ">");
01537 else if (!USES_LIST(mb_type, 0))
01538 av_log(s->avctx, AV_LOG_DEBUG, "<");
01539 else {
01540 assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
01541 av_log(s->avctx, AV_LOG_DEBUG, "X");
01542 }
01543
01544
01545 if (IS_8X8(mb_type))
01546 av_log(s->avctx, AV_LOG_DEBUG, "+");
01547 else if (IS_16X8(mb_type))
01548 av_log(s->avctx, AV_LOG_DEBUG, "-");
01549 else if (IS_8X16(mb_type))
01550 av_log(s->avctx, AV_LOG_DEBUG, "|");
01551 else if (IS_INTRA(mb_type) || IS_16X16(mb_type))
01552 av_log(s->avctx, AV_LOG_DEBUG, " ");
01553 else
01554 av_log(s->avctx, AV_LOG_DEBUG, "?");
01555
01556
01557 if (IS_INTERLACED(mb_type))
01558 av_log(s->avctx, AV_LOG_DEBUG, "=");
01559 else
01560 av_log(s->avctx, AV_LOG_DEBUG, " ");
01561 }
01562
01563 }
01564 av_log(s->avctx, AV_LOG_DEBUG, "\n");
01565 }
01566 }
01567
01568 if ((s->avctx->debug & (FF_DEBUG_VIS_QP | FF_DEBUG_VIS_MB_TYPE)) ||
01569 (s->avctx->debug_mv)) {
01570 const int shift = 1 + s->quarter_sample;
01571 int mb_y;
01572 uint8_t *ptr;
01573 int i;
01574 int h_chroma_shift, v_chroma_shift, block_height;
01575 const int width = s->avctx->width;
01576 const int height = s->avctx->height;
01577 const int mv_sample_log2 = 4 - pict->motion_subsample_log2;
01578 const int mv_stride = (s->mb_width << mv_sample_log2) +
01579 (s->codec_id == CODEC_ID_H264 ? 0 : 1);
01580 s->low_delay = 0;
01581
01582 avcodec_get_chroma_sub_sample(s->avctx->pix_fmt,
01583 &h_chroma_shift, &v_chroma_shift);
01584 for (i = 0; i < 3; i++) {
01585 size_t size= (i == 0) ? pict->linesize[i] * height:
01586 pict->linesize[i] * height >> v_chroma_shift;
01587 s->visualization_buffer[i]= av_realloc(s->visualization_buffer[i], size);
01588 memcpy(s->visualization_buffer[i], pict->data[i], size);
01589 pict->data[i] = s->visualization_buffer[i];
01590 }
01591 pict->type = FF_BUFFER_TYPE_COPY;
01592 pict->opaque= NULL;
01593 ptr = pict->data[0];
01594 block_height = 16 >> v_chroma_shift;
01595
01596 for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
01597 int mb_x;
01598 for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
01599 const int mb_index = mb_x + mb_y * s->mb_stride;
01600 if ((s->avctx->debug_mv) && pict->motion_val[0]) {
01601 int type;
01602 for (type = 0; type < 3; type++) {
01603 int direction = 0;
01604 switch (type) {
01605 case 0:
01606 if ((!(s->avctx->debug_mv & FF_DEBUG_VIS_MV_P_FOR)) ||
01607 (pict->pict_type!= AV_PICTURE_TYPE_P))
01608 continue;
01609 direction = 0;
01610 break;
01611 case 1:
01612 if ((!(s->avctx->debug_mv & FF_DEBUG_VIS_MV_B_FOR)) ||
01613 (pict->pict_type!= AV_PICTURE_TYPE_B))
01614 continue;
01615 direction = 0;
01616 break;
01617 case 2:
01618 if ((!(s->avctx->debug_mv & FF_DEBUG_VIS_MV_B_BACK)) ||
01619 (pict->pict_type!= AV_PICTURE_TYPE_B))
01620 continue;
01621 direction = 1;
01622 break;
01623 }
01624 if (!USES_LIST(pict->mb_type[mb_index], direction))
01625 continue;
01626
01627 if (IS_8X8(pict->mb_type[mb_index])) {
01628 int i;
01629 for (i = 0; i < 4; i++) {
01630 int sx = mb_x * 16 + 4 + 8 * (i & 1);
01631 int sy = mb_y * 16 + 4 + 8 * (i >> 1);
01632 int xy = (mb_x * 2 + (i & 1) +
01633 (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
01634 int mx = (pict->motion_val[direction][xy][0] >> shift) + sx;
01635 int my = (pict->motion_val[direction][xy][1] >> shift) + sy;
01636 draw_arrow(ptr, sx, sy, mx, my, width,
01637 height, s->linesize, 100);
01638 }
01639 } else if (IS_16X8(pict->mb_type[mb_index])) {
01640 int i;
01641 for (i = 0; i < 2; i++) {
01642 int sx = mb_x * 16 + 8;
01643 int sy = mb_y * 16 + 4 + 8 * i;
01644 int xy = (mb_x * 2 + (mb_y * 2 + i) * mv_stride) << (mv_sample_log2 - 1);
01645 int mx = (pict->motion_val[direction][xy][0] >> shift);
01646 int my = (pict->motion_val[direction][xy][1] >> shift);
01647
01648 if (IS_INTERLACED(pict->mb_type[mb_index]))
01649 my *= 2;
01650
01651 draw_arrow(ptr, sx, sy, mx + sx, my + sy, width,
01652 height, s->linesize, 100);
01653 }
01654 } else if (IS_8X16(pict->mb_type[mb_index])) {
01655 int i;
01656 for (i = 0; i < 2; i++) {
01657 int sx = mb_x * 16 + 4 + 8 * i;
01658 int sy = mb_y * 16 + 8;
01659 int xy = (mb_x * 2 + i + mb_y * 2 * mv_stride) << (mv_sample_log2 - 1);
01660 int mx = pict->motion_val[direction][xy][0] >> shift;
01661 int my = pict->motion_val[direction][xy][1] >> shift;
01662
01663 if (IS_INTERLACED(pict->mb_type[mb_index]))
01664 my *= 2;
01665
01666 draw_arrow(ptr, sx, sy, mx + sx, my + sy, width,
01667 height, s->linesize, 100);
01668 }
01669 } else {
01670 int sx= mb_x * 16 + 8;
01671 int sy= mb_y * 16 + 8;
01672 int xy= (mb_x + mb_y * mv_stride) << mv_sample_log2;
01673 int mx= (pict->motion_val[direction][xy][0]>>shift) + sx;
01674 int my= (pict->motion_val[direction][xy][1]>>shift) + sy;
01675 draw_arrow(ptr, sx, sy, mx, my, width, height, s->linesize, 100);
01676 }
01677 }
01678 }
01679 if ((s->avctx->debug & FF_DEBUG_VIS_QP)) {
01680 uint64_t c = (pict->qscale_table[mb_index] * 128 / 31) *
01681 0x0101010101010101ULL;
01682 int y;
01683 for (y = 0; y < block_height; y++) {
01684 *(uint64_t *)(pict->data[1] + 8 * mb_x +
01685 (block_height * mb_y + y) *
01686 pict->linesize[1]) = c;
01687 *(uint64_t *)(pict->data[2] + 8 * mb_x +
01688 (block_height * mb_y + y) *
01689 pict->linesize[2]) = c;
01690 }
01691 }
01692 if ((s->avctx->debug & FF_DEBUG_VIS_MB_TYPE) &&
01693 pict->motion_val[0]) {
01694 int mb_type = pict->mb_type[mb_index];
01695 uint64_t u,v;
01696 int y;
01697 #define COLOR(theta, r) \
01698 u = (int)(128 + r * cos(theta * 3.141592 / 180)); \
01699 v = (int)(128 + r * sin(theta * 3.141592 / 180));
01700
01701
01702 u = v = 128;
01703 if (IS_PCM(mb_type)) {
01704 COLOR(120, 48)
01705 } else if ((IS_INTRA(mb_type) && IS_ACPRED(mb_type)) ||
01706 IS_INTRA16x16(mb_type)) {
01707 COLOR(30, 48)
01708 } else if (IS_INTRA4x4(mb_type)) {
01709 COLOR(90, 48)
01710 } else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type)) {
01711
01712 } else if (IS_DIRECT(mb_type)) {
01713 COLOR(150, 48)
01714 } else if (IS_GMC(mb_type) && IS_SKIP(mb_type)) {
01715 COLOR(170, 48)
01716 } else if (IS_GMC(mb_type)) {
01717 COLOR(190, 48)
01718 } else if (IS_SKIP(mb_type)) {
01719
01720 } else if (!USES_LIST(mb_type, 1)) {
01721 COLOR(240, 48)
01722 } else if (!USES_LIST(mb_type, 0)) {
01723 COLOR(0, 48)
01724 } else {
01725 assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
01726 COLOR(300,48)
01727 }
01728
01729 u *= 0x0101010101010101ULL;
01730 v *= 0x0101010101010101ULL;
01731 for (y = 0; y < block_height; y++) {
01732 *(uint64_t *)(pict->data[1] + 8 * mb_x +
01733 (block_height * mb_y + y) * pict->linesize[1]) = u;
01734 *(uint64_t *)(pict->data[2] + 8 * mb_x +
01735 (block_height * mb_y + y) * pict->linesize[2]) = v;
01736 }
01737
01738
01739 if (IS_8X8(mb_type) || IS_16X8(mb_type)) {
01740 *(uint64_t *)(pict->data[0] + 16 * mb_x + 0 +
01741 (16 * mb_y + 8) * pict->linesize[0]) ^= 0x8080808080808080ULL;
01742 *(uint64_t *)(pict->data[0] + 16 * mb_x + 8 +
01743 (16 * mb_y + 8) * pict->linesize[0]) ^= 0x8080808080808080ULL;
01744 }
01745 if (IS_8X8(mb_type) || IS_8X16(mb_type)) {
01746 for (y = 0; y < 16; y++)
01747 pict->data[0][16 * mb_x + 8 + (16 * mb_y + y) *
01748 pict->linesize[0]] ^= 0x80;
01749 }
01750 if (IS_8X8(mb_type) && mv_sample_log2 >= 2) {
01751 int dm = 1 << (mv_sample_log2 - 2);
01752 for (i = 0; i < 4; i++) {
01753 int sx = mb_x * 16 + 8 * (i & 1);
01754 int sy = mb_y * 16 + 8 * (i >> 1);
01755 int xy = (mb_x * 2 + (i & 1) +
01756 (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
01757
01758 int32_t *mv = (int32_t *) &pict->motion_val[0][xy];
01759 if (mv[0] != mv[dm] ||
01760 mv[dm * mv_stride] != mv[dm * (mv_stride + 1)])
01761 for (y = 0; y < 8; y++)
01762 pict->data[0][sx + 4 + (sy + y) * pict->linesize[0]] ^= 0x80;
01763 if (mv[0] != mv[dm * mv_stride] || mv[dm] != mv[dm * (mv_stride + 1)])
01764 *(uint64_t *)(pict->data[0] + sx + (sy + 4) *
01765 pict->linesize[0]) ^= 0x8080808080808080ULL;
01766 }
01767 }
01768
01769 if (IS_INTERLACED(mb_type) &&
01770 s->codec_id == CODEC_ID_H264) {
01771
01772 }
01773 }
01774 s->mbskip_table[mb_index] = 0;
01775 }
01776 }
01777 }
01778 }
01779
01780 static inline int hpel_motion_lowres(MpegEncContext *s,
01781 uint8_t *dest, uint8_t *src,
01782 int field_based, int field_select,
01783 int src_x, int src_y,
01784 int width, int height, int stride,
01785 int h_edge_pos, int v_edge_pos,
01786 int w, int h, h264_chroma_mc_func *pix_op,
01787 int motion_x, int motion_y)
01788 {
01789 const int lowres = s->avctx->lowres;
01790 const int op_index = FFMIN(lowres, 2);
01791 const int s_mask = (2 << lowres) - 1;
01792 int emu = 0;
01793 int sx, sy;
01794
01795 if (s->quarter_sample) {
01796 motion_x /= 2;
01797 motion_y /= 2;
01798 }
01799
01800 sx = motion_x & s_mask;
01801 sy = motion_y & s_mask;
01802 src_x += motion_x >> lowres + 1;
01803 src_y += motion_y >> lowres + 1;
01804
01805 src += src_y * stride + src_x;
01806
01807 if ((unsigned)src_x > FFMAX( h_edge_pos - (!!sx) - w, 0) ||
01808 (unsigned)src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
01809 s->dsp.emulated_edge_mc(s->edge_emu_buffer, src, s->linesize, w + 1,
01810 (h + 1) << field_based, src_x,
01811 src_y << field_based,
01812 h_edge_pos,
01813 v_edge_pos);
01814 src = s->edge_emu_buffer;
01815 emu = 1;
01816 }
01817
01818 sx = (sx << 2) >> lowres;
01819 sy = (sy << 2) >> lowres;
01820 if (field_select)
01821 src += s->linesize;
01822 pix_op[op_index](dest, src, stride, h, sx, sy);
01823 return emu;
01824 }
01825
01826
01827 static av_always_inline void mpeg_motion_lowres(MpegEncContext *s,
01828 uint8_t *dest_y,
01829 uint8_t *dest_cb,
01830 uint8_t *dest_cr,
01831 int field_based,
01832 int bottom_field,
01833 int field_select,
01834 uint8_t **ref_picture,
01835 h264_chroma_mc_func *pix_op,
01836 int motion_x, int motion_y,
01837 int h, int mb_y)
01838 {
01839 uint8_t *ptr_y, *ptr_cb, *ptr_cr;
01840 int mx, my, src_x, src_y, uvsrc_x, uvsrc_y, uvlinesize, linesize, sx, sy,
01841 uvsx, uvsy;
01842 const int lowres = s->avctx->lowres;
01843 const int op_index = FFMIN(lowres-1+s->chroma_x_shift, 2);
01844 const int block_s = 8>>lowres;
01845 const int s_mask = (2 << lowres) - 1;
01846 const int h_edge_pos = s->h_edge_pos >> lowres;
01847 const int v_edge_pos = s->v_edge_pos >> lowres;
01848 linesize = s->current_picture.f.linesize[0] << field_based;
01849 uvlinesize = s->current_picture.f.linesize[1] << field_based;
01850
01851
01852 if (s->quarter_sample) {
01853 motion_x /= 2;
01854 motion_y /= 2;
01855 }
01856
01857 if(field_based){
01858 motion_y += (bottom_field - field_select)*((1 << lowres)-1);
01859 }
01860
01861 sx = motion_x & s_mask;
01862 sy = motion_y & s_mask;
01863 src_x = s->mb_x * 2 * block_s + (motion_x >> lowres + 1);
01864 src_y = (mb_y * 2 * block_s >> field_based) + (motion_y >> lowres + 1);
01865
01866 if (s->out_format == FMT_H263) {
01867 uvsx = ((motion_x >> 1) & s_mask) | (sx & 1);
01868 uvsy = ((motion_y >> 1) & s_mask) | (sy & 1);
01869 uvsrc_x = src_x >> 1;
01870 uvsrc_y = src_y >> 1;
01871 } else if (s->out_format == FMT_H261) {
01872
01873 mx = motion_x / 4;
01874 my = motion_y / 4;
01875 uvsx = (2 * mx) & s_mask;
01876 uvsy = (2 * my) & s_mask;
01877 uvsrc_x = s->mb_x * block_s + (mx >> lowres);
01878 uvsrc_y = mb_y * block_s + (my >> lowres);
01879 } else {
01880 if(s->chroma_y_shift){
01881 mx = motion_x / 2;
01882 my = motion_y / 2;
01883 uvsx = mx & s_mask;
01884 uvsy = my & s_mask;
01885 uvsrc_x = s->mb_x * block_s + (mx >> lowres + 1);
01886 uvsrc_y = (mb_y * block_s >> field_based) + (my >> lowres + 1);
01887 } else {
01888 if(s->chroma_x_shift){
01889
01890 mx = motion_x / 2;
01891 uvsx = mx & s_mask;
01892 uvsy = motion_y & s_mask;
01893 uvsrc_y = src_y;
01894 uvsrc_x = s->mb_x*block_s + (mx >> (lowres+1));
01895 } else {
01896
01897 uvsx = motion_x & s_mask;
01898 uvsy = motion_y & s_mask;
01899 uvsrc_x = src_x;
01900 uvsrc_y = src_y;
01901 }
01902 }
01903 }
01904
01905 ptr_y = ref_picture[0] + src_y * linesize + src_x;
01906 ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x;
01907 ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x;
01908
01909 if ((unsigned) src_x > FFMAX( h_edge_pos - (!!sx) - 2 * block_s, 0) ||
01910 (unsigned) src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
01911 s->dsp.emulated_edge_mc(s->edge_emu_buffer, ptr_y,
01912 s->linesize, 17, 17 + field_based,
01913 src_x, src_y << field_based, h_edge_pos,
01914 v_edge_pos);
01915 ptr_y = s->edge_emu_buffer;
01916 if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) {
01917 uint8_t *uvbuf = s->edge_emu_buffer + 18 * s->linesize;
01918 s->dsp.emulated_edge_mc(uvbuf , ptr_cb, s->uvlinesize, 9,
01919 9 + field_based,
01920 uvsrc_x, uvsrc_y << field_based,
01921 h_edge_pos >> 1, v_edge_pos >> 1);
01922 s->dsp.emulated_edge_mc(uvbuf + 16, ptr_cr, s->uvlinesize, 9,
01923 9 + field_based,
01924 uvsrc_x, uvsrc_y << field_based,
01925 h_edge_pos >> 1, v_edge_pos >> 1);
01926 ptr_cb = uvbuf;
01927 ptr_cr = uvbuf + 16;
01928 }
01929 }
01930
01931
01932 if (bottom_field) {
01933 dest_y += s->linesize;
01934 dest_cb += s->uvlinesize;
01935 dest_cr += s->uvlinesize;
01936 }
01937
01938 if (field_select) {
01939 ptr_y += s->linesize;
01940 ptr_cb += s->uvlinesize;
01941 ptr_cr += s->uvlinesize;
01942 }
01943
01944 sx = (sx << 2) >> lowres;
01945 sy = (sy << 2) >> lowres;
01946 pix_op[lowres - 1](dest_y, ptr_y, linesize, h, sx, sy);
01947
01948 if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) {
01949 uvsx = (uvsx << 2) >> lowres;
01950 uvsy = (uvsy << 2) >> lowres;
01951 if (h >> s->chroma_y_shift) {
01952 pix_op[op_index](dest_cb, ptr_cb, uvlinesize, h >> s->chroma_y_shift, uvsx, uvsy);
01953 pix_op[op_index](dest_cr, ptr_cr, uvlinesize, h >> s->chroma_y_shift, uvsx, uvsy);
01954 }
01955 }
01956
01957 }
01958
01959 static inline void chroma_4mv_motion_lowres(MpegEncContext *s,
01960 uint8_t *dest_cb, uint8_t *dest_cr,
01961 uint8_t **ref_picture,
01962 h264_chroma_mc_func * pix_op,
01963 int mx, int my)
01964 {
01965 const int lowres = s->avctx->lowres;
01966 const int op_index = FFMIN(lowres, 2);
01967 const int block_s = 8 >> lowres;
01968 const int s_mask = (2 << lowres) - 1;
01969 const int h_edge_pos = s->h_edge_pos >> lowres + 1;
01970 const int v_edge_pos = s->v_edge_pos >> lowres + 1;
01971 int emu = 0, src_x, src_y, offset, sx, sy;
01972 uint8_t *ptr;
01973
01974 if (s->quarter_sample) {
01975 mx /= 2;
01976 my /= 2;
01977 }
01978
01979
01980
01981 mx = ff_h263_round_chroma(mx);
01982 my = ff_h263_round_chroma(my);
01983
01984 sx = mx & s_mask;
01985 sy = my & s_mask;
01986 src_x = s->mb_x * block_s + (mx >> lowres + 1);
01987 src_y = s->mb_y * block_s + (my >> lowres + 1);
01988
01989 offset = src_y * s->uvlinesize + src_x;
01990 ptr = ref_picture[1] + offset;
01991 if (s->flags & CODEC_FLAG_EMU_EDGE) {
01992 if ((unsigned) src_x > FFMAX(h_edge_pos - (!!sx) - block_s, 0) ||
01993 (unsigned) src_y > FFMAX(v_edge_pos - (!!sy) - block_s, 0)) {
01994 s->dsp.emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize,
01995 9, 9, src_x, src_y, h_edge_pos, v_edge_pos);
01996 ptr = s->edge_emu_buffer;
01997 emu = 1;
01998 }
01999 }
02000 sx = (sx << 2) >> lowres;
02001 sy = (sy << 2) >> lowres;
02002 pix_op[op_index](dest_cb, ptr, s->uvlinesize, block_s, sx, sy);
02003
02004 ptr = ref_picture[2] + offset;
02005 if (emu) {
02006 s->dsp.emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize, 9, 9,
02007 src_x, src_y, h_edge_pos, v_edge_pos);
02008 ptr = s->edge_emu_buffer;
02009 }
02010 pix_op[op_index](dest_cr, ptr, s->uvlinesize, block_s, sx, sy);
02011 }
02012
02024 static inline void MPV_motion_lowres(MpegEncContext *s,
02025 uint8_t *dest_y, uint8_t *dest_cb,
02026 uint8_t *dest_cr,
02027 int dir, uint8_t **ref_picture,
02028 h264_chroma_mc_func *pix_op)
02029 {
02030 int mx, my;
02031 int mb_x, mb_y, i;
02032 const int lowres = s->avctx->lowres;
02033 const int block_s = 8 >>lowres;
02034
02035 mb_x = s->mb_x;
02036 mb_y = s->mb_y;
02037
02038 switch (s->mv_type) {
02039 case MV_TYPE_16X16:
02040 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
02041 0, 0, 0,
02042 ref_picture, pix_op,
02043 s->mv[dir][0][0], s->mv[dir][0][1],
02044 2 * block_s, mb_y);
02045 break;
02046 case MV_TYPE_8X8:
02047 mx = 0;
02048 my = 0;
02049 for (i = 0; i < 4; i++) {
02050 hpel_motion_lowres(s, dest_y + ((i & 1) + (i >> 1) *
02051 s->linesize) * block_s,
02052 ref_picture[0], 0, 0,
02053 (2 * mb_x + (i & 1)) * block_s,
02054 (2 * mb_y + (i >> 1)) * block_s,
02055 s->width, s->height, s->linesize,
02056 s->h_edge_pos >> lowres, s->v_edge_pos >> lowres,
02057 block_s, block_s, pix_op,
02058 s->mv[dir][i][0], s->mv[dir][i][1]);
02059
02060 mx += s->mv[dir][i][0];
02061 my += s->mv[dir][i][1];
02062 }
02063
02064 if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY))
02065 chroma_4mv_motion_lowres(s, dest_cb, dest_cr, ref_picture,
02066 pix_op, mx, my);
02067 break;
02068 case MV_TYPE_FIELD:
02069 if (s->picture_structure == PICT_FRAME) {
02070
02071 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
02072 1, 0, s->field_select[dir][0],
02073 ref_picture, pix_op,
02074 s->mv[dir][0][0], s->mv[dir][0][1],
02075 block_s, mb_y);
02076
02077 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
02078 1, 1, s->field_select[dir][1],
02079 ref_picture, pix_op,
02080 s->mv[dir][1][0], s->mv[dir][1][1],
02081 block_s, mb_y);
02082 } else {
02083 if (s->picture_structure != s->field_select[dir][0] + 1 &&
02084 s->pict_type != AV_PICTURE_TYPE_B && !s->first_field) {
02085 ref_picture = s->current_picture_ptr->f.data;
02086
02087 }
02088 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
02089 0, 0, s->field_select[dir][0],
02090 ref_picture, pix_op,
02091 s->mv[dir][0][0],
02092 s->mv[dir][0][1], 2 * block_s, mb_y >> 1);
02093 }
02094 break;
02095 case MV_TYPE_16X8:
02096 for (i = 0; i < 2; i++) {
02097 uint8_t **ref2picture;
02098
02099 if (s->picture_structure == s->field_select[dir][i] + 1 ||
02100 s->pict_type == AV_PICTURE_TYPE_B || s->first_field) {
02101 ref2picture = ref_picture;
02102 } else {
02103 ref2picture = s->current_picture_ptr->f.data;
02104 }
02105
02106 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
02107 0, 0, s->field_select[dir][i],
02108 ref2picture, pix_op,
02109 s->mv[dir][i][0], s->mv[dir][i][1] +
02110 2 * block_s * i, block_s, mb_y >> 1);
02111
02112 dest_y += 2 * block_s * s->linesize;
02113 dest_cb += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize;
02114 dest_cr += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize;
02115 }
02116 break;
02117 case MV_TYPE_DMV:
02118 if (s->picture_structure == PICT_FRAME) {
02119 for (i = 0; i < 2; i++) {
02120 int j;
02121 for (j = 0; j < 2; j++) {
02122 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
02123 1, j, j ^ i,
02124 ref_picture, pix_op,
02125 s->mv[dir][2 * i + j][0],
02126 s->mv[dir][2 * i + j][1],
02127 block_s, mb_y);
02128 }
02129 pix_op = s->dsp.avg_h264_chroma_pixels_tab;
02130 }
02131 } else {
02132 for (i = 0; i < 2; i++) {
02133 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
02134 0, 0, s->picture_structure != i + 1,
02135 ref_picture, pix_op,
02136 s->mv[dir][2 * i][0],s->mv[dir][2 * i][1],
02137 2 * block_s, mb_y >> 1);
02138
02139
02140 pix_op = s->dsp.avg_h264_chroma_pixels_tab;
02141
02142
02143
02144 if (!s->first_field) {
02145 ref_picture = s->current_picture_ptr->f.data;
02146 }
02147 }
02148 }
02149 break;
02150 default:
02151 assert(0);
02152 }
02153 }
02154
02158 int MPV_lowest_referenced_row(MpegEncContext *s, int dir)
02159 {
02160 int my_max = INT_MIN, my_min = INT_MAX, qpel_shift = !s->quarter_sample;
02161 int my, off, i, mvs;
02162
02163 if (s->picture_structure != PICT_FRAME) goto unhandled;
02164
02165 switch (s->mv_type) {
02166 case MV_TYPE_16X16:
02167 mvs = 1;
02168 break;
02169 case MV_TYPE_16X8:
02170 mvs = 2;
02171 break;
02172 case MV_TYPE_8X8:
02173 mvs = 4;
02174 break;
02175 default:
02176 goto unhandled;
02177 }
02178
02179 for (i = 0; i < mvs; i++) {
02180 my = s->mv[dir][i][1]<<qpel_shift;
02181 my_max = FFMAX(my_max, my);
02182 my_min = FFMIN(my_min, my);
02183 }
02184
02185 off = (FFMAX(-my_min, my_max) + 63) >> 6;
02186
02187 return FFMIN(FFMAX(s->mb_y + off, 0), s->mb_height-1);
02188 unhandled:
02189 return s->mb_height-1;
02190 }
02191
02192
02193 static inline void put_dct(MpegEncContext *s,
02194 DCTELEM *block, int i, uint8_t *dest, int line_size, int qscale)
02195 {
02196 s->dct_unquantize_intra(s, block, i, qscale);
02197 s->dsp.idct_put (dest, line_size, block);
02198 }
02199
02200
02201 static inline void add_dct(MpegEncContext *s,
02202 DCTELEM *block, int i, uint8_t *dest, int line_size)
02203 {
02204 if (s->block_last_index[i] >= 0) {
02205 s->dsp.idct_add (dest, line_size, block);
02206 }
02207 }
02208
02209 static inline void add_dequant_dct(MpegEncContext *s,
02210 DCTELEM *block, int i, uint8_t *dest, int line_size, int qscale)
02211 {
02212 if (s->block_last_index[i] >= 0) {
02213 s->dct_unquantize_inter(s, block, i, qscale);
02214
02215 s->dsp.idct_add (dest, line_size, block);
02216 }
02217 }
02218
02222 void ff_clean_intra_table_entries(MpegEncContext *s)
02223 {
02224 int wrap = s->b8_stride;
02225 int xy = s->block_index[0];
02226
02227 s->dc_val[0][xy ] =
02228 s->dc_val[0][xy + 1 ] =
02229 s->dc_val[0][xy + wrap] =
02230 s->dc_val[0][xy + 1 + wrap] = 1024;
02231
02232 memset(s->ac_val[0][xy ], 0, 32 * sizeof(int16_t));
02233 memset(s->ac_val[0][xy + wrap], 0, 32 * sizeof(int16_t));
02234 if (s->msmpeg4_version>=3) {
02235 s->coded_block[xy ] =
02236 s->coded_block[xy + 1 ] =
02237 s->coded_block[xy + wrap] =
02238 s->coded_block[xy + 1 + wrap] = 0;
02239 }
02240
02241 wrap = s->mb_stride;
02242 xy = s->mb_x + s->mb_y * wrap;
02243 s->dc_val[1][xy] =
02244 s->dc_val[2][xy] = 1024;
02245
02246 memset(s->ac_val[1][xy], 0, 16 * sizeof(int16_t));
02247 memset(s->ac_val[2][xy], 0, 16 * sizeof(int16_t));
02248
02249 s->mbintra_table[xy]= 0;
02250 }
02251
02252
02253
02254
02255
02256
02257
02258
02259
02260
02261
02262 static av_always_inline
02263 void MPV_decode_mb_internal(MpegEncContext *s, DCTELEM block[12][64],
02264 int lowres_flag, int is_mpeg12)
02265 {
02266 const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
02267 if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration){
02268 ff_xvmc_decode_mb(s);
02269 return;
02270 }
02271
02272 if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
02273
02274 int i,j;
02275 DCTELEM *dct = &s->current_picture.f.dct_coeff[mb_xy * 64 * 6];
02276 av_log(s->avctx, AV_LOG_DEBUG, "DCT coeffs of MB at %dx%d:\n", s->mb_x, s->mb_y);
02277 for(i=0; i<6; i++){
02278 for(j=0; j<64; j++){
02279 *dct++ = block[i][s->dsp.idct_permutation[j]];
02280 av_log(s->avctx, AV_LOG_DEBUG, "%5d", dct[-1]);
02281 }
02282 av_log(s->avctx, AV_LOG_DEBUG, "\n");
02283 }
02284 }
02285
02286 s->current_picture.f.qscale_table[mb_xy] = s->qscale;
02287
02288
02289 if (!s->mb_intra) {
02290 if (!is_mpeg12 && (s->h263_pred || s->h263_aic)) {
02291 if(s->mbintra_table[mb_xy])
02292 ff_clean_intra_table_entries(s);
02293 } else {
02294 s->last_dc[0] =
02295 s->last_dc[1] =
02296 s->last_dc[2] = 128 << s->intra_dc_precision;
02297 }
02298 }
02299 else if (!is_mpeg12 && (s->h263_pred || s->h263_aic))
02300 s->mbintra_table[mb_xy]=1;
02301
02302 if ((s->flags&CODEC_FLAG_PSNR) || !(s->encoding && (s->intra_only || s->pict_type==AV_PICTURE_TYPE_B) && s->avctx->mb_decision != FF_MB_DECISION_RD)) {
02303 uint8_t *dest_y, *dest_cb, *dest_cr;
02304 int dct_linesize, dct_offset;
02305 op_pixels_func (*op_pix)[4];
02306 qpel_mc_func (*op_qpix)[16];
02307 const int linesize = s->current_picture.f.linesize[0];
02308 const int uvlinesize = s->current_picture.f.linesize[1];
02309 const int readable= s->pict_type != AV_PICTURE_TYPE_B || s->encoding || s->avctx->draw_horiz_band || lowres_flag;
02310 const int block_size= lowres_flag ? 8>>s->avctx->lowres : 8;
02311
02312
02313
02314 if(!s->encoding){
02315 uint8_t *mbskip_ptr = &s->mbskip_table[mb_xy];
02316
02317 if (s->mb_skipped) {
02318 s->mb_skipped= 0;
02319 assert(s->pict_type!=AV_PICTURE_TYPE_I);
02320 *mbskip_ptr = 1;
02321 } else if(!s->current_picture.f.reference) {
02322 *mbskip_ptr = 1;
02323 } else{
02324 *mbskip_ptr = 0;
02325 }
02326 }
02327
02328 dct_linesize = linesize << s->interlaced_dct;
02329 dct_offset = s->interlaced_dct ? linesize : linesize * block_size;
02330
02331 if(readable){
02332 dest_y= s->dest[0];
02333 dest_cb= s->dest[1];
02334 dest_cr= s->dest[2];
02335 }else{
02336 dest_y = s->b_scratchpad;
02337 dest_cb= s->b_scratchpad+16*linesize;
02338 dest_cr= s->b_scratchpad+32*linesize;
02339 }
02340
02341 if (!s->mb_intra) {
02342
02343
02344 if(!s->encoding){
02345
02346 if(HAVE_THREADS && s->avctx->active_thread_type&FF_THREAD_FRAME) {
02347 if (s->mv_dir & MV_DIR_FORWARD) {
02348 ff_thread_await_progress((AVFrame*)s->last_picture_ptr, MPV_lowest_referenced_row(s, 0), 0);
02349 }
02350 if (s->mv_dir & MV_DIR_BACKWARD) {
02351 ff_thread_await_progress((AVFrame*)s->next_picture_ptr, MPV_lowest_referenced_row(s, 1), 0);
02352 }
02353 }
02354
02355 if(lowres_flag){
02356 h264_chroma_mc_func *op_pix = s->dsp.put_h264_chroma_pixels_tab;
02357
02358 if (s->mv_dir & MV_DIR_FORWARD) {
02359 MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f.data, op_pix);
02360 op_pix = s->dsp.avg_h264_chroma_pixels_tab;
02361 }
02362 if (s->mv_dir & MV_DIR_BACKWARD) {
02363 MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f.data, op_pix);
02364 }
02365 }else{
02366 op_qpix= s->me.qpel_put;
02367 if ((!s->no_rounding) || s->pict_type==AV_PICTURE_TYPE_B){
02368 op_pix = s->dsp.put_pixels_tab;
02369 }else{
02370 op_pix = s->dsp.put_no_rnd_pixels_tab;
02371 }
02372 if (s->mv_dir & MV_DIR_FORWARD) {
02373 MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f.data, op_pix, op_qpix);
02374 op_pix = s->dsp.avg_pixels_tab;
02375 op_qpix= s->me.qpel_avg;
02376 }
02377 if (s->mv_dir & MV_DIR_BACKWARD) {
02378 MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f.data, op_pix, op_qpix);
02379 }
02380 }
02381 }
02382
02383
02384 if(s->avctx->skip_idct){
02385 if( (s->avctx->skip_idct >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B)
02386 ||(s->avctx->skip_idct >= AVDISCARD_NONKEY && s->pict_type != AV_PICTURE_TYPE_I)
02387 || s->avctx->skip_idct >= AVDISCARD_ALL)
02388 goto skip_idct;
02389 }
02390
02391
02392 if(s->encoding || !( s->msmpeg4_version || s->codec_id==CODEC_ID_MPEG1VIDEO || s->codec_id==CODEC_ID_MPEG2VIDEO
02393 || (s->codec_id==CODEC_ID_MPEG4 && !s->mpeg_quant))){
02394 add_dequant_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
02395 add_dequant_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
02396 add_dequant_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
02397 add_dequant_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
02398
02399 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
02400 if (s->chroma_y_shift){
02401 add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
02402 add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
02403 }else{
02404 dct_linesize >>= 1;
02405 dct_offset >>=1;
02406 add_dequant_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
02407 add_dequant_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
02408 add_dequant_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
02409 add_dequant_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
02410 }
02411 }
02412 } else if(is_mpeg12 || (s->codec_id != CODEC_ID_WMV2)){
02413 add_dct(s, block[0], 0, dest_y , dct_linesize);
02414 add_dct(s, block[1], 1, dest_y + block_size, dct_linesize);
02415 add_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize);
02416 add_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize);
02417
02418 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
02419 if(s->chroma_y_shift){
02420 add_dct(s, block[4], 4, dest_cb, uvlinesize);
02421 add_dct(s, block[5], 5, dest_cr, uvlinesize);
02422 }else{
02423
02424 dct_linesize = uvlinesize << s->interlaced_dct;
02425 dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize*block_size;
02426
02427 add_dct(s, block[4], 4, dest_cb, dct_linesize);
02428 add_dct(s, block[5], 5, dest_cr, dct_linesize);
02429 add_dct(s, block[6], 6, dest_cb+dct_offset, dct_linesize);
02430 add_dct(s, block[7], 7, dest_cr+dct_offset, dct_linesize);
02431 if(!s->chroma_x_shift){
02432 add_dct(s, block[8], 8, dest_cb+block_size, dct_linesize);
02433 add_dct(s, block[9], 9, dest_cr+block_size, dct_linesize);
02434 add_dct(s, block[10], 10, dest_cb+block_size+dct_offset, dct_linesize);
02435 add_dct(s, block[11], 11, dest_cr+block_size+dct_offset, dct_linesize);
02436 }
02437 }
02438 }
02439 }
02440 else if (CONFIG_WMV2_DECODER || CONFIG_WMV2_ENCODER) {
02441 ff_wmv2_add_mb(s, block, dest_y, dest_cb, dest_cr);
02442 }
02443 } else {
02444
02445 if(s->encoding || !(s->codec_id==CODEC_ID_MPEG1VIDEO || s->codec_id==CODEC_ID_MPEG2VIDEO)){
02446 put_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
02447 put_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
02448 put_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
02449 put_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
02450
02451 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
02452 if(s->chroma_y_shift){
02453 put_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
02454 put_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
02455 }else{
02456 dct_offset >>=1;
02457 dct_linesize >>=1;
02458 put_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
02459 put_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
02460 put_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
02461 put_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
02462 }
02463 }
02464 }else{
02465 s->dsp.idct_put(dest_y , dct_linesize, block[0]);
02466 s->dsp.idct_put(dest_y + block_size, dct_linesize, block[1]);
02467 s->dsp.idct_put(dest_y + dct_offset , dct_linesize, block[2]);
02468 s->dsp.idct_put(dest_y + dct_offset + block_size, dct_linesize, block[3]);
02469
02470 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
02471 if(s->chroma_y_shift){
02472 s->dsp.idct_put(dest_cb, uvlinesize, block[4]);
02473 s->dsp.idct_put(dest_cr, uvlinesize, block[5]);
02474 }else{
02475
02476 dct_linesize = uvlinesize << s->interlaced_dct;
02477 dct_offset = s->interlaced_dct? uvlinesize : uvlinesize*block_size;
02478
02479 s->dsp.idct_put(dest_cb, dct_linesize, block[4]);
02480 s->dsp.idct_put(dest_cr, dct_linesize, block[5]);
02481 s->dsp.idct_put(dest_cb + dct_offset, dct_linesize, block[6]);
02482 s->dsp.idct_put(dest_cr + dct_offset, dct_linesize, block[7]);
02483 if(!s->chroma_x_shift){
02484 s->dsp.idct_put(dest_cb + block_size, dct_linesize, block[8]);
02485 s->dsp.idct_put(dest_cr + block_size, dct_linesize, block[9]);
02486 s->dsp.idct_put(dest_cb + block_size + dct_offset, dct_linesize, block[10]);
02487 s->dsp.idct_put(dest_cr + block_size + dct_offset, dct_linesize, block[11]);
02488 }
02489 }
02490 }
02491 }
02492 }
02493 skip_idct:
02494 if(!readable){
02495 s->dsp.put_pixels_tab[0][0](s->dest[0], dest_y , linesize,16);
02496 s->dsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[1], dest_cb, uvlinesize,16 >> s->chroma_y_shift);
02497 s->dsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[2], dest_cr, uvlinesize,16 >> s->chroma_y_shift);
02498 }
02499 }
02500 }
02501
02502 void MPV_decode_mb(MpegEncContext *s, DCTELEM block[12][64]){
02503 #if !CONFIG_SMALL
02504 if(s->out_format == FMT_MPEG1) {
02505 if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 1);
02506 else MPV_decode_mb_internal(s, block, 0, 1);
02507 } else
02508 #endif
02509 if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 0);
02510 else MPV_decode_mb_internal(s, block, 0, 0);
02511 }
02512
02516 void ff_draw_horiz_band(MpegEncContext *s, int y, int h){
02517 const int field_pic= s->picture_structure != PICT_FRAME;
02518 if(field_pic){
02519 h <<= 1;
02520 y <<= 1;
02521 }
02522
02523 if (!s->avctx->hwaccel
02524 && !(s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU)
02525 && s->unrestricted_mv
02526 && s->current_picture.f.reference
02527 && !s->intra_only
02528 && !(s->flags&CODEC_FLAG_EMU_EDGE)) {
02529 int sides = 0, edge_h;
02530 int hshift = av_pix_fmt_descriptors[s->avctx->pix_fmt].log2_chroma_w;
02531 int vshift = av_pix_fmt_descriptors[s->avctx->pix_fmt].log2_chroma_h;
02532 if (y==0) sides |= EDGE_TOP;
02533 if (y + h >= s->v_edge_pos) sides |= EDGE_BOTTOM;
02534
02535 edge_h= FFMIN(h, s->v_edge_pos - y);
02536
02537 s->dsp.draw_edges(s->current_picture_ptr->f.data[0] + y *s->linesize,
02538 s->linesize, s->h_edge_pos, edge_h,
02539 EDGE_WIDTH, EDGE_WIDTH, sides);
02540 s->dsp.draw_edges(s->current_picture_ptr->f.data[1] + (y>>vshift)*s->uvlinesize,
02541 s->uvlinesize, s->h_edge_pos>>hshift, edge_h>>vshift,
02542 EDGE_WIDTH>>hshift, EDGE_WIDTH>>vshift, sides);
02543 s->dsp.draw_edges(s->current_picture_ptr->f.data[2] + (y>>vshift)*s->uvlinesize,
02544 s->uvlinesize, s->h_edge_pos>>hshift, edge_h>>vshift,
02545 EDGE_WIDTH>>hshift, EDGE_WIDTH>>vshift, sides);
02546 }
02547
02548 h= FFMIN(h, s->avctx->height - y);
02549
02550 if(field_pic && s->first_field && !(s->avctx->slice_flags&SLICE_FLAG_ALLOW_FIELD)) return;
02551
02552 if (s->avctx->draw_horiz_band) {
02553 AVFrame *src;
02554 int offset[AV_NUM_DATA_POINTERS];
02555 int i;
02556
02557 if(s->pict_type==AV_PICTURE_TYPE_B || s->low_delay || (s->avctx->slice_flags&SLICE_FLAG_CODED_ORDER))
02558 src= (AVFrame*)s->current_picture_ptr;
02559 else if(s->last_picture_ptr)
02560 src= (AVFrame*)s->last_picture_ptr;
02561 else
02562 return;
02563
02564 if(s->pict_type==AV_PICTURE_TYPE_B && s->picture_structure == PICT_FRAME && s->out_format != FMT_H264){
02565 for (i = 0; i < AV_NUM_DATA_POINTERS; i++)
02566 offset[i] = 0;
02567 }else{
02568 offset[0]= y * s->linesize;
02569 offset[1]=
02570 offset[2]= (y >> s->chroma_y_shift) * s->uvlinesize;
02571 for (i = 3; i < AV_NUM_DATA_POINTERS; i++)
02572 offset[i] = 0;
02573 }
02574
02575 emms_c();
02576
02577 s->avctx->draw_horiz_band(s->avctx, src, offset,
02578 y, s->picture_structure, h);
02579 }
02580 }
02581
02582 void ff_init_block_index(MpegEncContext *s){
02583 const int linesize = s->current_picture.f.linesize[0];
02584 const int uvlinesize = s->current_picture.f.linesize[1];
02585 const int mb_size= 4 - s->avctx->lowres;
02586
02587 s->block_index[0]= s->b8_stride*(s->mb_y*2 ) - 2 + s->mb_x*2;
02588 s->block_index[1]= s->b8_stride*(s->mb_y*2 ) - 1 + s->mb_x*2;
02589 s->block_index[2]= s->b8_stride*(s->mb_y*2 + 1) - 2 + s->mb_x*2;
02590 s->block_index[3]= s->b8_stride*(s->mb_y*2 + 1) - 1 + s->mb_x*2;
02591 s->block_index[4]= s->mb_stride*(s->mb_y + 1) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
02592 s->block_index[5]= s->mb_stride*(s->mb_y + s->mb_height + 2) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
02593
02594
02595 s->dest[0] = s->current_picture.f.data[0] + ((s->mb_x - 1) << mb_size);
02596 s->dest[1] = s->current_picture.f.data[1] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
02597 s->dest[2] = s->current_picture.f.data[2] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
02598
02599 if(!(s->pict_type==AV_PICTURE_TYPE_B && s->avctx->draw_horiz_band && s->picture_structure==PICT_FRAME))
02600 {
02601 if(s->picture_structure==PICT_FRAME){
02602 s->dest[0] += s->mb_y * linesize << mb_size;
02603 s->dest[1] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
02604 s->dest[2] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
02605 }else{
02606 s->dest[0] += (s->mb_y>>1) * linesize << mb_size;
02607 s->dest[1] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
02608 s->dest[2] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
02609 assert((s->mb_y&1) == (s->picture_structure == PICT_BOTTOM_FIELD));
02610 }
02611 }
02612 }
02613
02614 void ff_mpeg_flush(AVCodecContext *avctx){
02615 int i;
02616 MpegEncContext *s = avctx->priv_data;
02617
02618 if(s==NULL || s->picture==NULL)
02619 return;
02620
02621 for(i=0; i<s->picture_count; i++){
02622 if (s->picture[i].f.data[0] &&
02623 (s->picture[i].f.type == FF_BUFFER_TYPE_INTERNAL ||
02624 s->picture[i].f.type == FF_BUFFER_TYPE_USER))
02625 free_frame_buffer(s, &s->picture[i]);
02626 }
02627 s->current_picture_ptr = s->last_picture_ptr = s->next_picture_ptr = NULL;
02628
02629 s->mb_x= s->mb_y= 0;
02630 s->closed_gop= 0;
02631
02632 s->parse_context.state= -1;
02633 s->parse_context.frame_start_found= 0;
02634 s->parse_context.overread= 0;
02635 s->parse_context.overread_index= 0;
02636 s->parse_context.index= 0;
02637 s->parse_context.last_index= 0;
02638 s->bitstream_buffer_size=0;
02639 s->pp_time=0;
02640 }
02641
02642 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
02643 DCTELEM *block, int n, int qscale)
02644 {
02645 int i, level, nCoeffs;
02646 const uint16_t *quant_matrix;
02647
02648 nCoeffs= s->block_last_index[n];
02649
02650 if (n < 4)
02651 block[0] = block[0] * s->y_dc_scale;
02652 else
02653 block[0] = block[0] * s->c_dc_scale;
02654
02655 quant_matrix = s->intra_matrix;
02656 for(i=1;i<=nCoeffs;i++) {
02657 int j= s->intra_scantable.permutated[i];
02658 level = block[j];
02659 if (level) {
02660 if (level < 0) {
02661 level = -level;
02662 level = (int)(level * qscale * quant_matrix[j]) >> 3;
02663 level = (level - 1) | 1;
02664 level = -level;
02665 } else {
02666 level = (int)(level * qscale * quant_matrix[j]) >> 3;
02667 level = (level - 1) | 1;
02668 }
02669 block[j] = level;
02670 }
02671 }
02672 }
02673
02674 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
02675 DCTELEM *block, int n, int qscale)
02676 {
02677 int i, level, nCoeffs;
02678 const uint16_t *quant_matrix;
02679
02680 nCoeffs= s->block_last_index[n];
02681
02682 quant_matrix = s->inter_matrix;
02683 for(i=0; i<=nCoeffs; i++) {
02684 int j= s->intra_scantable.permutated[i];
02685 level = block[j];
02686 if (level) {
02687 if (level < 0) {
02688 level = -level;
02689 level = (((level << 1) + 1) * qscale *
02690 ((int) (quant_matrix[j]))) >> 4;
02691 level = (level - 1) | 1;
02692 level = -level;
02693 } else {
02694 level = (((level << 1) + 1) * qscale *
02695 ((int) (quant_matrix[j]))) >> 4;
02696 level = (level - 1) | 1;
02697 }
02698 block[j] = level;
02699 }
02700 }
02701 }
02702
02703 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
02704 DCTELEM *block, int n, int qscale)
02705 {
02706 int i, level, nCoeffs;
02707 const uint16_t *quant_matrix;
02708
02709 if(s->alternate_scan) nCoeffs= 63;
02710 else nCoeffs= s->block_last_index[n];
02711
02712 if (n < 4)
02713 block[0] = block[0] * s->y_dc_scale;
02714 else
02715 block[0] = block[0] * s->c_dc_scale;
02716 quant_matrix = s->intra_matrix;
02717 for(i=1;i<=nCoeffs;i++) {
02718 int j= s->intra_scantable.permutated[i];
02719 level = block[j];
02720 if (level) {
02721 if (level < 0) {
02722 level = -level;
02723 level = (int)(level * qscale * quant_matrix[j]) >> 3;
02724 level = -level;
02725 } else {
02726 level = (int)(level * qscale * quant_matrix[j]) >> 3;
02727 }
02728 block[j] = level;
02729 }
02730 }
02731 }
02732
02733 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
02734 DCTELEM *block, int n, int qscale)
02735 {
02736 int i, level, nCoeffs;
02737 const uint16_t *quant_matrix;
02738 int sum=-1;
02739
02740 if(s->alternate_scan) nCoeffs= 63;
02741 else nCoeffs= s->block_last_index[n];
02742
02743 if (n < 4)
02744 block[0] = block[0] * s->y_dc_scale;
02745 else
02746 block[0] = block[0] * s->c_dc_scale;
02747 quant_matrix = s->intra_matrix;
02748 for(i=1;i<=nCoeffs;i++) {
02749 int j= s->intra_scantable.permutated[i];
02750 level = block[j];
02751 if (level) {
02752 if (level < 0) {
02753 level = -level;
02754 level = (int)(level * qscale * quant_matrix[j]) >> 3;
02755 level = -level;
02756 } else {
02757 level = (int)(level * qscale * quant_matrix[j]) >> 3;
02758 }
02759 block[j] = level;
02760 sum+=level;
02761 }
02762 }
02763 block[63]^=sum&1;
02764 }
02765
02766 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
02767 DCTELEM *block, int n, int qscale)
02768 {
02769 int i, level, nCoeffs;
02770 const uint16_t *quant_matrix;
02771 int sum=-1;
02772
02773 if(s->alternate_scan) nCoeffs= 63;
02774 else nCoeffs= s->block_last_index[n];
02775
02776 quant_matrix = s->inter_matrix;
02777 for(i=0; i<=nCoeffs; i++) {
02778 int j= s->intra_scantable.permutated[i];
02779 level = block[j];
02780 if (level) {
02781 if (level < 0) {
02782 level = -level;
02783 level = (((level << 1) + 1) * qscale *
02784 ((int) (quant_matrix[j]))) >> 4;
02785 level = -level;
02786 } else {
02787 level = (((level << 1) + 1) * qscale *
02788 ((int) (quant_matrix[j]))) >> 4;
02789 }
02790 block[j] = level;
02791 sum+=level;
02792 }
02793 }
02794 block[63]^=sum&1;
02795 }
02796
02797 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
02798 DCTELEM *block, int n, int qscale)
02799 {
02800 int i, level, qmul, qadd;
02801 int nCoeffs;
02802
02803 assert(s->block_last_index[n]>=0);
02804
02805 qmul = qscale << 1;
02806
02807 if (!s->h263_aic) {
02808 if (n < 4)
02809 block[0] = block[0] * s->y_dc_scale;
02810 else
02811 block[0] = block[0] * s->c_dc_scale;
02812 qadd = (qscale - 1) | 1;
02813 }else{
02814 qadd = 0;
02815 }
02816 if(s->ac_pred)
02817 nCoeffs=63;
02818 else
02819 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
02820
02821 for(i=1; i<=nCoeffs; i++) {
02822 level = block[i];
02823 if (level) {
02824 if (level < 0) {
02825 level = level * qmul - qadd;
02826 } else {
02827 level = level * qmul + qadd;
02828 }
02829 block[i] = level;
02830 }
02831 }
02832 }
02833
02834 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
02835 DCTELEM *block, int n, int qscale)
02836 {
02837 int i, level, qmul, qadd;
02838 int nCoeffs;
02839
02840 assert(s->block_last_index[n]>=0);
02841
02842 qadd = (qscale - 1) | 1;
02843 qmul = qscale << 1;
02844
02845 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
02846
02847 for(i=0; i<=nCoeffs; i++) {
02848 level = block[i];
02849 if (level) {
02850 if (level < 0) {
02851 level = level * qmul - qadd;
02852 } else {
02853 level = level * qmul + qadd;
02854 }
02855 block[i] = level;
02856 }
02857 }
02858 }
02859
02863 void ff_set_qscale(MpegEncContext * s, int qscale)
02864 {
02865 if (qscale < 1)
02866 qscale = 1;
02867 else if (qscale > 31)
02868 qscale = 31;
02869
02870 s->qscale = qscale;
02871 s->chroma_qscale= s->chroma_qscale_table[qscale];
02872
02873 s->y_dc_scale= s->y_dc_scale_table[ qscale ];
02874 s->c_dc_scale= s->c_dc_scale_table[ s->chroma_qscale ];
02875 }
02876
02877 void MPV_report_decode_progress(MpegEncContext *s)
02878 {
02879 if (s->pict_type != AV_PICTURE_TYPE_B && !s->partitioned_frame && !s->error_occurred)
02880 ff_thread_report_progress((AVFrame*)s->current_picture_ptr, s->mb_y, 0);
02881 }