00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018
00019
00020
00021
00022
00023
00024
00030 #include "libavutil/intmath.h"
00031 #include "libavutil/imgutils.h"
00032 #include "avcodec.h"
00033 #include "dsputil.h"
00034 #include "internal.h"
00035 #include "mpegvideo.h"
00036 #include "mpegvideo_common.h"
00037 #include "mjpegenc.h"
00038 #include "msmpeg4.h"
00039 #include "faandct.h"
00040 #include "xvmc_internal.h"
00041 #include "thread.h"
00042 #include <limits.h>
00043
00044
00045
00046
00047 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
00048 DCTELEM *block, int n, int qscale);
00049 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
00050 DCTELEM *block, int n, int qscale);
00051 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
00052 DCTELEM *block, int n, int qscale);
00053 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
00054 DCTELEM *block, int n, int qscale);
00055 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
00056 DCTELEM *block, int n, int qscale);
00057 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
00058 DCTELEM *block, int n, int qscale);
00059 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
00060 DCTELEM *block, int n, int qscale);
00061
00062
00063
00064
00065
00066
00067
00068
00069 static const uint8_t ff_default_chroma_qscale_table[32] = {
00070
00071 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
00072 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31
00073 };
00074
00075 const uint8_t ff_mpeg1_dc_scale_table[128] = {
00076
00077 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
00078 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
00079 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
00080 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
00081 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
00082 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
00083 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
00084 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
00085 };
00086
00087 static const uint8_t mpeg2_dc_scale_table1[128] = {
00088
00089 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
00090 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
00091 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
00092 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
00093 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
00094 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
00095 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
00096 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
00097 };
00098
00099 static const uint8_t mpeg2_dc_scale_table2[128] = {
00100
00101 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
00102 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
00103 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
00104 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
00105 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
00106 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
00107 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
00108 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
00109 };
00110
00111 static const uint8_t mpeg2_dc_scale_table3[128] = {
00112
00113 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
00114 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
00115 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
00116 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
00117 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
00118 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
00119 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
00120 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
00121 };
00122
00123 const uint8_t *const ff_mpeg2_dc_scale_table[4] = {
00124 ff_mpeg1_dc_scale_table,
00125 mpeg2_dc_scale_table1,
00126 mpeg2_dc_scale_table2,
00127 mpeg2_dc_scale_table3,
00128 };
00129
00130 const enum PixelFormat ff_pixfmt_list_420[] = {
00131 PIX_FMT_YUV420P,
00132 PIX_FMT_NONE
00133 };
00134
00135 const enum PixelFormat ff_hwaccel_pixfmt_list_420[] = {
00136 PIX_FMT_DXVA2_VLD,
00137 PIX_FMT_VAAPI_VLD,
00138 PIX_FMT_VDA_VLD,
00139 PIX_FMT_YUV420P,
00140 PIX_FMT_NONE
00141 };
00142
00143 const uint8_t *avpriv_mpv_find_start_code(const uint8_t *restrict p,
00144 const uint8_t *end,
00145 uint32_t * restrict state)
00146 {
00147 int i;
00148
00149 assert(p <= end);
00150 if (p >= end)
00151 return end;
00152
00153 for (i = 0; i < 3; i++) {
00154 uint32_t tmp = *state << 8;
00155 *state = tmp + *(p++);
00156 if (tmp == 0x100 || p == end)
00157 return p;
00158 }
00159
00160 while (p < end) {
00161 if (p[-1] > 1 ) p += 3;
00162 else if (p[-2] ) p += 2;
00163 else if (p[-3]|(p[-1]-1)) p++;
00164 else {
00165 p++;
00166 break;
00167 }
00168 }
00169
00170 p = FFMIN(p, end) - 4;
00171 *state = AV_RB32(p);
00172
00173 return p + 4;
00174 }
00175
00176
00177 av_cold int ff_dct_common_init(MpegEncContext *s)
00178 {
00179 ff_dsputil_init(&s->dsp, s->avctx);
00180
00181 s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_c;
00182 s->dct_unquantize_h263_inter = dct_unquantize_h263_inter_c;
00183 s->dct_unquantize_mpeg1_intra = dct_unquantize_mpeg1_intra_c;
00184 s->dct_unquantize_mpeg1_inter = dct_unquantize_mpeg1_inter_c;
00185 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_c;
00186 if (s->flags & CODEC_FLAG_BITEXACT)
00187 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_bitexact;
00188 s->dct_unquantize_mpeg2_inter = dct_unquantize_mpeg2_inter_c;
00189
00190 #if HAVE_MMX
00191 ff_MPV_common_init_mmx(s);
00192 #elif ARCH_ALPHA
00193 ff_MPV_common_init_axp(s);
00194 #elif HAVE_MMI
00195 ff_MPV_common_init_mmi(s);
00196 #elif ARCH_ARM
00197 ff_MPV_common_init_arm(s);
00198 #elif HAVE_ALTIVEC
00199 ff_MPV_common_init_altivec(s);
00200 #elif ARCH_BFIN
00201 ff_MPV_common_init_bfin(s);
00202 #endif
00203
00204
00205
00206
00207 if (s->alternate_scan) {
00208 ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_alternate_vertical_scan);
00209 ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_alternate_vertical_scan);
00210 } else {
00211 ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_zigzag_direct);
00212 ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_zigzag_direct);
00213 }
00214 ff_init_scantable(s->dsp.idct_permutation, &s->intra_h_scantable, ff_alternate_horizontal_scan);
00215 ff_init_scantable(s->dsp.idct_permutation, &s->intra_v_scantable, ff_alternate_vertical_scan);
00216
00217 return 0;
00218 }
00219
00220 void ff_copy_picture(Picture *dst, Picture *src)
00221 {
00222 *dst = *src;
00223 dst->f.type = FF_BUFFER_TYPE_COPY;
00224 }
00225
00229 static void free_frame_buffer(MpegEncContext *s, Picture *pic)
00230 {
00231
00232
00233
00234 if (s->codec_id != CODEC_ID_WMV3IMAGE && s->codec_id != CODEC_ID_VC1IMAGE)
00235 ff_thread_release_buffer(s->avctx, &pic->f);
00236 else
00237 avcodec_default_release_buffer(s->avctx, &pic->f);
00238 av_freep(&pic->f.hwaccel_picture_private);
00239 }
00240
00244 static int alloc_frame_buffer(MpegEncContext *s, Picture *pic)
00245 {
00246 int r;
00247
00248 if (s->avctx->hwaccel) {
00249 assert(!pic->f.hwaccel_picture_private);
00250 if (s->avctx->hwaccel->priv_data_size) {
00251 pic->f.hwaccel_picture_private = av_mallocz(s->avctx->hwaccel->priv_data_size);
00252 if (!pic->f.hwaccel_picture_private) {
00253 av_log(s->avctx, AV_LOG_ERROR, "alloc_frame_buffer() failed (hwaccel private data allocation)\n");
00254 return -1;
00255 }
00256 }
00257 }
00258
00259 if (s->codec_id != CODEC_ID_WMV3IMAGE && s->codec_id != CODEC_ID_VC1IMAGE)
00260 r = ff_thread_get_buffer(s->avctx, &pic->f);
00261 else
00262 r = avcodec_default_get_buffer(s->avctx, &pic->f);
00263
00264 if (r < 0 || !pic->f.type || !pic->f.data[0]) {
00265 av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (%d %d %p)\n",
00266 r, pic->f.type, pic->f.data[0]);
00267 av_freep(&pic->f.hwaccel_picture_private);
00268 return -1;
00269 }
00270
00271 if (s->linesize && (s->linesize != pic->f.linesize[0] ||
00272 s->uvlinesize != pic->f.linesize[1])) {
00273 av_log(s->avctx, AV_LOG_ERROR,
00274 "get_buffer() failed (stride changed)\n");
00275 free_frame_buffer(s, pic);
00276 return -1;
00277 }
00278
00279 if (pic->f.linesize[1] != pic->f.linesize[2]) {
00280 av_log(s->avctx, AV_LOG_ERROR,
00281 "get_buffer() failed (uv stride mismatch)\n");
00282 free_frame_buffer(s, pic);
00283 return -1;
00284 }
00285
00286 return 0;
00287 }
00288
00293 int ff_alloc_picture(MpegEncContext *s, Picture *pic, int shared)
00294 {
00295 const int big_mb_num = s->mb_stride * (s->mb_height + 1) + 1;
00296
00297
00298
00299 const int mb_array_size = s->mb_stride * s->mb_height;
00300 const int b8_array_size = s->b8_stride * s->mb_height * 2;
00301 const int b4_array_size = s->b4_stride * s->mb_height * 4;
00302 int i;
00303 int r = -1;
00304
00305 if (shared) {
00306 assert(pic->f.data[0]);
00307 assert(pic->f.type == 0 || pic->f.type == FF_BUFFER_TYPE_SHARED);
00308 pic->f.type = FF_BUFFER_TYPE_SHARED;
00309 } else {
00310 assert(!pic->f.data[0]);
00311
00312 if (alloc_frame_buffer(s, pic) < 0)
00313 return -1;
00314
00315 s->linesize = pic->f.linesize[0];
00316 s->uvlinesize = pic->f.linesize[1];
00317 }
00318
00319 if (pic->f.qscale_table == NULL) {
00320 if (s->encoding) {
00321 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_var,
00322 mb_array_size * sizeof(int16_t), fail)
00323 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mc_mb_var,
00324 mb_array_size * sizeof(int16_t), fail)
00325 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_mean,
00326 mb_array_size * sizeof(int8_t ), fail)
00327 }
00328
00329 FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.mbskip_table,
00330 mb_array_size * sizeof(uint8_t) + 2, fail)
00331 FF_ALLOCZ_OR_GOTO(s->avctx, pic->qscale_table_base,
00332 (big_mb_num + s->mb_stride) * sizeof(uint8_t),
00333 fail)
00334 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_type_base,
00335 (big_mb_num + s->mb_stride) * sizeof(uint32_t),
00336 fail)
00337 pic->f.mb_type = pic->mb_type_base + 2 * s->mb_stride + 1;
00338 pic->f.qscale_table = pic->qscale_table_base + 2 * s->mb_stride + 1;
00339 if (s->out_format == FMT_H264) {
00340 for (i = 0; i < 2; i++) {
00341 FF_ALLOCZ_OR_GOTO(s->avctx, pic->motion_val_base[i],
00342 2 * (b4_array_size + 4) * sizeof(int16_t),
00343 fail)
00344 pic->f.motion_val[i] = pic->motion_val_base[i] + 4;
00345 FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.ref_index[i],
00346 4 * mb_array_size * sizeof(uint8_t), fail)
00347 }
00348 pic->f.motion_subsample_log2 = 2;
00349 } else if (s->out_format == FMT_H263 || s->encoding ||
00350 (s->avctx->debug & FF_DEBUG_MV) || s->avctx->debug_mv) {
00351 for (i = 0; i < 2; i++) {
00352 FF_ALLOCZ_OR_GOTO(s->avctx, pic->motion_val_base[i],
00353 2 * (b8_array_size + 4) * sizeof(int16_t),
00354 fail)
00355 pic->f.motion_val[i] = pic->motion_val_base[i] + 4;
00356 FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.ref_index[i],
00357 4 * mb_array_size * sizeof(uint8_t), fail)
00358 }
00359 pic->f.motion_subsample_log2 = 3;
00360 }
00361 if (s->avctx->debug&FF_DEBUG_DCT_COEFF) {
00362 FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.dct_coeff,
00363 64 * mb_array_size * sizeof(DCTELEM) * 6, fail)
00364 }
00365 pic->f.qstride = s->mb_stride;
00366 FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.pan_scan,
00367 1 * sizeof(AVPanScan), fail)
00368 }
00369
00370 pic->owner2 = s;
00371
00372 return 0;
00373 fail:
00374 if (r >= 0)
00375 free_frame_buffer(s, pic);
00376 return -1;
00377 }
00378
00382 static void free_picture(MpegEncContext *s, Picture *pic)
00383 {
00384 int i;
00385
00386 if (pic->f.data[0] && pic->f.type != FF_BUFFER_TYPE_SHARED) {
00387 free_frame_buffer(s, pic);
00388 }
00389
00390 av_freep(&pic->mb_var);
00391 av_freep(&pic->mc_mb_var);
00392 av_freep(&pic->mb_mean);
00393 av_freep(&pic->f.mbskip_table);
00394 av_freep(&pic->qscale_table_base);
00395 av_freep(&pic->mb_type_base);
00396 av_freep(&pic->f.dct_coeff);
00397 av_freep(&pic->f.pan_scan);
00398 pic->f.mb_type = NULL;
00399 for (i = 0; i < 2; i++) {
00400 av_freep(&pic->motion_val_base[i]);
00401 av_freep(&pic->f.ref_index[i]);
00402 }
00403
00404 if (pic->f.type == FF_BUFFER_TYPE_SHARED) {
00405 for (i = 0; i < 4; i++) {
00406 pic->f.base[i] =
00407 pic->f.data[i] = NULL;
00408 }
00409 pic->f.type = 0;
00410 }
00411 }
00412
00413 static int init_duplicate_context(MpegEncContext *s, MpegEncContext *base)
00414 {
00415 int y_size = s->b8_stride * (2 * s->mb_height + 1);
00416 int c_size = s->mb_stride * (s->mb_height + 1);
00417 int yc_size = y_size + 2 * c_size;
00418 int i;
00419
00420
00421
00422 FF_ALLOCZ_OR_GOTO(s->avctx, s->edge_emu_buffer,
00423 (s->width + 95) * 2 * 21 * 4, fail);
00424
00425
00426
00427 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.scratchpad,
00428 (s->width + 95) * 4 * 16 * 2 * sizeof(uint8_t), fail)
00429 s->me.temp = s->me.scratchpad;
00430 s->rd_scratchpad = s->me.scratchpad;
00431 s->b_scratchpad = s->me.scratchpad;
00432 s->obmc_scratchpad = s->me.scratchpad + 16;
00433 if (s->encoding) {
00434 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.map,
00435 ME_MAP_SIZE * sizeof(uint32_t), fail)
00436 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.score_map,
00437 ME_MAP_SIZE * sizeof(uint32_t), fail)
00438 if (s->avctx->noise_reduction) {
00439 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_error_sum,
00440 2 * 64 * sizeof(int), fail)
00441 }
00442 }
00443 FF_ALLOCZ_OR_GOTO(s->avctx, s->blocks, 64 * 12 * 2 * sizeof(DCTELEM), fail)
00444 s->block = s->blocks[0];
00445
00446 for (i = 0; i < 12; i++) {
00447 s->pblocks[i] = &s->block[i];
00448 }
00449
00450 if (s->out_format == FMT_H263) {
00451
00452 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_val_base,
00453 yc_size * sizeof(int16_t) * 16, fail);
00454 s->ac_val[0] = s->ac_val_base + s->b8_stride + 1;
00455 s->ac_val[1] = s->ac_val_base + y_size + s->mb_stride + 1;
00456 s->ac_val[2] = s->ac_val[1] + c_size;
00457 }
00458
00459 return 0;
00460 fail:
00461 return -1;
00462 }
00463
00464 static void free_duplicate_context(MpegEncContext *s)
00465 {
00466 if (s == NULL)
00467 return;
00468
00469 av_freep(&s->edge_emu_buffer);
00470 av_freep(&s->me.scratchpad);
00471 s->me.temp =
00472 s->rd_scratchpad =
00473 s->b_scratchpad =
00474 s->obmc_scratchpad = NULL;
00475
00476 av_freep(&s->dct_error_sum);
00477 av_freep(&s->me.map);
00478 av_freep(&s->me.score_map);
00479 av_freep(&s->blocks);
00480 av_freep(&s->ac_val_base);
00481 s->block = NULL;
00482 }
00483
00484 static void backup_duplicate_context(MpegEncContext *bak, MpegEncContext *src)
00485 {
00486 #define COPY(a) bak->a = src->a
00487 COPY(edge_emu_buffer);
00488 COPY(me.scratchpad);
00489 COPY(me.temp);
00490 COPY(rd_scratchpad);
00491 COPY(b_scratchpad);
00492 COPY(obmc_scratchpad);
00493 COPY(me.map);
00494 COPY(me.score_map);
00495 COPY(blocks);
00496 COPY(block);
00497 COPY(start_mb_y);
00498 COPY(end_mb_y);
00499 COPY(me.map_generation);
00500 COPY(pb);
00501 COPY(dct_error_sum);
00502 COPY(dct_count[0]);
00503 COPY(dct_count[1]);
00504 COPY(ac_val_base);
00505 COPY(ac_val[0]);
00506 COPY(ac_val[1]);
00507 COPY(ac_val[2]);
00508 #undef COPY
00509 }
00510
00511 void ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src)
00512 {
00513 MpegEncContext bak;
00514 int i;
00515
00516
00517 backup_duplicate_context(&bak, dst);
00518 memcpy(dst, src, sizeof(MpegEncContext));
00519 backup_duplicate_context(dst, &bak);
00520 for (i = 0; i < 12; i++) {
00521 dst->pblocks[i] = &dst->block[i];
00522 }
00523
00524
00525 }
00526
00527 int ff_mpeg_update_thread_context(AVCodecContext *dst,
00528 const AVCodecContext *src)
00529 {
00530 MpegEncContext *s = dst->priv_data, *s1 = src->priv_data;
00531
00532 if (dst == src)
00533 return 0;
00534
00535
00536
00537 if (!s->context_initialized) {
00538 memcpy(s, s1, sizeof(MpegEncContext));
00539
00540 s->avctx = dst;
00541 s->bitstream_buffer = NULL;
00542 s->bitstream_buffer_size = s->allocated_bitstream_buffer_size = 0;
00543
00544 if (s1->context_initialized){
00545 s->picture_range_start += MAX_PICTURE_COUNT;
00546 s->picture_range_end += MAX_PICTURE_COUNT;
00547 ff_MPV_common_init(s);
00548 }
00549 }
00550
00551 s->avctx->coded_height = s1->avctx->coded_height;
00552 s->avctx->coded_width = s1->avctx->coded_width;
00553 s->avctx->width = s1->avctx->width;
00554 s->avctx->height = s1->avctx->height;
00555
00556 s->coded_picture_number = s1->coded_picture_number;
00557 s->picture_number = s1->picture_number;
00558 s->input_picture_number = s1->input_picture_number;
00559
00560 memcpy(s->picture, s1->picture, s1->picture_count * sizeof(Picture));
00561 memcpy(&s->last_picture, &s1->last_picture,
00562 (char *) &s1->last_picture_ptr - (char *) &s1->last_picture);
00563
00564 s->last_picture_ptr = REBASE_PICTURE(s1->last_picture_ptr, s, s1);
00565 s->current_picture_ptr = REBASE_PICTURE(s1->current_picture_ptr, s, s1);
00566 s->next_picture_ptr = REBASE_PICTURE(s1->next_picture_ptr, s, s1);
00567
00568
00569 s->next_p_frame_damaged = s1->next_p_frame_damaged;
00570 s->workaround_bugs = s1->workaround_bugs;
00571 s->padding_bug_score = s1->padding_bug_score;
00572
00573
00574 memcpy(&s->time_increment_bits, &s1->time_increment_bits,
00575 (char *) &s1->shape - (char *) &s1->time_increment_bits);
00576
00577
00578 s->max_b_frames = s1->max_b_frames;
00579 s->low_delay = s1->low_delay;
00580 s->dropable = s1->dropable;
00581
00582
00583 s->divx_packed = s1->divx_packed;
00584
00585 if (s1->bitstream_buffer) {
00586 if (s1->bitstream_buffer_size +
00587 FF_INPUT_BUFFER_PADDING_SIZE > s->allocated_bitstream_buffer_size)
00588 av_fast_malloc(&s->bitstream_buffer,
00589 &s->allocated_bitstream_buffer_size,
00590 s1->allocated_bitstream_buffer_size);
00591 s->bitstream_buffer_size = s1->bitstream_buffer_size;
00592 memcpy(s->bitstream_buffer, s1->bitstream_buffer,
00593 s1->bitstream_buffer_size);
00594 memset(s->bitstream_buffer + s->bitstream_buffer_size, 0,
00595 FF_INPUT_BUFFER_PADDING_SIZE);
00596 }
00597
00598
00599 memcpy(&s->progressive_sequence, &s1->progressive_sequence,
00600 (char *) &s1->rtp_mode - (char *) &s1->progressive_sequence);
00601
00602 if (!s1->first_field) {
00603 s->last_pict_type = s1->pict_type;
00604 if (s1->current_picture_ptr)
00605 s->last_lambda_for[s1->pict_type] = s1->current_picture_ptr->f.quality;
00606
00607 if (s1->pict_type != AV_PICTURE_TYPE_B) {
00608 s->last_non_b_pict_type = s1->pict_type;
00609 }
00610 }
00611
00612 return 0;
00613 }
00614
00621 void ff_MPV_common_defaults(MpegEncContext *s)
00622 {
00623 s->y_dc_scale_table =
00624 s->c_dc_scale_table = ff_mpeg1_dc_scale_table;
00625 s->chroma_qscale_table = ff_default_chroma_qscale_table;
00626 s->progressive_frame = 1;
00627 s->progressive_sequence = 1;
00628 s->picture_structure = PICT_FRAME;
00629
00630 s->coded_picture_number = 0;
00631 s->picture_number = 0;
00632 s->input_picture_number = 0;
00633
00634 s->picture_in_gop_number = 0;
00635
00636 s->f_code = 1;
00637 s->b_code = 1;
00638
00639 s->picture_range_start = 0;
00640 s->picture_range_end = MAX_PICTURE_COUNT;
00641
00642 s->slice_context_count = 1;
00643 }
00644
00650 void ff_MPV_decode_defaults(MpegEncContext *s)
00651 {
00652 ff_MPV_common_defaults(s);
00653 }
00654
00659 av_cold int ff_MPV_common_init(MpegEncContext *s)
00660 {
00661 int y_size, c_size, yc_size, i, mb_array_size, mv_table_size, x, y;
00662 int nb_slices = (HAVE_THREADS &&
00663 s->avctx->active_thread_type & FF_THREAD_SLICE) ?
00664 s->avctx->thread_count : 1;
00665
00666 if (s->encoding && s->avctx->slices)
00667 nb_slices = s->avctx->slices;
00668
00669 if (s->codec_id == CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
00670 s->mb_height = (s->height + 31) / 32 * 2;
00671 else if (s->codec_id != CODEC_ID_H264)
00672 s->mb_height = (s->height + 15) / 16;
00673
00674 if (s->avctx->pix_fmt == PIX_FMT_NONE) {
00675 av_log(s->avctx, AV_LOG_ERROR,
00676 "decoding to PIX_FMT_NONE is not supported.\n");
00677 return -1;
00678 }
00679
00680 if (nb_slices > MAX_THREADS || (nb_slices > s->mb_height && s->mb_height)) {
00681 int max_slices;
00682 if (s->mb_height)
00683 max_slices = FFMIN(MAX_THREADS, s->mb_height);
00684 else
00685 max_slices = MAX_THREADS;
00686 av_log(s->avctx, AV_LOG_WARNING, "too many threads/slices (%d),"
00687 " reducing to %d\n", nb_slices, max_slices);
00688 nb_slices = max_slices;
00689 }
00690
00691 if ((s->width || s->height) &&
00692 av_image_check_size(s->width, s->height, 0, s->avctx))
00693 return -1;
00694
00695 ff_dct_common_init(s);
00696
00697 s->flags = s->avctx->flags;
00698 s->flags2 = s->avctx->flags2;
00699
00700 s->mb_width = (s->width + 15) / 16;
00701 s->mb_stride = s->mb_width + 1;
00702 s->b8_stride = s->mb_width * 2 + 1;
00703 s->b4_stride = s->mb_width * 4 + 1;
00704 mb_array_size = s->mb_height * s->mb_stride;
00705 mv_table_size = (s->mb_height + 2) * s->mb_stride + 1;
00706
00707
00708 avcodec_get_chroma_sub_sample(s->avctx->pix_fmt, &s->chroma_x_shift,
00709 &s->chroma_y_shift);
00710
00711
00712 s->h_edge_pos = s->mb_width * 16;
00713 s->v_edge_pos = s->mb_height * 16;
00714
00715 s->mb_num = s->mb_width * s->mb_height;
00716
00717 s->block_wrap[0] =
00718 s->block_wrap[1] =
00719 s->block_wrap[2] =
00720 s->block_wrap[3] = s->b8_stride;
00721 s->block_wrap[4] =
00722 s->block_wrap[5] = s->mb_stride;
00723
00724 y_size = s->b8_stride * (2 * s->mb_height + 1);
00725 c_size = s->mb_stride * (s->mb_height + 1);
00726 yc_size = y_size + 2 * c_size;
00727
00728
00729 s->codec_tag = avpriv_toupper4(s->avctx->codec_tag);
00730 s->stream_codec_tag = avpriv_toupper4(s->avctx->stream_codec_tag);
00731
00732 s->avctx->coded_frame = &s->current_picture.f;
00733
00734 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_index2xy, (s->mb_num + 1) * sizeof(int), fail);
00735 for (y = 0; y < s->mb_height; y++)
00736 for (x = 0; x < s->mb_width; x++)
00737 s->mb_index2xy[x + y * s->mb_width] = x + y * s->mb_stride;
00738
00739 s->mb_index2xy[s->mb_height * s->mb_width] = (s->mb_height - 1) * s->mb_stride + s->mb_width;
00740
00741 if (s->encoding) {
00742
00743 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
00744 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_forw_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
00745 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_back_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
00746 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_forw_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
00747 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_back_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
00748 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_direct_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
00749 s->p_mv_table = s->p_mv_table_base + s->mb_stride + 1;
00750 s->b_forw_mv_table = s->b_forw_mv_table_base + s->mb_stride + 1;
00751 s->b_back_mv_table = s->b_back_mv_table_base + s->mb_stride + 1;
00752 s->b_bidir_forw_mv_table= s->b_bidir_forw_mv_table_base + s->mb_stride + 1;
00753 s->b_bidir_back_mv_table= s->b_bidir_back_mv_table_base + s->mb_stride + 1;
00754 s->b_direct_mv_table = s->b_direct_mv_table_base + s->mb_stride + 1;
00755
00756 if(s->msmpeg4_version){
00757 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_stats, 2*2*(MAX_LEVEL+1)*(MAX_RUN+1)*2*sizeof(int), fail);
00758 }
00759 FF_ALLOCZ_OR_GOTO(s->avctx, s->avctx->stats_out, 256, fail);
00760
00761
00762 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_type , mb_array_size * sizeof(uint16_t), fail)
00763
00764 FF_ALLOCZ_OR_GOTO(s->avctx, s->lambda_table, mb_array_size * sizeof(int), fail)
00765
00766 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix , 64*32 * sizeof(int), fail)
00767 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_chroma_intra_matrix , 64*32 * sizeof(int), fail)
00768 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix , 64*32 * sizeof(int), fail)
00769 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix16 , 64*32*2 * sizeof(uint16_t), fail)
00770 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_chroma_intra_matrix16, 64*32*2 * sizeof(uint16_t), fail)
00771 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix16 , 64*32*2 * sizeof(uint16_t), fail)
00772 FF_ALLOCZ_OR_GOTO(s->avctx, s->input_picture, MAX_PICTURE_COUNT * sizeof(Picture*), fail)
00773 FF_ALLOCZ_OR_GOTO(s->avctx, s->reordered_input_picture, MAX_PICTURE_COUNT * sizeof(Picture*), fail)
00774
00775 if(s->avctx->noise_reduction){
00776 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_offset, 2 * 64 * sizeof(uint16_t), fail)
00777 }
00778 }
00779
00780 s->picture_count = MAX_PICTURE_COUNT * FFMAX(1, s->avctx->thread_count);
00781 FF_ALLOCZ_OR_GOTO(s->avctx, s->picture,
00782 s->picture_count * sizeof(Picture), fail);
00783 for (i = 0; i < s->picture_count; i++) {
00784 avcodec_get_frame_defaults(&s->picture[i].f);
00785 }
00786
00787 FF_ALLOCZ_OR_GOTO(s->avctx, s->error_status_table, mb_array_size*sizeof(uint8_t), fail)
00788
00789 if(s->codec_id==CODEC_ID_MPEG4 || (s->flags & CODEC_FLAG_INTERLACED_ME)){
00790
00791 for (i = 0; i < 2; i++) {
00792 int j, k;
00793 for (j = 0; j < 2; j++) {
00794 for (k = 0; k < 2; k++) {
00795 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_field_mv_table_base[i][j][k], mv_table_size * 2 * sizeof(int16_t), fail)
00796 s->b_field_mv_table[i][j][k] = s->b_field_mv_table_base[i][j][k] + s->mb_stride + 1;
00797 }
00798 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_field_select_table [i][j], mb_array_size * 2 * sizeof(uint8_t), fail)
00799 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_mv_table_base[i][j], mv_table_size * 2 * sizeof(int16_t), fail)
00800 s->p_field_mv_table[i][j] = s->p_field_mv_table_base[i][j] + s->mb_stride + 1;
00801 }
00802 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_select_table[i], mb_array_size * 2 * sizeof(uint8_t), fail)
00803 }
00804 }
00805 if (s->out_format == FMT_H263) {
00806
00807 FF_ALLOCZ_OR_GOTO(s->avctx, s->coded_block_base, y_size, fail);
00808 s->coded_block = s->coded_block_base + s->b8_stride + 1;
00809
00810
00811 FF_ALLOCZ_OR_GOTO(s->avctx, s->cbp_table , mb_array_size * sizeof(uint8_t), fail);
00812 FF_ALLOCZ_OR_GOTO(s->avctx, s->pred_dir_table, mb_array_size * sizeof(uint8_t), fail);
00813 }
00814
00815 if (s->h263_pred || s->h263_plus || !s->encoding) {
00816
00817
00818 FF_ALLOCZ_OR_GOTO(s->avctx, s->dc_val_base, yc_size * sizeof(int16_t), fail);
00819 s->dc_val[0] = s->dc_val_base + s->b8_stride + 1;
00820 s->dc_val[1] = s->dc_val_base + y_size + s->mb_stride + 1;
00821 s->dc_val[2] = s->dc_val[1] + c_size;
00822 for (i = 0; i < yc_size; i++)
00823 s->dc_val_base[i] = 1024;
00824 }
00825
00826
00827 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbintra_table, mb_array_size, fail);
00828 memset(s->mbintra_table, 1, mb_array_size);
00829
00830
00831 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbskip_table, mb_array_size + 2, fail);
00832
00833
00834 s->parse_context.state = -1;
00835
00836 s->context_initialized = 1;
00837 s->thread_context[0] = s;
00838
00839
00840 if (nb_slices > 1) {
00841 for (i = 1; i < nb_slices; i++) {
00842 s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
00843 memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
00844 }
00845
00846 for (i = 0; i < nb_slices; i++) {
00847 if (init_duplicate_context(s->thread_context[i], s) < 0)
00848 goto fail;
00849 s->thread_context[i]->start_mb_y =
00850 (s->mb_height * (i) + nb_slices / 2) / nb_slices;
00851 s->thread_context[i]->end_mb_y =
00852 (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
00853 }
00854 } else {
00855 if (init_duplicate_context(s, s) < 0)
00856 goto fail;
00857 s->start_mb_y = 0;
00858 s->end_mb_y = s->mb_height;
00859 }
00860 s->slice_context_count = nb_slices;
00861
00862
00863 return 0;
00864 fail:
00865 ff_MPV_common_end(s);
00866 return -1;
00867 }
00868
00869
00870 void ff_MPV_common_end(MpegEncContext *s)
00871 {
00872 int i, j, k;
00873
00874 if (s->slice_context_count > 1) {
00875 for (i = 0; i < s->slice_context_count; i++) {
00876 free_duplicate_context(s->thread_context[i]);
00877 }
00878 for (i = 1; i < s->slice_context_count; i++) {
00879 av_freep(&s->thread_context[i]);
00880 }
00881 s->slice_context_count = 1;
00882 } else free_duplicate_context(s);
00883
00884 av_freep(&s->parse_context.buffer);
00885 s->parse_context.buffer_size = 0;
00886
00887 av_freep(&s->mb_type);
00888 av_freep(&s->p_mv_table_base);
00889 av_freep(&s->b_forw_mv_table_base);
00890 av_freep(&s->b_back_mv_table_base);
00891 av_freep(&s->b_bidir_forw_mv_table_base);
00892 av_freep(&s->b_bidir_back_mv_table_base);
00893 av_freep(&s->b_direct_mv_table_base);
00894 s->p_mv_table = NULL;
00895 s->b_forw_mv_table = NULL;
00896 s->b_back_mv_table = NULL;
00897 s->b_bidir_forw_mv_table = NULL;
00898 s->b_bidir_back_mv_table = NULL;
00899 s->b_direct_mv_table = NULL;
00900 for (i = 0; i < 2; i++) {
00901 for (j = 0; j < 2; j++) {
00902 for (k = 0; k < 2; k++) {
00903 av_freep(&s->b_field_mv_table_base[i][j][k]);
00904 s->b_field_mv_table[i][j][k] = NULL;
00905 }
00906 av_freep(&s->b_field_select_table[i][j]);
00907 av_freep(&s->p_field_mv_table_base[i][j]);
00908 s->p_field_mv_table[i][j] = NULL;
00909 }
00910 av_freep(&s->p_field_select_table[i]);
00911 }
00912
00913 av_freep(&s->dc_val_base);
00914 av_freep(&s->coded_block_base);
00915 av_freep(&s->mbintra_table);
00916 av_freep(&s->cbp_table);
00917 av_freep(&s->pred_dir_table);
00918
00919 av_freep(&s->mbskip_table);
00920 av_freep(&s->bitstream_buffer);
00921 s->allocated_bitstream_buffer_size = 0;
00922
00923 av_freep(&s->avctx->stats_out);
00924 av_freep(&s->ac_stats);
00925 av_freep(&s->error_status_table);
00926 av_freep(&s->mb_index2xy);
00927 av_freep(&s->lambda_table);
00928 if(s->q_chroma_intra_matrix != s->q_intra_matrix ) av_freep(&s->q_chroma_intra_matrix);
00929 if(s->q_chroma_intra_matrix16 != s->q_intra_matrix16) av_freep(&s->q_chroma_intra_matrix16);
00930 s->q_chroma_intra_matrix= NULL;
00931 s->q_chroma_intra_matrix16= NULL;
00932 av_freep(&s->q_intra_matrix);
00933 av_freep(&s->q_inter_matrix);
00934 av_freep(&s->q_intra_matrix16);
00935 av_freep(&s->q_inter_matrix16);
00936 av_freep(&s->input_picture);
00937 av_freep(&s->reordered_input_picture);
00938 av_freep(&s->dct_offset);
00939
00940 if (s->picture && !s->avctx->internal->is_copy) {
00941 for (i = 0; i < s->picture_count; i++) {
00942 free_picture(s, &s->picture[i]);
00943 }
00944 }
00945 av_freep(&s->picture);
00946 s->context_initialized = 0;
00947 s->last_picture_ptr =
00948 s->next_picture_ptr =
00949 s->current_picture_ptr = NULL;
00950 s->linesize = s->uvlinesize = 0;
00951
00952 for (i = 0; i < 3; i++)
00953 av_freep(&s->visualization_buffer[i]);
00954
00955 if (!(s->avctx->active_thread_type & FF_THREAD_FRAME))
00956 avcodec_default_free_buffers(s->avctx);
00957 }
00958
00959 void ff_init_rl(RLTable *rl,
00960 uint8_t static_store[2][2 * MAX_RUN + MAX_LEVEL + 3])
00961 {
00962 int8_t max_level[MAX_RUN + 1], max_run[MAX_LEVEL + 1];
00963 uint8_t index_run[MAX_RUN + 1];
00964 int last, run, level, start, end, i;
00965
00966
00967 if (static_store && rl->max_level[0])
00968 return;
00969
00970
00971 for (last = 0; last < 2; last++) {
00972 if (last == 0) {
00973 start = 0;
00974 end = rl->last;
00975 } else {
00976 start = rl->last;
00977 end = rl->n;
00978 }
00979
00980 memset(max_level, 0, MAX_RUN + 1);
00981 memset(max_run, 0, MAX_LEVEL + 1);
00982 memset(index_run, rl->n, MAX_RUN + 1);
00983 for (i = start; i < end; i++) {
00984 run = rl->table_run[i];
00985 level = rl->table_level[i];
00986 if (index_run[run] == rl->n)
00987 index_run[run] = i;
00988 if (level > max_level[run])
00989 max_level[run] = level;
00990 if (run > max_run[level])
00991 max_run[level] = run;
00992 }
00993 if (static_store)
00994 rl->max_level[last] = static_store[last];
00995 else
00996 rl->max_level[last] = av_malloc(MAX_RUN + 1);
00997 memcpy(rl->max_level[last], max_level, MAX_RUN + 1);
00998 if (static_store)
00999 rl->max_run[last] = static_store[last] + MAX_RUN + 1;
01000 else
01001 rl->max_run[last] = av_malloc(MAX_LEVEL + 1);
01002 memcpy(rl->max_run[last], max_run, MAX_LEVEL + 1);
01003 if (static_store)
01004 rl->index_run[last] = static_store[last] + MAX_RUN + MAX_LEVEL + 2;
01005 else
01006 rl->index_run[last] = av_malloc(MAX_RUN + 1);
01007 memcpy(rl->index_run[last], index_run, MAX_RUN + 1);
01008 }
01009 }
01010
01011 void ff_init_vlc_rl(RLTable *rl)
01012 {
01013 int i, q;
01014
01015 for (q = 0; q < 32; q++) {
01016 int qmul = q * 2;
01017 int qadd = (q - 1) | 1;
01018
01019 if (q == 0) {
01020 qmul = 1;
01021 qadd = 0;
01022 }
01023 for (i = 0; i < rl->vlc.table_size; i++) {
01024 int code = rl->vlc.table[i][0];
01025 int len = rl->vlc.table[i][1];
01026 int level, run;
01027
01028 if (len == 0) {
01029 run = 66;
01030 level = MAX_LEVEL;
01031 } else if (len < 0) {
01032 run = 0;
01033 level = code;
01034 } else {
01035 if (code == rl->n) {
01036 run = 66;
01037 level = 0;
01038 } else {
01039 run = rl->table_run[code] + 1;
01040 level = rl->table_level[code] * qmul + qadd;
01041 if (code >= rl->last) run += 192;
01042 }
01043 }
01044 rl->rl_vlc[q][i].len = len;
01045 rl->rl_vlc[q][i].level = level;
01046 rl->rl_vlc[q][i].run = run;
01047 }
01048 }
01049 }
01050
01051 void ff_release_unused_pictures(MpegEncContext*s, int remove_current)
01052 {
01053 int i;
01054
01055
01056 for (i = 0; i < s->picture_count; i++) {
01057 if (s->picture[i].f.data[0] && !s->picture[i].f.reference &&
01058 (!s->picture[i].owner2 || s->picture[i].owner2 == s) &&
01059 (remove_current || &s->picture[i] != s->current_picture_ptr)
01060 ) {
01061 free_frame_buffer(s, &s->picture[i]);
01062 }
01063 }
01064 }
01065
01066 int ff_find_unused_picture(MpegEncContext *s, int shared)
01067 {
01068 int i;
01069
01070 if (shared) {
01071 for (i = s->picture_range_start; i < s->picture_range_end; i++) {
01072 if (s->picture[i].f.data[0] == NULL && s->picture[i].f.type == 0)
01073 return i;
01074 }
01075 } else {
01076 for (i = s->picture_range_start; i < s->picture_range_end; i++) {
01077 if (s->picture[i].f.data[0] == NULL && s->picture[i].f.type != 0)
01078 return i;
01079 }
01080 for (i = s->picture_range_start; i < s->picture_range_end; i++) {
01081 if (s->picture[i].f.data[0] == NULL)
01082 return i;
01083 }
01084 }
01085
01086 av_log(s->avctx, AV_LOG_FATAL,
01087 "Internal error, picture buffer overflow\n");
01088
01089
01090
01091
01092
01093
01094
01095
01096
01097
01098
01099 abort();
01100 return -1;
01101 }
01102
01103 static void update_noise_reduction(MpegEncContext *s)
01104 {
01105 int intra, i;
01106
01107 for (intra = 0; intra < 2; intra++) {
01108 if (s->dct_count[intra] > (1 << 16)) {
01109 for (i = 0; i < 64; i++) {
01110 s->dct_error_sum[intra][i] >>= 1;
01111 }
01112 s->dct_count[intra] >>= 1;
01113 }
01114
01115 for (i = 0; i < 64; i++) {
01116 s->dct_offset[intra][i] = (s->avctx->noise_reduction *
01117 s->dct_count[intra] +
01118 s->dct_error_sum[intra][i] / 2) /
01119 (s->dct_error_sum[intra][i] + 1);
01120 }
01121 }
01122 }
01123
01128 int ff_MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
01129 {
01130 int i;
01131 Picture *pic;
01132 s->mb_skipped = 0;
01133
01134 assert(s->last_picture_ptr == NULL || s->out_format != FMT_H264 ||
01135 s->codec_id == CODEC_ID_SVQ3);
01136
01137 if (!ff_thread_can_start_frame(avctx)) {
01138 av_log(avctx, AV_LOG_ERROR, "Attempt to start a frame outside SETUP state\n");
01139 return -1;
01140 }
01141
01142
01143 if (s->out_format != FMT_H264 || s->codec_id == CODEC_ID_SVQ3) {
01144 if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
01145 s->last_picture_ptr != s->next_picture_ptr &&
01146 s->last_picture_ptr->f.data[0]) {
01147 if (s->last_picture_ptr->owner2 == s)
01148 free_frame_buffer(s, s->last_picture_ptr);
01149 }
01150
01151
01152
01153 if (!s->encoding) {
01154 for (i = 0; i < s->picture_count; i++) {
01155 if (s->picture[i].owner2 == s && s->picture[i].f.data[0] &&
01156 &s->picture[i] != s->last_picture_ptr &&
01157 &s->picture[i] != s->next_picture_ptr &&
01158 s->picture[i].f.reference) {
01159 if (!(avctx->active_thread_type & FF_THREAD_FRAME))
01160 av_log(avctx, AV_LOG_ERROR,
01161 "releasing zombie picture\n");
01162 free_frame_buffer(s, &s->picture[i]);
01163 }
01164 }
01165 }
01166 }
01167
01168 if (!s->encoding) {
01169 ff_release_unused_pictures(s, 1);
01170
01171 if (s->current_picture_ptr &&
01172 s->current_picture_ptr->f.data[0] == NULL) {
01173
01174
01175 pic = s->current_picture_ptr;
01176 } else {
01177 i = ff_find_unused_picture(s, 0);
01178 if (i < 0)
01179 return i;
01180 pic = &s->picture[i];
01181 }
01182
01183 pic->f.reference = 0;
01184 if (!s->dropable) {
01185 if (s->codec_id == CODEC_ID_H264)
01186 pic->f.reference = s->picture_structure;
01187 else if (s->pict_type != AV_PICTURE_TYPE_B)
01188 pic->f.reference = 3;
01189 }
01190
01191 pic->f.coded_picture_number = s->coded_picture_number++;
01192
01193 if (ff_alloc_picture(s, pic, 0) < 0)
01194 return -1;
01195
01196 s->current_picture_ptr = pic;
01197
01198 s->current_picture_ptr->f.top_field_first = s->top_field_first;
01199 if (s->codec_id == CODEC_ID_MPEG1VIDEO ||
01200 s->codec_id == CODEC_ID_MPEG2VIDEO) {
01201 if (s->picture_structure != PICT_FRAME)
01202 s->current_picture_ptr->f.top_field_first =
01203 (s->picture_structure == PICT_TOP_FIELD) == s->first_field;
01204 }
01205 s->current_picture_ptr->f.interlaced_frame = !s->progressive_frame &&
01206 !s->progressive_sequence;
01207 s->current_picture_ptr->field_picture = s->picture_structure != PICT_FRAME;
01208 }
01209
01210 s->current_picture_ptr->f.pict_type = s->pict_type;
01211
01212
01213 s->current_picture_ptr->f.key_frame = s->pict_type == AV_PICTURE_TYPE_I;
01214
01215 ff_copy_picture(&s->current_picture, s->current_picture_ptr);
01216
01217 if (s->pict_type != AV_PICTURE_TYPE_B) {
01218 s->last_picture_ptr = s->next_picture_ptr;
01219 if (!s->dropable)
01220 s->next_picture_ptr = s->current_picture_ptr;
01221 }
01222
01223
01224
01225
01226
01227
01228
01229 if (s->codec_id != CODEC_ID_H264) {
01230 if ((s->last_picture_ptr == NULL ||
01231 s->last_picture_ptr->f.data[0] == NULL) &&
01232 (s->pict_type != AV_PICTURE_TYPE_I ||
01233 s->picture_structure != PICT_FRAME)) {
01234 if (s->pict_type != AV_PICTURE_TYPE_I)
01235 av_log(avctx, AV_LOG_ERROR,
01236 "warning: first frame is no keyframe\n");
01237 else if (s->picture_structure != PICT_FRAME)
01238 av_log(avctx, AV_LOG_INFO,
01239 "allocate dummy last picture for field based first keyframe\n");
01240
01241
01242 i = ff_find_unused_picture(s, 0);
01243 if (i < 0)
01244 return i;
01245 s->last_picture_ptr = &s->picture[i];
01246 s->last_picture_ptr->f.key_frame = 0;
01247 if (ff_alloc_picture(s, s->last_picture_ptr, 0) < 0) {
01248 s->last_picture_ptr = NULL;
01249 return -1;
01250 }
01251
01252 if(s->codec_id == CODEC_ID_FLV1 || s->codec_id == CODEC_ID_H263){
01253 for(i=0; i<avctx->height; i++)
01254 memset(s->last_picture_ptr->f.data[0] + s->last_picture_ptr->f.linesize[0]*i, 16, avctx->width);
01255 }
01256
01257 ff_thread_report_progress(&s->last_picture_ptr->f, INT_MAX, 0);
01258 ff_thread_report_progress(&s->last_picture_ptr->f, INT_MAX, 1);
01259 s->last_picture_ptr->f.reference = 3;
01260 }
01261 if ((s->next_picture_ptr == NULL ||
01262 s->next_picture_ptr->f.data[0] == NULL) &&
01263 s->pict_type == AV_PICTURE_TYPE_B) {
01264
01265 i = ff_find_unused_picture(s, 0);
01266 if (i < 0)
01267 return i;
01268 s->next_picture_ptr = &s->picture[i];
01269 s->next_picture_ptr->f.key_frame = 0;
01270 if (ff_alloc_picture(s, s->next_picture_ptr, 0) < 0) {
01271 s->next_picture_ptr = NULL;
01272 return -1;
01273 }
01274 ff_thread_report_progress(&s->next_picture_ptr->f, INT_MAX, 0);
01275 ff_thread_report_progress(&s->next_picture_ptr->f, INT_MAX, 1);
01276 s->next_picture_ptr->f.reference = 3;
01277 }
01278 }
01279
01280 if (s->last_picture_ptr)
01281 ff_copy_picture(&s->last_picture, s->last_picture_ptr);
01282 if (s->next_picture_ptr)
01283 ff_copy_picture(&s->next_picture, s->next_picture_ptr);
01284
01285 if (HAVE_THREADS && (avctx->active_thread_type & FF_THREAD_FRAME) &&
01286 (s->out_format != FMT_H264 || s->codec_id == CODEC_ID_SVQ3)) {
01287 if (s->next_picture_ptr)
01288 s->next_picture_ptr->owner2 = s;
01289 if (s->last_picture_ptr)
01290 s->last_picture_ptr->owner2 = s;
01291 }
01292
01293 assert(s->pict_type == AV_PICTURE_TYPE_I || (s->last_picture_ptr &&
01294 s->last_picture_ptr->f.data[0]));
01295
01296 if (s->picture_structure!= PICT_FRAME && s->out_format != FMT_H264) {
01297 int i;
01298 for (i = 0; i < 4; i++) {
01299 if (s->picture_structure == PICT_BOTTOM_FIELD) {
01300 s->current_picture.f.data[i] +=
01301 s->current_picture.f.linesize[i];
01302 }
01303 s->current_picture.f.linesize[i] *= 2;
01304 s->last_picture.f.linesize[i] *= 2;
01305 s->next_picture.f.linesize[i] *= 2;
01306 }
01307 }
01308
01309 s->err_recognition = avctx->err_recognition;
01310
01311
01312
01313
01314 if (s->mpeg_quant || s->codec_id == CODEC_ID_MPEG2VIDEO) {
01315 s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
01316 s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
01317 } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
01318 s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
01319 s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
01320 } else {
01321 s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
01322 s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
01323 }
01324
01325 if (s->dct_error_sum) {
01326 assert(s->avctx->noise_reduction && s->encoding);
01327 update_noise_reduction(s);
01328 }
01329
01330 if (CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration)
01331 return ff_xvmc_field_start(s, avctx);
01332
01333 return 0;
01334 }
01335
01336
01337
01338 void ff_MPV_frame_end(MpegEncContext *s)
01339 {
01340 int i;
01341
01342
01343 if (CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration) {
01344 ff_xvmc_field_end(s);
01345 } else if((s->error_count || s->encoding || !(s->avctx->codec->capabilities&CODEC_CAP_DRAW_HORIZ_BAND)) &&
01346 !s->avctx->hwaccel &&
01347 !(s->avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU) &&
01348 s->unrestricted_mv &&
01349 s->current_picture.f.reference &&
01350 !s->intra_only &&
01351 !(s->flags & CODEC_FLAG_EMU_EDGE)) {
01352 int hshift = av_pix_fmt_descriptors[s->avctx->pix_fmt].log2_chroma_w;
01353 int vshift = av_pix_fmt_descriptors[s->avctx->pix_fmt].log2_chroma_h;
01354 s->dsp.draw_edges(s->current_picture.f.data[0], s->current_picture.f.linesize[0],
01355 s->h_edge_pos, s->v_edge_pos,
01356 EDGE_WIDTH, EDGE_WIDTH,
01357 EDGE_TOP | EDGE_BOTTOM);
01358 s->dsp.draw_edges(s->current_picture.f.data[1], s->current_picture.f.linesize[1],
01359 s->h_edge_pos >> hshift, s->v_edge_pos >> vshift,
01360 EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift,
01361 EDGE_TOP | EDGE_BOTTOM);
01362 s->dsp.draw_edges(s->current_picture.f.data[2], s->current_picture.f.linesize[2],
01363 s->h_edge_pos >> hshift, s->v_edge_pos >> vshift,
01364 EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift,
01365 EDGE_TOP | EDGE_BOTTOM);
01366 }
01367
01368 emms_c();
01369
01370 s->last_pict_type = s->pict_type;
01371 s->last_lambda_for [s->pict_type] = s->current_picture_ptr->f.quality;
01372 if (s->pict_type!= AV_PICTURE_TYPE_B) {
01373 s->last_non_b_pict_type = s->pict_type;
01374 }
01375 #if 0
01376
01377 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
01378 if (s->picture[i].f.data[0] == s->current_picture.f.data[0]) {
01379 s->picture[i] = s->current_picture;
01380 break;
01381 }
01382 }
01383 assert(i < MAX_PICTURE_COUNT);
01384 #endif
01385
01386 if (s->encoding) {
01387
01388 for (i = 0; i < s->picture_count; i++) {
01389 if (s->picture[i].f.data[0] && !s->picture[i].f.reference
01390 ) {
01391 free_frame_buffer(s, &s->picture[i]);
01392 }
01393 }
01394 }
01395
01396 #if 0
01397 memset(&s->last_picture, 0, sizeof(Picture));
01398 memset(&s->next_picture, 0, sizeof(Picture));
01399 memset(&s->current_picture, 0, sizeof(Picture));
01400 #endif
01401 s->avctx->coded_frame = &s->current_picture_ptr->f;
01402
01403 if (s->codec_id != CODEC_ID_H264 && s->current_picture.f.reference) {
01404 ff_thread_report_progress(&s->current_picture_ptr->f, INT_MAX, 0);
01405 }
01406 }
01407
01415 static void draw_line(uint8_t *buf, int sx, int sy, int ex, int ey,
01416 int w, int h, int stride, int color)
01417 {
01418 int x, y, fr, f;
01419
01420 sx = av_clip(sx, 0, w - 1);
01421 sy = av_clip(sy, 0, h - 1);
01422 ex = av_clip(ex, 0, w - 1);
01423 ey = av_clip(ey, 0, h - 1);
01424
01425 buf[sy * stride + sx] += color;
01426
01427 if (FFABS(ex - sx) > FFABS(ey - sy)) {
01428 if (sx > ex) {
01429 FFSWAP(int, sx, ex);
01430 FFSWAP(int, sy, ey);
01431 }
01432 buf += sx + sy * stride;
01433 ex -= sx;
01434 f = ((ey - sy) << 16) / ex;
01435 for(x= 0; x <= ex; x++){
01436 y = (x * f) >> 16;
01437 fr = (x * f) & 0xFFFF;
01438 buf[y * stride + x] += (color * (0x10000 - fr)) >> 16;
01439 if(fr) buf[(y + 1) * stride + x] += (color * fr ) >> 16;
01440 }
01441 } else {
01442 if (sy > ey) {
01443 FFSWAP(int, sx, ex);
01444 FFSWAP(int, sy, ey);
01445 }
01446 buf += sx + sy * stride;
01447 ey -= sy;
01448 if (ey)
01449 f = ((ex - sx) << 16) / ey;
01450 else
01451 f = 0;
01452 for(y= 0; y <= ey; y++){
01453 x = (y*f) >> 16;
01454 fr = (y*f) & 0xFFFF;
01455 buf[y * stride + x] += (color * (0x10000 - fr)) >> 16;
01456 if(fr) buf[y * stride + x + 1] += (color * fr ) >> 16;
01457 }
01458 }
01459 }
01460
01468 static void draw_arrow(uint8_t *buf, int sx, int sy, int ex,
01469 int ey, int w, int h, int stride, int color)
01470 {
01471 int dx,dy;
01472
01473 sx = av_clip(sx, -100, w + 100);
01474 sy = av_clip(sy, -100, h + 100);
01475 ex = av_clip(ex, -100, w + 100);
01476 ey = av_clip(ey, -100, h + 100);
01477
01478 dx = ex - sx;
01479 dy = ey - sy;
01480
01481 if (dx * dx + dy * dy > 3 * 3) {
01482 int rx = dx + dy;
01483 int ry = -dx + dy;
01484 int length = ff_sqrt((rx * rx + ry * ry) << 8);
01485
01486
01487 rx = ROUNDED_DIV(rx * 3 << 4, length);
01488 ry = ROUNDED_DIV(ry * 3 << 4, length);
01489
01490 draw_line(buf, sx, sy, sx + rx, sy + ry, w, h, stride, color);
01491 draw_line(buf, sx, sy, sx - ry, sy + rx, w, h, stride, color);
01492 }
01493 draw_line(buf, sx, sy, ex, ey, w, h, stride, color);
01494 }
01495
01499 void ff_print_debug_info(MpegEncContext *s, AVFrame *pict)
01500 {
01501 if (s->avctx->hwaccel || !pict || !pict->mb_type)
01502 return;
01503
01504 if (s->avctx->debug & (FF_DEBUG_SKIP | FF_DEBUG_QP | FF_DEBUG_MB_TYPE)) {
01505 int x,y;
01506
01507 av_log(s->avctx, AV_LOG_DEBUG, "New frame, type: %c\n",
01508 av_get_picture_type_char(pict->pict_type));
01509 for (y = 0; y < s->mb_height; y++) {
01510 for (x = 0; x < s->mb_width; x++) {
01511 if (s->avctx->debug & FF_DEBUG_SKIP) {
01512 int count = s->mbskip_table[x + y * s->mb_stride];
01513 if (count > 9)
01514 count = 9;
01515 av_log(s->avctx, AV_LOG_DEBUG, "%1d", count);
01516 }
01517 if (s->avctx->debug & FF_DEBUG_QP) {
01518 av_log(s->avctx, AV_LOG_DEBUG, "%2d",
01519 pict->qscale_table[x + y * s->mb_stride]);
01520 }
01521 if (s->avctx->debug & FF_DEBUG_MB_TYPE) {
01522 int mb_type = pict->mb_type[x + y * s->mb_stride];
01523
01524 if (IS_PCM(mb_type))
01525 av_log(s->avctx, AV_LOG_DEBUG, "P");
01526 else if (IS_INTRA(mb_type) && IS_ACPRED(mb_type))
01527 av_log(s->avctx, AV_LOG_DEBUG, "A");
01528 else if (IS_INTRA4x4(mb_type))
01529 av_log(s->avctx, AV_LOG_DEBUG, "i");
01530 else if (IS_INTRA16x16(mb_type))
01531 av_log(s->avctx, AV_LOG_DEBUG, "I");
01532 else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type))
01533 av_log(s->avctx, AV_LOG_DEBUG, "d");
01534 else if (IS_DIRECT(mb_type))
01535 av_log(s->avctx, AV_LOG_DEBUG, "D");
01536 else if (IS_GMC(mb_type) && IS_SKIP(mb_type))
01537 av_log(s->avctx, AV_LOG_DEBUG, "g");
01538 else if (IS_GMC(mb_type))
01539 av_log(s->avctx, AV_LOG_DEBUG, "G");
01540 else if (IS_SKIP(mb_type))
01541 av_log(s->avctx, AV_LOG_DEBUG, "S");
01542 else if (!USES_LIST(mb_type, 1))
01543 av_log(s->avctx, AV_LOG_DEBUG, ">");
01544 else if (!USES_LIST(mb_type, 0))
01545 av_log(s->avctx, AV_LOG_DEBUG, "<");
01546 else {
01547 assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
01548 av_log(s->avctx, AV_LOG_DEBUG, "X");
01549 }
01550
01551
01552 if (IS_8X8(mb_type))
01553 av_log(s->avctx, AV_LOG_DEBUG, "+");
01554 else if (IS_16X8(mb_type))
01555 av_log(s->avctx, AV_LOG_DEBUG, "-");
01556 else if (IS_8X16(mb_type))
01557 av_log(s->avctx, AV_LOG_DEBUG, "|");
01558 else if (IS_INTRA(mb_type) || IS_16X16(mb_type))
01559 av_log(s->avctx, AV_LOG_DEBUG, " ");
01560 else
01561 av_log(s->avctx, AV_LOG_DEBUG, "?");
01562
01563
01564 if (IS_INTERLACED(mb_type))
01565 av_log(s->avctx, AV_LOG_DEBUG, "=");
01566 else
01567 av_log(s->avctx, AV_LOG_DEBUG, " ");
01568 }
01569
01570 }
01571 av_log(s->avctx, AV_LOG_DEBUG, "\n");
01572 }
01573 }
01574
01575 if ((s->avctx->debug & (FF_DEBUG_VIS_QP | FF_DEBUG_VIS_MB_TYPE)) ||
01576 (s->avctx->debug_mv)) {
01577 const int shift = 1 + s->quarter_sample;
01578 int mb_y;
01579 uint8_t *ptr;
01580 int i;
01581 int h_chroma_shift, v_chroma_shift, block_height;
01582 const int width = s->avctx->width;
01583 const int height = s->avctx->height;
01584 const int mv_sample_log2 = 4 - pict->motion_subsample_log2;
01585 const int mv_stride = (s->mb_width << mv_sample_log2) +
01586 (s->codec_id == CODEC_ID_H264 ? 0 : 1);
01587 s->low_delay = 0;
01588
01589 avcodec_get_chroma_sub_sample(s->avctx->pix_fmt,
01590 &h_chroma_shift, &v_chroma_shift);
01591 for (i = 0; i < 3; i++) {
01592 size_t size= (i == 0) ? pict->linesize[i] * height:
01593 pict->linesize[i] * height >> v_chroma_shift;
01594 s->visualization_buffer[i]= av_realloc(s->visualization_buffer[i], size);
01595 memcpy(s->visualization_buffer[i], pict->data[i], size);
01596 pict->data[i] = s->visualization_buffer[i];
01597 }
01598 pict->type = FF_BUFFER_TYPE_COPY;
01599 pict->opaque= NULL;
01600 ptr = pict->data[0];
01601 block_height = 16 >> v_chroma_shift;
01602
01603 for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
01604 int mb_x;
01605 for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
01606 const int mb_index = mb_x + mb_y * s->mb_stride;
01607 if ((s->avctx->debug_mv) && pict->motion_val) {
01608 int type;
01609 for (type = 0; type < 3; type++) {
01610 int direction = 0;
01611 switch (type) {
01612 case 0:
01613 if ((!(s->avctx->debug_mv & FF_DEBUG_VIS_MV_P_FOR)) ||
01614 (pict->pict_type!= AV_PICTURE_TYPE_P))
01615 continue;
01616 direction = 0;
01617 break;
01618 case 1:
01619 if ((!(s->avctx->debug_mv & FF_DEBUG_VIS_MV_B_FOR)) ||
01620 (pict->pict_type!= AV_PICTURE_TYPE_B))
01621 continue;
01622 direction = 0;
01623 break;
01624 case 2:
01625 if ((!(s->avctx->debug_mv & FF_DEBUG_VIS_MV_B_BACK)) ||
01626 (pict->pict_type!= AV_PICTURE_TYPE_B))
01627 continue;
01628 direction = 1;
01629 break;
01630 }
01631 if (!USES_LIST(pict->mb_type[mb_index], direction))
01632 continue;
01633
01634 if (IS_8X8(pict->mb_type[mb_index])) {
01635 int i;
01636 for (i = 0; i < 4; i++) {
01637 int sx = mb_x * 16 + 4 + 8 * (i & 1);
01638 int sy = mb_y * 16 + 4 + 8 * (i >> 1);
01639 int xy = (mb_x * 2 + (i & 1) +
01640 (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
01641 int mx = (pict->motion_val[direction][xy][0] >> shift) + sx;
01642 int my = (pict->motion_val[direction][xy][1] >> shift) + sy;
01643 draw_arrow(ptr, sx, sy, mx, my, width,
01644 height, s->linesize, 100);
01645 }
01646 } else if (IS_16X8(pict->mb_type[mb_index])) {
01647 int i;
01648 for (i = 0; i < 2; i++) {
01649 int sx = mb_x * 16 + 8;
01650 int sy = mb_y * 16 + 4 + 8 * i;
01651 int xy = (mb_x * 2 + (mb_y * 2 + i) * mv_stride) << (mv_sample_log2 - 1);
01652 int mx = (pict->motion_val[direction][xy][0] >> shift);
01653 int my = (pict->motion_val[direction][xy][1] >> shift);
01654
01655 if (IS_INTERLACED(pict->mb_type[mb_index]))
01656 my *= 2;
01657
01658 draw_arrow(ptr, sx, sy, mx + sx, my + sy, width,
01659 height, s->linesize, 100);
01660 }
01661 } else if (IS_8X16(pict->mb_type[mb_index])) {
01662 int i;
01663 for (i = 0; i < 2; i++) {
01664 int sx = mb_x * 16 + 4 + 8 * i;
01665 int sy = mb_y * 16 + 8;
01666 int xy = (mb_x * 2 + i + mb_y * 2 * mv_stride) << (mv_sample_log2 - 1);
01667 int mx = pict->motion_val[direction][xy][0] >> shift;
01668 int my = pict->motion_val[direction][xy][1] >> shift;
01669
01670 if (IS_INTERLACED(pict->mb_type[mb_index]))
01671 my *= 2;
01672
01673 draw_arrow(ptr, sx, sy, mx + sx, my + sy, width,
01674 height, s->linesize, 100);
01675 }
01676 } else {
01677 int sx= mb_x * 16 + 8;
01678 int sy= mb_y * 16 + 8;
01679 int xy= (mb_x + mb_y * mv_stride) << mv_sample_log2;
01680 int mx= (pict->motion_val[direction][xy][0]>>shift) + sx;
01681 int my= (pict->motion_val[direction][xy][1]>>shift) + sy;
01682 draw_arrow(ptr, sx, sy, mx, my, width, height, s->linesize, 100);
01683 }
01684 }
01685 }
01686 if ((s->avctx->debug & FF_DEBUG_VIS_QP) && pict->motion_val) {
01687 uint64_t c = (pict->qscale_table[mb_index] * 128 / 31) *
01688 0x0101010101010101ULL;
01689 int y;
01690 for (y = 0; y < block_height; y++) {
01691 *(uint64_t *)(pict->data[1] + 8 * mb_x +
01692 (block_height * mb_y + y) *
01693 pict->linesize[1]) = c;
01694 *(uint64_t *)(pict->data[2] + 8 * mb_x +
01695 (block_height * mb_y + y) *
01696 pict->linesize[2]) = c;
01697 }
01698 }
01699 if ((s->avctx->debug & FF_DEBUG_VIS_MB_TYPE) &&
01700 pict->motion_val) {
01701 int mb_type = pict->mb_type[mb_index];
01702 uint64_t u,v;
01703 int y;
01704 #define COLOR(theta, r) \
01705 u = (int)(128 + r * cos(theta * 3.141592 / 180)); \
01706 v = (int)(128 + r * sin(theta * 3.141592 / 180));
01707
01708
01709 u = v = 128;
01710 if (IS_PCM(mb_type)) {
01711 COLOR(120, 48)
01712 } else if ((IS_INTRA(mb_type) && IS_ACPRED(mb_type)) ||
01713 IS_INTRA16x16(mb_type)) {
01714 COLOR(30, 48)
01715 } else if (IS_INTRA4x4(mb_type)) {
01716 COLOR(90, 48)
01717 } else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type)) {
01718
01719 } else if (IS_DIRECT(mb_type)) {
01720 COLOR(150, 48)
01721 } else if (IS_GMC(mb_type) && IS_SKIP(mb_type)) {
01722 COLOR(170, 48)
01723 } else if (IS_GMC(mb_type)) {
01724 COLOR(190, 48)
01725 } else if (IS_SKIP(mb_type)) {
01726
01727 } else if (!USES_LIST(mb_type, 1)) {
01728 COLOR(240, 48)
01729 } else if (!USES_LIST(mb_type, 0)) {
01730 COLOR(0, 48)
01731 } else {
01732 assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
01733 COLOR(300,48)
01734 }
01735
01736 u *= 0x0101010101010101ULL;
01737 v *= 0x0101010101010101ULL;
01738 for (y = 0; y < block_height; y++) {
01739 *(uint64_t *)(pict->data[1] + 8 * mb_x +
01740 (block_height * mb_y + y) * pict->linesize[1]) = u;
01741 *(uint64_t *)(pict->data[2] + 8 * mb_x +
01742 (block_height * mb_y + y) * pict->linesize[2]) = v;
01743 }
01744
01745
01746 if (IS_8X8(mb_type) || IS_16X8(mb_type)) {
01747 *(uint64_t *)(pict->data[0] + 16 * mb_x + 0 +
01748 (16 * mb_y + 8) * pict->linesize[0]) ^= 0x8080808080808080ULL;
01749 *(uint64_t *)(pict->data[0] + 16 * mb_x + 8 +
01750 (16 * mb_y + 8) * pict->linesize[0]) ^= 0x8080808080808080ULL;
01751 }
01752 if (IS_8X8(mb_type) || IS_8X16(mb_type)) {
01753 for (y = 0; y < 16; y++)
01754 pict->data[0][16 * mb_x + 8 + (16 * mb_y + y) *
01755 pict->linesize[0]] ^= 0x80;
01756 }
01757 if (IS_8X8(mb_type) && mv_sample_log2 >= 2) {
01758 int dm = 1 << (mv_sample_log2 - 2);
01759 for (i = 0; i < 4; i++) {
01760 int sx = mb_x * 16 + 8 * (i & 1);
01761 int sy = mb_y * 16 + 8 * (i >> 1);
01762 int xy = (mb_x * 2 + (i & 1) +
01763 (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
01764
01765 int32_t *mv = (int32_t *) &pict->motion_val[0][xy];
01766 if (mv[0] != mv[dm] ||
01767 mv[dm * mv_stride] != mv[dm * (mv_stride + 1)])
01768 for (y = 0; y < 8; y++)
01769 pict->data[0][sx + 4 + (sy + y) * pict->linesize[0]] ^= 0x80;
01770 if (mv[0] != mv[dm * mv_stride] || mv[dm] != mv[dm * (mv_stride + 1)])
01771 *(uint64_t *)(pict->data[0] + sx + (sy + 4) *
01772 pict->linesize[0]) ^= 0x8080808080808080ULL;
01773 }
01774 }
01775
01776 if (IS_INTERLACED(mb_type) &&
01777 s->codec_id == CODEC_ID_H264) {
01778
01779 }
01780 }
01781 s->mbskip_table[mb_index] = 0;
01782 }
01783 }
01784 }
01785 }
01786
01787 static inline int hpel_motion_lowres(MpegEncContext *s,
01788 uint8_t *dest, uint8_t *src,
01789 int field_based, int field_select,
01790 int src_x, int src_y,
01791 int width, int height, int stride,
01792 int h_edge_pos, int v_edge_pos,
01793 int w, int h, h264_chroma_mc_func *pix_op,
01794 int motion_x, int motion_y)
01795 {
01796 const int lowres = s->avctx->lowres;
01797 const int op_index = FFMIN(lowres, 2);
01798 const int s_mask = (2 << lowres) - 1;
01799 int emu = 0;
01800 int sx, sy;
01801
01802 if (s->quarter_sample) {
01803 motion_x /= 2;
01804 motion_y /= 2;
01805 }
01806
01807 sx = motion_x & s_mask;
01808 sy = motion_y & s_mask;
01809 src_x += motion_x >> lowres + 1;
01810 src_y += motion_y >> lowres + 1;
01811
01812 src += src_y * stride + src_x;
01813
01814 if ((unsigned)src_x > FFMAX( h_edge_pos - (!!sx) - w, 0) ||
01815 (unsigned)src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
01816 s->dsp.emulated_edge_mc(s->edge_emu_buffer, src, s->linesize, w + 1,
01817 (h + 1) << field_based, src_x,
01818 src_y << field_based,
01819 h_edge_pos,
01820 v_edge_pos);
01821 src = s->edge_emu_buffer;
01822 emu = 1;
01823 }
01824
01825 sx = (sx << 2) >> lowres;
01826 sy = (sy << 2) >> lowres;
01827 if (field_select)
01828 src += s->linesize;
01829 pix_op[op_index](dest, src, stride, h, sx, sy);
01830 return emu;
01831 }
01832
01833
01834 static av_always_inline void mpeg_motion_lowres(MpegEncContext *s,
01835 uint8_t *dest_y,
01836 uint8_t *dest_cb,
01837 uint8_t *dest_cr,
01838 int field_based,
01839 int bottom_field,
01840 int field_select,
01841 uint8_t **ref_picture,
01842 h264_chroma_mc_func *pix_op,
01843 int motion_x, int motion_y,
01844 int h, int mb_y)
01845 {
01846 uint8_t *ptr_y, *ptr_cb, *ptr_cr;
01847 int mx, my, src_x, src_y, uvsrc_x, uvsrc_y, uvlinesize, linesize, sx, sy,
01848 uvsx, uvsy;
01849 const int lowres = s->avctx->lowres;
01850 const int op_index = FFMIN(lowres-1+s->chroma_x_shift, 2);
01851 const int block_s = 8>>lowres;
01852 const int s_mask = (2 << lowres) - 1;
01853 const int h_edge_pos = s->h_edge_pos >> lowres;
01854 const int v_edge_pos = s->v_edge_pos >> lowres;
01855 linesize = s->current_picture.f.linesize[0] << field_based;
01856 uvlinesize = s->current_picture.f.linesize[1] << field_based;
01857
01858
01859 if (s->quarter_sample) {
01860 motion_x /= 2;
01861 motion_y /= 2;
01862 }
01863
01864 if(field_based){
01865 motion_y += (bottom_field - field_select)*((1 << lowres)-1);
01866 }
01867
01868 sx = motion_x & s_mask;
01869 sy = motion_y & s_mask;
01870 src_x = s->mb_x * 2 * block_s + (motion_x >> lowres + 1);
01871 src_y = (mb_y * 2 * block_s >> field_based) + (motion_y >> lowres + 1);
01872
01873 if (s->out_format == FMT_H263) {
01874 uvsx = ((motion_x >> 1) & s_mask) | (sx & 1);
01875 uvsy = ((motion_y >> 1) & s_mask) | (sy & 1);
01876 uvsrc_x = src_x >> 1;
01877 uvsrc_y = src_y >> 1;
01878 } else if (s->out_format == FMT_H261) {
01879
01880 mx = motion_x / 4;
01881 my = motion_y / 4;
01882 uvsx = (2 * mx) & s_mask;
01883 uvsy = (2 * my) & s_mask;
01884 uvsrc_x = s->mb_x * block_s + (mx >> lowres);
01885 uvsrc_y = mb_y * block_s + (my >> lowres);
01886 } else {
01887 if(s->chroma_y_shift){
01888 mx = motion_x / 2;
01889 my = motion_y / 2;
01890 uvsx = mx & s_mask;
01891 uvsy = my & s_mask;
01892 uvsrc_x = s->mb_x * block_s + (mx >> lowres + 1);
01893 uvsrc_y = (mb_y * block_s >> field_based) + (my >> lowres + 1);
01894 } else {
01895 if(s->chroma_x_shift){
01896
01897 mx = motion_x / 2;
01898 uvsx = mx & s_mask;
01899 uvsy = motion_y & s_mask;
01900 uvsrc_y = src_y;
01901 uvsrc_x = s->mb_x*block_s + (mx >> (lowres+1));
01902 } else {
01903
01904 uvsx = motion_x & s_mask;
01905 uvsy = motion_y & s_mask;
01906 uvsrc_x = src_x;
01907 uvsrc_y = src_y;
01908 }
01909 }
01910 }
01911
01912 ptr_y = ref_picture[0] + src_y * linesize + src_x;
01913 ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x;
01914 ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x;
01915
01916 if ((unsigned) src_x > FFMAX( h_edge_pos - (!!sx) - 2 * block_s, 0) ||
01917 (unsigned) src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
01918 s->dsp.emulated_edge_mc(s->edge_emu_buffer, ptr_y,
01919 s->linesize, 17, 17 + field_based,
01920 src_x, src_y << field_based, h_edge_pos,
01921 v_edge_pos);
01922 ptr_y = s->edge_emu_buffer;
01923 if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) {
01924 uint8_t *uvbuf = s->edge_emu_buffer + 18 * s->linesize;
01925 s->dsp.emulated_edge_mc(uvbuf , ptr_cb, s->uvlinesize, 9,
01926 9 + field_based,
01927 uvsrc_x, uvsrc_y << field_based,
01928 h_edge_pos >> 1, v_edge_pos >> 1);
01929 s->dsp.emulated_edge_mc(uvbuf + 16, ptr_cr, s->uvlinesize, 9,
01930 9 + field_based,
01931 uvsrc_x, uvsrc_y << field_based,
01932 h_edge_pos >> 1, v_edge_pos >> 1);
01933 ptr_cb = uvbuf;
01934 ptr_cr = uvbuf + 16;
01935 }
01936 }
01937
01938
01939 if (bottom_field) {
01940 dest_y += s->linesize;
01941 dest_cb += s->uvlinesize;
01942 dest_cr += s->uvlinesize;
01943 }
01944
01945 if (field_select) {
01946 ptr_y += s->linesize;
01947 ptr_cb += s->uvlinesize;
01948 ptr_cr += s->uvlinesize;
01949 }
01950
01951 sx = (sx << 2) >> lowres;
01952 sy = (sy << 2) >> lowres;
01953 pix_op[lowres - 1](dest_y, ptr_y, linesize, h, sx, sy);
01954
01955 if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) {
01956 uvsx = (uvsx << 2) >> lowres;
01957 uvsy = (uvsy << 2) >> lowres;
01958 if (h >> s->chroma_y_shift) {
01959 pix_op[op_index](dest_cb, ptr_cb, uvlinesize, h >> s->chroma_y_shift, uvsx, uvsy);
01960 pix_op[op_index](dest_cr, ptr_cr, uvlinesize, h >> s->chroma_y_shift, uvsx, uvsy);
01961 }
01962 }
01963
01964 }
01965
01966 static inline void chroma_4mv_motion_lowres(MpegEncContext *s,
01967 uint8_t *dest_cb, uint8_t *dest_cr,
01968 uint8_t **ref_picture,
01969 h264_chroma_mc_func * pix_op,
01970 int mx, int my)
01971 {
01972 const int lowres = s->avctx->lowres;
01973 const int op_index = FFMIN(lowres, 2);
01974 const int block_s = 8 >> lowres;
01975 const int s_mask = (2 << lowres) - 1;
01976 const int h_edge_pos = s->h_edge_pos >> lowres + 1;
01977 const int v_edge_pos = s->v_edge_pos >> lowres + 1;
01978 int emu = 0, src_x, src_y, offset, sx, sy;
01979 uint8_t *ptr;
01980
01981 if (s->quarter_sample) {
01982 mx /= 2;
01983 my /= 2;
01984 }
01985
01986
01987
01988 mx = ff_h263_round_chroma(mx);
01989 my = ff_h263_round_chroma(my);
01990
01991 sx = mx & s_mask;
01992 sy = my & s_mask;
01993 src_x = s->mb_x * block_s + (mx >> lowres + 1);
01994 src_y = s->mb_y * block_s + (my >> lowres + 1);
01995
01996 offset = src_y * s->uvlinesize + src_x;
01997 ptr = ref_picture[1] + offset;
01998 if (s->flags & CODEC_FLAG_EMU_EDGE) {
01999 if ((unsigned) src_x > FFMAX(h_edge_pos - (!!sx) - block_s, 0) ||
02000 (unsigned) src_y > FFMAX(v_edge_pos - (!!sy) - block_s, 0)) {
02001 s->dsp.emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize,
02002 9, 9, src_x, src_y, h_edge_pos, v_edge_pos);
02003 ptr = s->edge_emu_buffer;
02004 emu = 1;
02005 }
02006 }
02007 sx = (sx << 2) >> lowres;
02008 sy = (sy << 2) >> lowres;
02009 pix_op[op_index](dest_cb, ptr, s->uvlinesize, block_s, sx, sy);
02010
02011 ptr = ref_picture[2] + offset;
02012 if (emu) {
02013 s->dsp.emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize, 9, 9,
02014 src_x, src_y, h_edge_pos, v_edge_pos);
02015 ptr = s->edge_emu_buffer;
02016 }
02017 pix_op[op_index](dest_cr, ptr, s->uvlinesize, block_s, sx, sy);
02018 }
02019
02031 static inline void MPV_motion_lowres(MpegEncContext *s,
02032 uint8_t *dest_y, uint8_t *dest_cb,
02033 uint8_t *dest_cr,
02034 int dir, uint8_t **ref_picture,
02035 h264_chroma_mc_func *pix_op)
02036 {
02037 int mx, my;
02038 int mb_x, mb_y, i;
02039 const int lowres = s->avctx->lowres;
02040 const int block_s = 8 >>lowres;
02041
02042 mb_x = s->mb_x;
02043 mb_y = s->mb_y;
02044
02045 switch (s->mv_type) {
02046 case MV_TYPE_16X16:
02047 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
02048 0, 0, 0,
02049 ref_picture, pix_op,
02050 s->mv[dir][0][0], s->mv[dir][0][1],
02051 2 * block_s, mb_y);
02052 break;
02053 case MV_TYPE_8X8:
02054 mx = 0;
02055 my = 0;
02056 for (i = 0; i < 4; i++) {
02057 hpel_motion_lowres(s, dest_y + ((i & 1) + (i >> 1) *
02058 s->linesize) * block_s,
02059 ref_picture[0], 0, 0,
02060 (2 * mb_x + (i & 1)) * block_s,
02061 (2 * mb_y + (i >> 1)) * block_s,
02062 s->width, s->height, s->linesize,
02063 s->h_edge_pos >> lowres, s->v_edge_pos >> lowres,
02064 block_s, block_s, pix_op,
02065 s->mv[dir][i][0], s->mv[dir][i][1]);
02066
02067 mx += s->mv[dir][i][0];
02068 my += s->mv[dir][i][1];
02069 }
02070
02071 if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY))
02072 chroma_4mv_motion_lowres(s, dest_cb, dest_cr, ref_picture,
02073 pix_op, mx, my);
02074 break;
02075 case MV_TYPE_FIELD:
02076 if (s->picture_structure == PICT_FRAME) {
02077
02078 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
02079 1, 0, s->field_select[dir][0],
02080 ref_picture, pix_op,
02081 s->mv[dir][0][0], s->mv[dir][0][1],
02082 block_s, mb_y);
02083
02084 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
02085 1, 1, s->field_select[dir][1],
02086 ref_picture, pix_op,
02087 s->mv[dir][1][0], s->mv[dir][1][1],
02088 block_s, mb_y);
02089 } else {
02090 if (s->picture_structure != s->field_select[dir][0] + 1 &&
02091 s->pict_type != AV_PICTURE_TYPE_B && !s->first_field) {
02092 ref_picture = s->current_picture_ptr->f.data;
02093
02094 }
02095 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
02096 0, 0, s->field_select[dir][0],
02097 ref_picture, pix_op,
02098 s->mv[dir][0][0],
02099 s->mv[dir][0][1], 2 * block_s, mb_y >> 1);
02100 }
02101 break;
02102 case MV_TYPE_16X8:
02103 for (i = 0; i < 2; i++) {
02104 uint8_t **ref2picture;
02105
02106 if (s->picture_structure == s->field_select[dir][i] + 1 ||
02107 s->pict_type == AV_PICTURE_TYPE_B || s->first_field) {
02108 ref2picture = ref_picture;
02109 } else {
02110 ref2picture = s->current_picture_ptr->f.data;
02111 }
02112
02113 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
02114 0, 0, s->field_select[dir][i],
02115 ref2picture, pix_op,
02116 s->mv[dir][i][0], s->mv[dir][i][1] +
02117 2 * block_s * i, block_s, mb_y >> 1);
02118
02119 dest_y += 2 * block_s * s->linesize;
02120 dest_cb += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize;
02121 dest_cr += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize;
02122 }
02123 break;
02124 case MV_TYPE_DMV:
02125 if (s->picture_structure == PICT_FRAME) {
02126 for (i = 0; i < 2; i++) {
02127 int j;
02128 for (j = 0; j < 2; j++) {
02129 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
02130 1, j, j ^ i,
02131 ref_picture, pix_op,
02132 s->mv[dir][2 * i + j][0],
02133 s->mv[dir][2 * i + j][1],
02134 block_s, mb_y);
02135 }
02136 pix_op = s->dsp.avg_h264_chroma_pixels_tab;
02137 }
02138 } else {
02139 for (i = 0; i < 2; i++) {
02140 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
02141 0, 0, s->picture_structure != i + 1,
02142 ref_picture, pix_op,
02143 s->mv[dir][2 * i][0],s->mv[dir][2 * i][1],
02144 2 * block_s, mb_y >> 1);
02145
02146
02147 pix_op = s->dsp.avg_h264_chroma_pixels_tab;
02148
02149
02150
02151 if (!s->first_field) {
02152 ref_picture = s->current_picture_ptr->f.data;
02153 }
02154 }
02155 }
02156 break;
02157 default:
02158 assert(0);
02159 }
02160 }
02161
02165 int ff_MPV_lowest_referenced_row(MpegEncContext *s, int dir)
02166 {
02167 int my_max = INT_MIN, my_min = INT_MAX, qpel_shift = !s->quarter_sample;
02168 int my, off, i, mvs;
02169
02170 if (s->picture_structure != PICT_FRAME) goto unhandled;
02171
02172 switch (s->mv_type) {
02173 case MV_TYPE_16X16:
02174 mvs = 1;
02175 break;
02176 case MV_TYPE_16X8:
02177 mvs = 2;
02178 break;
02179 case MV_TYPE_8X8:
02180 mvs = 4;
02181 break;
02182 default:
02183 goto unhandled;
02184 }
02185
02186 for (i = 0; i < mvs; i++) {
02187 my = s->mv[dir][i][1]<<qpel_shift;
02188 my_max = FFMAX(my_max, my);
02189 my_min = FFMIN(my_min, my);
02190 }
02191
02192 off = (FFMAX(-my_min, my_max) + 63) >> 6;
02193
02194 return FFMIN(FFMAX(s->mb_y + off, 0), s->mb_height-1);
02195 unhandled:
02196 return s->mb_height-1;
02197 }
02198
02199
02200 static inline void put_dct(MpegEncContext *s,
02201 DCTELEM *block, int i, uint8_t *dest, int line_size, int qscale)
02202 {
02203 s->dct_unquantize_intra(s, block, i, qscale);
02204 s->dsp.idct_put (dest, line_size, block);
02205 }
02206
02207
02208 static inline void add_dct(MpegEncContext *s,
02209 DCTELEM *block, int i, uint8_t *dest, int line_size)
02210 {
02211 if (s->block_last_index[i] >= 0) {
02212 s->dsp.idct_add (dest, line_size, block);
02213 }
02214 }
02215
02216 static inline void add_dequant_dct(MpegEncContext *s,
02217 DCTELEM *block, int i, uint8_t *dest, int line_size, int qscale)
02218 {
02219 if (s->block_last_index[i] >= 0) {
02220 s->dct_unquantize_inter(s, block, i, qscale);
02221
02222 s->dsp.idct_add (dest, line_size, block);
02223 }
02224 }
02225
02229 void ff_clean_intra_table_entries(MpegEncContext *s)
02230 {
02231 int wrap = s->b8_stride;
02232 int xy = s->block_index[0];
02233
02234 s->dc_val[0][xy ] =
02235 s->dc_val[0][xy + 1 ] =
02236 s->dc_val[0][xy + wrap] =
02237 s->dc_val[0][xy + 1 + wrap] = 1024;
02238
02239 memset(s->ac_val[0][xy ], 0, 32 * sizeof(int16_t));
02240 memset(s->ac_val[0][xy + wrap], 0, 32 * sizeof(int16_t));
02241 if (s->msmpeg4_version>=3) {
02242 s->coded_block[xy ] =
02243 s->coded_block[xy + 1 ] =
02244 s->coded_block[xy + wrap] =
02245 s->coded_block[xy + 1 + wrap] = 0;
02246 }
02247
02248 wrap = s->mb_stride;
02249 xy = s->mb_x + s->mb_y * wrap;
02250 s->dc_val[1][xy] =
02251 s->dc_val[2][xy] = 1024;
02252
02253 memset(s->ac_val[1][xy], 0, 16 * sizeof(int16_t));
02254 memset(s->ac_val[2][xy], 0, 16 * sizeof(int16_t));
02255
02256 s->mbintra_table[xy]= 0;
02257 }
02258
02259
02260
02261
02262
02263
02264
02265
02266
02267
02268
02269 static av_always_inline
02270 void MPV_decode_mb_internal(MpegEncContext *s, DCTELEM block[12][64],
02271 int lowres_flag, int is_mpeg12)
02272 {
02273 const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
02274 if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration){
02275 ff_xvmc_decode_mb(s);
02276 return;
02277 }
02278
02279 if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
02280
02281 int i,j;
02282 DCTELEM *dct = &s->current_picture.f.dct_coeff[mb_xy * 64 * 6];
02283 av_log(s->avctx, AV_LOG_DEBUG, "DCT coeffs of MB at %dx%d:\n", s->mb_x, s->mb_y);
02284 for(i=0; i<6; i++){
02285 for(j=0; j<64; j++){
02286 *dct++ = block[i][s->dsp.idct_permutation[j]];
02287 av_log(s->avctx, AV_LOG_DEBUG, "%5d", dct[-1]);
02288 }
02289 av_log(s->avctx, AV_LOG_DEBUG, "\n");
02290 }
02291 }
02292
02293 s->current_picture.f.qscale_table[mb_xy] = s->qscale;
02294
02295
02296 if (!s->mb_intra) {
02297 if (!is_mpeg12 && (s->h263_pred || s->h263_aic)) {
02298 if(s->mbintra_table[mb_xy])
02299 ff_clean_intra_table_entries(s);
02300 } else {
02301 s->last_dc[0] =
02302 s->last_dc[1] =
02303 s->last_dc[2] = 128 << s->intra_dc_precision;
02304 }
02305 }
02306 else if (!is_mpeg12 && (s->h263_pred || s->h263_aic))
02307 s->mbintra_table[mb_xy]=1;
02308
02309 if ((s->flags&CODEC_FLAG_PSNR) || !(s->encoding && (s->intra_only || s->pict_type==AV_PICTURE_TYPE_B) && s->avctx->mb_decision != FF_MB_DECISION_RD)) {
02310 uint8_t *dest_y, *dest_cb, *dest_cr;
02311 int dct_linesize, dct_offset;
02312 op_pixels_func (*op_pix)[4];
02313 qpel_mc_func (*op_qpix)[16];
02314 const int linesize = s->current_picture.f.linesize[0];
02315 const int uvlinesize = s->current_picture.f.linesize[1];
02316 const int readable= s->pict_type != AV_PICTURE_TYPE_B || s->encoding || s->avctx->draw_horiz_band || lowres_flag;
02317 const int block_size= lowres_flag ? 8>>s->avctx->lowres : 8;
02318
02319
02320
02321 if(!s->encoding){
02322 uint8_t *mbskip_ptr = &s->mbskip_table[mb_xy];
02323
02324 if (s->mb_skipped) {
02325 s->mb_skipped= 0;
02326 assert(s->pict_type!=AV_PICTURE_TYPE_I);
02327 *mbskip_ptr = 1;
02328 } else if(!s->current_picture.f.reference) {
02329 *mbskip_ptr = 1;
02330 } else{
02331 *mbskip_ptr = 0;
02332 }
02333 }
02334
02335 dct_linesize = linesize << s->interlaced_dct;
02336 dct_offset = s->interlaced_dct ? linesize : linesize * block_size;
02337
02338 if(readable){
02339 dest_y= s->dest[0];
02340 dest_cb= s->dest[1];
02341 dest_cr= s->dest[2];
02342 }else{
02343 dest_y = s->b_scratchpad;
02344 dest_cb= s->b_scratchpad+16*linesize;
02345 dest_cr= s->b_scratchpad+32*linesize;
02346 }
02347
02348 if (!s->mb_intra) {
02349
02350
02351 if(!s->encoding){
02352
02353 if(HAVE_THREADS && s->avctx->active_thread_type&FF_THREAD_FRAME) {
02354 if (s->mv_dir & MV_DIR_FORWARD) {
02355 ff_thread_await_progress(&s->last_picture_ptr->f,
02356 ff_MPV_lowest_referenced_row(s, 0),
02357 0);
02358 }
02359 if (s->mv_dir & MV_DIR_BACKWARD) {
02360 ff_thread_await_progress(&s->next_picture_ptr->f,
02361 ff_MPV_lowest_referenced_row(s, 1),
02362 0);
02363 }
02364 }
02365
02366 if(lowres_flag){
02367 h264_chroma_mc_func *op_pix = s->dsp.put_h264_chroma_pixels_tab;
02368
02369 if (s->mv_dir & MV_DIR_FORWARD) {
02370 MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f.data, op_pix);
02371 op_pix = s->dsp.avg_h264_chroma_pixels_tab;
02372 }
02373 if (s->mv_dir & MV_DIR_BACKWARD) {
02374 MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f.data, op_pix);
02375 }
02376 }else{
02377 op_qpix= s->me.qpel_put;
02378 if ((!s->no_rounding) || s->pict_type==AV_PICTURE_TYPE_B){
02379 op_pix = s->dsp.put_pixels_tab;
02380 }else{
02381 op_pix = s->dsp.put_no_rnd_pixels_tab;
02382 }
02383 if (s->mv_dir & MV_DIR_FORWARD) {
02384 MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f.data, op_pix, op_qpix);
02385 op_pix = s->dsp.avg_pixels_tab;
02386 op_qpix= s->me.qpel_avg;
02387 }
02388 if (s->mv_dir & MV_DIR_BACKWARD) {
02389 MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f.data, op_pix, op_qpix);
02390 }
02391 }
02392 }
02393
02394
02395 if(s->avctx->skip_idct){
02396 if( (s->avctx->skip_idct >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B)
02397 ||(s->avctx->skip_idct >= AVDISCARD_NONKEY && s->pict_type != AV_PICTURE_TYPE_I)
02398 || s->avctx->skip_idct >= AVDISCARD_ALL)
02399 goto skip_idct;
02400 }
02401
02402
02403 if(s->encoding || !( s->msmpeg4_version || s->codec_id==CODEC_ID_MPEG1VIDEO || s->codec_id==CODEC_ID_MPEG2VIDEO
02404 || (s->codec_id==CODEC_ID_MPEG4 && !s->mpeg_quant))){
02405 add_dequant_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
02406 add_dequant_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
02407 add_dequant_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
02408 add_dequant_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
02409
02410 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
02411 if (s->chroma_y_shift){
02412 add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
02413 add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
02414 }else{
02415 dct_linesize >>= 1;
02416 dct_offset >>=1;
02417 add_dequant_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
02418 add_dequant_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
02419 add_dequant_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
02420 add_dequant_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
02421 }
02422 }
02423 } else if(is_mpeg12 || (s->codec_id != CODEC_ID_WMV2)){
02424 add_dct(s, block[0], 0, dest_y , dct_linesize);
02425 add_dct(s, block[1], 1, dest_y + block_size, dct_linesize);
02426 add_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize);
02427 add_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize);
02428
02429 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
02430 if(s->chroma_y_shift){
02431 add_dct(s, block[4], 4, dest_cb, uvlinesize);
02432 add_dct(s, block[5], 5, dest_cr, uvlinesize);
02433 }else{
02434
02435 dct_linesize = uvlinesize << s->interlaced_dct;
02436 dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize*block_size;
02437
02438 add_dct(s, block[4], 4, dest_cb, dct_linesize);
02439 add_dct(s, block[5], 5, dest_cr, dct_linesize);
02440 add_dct(s, block[6], 6, dest_cb+dct_offset, dct_linesize);
02441 add_dct(s, block[7], 7, dest_cr+dct_offset, dct_linesize);
02442 if(!s->chroma_x_shift){
02443 add_dct(s, block[8], 8, dest_cb+block_size, dct_linesize);
02444 add_dct(s, block[9], 9, dest_cr+block_size, dct_linesize);
02445 add_dct(s, block[10], 10, dest_cb+block_size+dct_offset, dct_linesize);
02446 add_dct(s, block[11], 11, dest_cr+block_size+dct_offset, dct_linesize);
02447 }
02448 }
02449 }
02450 }
02451 else if (CONFIG_WMV2_DECODER || CONFIG_WMV2_ENCODER) {
02452 ff_wmv2_add_mb(s, block, dest_y, dest_cb, dest_cr);
02453 }
02454 } else {
02455
02456 if(s->encoding || !(s->codec_id==CODEC_ID_MPEG1VIDEO || s->codec_id==CODEC_ID_MPEG2VIDEO)){
02457 put_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
02458 put_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
02459 put_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
02460 put_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
02461
02462 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
02463 if(s->chroma_y_shift){
02464 put_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
02465 put_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
02466 }else{
02467 dct_offset >>=1;
02468 dct_linesize >>=1;
02469 put_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
02470 put_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
02471 put_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
02472 put_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
02473 }
02474 }
02475 }else{
02476 s->dsp.idct_put(dest_y , dct_linesize, block[0]);
02477 s->dsp.idct_put(dest_y + block_size, dct_linesize, block[1]);
02478 s->dsp.idct_put(dest_y + dct_offset , dct_linesize, block[2]);
02479 s->dsp.idct_put(dest_y + dct_offset + block_size, dct_linesize, block[3]);
02480
02481 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
02482 if(s->chroma_y_shift){
02483 s->dsp.idct_put(dest_cb, uvlinesize, block[4]);
02484 s->dsp.idct_put(dest_cr, uvlinesize, block[5]);
02485 }else{
02486
02487 dct_linesize = uvlinesize << s->interlaced_dct;
02488 dct_offset = s->interlaced_dct? uvlinesize : uvlinesize*block_size;
02489
02490 s->dsp.idct_put(dest_cb, dct_linesize, block[4]);
02491 s->dsp.idct_put(dest_cr, dct_linesize, block[5]);
02492 s->dsp.idct_put(dest_cb + dct_offset, dct_linesize, block[6]);
02493 s->dsp.idct_put(dest_cr + dct_offset, dct_linesize, block[7]);
02494 if(!s->chroma_x_shift){
02495 s->dsp.idct_put(dest_cb + block_size, dct_linesize, block[8]);
02496 s->dsp.idct_put(dest_cr + block_size, dct_linesize, block[9]);
02497 s->dsp.idct_put(dest_cb + block_size + dct_offset, dct_linesize, block[10]);
02498 s->dsp.idct_put(dest_cr + block_size + dct_offset, dct_linesize, block[11]);
02499 }
02500 }
02501 }
02502 }
02503 }
02504 skip_idct:
02505 if(!readable){
02506 s->dsp.put_pixels_tab[0][0](s->dest[0], dest_y , linesize,16);
02507 s->dsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[1], dest_cb, uvlinesize,16 >> s->chroma_y_shift);
02508 s->dsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[2], dest_cr, uvlinesize,16 >> s->chroma_y_shift);
02509 }
02510 }
02511 }
02512
02513 void ff_MPV_decode_mb(MpegEncContext *s, DCTELEM block[12][64]){
02514 #if !CONFIG_SMALL
02515 if(s->out_format == FMT_MPEG1) {
02516 if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 1);
02517 else MPV_decode_mb_internal(s, block, 0, 1);
02518 } else
02519 #endif
02520 if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 0);
02521 else MPV_decode_mb_internal(s, block, 0, 0);
02522 }
02523
02527 void ff_draw_horiz_band(MpegEncContext *s, int y, int h){
02528 const int field_pic= s->picture_structure != PICT_FRAME;
02529 if(field_pic){
02530 h <<= 1;
02531 y <<= 1;
02532 }
02533
02534 if (!s->avctx->hwaccel
02535 && !(s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU)
02536 && s->unrestricted_mv
02537 && s->current_picture.f.reference
02538 && !s->intra_only
02539 && !(s->flags&CODEC_FLAG_EMU_EDGE)) {
02540 int sides = 0, edge_h;
02541 int hshift = av_pix_fmt_descriptors[s->avctx->pix_fmt].log2_chroma_w;
02542 int vshift = av_pix_fmt_descriptors[s->avctx->pix_fmt].log2_chroma_h;
02543 if (y==0) sides |= EDGE_TOP;
02544 if (y + h >= s->v_edge_pos) sides |= EDGE_BOTTOM;
02545
02546 edge_h= FFMIN(h, s->v_edge_pos - y);
02547
02548 s->dsp.draw_edges(s->current_picture_ptr->f.data[0] + y *s->linesize,
02549 s->linesize, s->h_edge_pos, edge_h,
02550 EDGE_WIDTH, EDGE_WIDTH, sides);
02551 s->dsp.draw_edges(s->current_picture_ptr->f.data[1] + (y>>vshift)*s->uvlinesize,
02552 s->uvlinesize, s->h_edge_pos>>hshift, edge_h>>vshift,
02553 EDGE_WIDTH>>hshift, EDGE_WIDTH>>vshift, sides);
02554 s->dsp.draw_edges(s->current_picture_ptr->f.data[2] + (y>>vshift)*s->uvlinesize,
02555 s->uvlinesize, s->h_edge_pos>>hshift, edge_h>>vshift,
02556 EDGE_WIDTH>>hshift, EDGE_WIDTH>>vshift, sides);
02557 }
02558
02559 h= FFMIN(h, s->avctx->height - y);
02560
02561 if(field_pic && s->first_field && !(s->avctx->slice_flags&SLICE_FLAG_ALLOW_FIELD)) return;
02562
02563 if (s->avctx->draw_horiz_band) {
02564 AVFrame *src;
02565 int offset[AV_NUM_DATA_POINTERS];
02566 int i;
02567
02568 if(s->pict_type==AV_PICTURE_TYPE_B || s->low_delay || (s->avctx->slice_flags&SLICE_FLAG_CODED_ORDER))
02569 src = &s->current_picture_ptr->f;
02570 else if(s->last_picture_ptr)
02571 src = &s->last_picture_ptr->f;
02572 else
02573 return;
02574
02575 if(s->pict_type==AV_PICTURE_TYPE_B && s->picture_structure == PICT_FRAME && s->out_format != FMT_H264){
02576 for (i = 0; i < AV_NUM_DATA_POINTERS; i++)
02577 offset[i] = 0;
02578 }else{
02579 offset[0]= y * s->linesize;
02580 offset[1]=
02581 offset[2]= (y >> s->chroma_y_shift) * s->uvlinesize;
02582 for (i = 3; i < AV_NUM_DATA_POINTERS; i++)
02583 offset[i] = 0;
02584 }
02585
02586 emms_c();
02587
02588 s->avctx->draw_horiz_band(s->avctx, src, offset,
02589 y, s->picture_structure, h);
02590 }
02591 }
02592
02593 void ff_init_block_index(MpegEncContext *s){
02594 const int linesize = s->current_picture.f.linesize[0];
02595 const int uvlinesize = s->current_picture.f.linesize[1];
02596 const int mb_size= 4 - s->avctx->lowres;
02597
02598 s->block_index[0]= s->b8_stride*(s->mb_y*2 ) - 2 + s->mb_x*2;
02599 s->block_index[1]= s->b8_stride*(s->mb_y*2 ) - 1 + s->mb_x*2;
02600 s->block_index[2]= s->b8_stride*(s->mb_y*2 + 1) - 2 + s->mb_x*2;
02601 s->block_index[3]= s->b8_stride*(s->mb_y*2 + 1) - 1 + s->mb_x*2;
02602 s->block_index[4]= s->mb_stride*(s->mb_y + 1) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
02603 s->block_index[5]= s->mb_stride*(s->mb_y + s->mb_height + 2) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
02604
02605
02606 s->dest[0] = s->current_picture.f.data[0] + ((s->mb_x - 1) << mb_size);
02607 s->dest[1] = s->current_picture.f.data[1] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
02608 s->dest[2] = s->current_picture.f.data[2] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
02609
02610 if(!(s->pict_type==AV_PICTURE_TYPE_B && s->avctx->draw_horiz_band && s->picture_structure==PICT_FRAME))
02611 {
02612 if(s->picture_structure==PICT_FRAME){
02613 s->dest[0] += s->mb_y * linesize << mb_size;
02614 s->dest[1] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
02615 s->dest[2] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
02616 }else{
02617 s->dest[0] += (s->mb_y>>1) * linesize << mb_size;
02618 s->dest[1] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
02619 s->dest[2] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
02620 assert((s->mb_y&1) == (s->picture_structure == PICT_BOTTOM_FIELD));
02621 }
02622 }
02623 }
02624
02625 void ff_mpeg_flush(AVCodecContext *avctx){
02626 int i;
02627 MpegEncContext *s = avctx->priv_data;
02628
02629 if(s==NULL || s->picture==NULL)
02630 return;
02631
02632 for(i=0; i<s->picture_count; i++){
02633 if (s->picture[i].f.data[0] &&
02634 (s->picture[i].f.type == FF_BUFFER_TYPE_INTERNAL ||
02635 s->picture[i].f.type == FF_BUFFER_TYPE_USER))
02636 free_frame_buffer(s, &s->picture[i]);
02637 }
02638 s->current_picture_ptr = s->last_picture_ptr = s->next_picture_ptr = NULL;
02639
02640 s->mb_x= s->mb_y= 0;
02641 s->closed_gop= 0;
02642
02643 s->parse_context.state= -1;
02644 s->parse_context.frame_start_found= 0;
02645 s->parse_context.overread= 0;
02646 s->parse_context.overread_index= 0;
02647 s->parse_context.index= 0;
02648 s->parse_context.last_index= 0;
02649 s->bitstream_buffer_size=0;
02650 s->pp_time=0;
02651 }
02652
02653 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
02654 DCTELEM *block, int n, int qscale)
02655 {
02656 int i, level, nCoeffs;
02657 const uint16_t *quant_matrix;
02658
02659 nCoeffs= s->block_last_index[n];
02660
02661 block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
02662
02663 quant_matrix = s->intra_matrix;
02664 for(i=1;i<=nCoeffs;i++) {
02665 int j= s->intra_scantable.permutated[i];
02666 level = block[j];
02667 if (level) {
02668 if (level < 0) {
02669 level = -level;
02670 level = (int)(level * qscale * quant_matrix[j]) >> 3;
02671 level = (level - 1) | 1;
02672 level = -level;
02673 } else {
02674 level = (int)(level * qscale * quant_matrix[j]) >> 3;
02675 level = (level - 1) | 1;
02676 }
02677 block[j] = level;
02678 }
02679 }
02680 }
02681
02682 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
02683 DCTELEM *block, int n, int qscale)
02684 {
02685 int i, level, nCoeffs;
02686 const uint16_t *quant_matrix;
02687
02688 nCoeffs= s->block_last_index[n];
02689
02690 quant_matrix = s->inter_matrix;
02691 for(i=0; i<=nCoeffs; i++) {
02692 int j= s->intra_scantable.permutated[i];
02693 level = block[j];
02694 if (level) {
02695 if (level < 0) {
02696 level = -level;
02697 level = (((level << 1) + 1) * qscale *
02698 ((int) (quant_matrix[j]))) >> 4;
02699 level = (level - 1) | 1;
02700 level = -level;
02701 } else {
02702 level = (((level << 1) + 1) * qscale *
02703 ((int) (quant_matrix[j]))) >> 4;
02704 level = (level - 1) | 1;
02705 }
02706 block[j] = level;
02707 }
02708 }
02709 }
02710
02711 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
02712 DCTELEM *block, int n, int qscale)
02713 {
02714 int i, level, nCoeffs;
02715 const uint16_t *quant_matrix;
02716
02717 if(s->alternate_scan) nCoeffs= 63;
02718 else nCoeffs= s->block_last_index[n];
02719
02720 block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
02721 quant_matrix = s->intra_matrix;
02722 for(i=1;i<=nCoeffs;i++) {
02723 int j= s->intra_scantable.permutated[i];
02724 level = block[j];
02725 if (level) {
02726 if (level < 0) {
02727 level = -level;
02728 level = (int)(level * qscale * quant_matrix[j]) >> 3;
02729 level = -level;
02730 } else {
02731 level = (int)(level * qscale * quant_matrix[j]) >> 3;
02732 }
02733 block[j] = level;
02734 }
02735 }
02736 }
02737
02738 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
02739 DCTELEM *block, int n, int qscale)
02740 {
02741 int i, level, nCoeffs;
02742 const uint16_t *quant_matrix;
02743 int sum=-1;
02744
02745 if(s->alternate_scan) nCoeffs= 63;
02746 else nCoeffs= s->block_last_index[n];
02747
02748 block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
02749 sum += block[0];
02750 quant_matrix = s->intra_matrix;
02751 for(i=1;i<=nCoeffs;i++) {
02752 int j= s->intra_scantable.permutated[i];
02753 level = block[j];
02754 if (level) {
02755 if (level < 0) {
02756 level = -level;
02757 level = (int)(level * qscale * quant_matrix[j]) >> 3;
02758 level = -level;
02759 } else {
02760 level = (int)(level * qscale * quant_matrix[j]) >> 3;
02761 }
02762 block[j] = level;
02763 sum+=level;
02764 }
02765 }
02766 block[63]^=sum&1;
02767 }
02768
02769 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
02770 DCTELEM *block, int n, int qscale)
02771 {
02772 int i, level, nCoeffs;
02773 const uint16_t *quant_matrix;
02774 int sum=-1;
02775
02776 if(s->alternate_scan) nCoeffs= 63;
02777 else nCoeffs= s->block_last_index[n];
02778
02779 quant_matrix = s->inter_matrix;
02780 for(i=0; i<=nCoeffs; i++) {
02781 int j= s->intra_scantable.permutated[i];
02782 level = block[j];
02783 if (level) {
02784 if (level < 0) {
02785 level = -level;
02786 level = (((level << 1) + 1) * qscale *
02787 ((int) (quant_matrix[j]))) >> 4;
02788 level = -level;
02789 } else {
02790 level = (((level << 1) + 1) * qscale *
02791 ((int) (quant_matrix[j]))) >> 4;
02792 }
02793 block[j] = level;
02794 sum+=level;
02795 }
02796 }
02797 block[63]^=sum&1;
02798 }
02799
02800 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
02801 DCTELEM *block, int n, int qscale)
02802 {
02803 int i, level, qmul, qadd;
02804 int nCoeffs;
02805
02806 assert(s->block_last_index[n]>=0);
02807
02808 qmul = qscale << 1;
02809
02810 if (!s->h263_aic) {
02811 block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
02812 qadd = (qscale - 1) | 1;
02813 }else{
02814 qadd = 0;
02815 }
02816 if(s->ac_pred)
02817 nCoeffs=63;
02818 else
02819 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
02820
02821 for(i=1; i<=nCoeffs; i++) {
02822 level = block[i];
02823 if (level) {
02824 if (level < 0) {
02825 level = level * qmul - qadd;
02826 } else {
02827 level = level * qmul + qadd;
02828 }
02829 block[i] = level;
02830 }
02831 }
02832 }
02833
02834 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
02835 DCTELEM *block, int n, int qscale)
02836 {
02837 int i, level, qmul, qadd;
02838 int nCoeffs;
02839
02840 assert(s->block_last_index[n]>=0);
02841
02842 qadd = (qscale - 1) | 1;
02843 qmul = qscale << 1;
02844
02845 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
02846
02847 for(i=0; i<=nCoeffs; i++) {
02848 level = block[i];
02849 if (level) {
02850 if (level < 0) {
02851 level = level * qmul - qadd;
02852 } else {
02853 level = level * qmul + qadd;
02854 }
02855 block[i] = level;
02856 }
02857 }
02858 }
02859
02863 void ff_set_qscale(MpegEncContext * s, int qscale)
02864 {
02865 if (qscale < 1)
02866 qscale = 1;
02867 else if (qscale > 31)
02868 qscale = 31;
02869
02870 s->qscale = qscale;
02871 s->chroma_qscale= s->chroma_qscale_table[qscale];
02872
02873 s->y_dc_scale= s->y_dc_scale_table[ qscale ];
02874 s->c_dc_scale= s->c_dc_scale_table[ s->chroma_qscale ];
02875 }
02876
02877 void ff_MPV_report_decode_progress(MpegEncContext *s)
02878 {
02879 if (s->pict_type != AV_PICTURE_TYPE_B && !s->partitioned_frame && !s->error_occurred)
02880 ff_thread_report_progress(&s->current_picture_ptr->f, s->mb_y, 0);
02881 }