FFmpeg
mpegvideo_dec.c
Go to the documentation of this file.
1 /*
2  * Common mpeg video decoding code
3  * Copyright (c) 2000,2001 Fabrice Bellard
4  * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
5  *
6  * This file is part of FFmpeg.
7  *
8  * FFmpeg is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU Lesser General Public
10  * License as published by the Free Software Foundation; either
11  * version 2.1 of the License, or (at your option) any later version.
12  *
13  * FFmpeg is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16  * Lesser General Public License for more details.
17  *
18  * You should have received a copy of the GNU Lesser General Public
19  * License along with FFmpeg; if not, write to the Free Software
20  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21  */
22 
23 #include <limits.h>
24 
25 #include "config_components.h"
26 
27 #include "libavutil/avassert.h"
28 #include "libavutil/emms.h"
29 #include "libavutil/imgutils.h"
30 #include "libavutil/internal.h"
32 
33 #include "avcodec.h"
34 #include "decode.h"
35 #include "h264chroma.h"
36 #include "internal.h"
37 #include "mpegutils.h"
38 #include "mpegvideo.h"
39 #include "mpegvideodec.h"
40 #include "mpeg4videodec.h"
41 #include "thread.h"
42 #include "threadframe.h"
43 #include "wmv2dec.h"
44 
46 {
48 
49  s->avctx = avctx;
50  s->width = avctx->coded_width;
51  s->height = avctx->coded_height;
52  s->codec_id = avctx->codec->id;
53  s->workaround_bugs = avctx->workaround_bugs;
54 
55  /* convert fourcc to upper case */
56  s->codec_tag = ff_toupper4(avctx->codec_tag);
57 
59  ff_h264chroma_init(&s->h264chroma, 8); //for lowres
60 }
61 
63  const AVCodecContext *src)
64 {
65  MpegEncContext *const s1 = src->priv_data;
66  MpegEncContext *const s = dst->priv_data;
67  int ret;
68 
69  if (dst == src)
70  return 0;
71 
72  av_assert0(s != s1);
73 
74  // FIXME can parameters change on I-frames?
75  // in that case dst may need a reinit
76  if (!s->context_initialized) {
77  void *private_ctx = s->private_ctx;
78  int err;
79  memcpy(s, s1, sizeof(*s));
80 
81  s->context_initialized = 0;
82  s->context_reinit = 0;
83  s->avctx = dst;
84  s->private_ctx = private_ctx;
85  s->bitstream_buffer = NULL;
86  s->bitstream_buffer_size = s->allocated_bitstream_buffer_size = 0;
87 
88  if (s1->context_initialized) {
89  if ((err = ff_mpv_common_init(s)) < 0)
90  return err;
91  }
92  }
93 
94  if (s->height != s1->height || s->width != s1->width || s->context_reinit) {
95  s->height = s1->height;
96  s->width = s1->width;
98  return ret;
99  }
100 
101  s->quarter_sample = s1->quarter_sample;
102 
103  s->coded_picture_number = s1->coded_picture_number;
104  s->picture_number = s1->picture_number;
105 
106  av_assert0(!s->picture || s->picture != s1->picture);
107  if (s->picture)
108  for (int i = 0; i < MAX_PICTURE_COUNT; i++) {
109  ff_mpeg_unref_picture(&s->picture[i]);
110  if (s1->picture && s1->picture[i].f->buf[0] &&
111  (ret = ff_mpeg_ref_picture(&s->picture[i], &s1->picture[i])) < 0)
112  return ret;
113  }
114 
115 #define UPDATE_PICTURE(pic)\
116 do {\
117  ff_mpeg_unref_picture(&s->pic);\
118  if (s1->pic.f && s1->pic.f->buf[0])\
119  ret = ff_mpeg_ref_picture(&s->pic, &s1->pic);\
120  else\
121  ret = ff_update_picture_tables(&s->pic, &s1->pic);\
122  if (ret < 0)\
123  return ret;\
124 } while (0)
125 
129 
130 #define REBASE_PICTURE(pic, new_ctx, old_ctx) \
131  ((pic && pic >= old_ctx->picture && \
132  pic < old_ctx->picture + MAX_PICTURE_COUNT) ? \
133  &new_ctx->picture[pic - old_ctx->picture] : NULL)
134 
135  s->last_picture_ptr = REBASE_PICTURE(s1->last_picture_ptr, s, s1);
136  s->current_picture_ptr = REBASE_PICTURE(s1->current_picture_ptr, s, s1);
137  s->next_picture_ptr = REBASE_PICTURE(s1->next_picture_ptr, s, s1);
138 
139  // Error/bug resilience
140  s->workaround_bugs = s1->workaround_bugs;
141  s->padding_bug_score = s1->padding_bug_score;
142 
143  // MPEG-4 timing info
144  memcpy(&s->last_time_base, &s1->last_time_base,
145  (char *) &s1->pb_field_time + sizeof(s1->pb_field_time) -
146  (char *) &s1->last_time_base);
147 
148  // B-frame info
149  s->max_b_frames = s1->max_b_frames;
150  s->low_delay = s1->low_delay;
151  s->droppable = s1->droppable;
152 
153  // DivX handling (doesn't work)
154  s->divx_packed = s1->divx_packed;
155 
156  if (s1->bitstream_buffer) {
157  av_fast_padded_malloc(&s->bitstream_buffer,
158  &s->allocated_bitstream_buffer_size,
159  s1->bitstream_buffer_size);
160  if (!s->bitstream_buffer) {
161  s->bitstream_buffer_size = 0;
162  return AVERROR(ENOMEM);
163  }
164  s->bitstream_buffer_size = s1->bitstream_buffer_size;
165  memcpy(s->bitstream_buffer, s1->bitstream_buffer,
166  s1->bitstream_buffer_size);
167  }
168 
169  // linesize-dependent scratch buffer allocation
170  if (!s->sc.edge_emu_buffer)
171  if (s1->linesize) {
172  if (ff_mpeg_framesize_alloc(s->avctx, &s->me,
173  &s->sc, s1->linesize) < 0) {
174  av_log(s->avctx, AV_LOG_ERROR, "Failed to allocate context "
175  "scratch buffers.\n");
176  return AVERROR(ENOMEM);
177  }
178  } else {
179  av_log(s->avctx, AV_LOG_ERROR, "Context scratch buffers could not "
180  "be allocated due to unknown size.\n");
181  }
182 
183  // MPEG-2/interlacing info
184  memcpy(&s->progressive_sequence, &s1->progressive_sequence,
185  (char *) &s1->rtp_mode - (char *) &s1->progressive_sequence);
186 
187  return 0;
188 }
189 
191 {
192  int err = 0;
193 
194  if (!s->context_initialized)
195  return AVERROR(EINVAL);
196 
198 
199  if (s->picture)
200  for (int i = 0; i < MAX_PICTURE_COUNT; i++)
201  s->picture[i].needs_realloc = 1;
202 
203  s->last_picture_ptr =
204  s->next_picture_ptr =
205  s->current_picture_ptr = NULL;
206 
207  if ((s->width || s->height) &&
208  (err = av_image_check_size(s->width, s->height, 0, s->avctx)) < 0)
209  goto fail;
210 
211  /* set chroma shifts */
212  err = av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
213  &s->chroma_x_shift,
214  &s->chroma_y_shift);
215  if (err < 0)
216  goto fail;
217 
218  if ((err = ff_mpv_init_context_frame(s)))
219  goto fail;
220 
221  memset(s->thread_context, 0, sizeof(s->thread_context));
222  s->thread_context[0] = s;
223 
224  if (s->width && s->height) {
226  if (err < 0)
227  goto fail;
228  }
229  s->context_reinit = 0;
230 
231  return 0;
232  fail:
234  s->context_reinit = 1;
235  return err;
236 }
237 
239 {
240  AVCodecContext *avctx = s->avctx;
241  int ret;
242 
243  pic->tf.f = pic->f;
244 
245  /* WM Image / Screen codecs allocate internal buffers with different
246  * dimensions / colorspaces; ignore user-defined callbacks for these. */
251  pic->reference ? AV_GET_BUFFER_FLAG_REF : 0);
252  } else {
253  pic->f->width = avctx->width;
254  pic->f->height = avctx->height;
255  pic->f->format = avctx->pix_fmt;
257  }
258  if (ret < 0)
259  goto fail;
260 
262  if (ret < 0)
263  goto fail;
264 
265  return ff_alloc_picture(s->avctx, pic, &s->me, &s->sc, 0, s->out_format,
266  s->mb_stride, s->mb_width, s->mb_height, s->b8_stride,
267  &s->linesize, &s->uvlinesize);
268 fail:
270  return ret;
271 }
272 
273 static void color_frame(AVFrame *frame, int luma)
274 {
275  int h_chroma_shift, v_chroma_shift;
276 
277  for (int i = 0; i < frame->height; i++)
278  memset(frame->data[0] + frame->linesize[0] * i, luma, frame->width);
279 
280  if (!frame->data[1])
281  return;
282  av_pix_fmt_get_chroma_sub_sample(frame->format, &h_chroma_shift, &v_chroma_shift);
283  for (int i = 0; i < AV_CEIL_RSHIFT(frame->height, v_chroma_shift); i++) {
284  memset(frame->data[1] + frame->linesize[1] * i,
285  0x80, AV_CEIL_RSHIFT(frame->width, h_chroma_shift));
286  memset(frame->data[2] + frame->linesize[2] * i,
287  0x80, AV_CEIL_RSHIFT(frame->width, h_chroma_shift));
288  }
289 }
290 
291 /**
292  * generic function called after decoding
293  * the header and before a frame is decoded.
294  */
296 {
297  Picture *pic;
298  int idx, ret;
299 
300  s->mb_skipped = 0;
301 
303  av_log(avctx, AV_LOG_ERROR, "Attempt to start a frame outside SETUP state\n");
304  return -1;
305  }
306 
307  /* mark & release old frames */
308  if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
309  s->last_picture_ptr != s->next_picture_ptr &&
310  s->last_picture_ptr->f->buf[0]) {
311  ff_mpeg_unref_picture(s->last_picture_ptr);
312  }
313 
314  /* release non reference/forgotten frames */
315  for (int i = 0; i < MAX_PICTURE_COUNT; i++) {
316  if (!s->picture[i].reference ||
317  (&s->picture[i] != s->last_picture_ptr &&
318  &s->picture[i] != s->next_picture_ptr &&
319  !s->picture[i].needs_realloc)) {
320  ff_mpeg_unref_picture(&s->picture[i]);
321  }
322  }
323 
324  ff_mpeg_unref_picture(&s->current_picture);
325  ff_mpeg_unref_picture(&s->last_picture);
326  ff_mpeg_unref_picture(&s->next_picture);
327 
328  if (s->current_picture_ptr && !s->current_picture_ptr->f->buf[0]) {
329  // we already have an unused image
330  // (maybe it was set before reading the header)
331  pic = s->current_picture_ptr;
332  } else {
333  idx = ff_find_unused_picture(s->avctx, s->picture, 0);
334  if (idx < 0) {
335  av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
336  return idx;
337  }
338  pic = &s->picture[idx];
339  }
340 
341  pic->reference = 0;
342  if (!s->droppable) {
343  if (s->pict_type != AV_PICTURE_TYPE_B)
344  pic->reference = 3;
345  }
346 
347  if (alloc_picture(s, pic) < 0)
348  return -1;
349 
350  s->current_picture_ptr = pic;
351  // FIXME use only the vars from current_pic
352  s->current_picture_ptr->f->flags |= AV_FRAME_FLAG_TOP_FIELD_FIRST * !!s->top_field_first;
353  if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
354  s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
355  if (s->picture_structure != PICT_FRAME)
356  s->current_picture_ptr->f->flags |= AV_FRAME_FLAG_TOP_FIELD_FIRST *
357  ((s->picture_structure == PICT_TOP_FIELD) == s->first_field);
358  }
359  s->current_picture_ptr->f->flags |= AV_FRAME_FLAG_INTERLACED * (!s->progressive_frame &&
360  !s->progressive_sequence);
361  s->current_picture_ptr->field_picture = s->picture_structure != PICT_FRAME;
362 
363  s->current_picture_ptr->f->pict_type = s->pict_type;
364  if (s->pict_type == AV_PICTURE_TYPE_I)
365  s->current_picture_ptr->f->flags |= AV_FRAME_FLAG_KEY;
366  else
367  s->current_picture_ptr->f->flags &= ~AV_FRAME_FLAG_KEY;
368 
369  if ((ret = ff_mpeg_ref_picture(&s->current_picture,
370  s->current_picture_ptr)) < 0)
371  return ret;
372 
373  if (s->pict_type != AV_PICTURE_TYPE_B) {
374  s->last_picture_ptr = s->next_picture_ptr;
375  if (!s->droppable)
376  s->next_picture_ptr = s->current_picture_ptr;
377  }
378  ff_dlog(s->avctx, "L%p N%p C%p L%p N%p C%p type:%d drop:%d\n",
379  s->last_picture_ptr, s->next_picture_ptr,s->current_picture_ptr,
380  s->last_picture_ptr ? s->last_picture_ptr->f->data[0] : NULL,
381  s->next_picture_ptr ? s->next_picture_ptr->f->data[0] : NULL,
382  s->current_picture_ptr ? s->current_picture_ptr->f->data[0] : NULL,
383  s->pict_type, s->droppable);
384 
385  if ((!s->last_picture_ptr || !s->last_picture_ptr->f->buf[0]) &&
386  (s->pict_type != AV_PICTURE_TYPE_I)) {
387  if (s->pict_type == AV_PICTURE_TYPE_B && s->next_picture_ptr && s->next_picture_ptr->f->buf[0])
389  "allocating dummy last picture for B frame\n");
390  else if (s->pict_type != AV_PICTURE_TYPE_I)
392  "warning: first frame is no keyframe\n");
393 
394  /* Allocate a dummy frame */
395  idx = ff_find_unused_picture(s->avctx, s->picture, 0);
396  if (idx < 0) {
397  av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
398  return idx;
399  }
400  s->last_picture_ptr = &s->picture[idx];
401 
402  s->last_picture_ptr->reference = 3;
403  s->last_picture_ptr->f->flags &= ~AV_FRAME_FLAG_KEY;
404  s->last_picture_ptr->f->pict_type = AV_PICTURE_TYPE_P;
405 
406  if (alloc_picture(s, s->last_picture_ptr) < 0) {
407  s->last_picture_ptr = NULL;
408  return -1;
409  }
410 
411  if (!avctx->hwaccel) {
412  int luma_val = s->codec_id == AV_CODEC_ID_FLV1 || s->codec_id == AV_CODEC_ID_H263 ? 16 : 0x80;
413  color_frame(s->last_picture_ptr->f, luma_val);
414  }
415 
416  ff_thread_report_progress(&s->last_picture_ptr->tf, INT_MAX, 0);
417  ff_thread_report_progress(&s->last_picture_ptr->tf, INT_MAX, 1);
418  }
419  if ((!s->next_picture_ptr || !s->next_picture_ptr->f->buf[0]) &&
420  s->pict_type == AV_PICTURE_TYPE_B) {
421  /* Allocate a dummy frame */
422  idx = ff_find_unused_picture(s->avctx, s->picture, 0);
423  if (idx < 0) {
424  av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
425  return idx;
426  }
427  s->next_picture_ptr = &s->picture[idx];
428 
429  s->next_picture_ptr->reference = 3;
430  s->next_picture_ptr->f->flags &= ~AV_FRAME_FLAG_KEY;
431  s->next_picture_ptr->f->pict_type = AV_PICTURE_TYPE_P;
432 
433  if (alloc_picture(s, s->next_picture_ptr) < 0) {
434  s->next_picture_ptr = NULL;
435  return -1;
436  }
437  ff_thread_report_progress(&s->next_picture_ptr->tf, INT_MAX, 0);
438  ff_thread_report_progress(&s->next_picture_ptr->tf, INT_MAX, 1);
439  }
440 
441  if (s->last_picture_ptr) {
442  if (s->last_picture_ptr->f->buf[0] &&
443  (ret = ff_mpeg_ref_picture(&s->last_picture,
444  s->last_picture_ptr)) < 0)
445  return ret;
446  }
447  if (s->next_picture_ptr) {
448  if (s->next_picture_ptr->f->buf[0] &&
449  (ret = ff_mpeg_ref_picture(&s->next_picture,
450  s->next_picture_ptr)) < 0)
451  return ret;
452  }
453 
454  av_assert0(s->pict_type == AV_PICTURE_TYPE_I || (s->last_picture_ptr &&
455  s->last_picture_ptr->f->buf[0]));
456 
457  if (s->picture_structure != PICT_FRAME) {
458  for (int i = 0; i < 4; i++) {
459  if (s->picture_structure == PICT_BOTTOM_FIELD) {
460  s->current_picture.f->data[i] = FF_PTR_ADD(s->current_picture.f->data[i],
461  s->current_picture.f->linesize[i]);
462  }
463  s->current_picture.f->linesize[i] *= 2;
464  s->last_picture.f->linesize[i] *= 2;
465  s->next_picture.f->linesize[i] *= 2;
466  }
467  }
468 
469  /* set dequantizer, we can't do it during init as
470  * it might change for MPEG-4 and we can't do it in the header
471  * decode as init is not called for MPEG-4 there yet */
472  if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
473  s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
474  s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
475  } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
476  s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
477  s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
478  } else {
479  s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
480  s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
481  }
482 
483  if (s->avctx->debug & FF_DEBUG_NOMC)
484  color_frame(s->current_picture_ptr->f, 0x80);
485 
486  return 0;
487 }
488 
489 /* called after a frame has been decoded. */
491 {
492  emms_c();
493 
494  if (s->current_picture.reference)
495  ff_thread_report_progress(&s->current_picture_ptr->tf, INT_MAX, 0);
496 }
497 
498 void ff_print_debug_info(const MpegEncContext *s, const Picture *p, AVFrame *pict)
499 {
500  ff_print_debug_info2(s->avctx, pict, s->mbskip_table, p->mb_type,
501  p->qscale_table, p->motion_val,
502  s->mb_width, s->mb_height, s->mb_stride, s->quarter_sample);
503 }
504 
505 int ff_mpv_export_qp_table(const MpegEncContext *s, AVFrame *f, const Picture *p, int qp_type)
506 {
507  AVVideoEncParams *par;
508  int mult = (qp_type == FF_MPV_QSCALE_TYPE_MPEG1) ? 2 : 1;
509  unsigned int nb_mb = p->alloc_mb_height * p->alloc_mb_width;
510 
511  if (!(s->avctx->export_side_data & AV_CODEC_EXPORT_DATA_VIDEO_ENC_PARAMS))
512  return 0;
513 
515  if (!par)
516  return AVERROR(ENOMEM);
517 
518  for (unsigned y = 0; y < p->alloc_mb_height; y++)
519  for (unsigned x = 0; x < p->alloc_mb_width; x++) {
520  const unsigned int block_idx = y * p->alloc_mb_width + x;
521  const unsigned int mb_xy = y * p->alloc_mb_stride + x;
522  AVVideoBlockParams *const b = av_video_enc_params_block(par, block_idx);
523 
524  b->src_x = x * 16;
525  b->src_y = y * 16;
526  b->w = 16;
527  b->h = 16;
528 
529  b->delta_qp = p->qscale_table[mb_xy] * mult;
530  }
531 
532  return 0;
533 }
534 
536 {
537  ff_draw_horiz_band(s->avctx, s->current_picture_ptr->f,
538  s->last_picture_ptr ? s->last_picture_ptr->f : NULL,
539  y, h, s->picture_structure,
540  s->first_field, s->low_delay);
541 }
542 
544 {
545  MpegEncContext *const s = avctx->priv_data;
546 
547  if (!s->picture)
548  return;
549 
550  for (int i = 0; i < MAX_PICTURE_COUNT; i++)
551  ff_mpeg_unref_picture(&s->picture[i]);
552  s->current_picture_ptr = s->last_picture_ptr = s->next_picture_ptr = NULL;
553 
554  ff_mpeg_unref_picture(&s->current_picture);
555  ff_mpeg_unref_picture(&s->last_picture);
556  ff_mpeg_unref_picture(&s->next_picture);
557 
558  s->mb_x = s->mb_y = 0;
559 
560  s->bitstream_buffer_size = 0;
561  s->pp_time = 0;
562 }
563 
565 {
566  if (s->pict_type != AV_PICTURE_TYPE_B && !s->partitioned_frame && !s->er.error_occurred)
567  ff_thread_report_progress(&s->current_picture_ptr->tf, s->mb_y, 0);
568 }
569 
570 
572  uint8_t *dest, const uint8_t *src,
573  int field_based, int field_select,
574  int src_x, int src_y,
575  int width, int height, ptrdiff_t stride,
576  int h_edge_pos, int v_edge_pos,
577  int w, int h, const h264_chroma_mc_func *pix_op,
578  int motion_x, int motion_y)
579 {
580  const int lowres = s->avctx->lowres;
581  const int op_index = FFMIN(lowres, 3);
582  const int s_mask = (2 << lowres) - 1;
583  int emu = 0;
584  int sx, sy;
585 
586  if (s->quarter_sample) {
587  motion_x /= 2;
588  motion_y /= 2;
589  }
590 
591  sx = motion_x & s_mask;
592  sy = motion_y & s_mask;
593  src_x += motion_x >> lowres + 1;
594  src_y += motion_y >> lowres + 1;
595 
596  src += src_y * stride + src_x;
597 
598  if ((unsigned)src_x > FFMAX( h_edge_pos - (!!sx) - w, 0) ||
599  (unsigned)src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
600  s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, src,
601  s->linesize, s->linesize,
602  w + 1, (h + 1) << field_based,
603  src_x, src_y * (1 << field_based),
605  src = s->sc.edge_emu_buffer;
606  emu = 1;
607  }
608 
609  sx = (sx << 2) >> lowres;
610  sy = (sy << 2) >> lowres;
611  if (field_select)
612  src += s->linesize;
613  pix_op[op_index](dest, src, stride, h, sx, sy);
614  return emu;
615 }
616 
617 /* apply one mpeg motion vector to the three components */
619  uint8_t *dest_y,
620  uint8_t *dest_cb,
621  uint8_t *dest_cr,
622  int field_based,
623  int bottom_field,
624  int field_select,
625  uint8_t *const *ref_picture,
626  const h264_chroma_mc_func *pix_op,
627  int motion_x, int motion_y,
628  int h, int mb_y)
629 {
630  const uint8_t *ptr_y, *ptr_cb, *ptr_cr;
631  int mx, my, src_x, src_y, uvsrc_x, uvsrc_y, sx, sy, uvsx, uvsy;
632  ptrdiff_t uvlinesize, linesize;
633  const int lowres = s->avctx->lowres;
634  const int op_index = FFMIN(lowres - 1 + s->chroma_x_shift, 3);
635  const int block_s = 8 >> lowres;
636  const int s_mask = (2 << lowres) - 1;
637  const int h_edge_pos = s->h_edge_pos >> lowres;
638  const int v_edge_pos = s->v_edge_pos >> lowres;
639  int hc = s->chroma_y_shift ? (h+1-bottom_field)>>1 : h;
640  linesize = s->current_picture.f->linesize[0] << field_based;
641  uvlinesize = s->current_picture.f->linesize[1] << field_based;
642 
643  // FIXME obviously not perfect but qpel will not work in lowres anyway
644  if (s->quarter_sample) {
645  motion_x /= 2;
646  motion_y /= 2;
647  }
648 
649  if (field_based) {
650  motion_y += (bottom_field - field_select)*((1 << lowres)-1);
651  }
652 
653  sx = motion_x & s_mask;
654  sy = motion_y & s_mask;
655  src_x = s->mb_x * 2 * block_s + (motion_x >> lowres + 1);
656  src_y = (mb_y * 2 * block_s >> field_based) + (motion_y >> lowres + 1);
657 
658  if (s->out_format == FMT_H263) {
659  uvsx = ((motion_x >> 1) & s_mask) | (sx & 1);
660  uvsy = ((motion_y >> 1) & s_mask) | (sy & 1);
661  uvsrc_x = src_x >> 1;
662  uvsrc_y = src_y >> 1;
663  } else if (s->out_format == FMT_H261) {
664  // even chroma mv's are full pel in H261
665  mx = motion_x / 4;
666  my = motion_y / 4;
667  uvsx = (2 * mx) & s_mask;
668  uvsy = (2 * my) & s_mask;
669  uvsrc_x = s->mb_x * block_s + (mx >> lowres);
670  uvsrc_y = mb_y * block_s + (my >> lowres);
671  } else {
672  if (s->chroma_y_shift) {
673  mx = motion_x / 2;
674  my = motion_y / 2;
675  uvsx = mx & s_mask;
676  uvsy = my & s_mask;
677  uvsrc_x = s->mb_x * block_s + (mx >> lowres + 1);
678  uvsrc_y = (mb_y * block_s >> field_based) + (my >> lowres + 1);
679  } else {
680  if (s->chroma_x_shift) {
681  //Chroma422
682  mx = motion_x / 2;
683  uvsx = mx & s_mask;
684  uvsy = motion_y & s_mask;
685  uvsrc_y = src_y;
686  uvsrc_x = s->mb_x*block_s + (mx >> (lowres+1));
687  } else {
688  //Chroma444
689  uvsx = motion_x & s_mask;
690  uvsy = motion_y & s_mask;
691  uvsrc_x = src_x;
692  uvsrc_y = src_y;
693  }
694  }
695  }
696 
697  ptr_y = ref_picture[0] + src_y * linesize + src_x;
698  ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x;
699  ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x;
700 
701  if ((unsigned) src_x > FFMAX( h_edge_pos - (!!sx) - 2 * block_s, 0) || uvsrc_y<0 ||
702  (unsigned) src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - FFMAX(h, hc<<s->chroma_y_shift), 0)) {
703  s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, ptr_y,
704  linesize >> field_based, linesize >> field_based,
705  17, 17 + field_based,
706  src_x, src_y * (1 << field_based), h_edge_pos,
707  v_edge_pos);
708  ptr_y = s->sc.edge_emu_buffer;
709  if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
710  uint8_t *ubuf = s->sc.edge_emu_buffer + 18 * s->linesize;
711  uint8_t *vbuf =ubuf + 10 * s->uvlinesize;
712  if (s->workaround_bugs & FF_BUG_IEDGE)
713  vbuf -= s->uvlinesize;
714  s->vdsp.emulated_edge_mc(ubuf, ptr_cb,
715  uvlinesize >> field_based, uvlinesize >> field_based,
716  9, 9 + field_based,
717  uvsrc_x, uvsrc_y * (1 << field_based),
718  h_edge_pos >> 1, v_edge_pos >> 1);
719  s->vdsp.emulated_edge_mc(vbuf, ptr_cr,
720  uvlinesize >> field_based,uvlinesize >> field_based,
721  9, 9 + field_based,
722  uvsrc_x, uvsrc_y * (1 << field_based),
723  h_edge_pos >> 1, v_edge_pos >> 1);
724  ptr_cb = ubuf;
725  ptr_cr = vbuf;
726  }
727  }
728 
729  // FIXME use this for field pix too instead of the obnoxious hack which changes picture.f->data
730  if (bottom_field) {
731  dest_y += s->linesize;
732  dest_cb += s->uvlinesize;
733  dest_cr += s->uvlinesize;
734  }
735 
736  if (field_select) {
737  ptr_y += s->linesize;
738  ptr_cb += s->uvlinesize;
739  ptr_cr += s->uvlinesize;
740  }
741 
742  sx = (sx << 2) >> lowres;
743  sy = (sy << 2) >> lowres;
744  pix_op[lowres - 1](dest_y, ptr_y, linesize, h, sx, sy);
745 
746  if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
747  uvsx = (uvsx << 2) >> lowres;
748  uvsy = (uvsy << 2) >> lowres;
749  if (hc) {
750  pix_op[op_index](dest_cb, ptr_cb, uvlinesize, hc, uvsx, uvsy);
751  pix_op[op_index](dest_cr, ptr_cr, uvlinesize, hc, uvsx, uvsy);
752  }
753  }
754  // FIXME h261 lowres loop filter
755 }
756 
758  uint8_t *dest_cb, uint8_t *dest_cr,
759  uint8_t *const *ref_picture,
760  const h264_chroma_mc_func * pix_op,
761  int mx, int my)
762 {
763  const int lowres = s->avctx->lowres;
764  const int op_index = FFMIN(lowres, 3);
765  const int block_s = 8 >> lowres;
766  const int s_mask = (2 << lowres) - 1;
767  const int h_edge_pos = s->h_edge_pos >> lowres + 1;
768  const int v_edge_pos = s->v_edge_pos >> lowres + 1;
769  int emu = 0, src_x, src_y, sx, sy;
770  ptrdiff_t offset;
771  const uint8_t *ptr;
772 
773  if (s->quarter_sample) {
774  mx /= 2;
775  my /= 2;
776  }
777 
778  /* In case of 8X8, we construct a single chroma motion vector
779  with a special rounding */
780  mx = ff_h263_round_chroma(mx);
781  my = ff_h263_round_chroma(my);
782 
783  sx = mx & s_mask;
784  sy = my & s_mask;
785  src_x = s->mb_x * block_s + (mx >> lowres + 1);
786  src_y = s->mb_y * block_s + (my >> lowres + 1);
787 
788  offset = src_y * s->uvlinesize + src_x;
789  ptr = ref_picture[1] + offset;
790  if ((unsigned) src_x > FFMAX(h_edge_pos - (!!sx) - block_s, 0) ||
791  (unsigned) src_y > FFMAX(v_edge_pos - (!!sy) - block_s, 0)) {
792  s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, ptr,
793  s->uvlinesize, s->uvlinesize,
794  9, 9,
795  src_x, src_y, h_edge_pos, v_edge_pos);
796  ptr = s->sc.edge_emu_buffer;
797  emu = 1;
798  }
799  sx = (sx << 2) >> lowres;
800  sy = (sy << 2) >> lowres;
801  pix_op[op_index](dest_cb, ptr, s->uvlinesize, block_s, sx, sy);
802 
803  ptr = ref_picture[2] + offset;
804  if (emu) {
805  s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, ptr,
806  s->uvlinesize, s->uvlinesize,
807  9, 9,
808  src_x, src_y, h_edge_pos, v_edge_pos);
809  ptr = s->sc.edge_emu_buffer;
810  }
811  pix_op[op_index](dest_cr, ptr, s->uvlinesize, block_s, sx, sy);
812 }
813 
814 /**
815  * motion compensation of a single macroblock
816  * @param s context
817  * @param dest_y luma destination pointer
818  * @param dest_cb chroma cb/u destination pointer
819  * @param dest_cr chroma cr/v destination pointer
820  * @param dir direction (0->forward, 1->backward)
821  * @param ref_picture array[3] of pointers to the 3 planes of the reference picture
822  * @param pix_op halfpel motion compensation function (average or put normally)
823  * the motion vectors are taken from s->mv and the MV type from s->mv_type
824  */
825 static inline void MPV_motion_lowres(MpegEncContext *s,
826  uint8_t *dest_y, uint8_t *dest_cb,
827  uint8_t *dest_cr,
828  int dir, uint8_t *const *ref_picture,
829  const h264_chroma_mc_func *pix_op)
830 {
831  int mx, my;
832  int mb_x, mb_y;
833  const int lowres = s->avctx->lowres;
834  const int block_s = 8 >>lowres;
835 
836  mb_x = s->mb_x;
837  mb_y = s->mb_y;
838 
839  switch (s->mv_type) {
840  case MV_TYPE_16X16:
841  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
842  0, 0, 0,
843  ref_picture, pix_op,
844  s->mv[dir][0][0], s->mv[dir][0][1],
845  2 * block_s, mb_y);
846  break;
847  case MV_TYPE_8X8:
848  mx = 0;
849  my = 0;
850  for (int i = 0; i < 4; i++) {
851  hpel_motion_lowres(s, dest_y + ((i & 1) + (i >> 1) *
852  s->linesize) * block_s,
853  ref_picture[0], 0, 0,
854  (2 * mb_x + (i & 1)) * block_s,
855  (2 * mb_y + (i >> 1)) * block_s,
856  s->width, s->height, s->linesize,
857  s->h_edge_pos >> lowres, s->v_edge_pos >> lowres,
858  block_s, block_s, pix_op,
859  s->mv[dir][i][0], s->mv[dir][i][1]);
860 
861  mx += s->mv[dir][i][0];
862  my += s->mv[dir][i][1];
863  }
864 
865  if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY))
866  chroma_4mv_motion_lowres(s, dest_cb, dest_cr, ref_picture,
867  pix_op, mx, my);
868  break;
869  case MV_TYPE_FIELD:
870  if (s->picture_structure == PICT_FRAME) {
871  /* top field */
872  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
873  1, 0, s->field_select[dir][0],
874  ref_picture, pix_op,
875  s->mv[dir][0][0], s->mv[dir][0][1],
876  block_s, mb_y);
877  /* bottom field */
878  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
879  1, 1, s->field_select[dir][1],
880  ref_picture, pix_op,
881  s->mv[dir][1][0], s->mv[dir][1][1],
882  block_s, mb_y);
883  } else {
884  if ( s->picture_structure != s->field_select[dir][0] + 1 && s->pict_type != AV_PICTURE_TYPE_B && !s->first_field
885  || !ref_picture[0]) {
886  ref_picture = s->current_picture_ptr->f->data;
887  }
888  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
889  0, 0, s->field_select[dir][0],
890  ref_picture, pix_op,
891  s->mv[dir][0][0],
892  s->mv[dir][0][1], 2 * block_s, mb_y >> 1);
893  }
894  break;
895  case MV_TYPE_16X8:
896  for (int i = 0; i < 2; i++) {
897  uint8_t *const *ref2picture;
898 
899  if ((s->picture_structure == s->field_select[dir][i] + 1 ||
900  s->pict_type == AV_PICTURE_TYPE_B || s->first_field) &&
901  ref_picture[0]) {
902  ref2picture = ref_picture;
903  } else {
904  ref2picture = s->current_picture_ptr->f->data;
905  }
906 
907  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
908  0, 0, s->field_select[dir][i],
909  ref2picture, pix_op,
910  s->mv[dir][i][0], s->mv[dir][i][1] +
911  2 * block_s * i, block_s, mb_y >> 1);
912 
913  dest_y += 2 * block_s * s->linesize;
914  dest_cb += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize;
915  dest_cr += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize;
916  }
917  break;
918  case MV_TYPE_DMV:
919  if (s->picture_structure == PICT_FRAME) {
920  for (int i = 0; i < 2; i++) {
921  for (int j = 0; j < 2; j++) {
922  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
923  1, j, j ^ i,
924  ref_picture, pix_op,
925  s->mv[dir][2 * i + j][0],
926  s->mv[dir][2 * i + j][1],
927  block_s, mb_y);
928  }
929  pix_op = s->h264chroma.avg_h264_chroma_pixels_tab;
930  }
931  } else {
932  if (!ref_picture[0]) {
933  ref_picture = s->current_picture_ptr->f->data;
934  }
935  for (int i = 0; i < 2; i++) {
936  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
937  0, 0, s->picture_structure != i + 1,
938  ref_picture, pix_op,
939  s->mv[dir][2 * i][0],s->mv[dir][2 * i][1],
940  2 * block_s, mb_y >> 1);
941 
942  // after put we make avg of the same block
943  pix_op = s->h264chroma.avg_h264_chroma_pixels_tab;
944 
945  // opposite parity is always in the same
946  // frame if this is second field
947  if (!s->first_field) {
948  ref_picture = s->current_picture_ptr->f->data;
949  }
950  }
951  }
952  break;
953  default:
954  av_assert2(0);
955  }
956 }
957 
958 /**
959  * find the lowest MB row referenced in the MVs
960  */
962 {
963  int my_max = INT_MIN, my_min = INT_MAX, qpel_shift = !s->quarter_sample;
964  int off, mvs;
965 
966  if (s->picture_structure != PICT_FRAME || s->mcsel)
967  goto unhandled;
968 
969  switch (s->mv_type) {
970  case MV_TYPE_16X16:
971  mvs = 1;
972  break;
973  case MV_TYPE_16X8:
974  mvs = 2;
975  break;
976  case MV_TYPE_8X8:
977  mvs = 4;
978  break;
979  default:
980  goto unhandled;
981  }
982 
983  for (int i = 0; i < mvs; i++) {
984  int my = s->mv[dir][i][1];
985  my_max = FFMAX(my_max, my);
986  my_min = FFMIN(my_min, my);
987  }
988 
989  off = ((FFMAX(-my_min, my_max) << qpel_shift) + 63) >> 6;
990 
991  return av_clip(s->mb_y + off, 0, s->mb_height - 1);
992 unhandled:
993  return s->mb_height - 1;
994 }
995 
996 /* add block[] to dest[] */
997 static inline void add_dct(MpegEncContext *s,
998  int16_t *block, int i, uint8_t *dest, int line_size)
999 {
1000  if (s->block_last_index[i] >= 0) {
1001  s->idsp.idct_add(dest, line_size, block);
1002  }
1003 }
1004 
1005 #define IS_ENCODER 0
1007 
1009 {
1010  if (s->avctx->debug & FF_DEBUG_DCT_COEFF) {
1011  /* print DCT coefficients */
1012  av_log(s->avctx, AV_LOG_DEBUG, "DCT coeffs of MB at %dx%d:\n", s->mb_x, s->mb_y);
1013  for (int i = 0; i < 6; i++) {
1014  for (int j = 0; j < 64; j++) {
1015  av_log(s->avctx, AV_LOG_DEBUG, "%5d",
1016  block[i][s->idsp.idct_permutation[j]]);
1017  }
1018  av_log(s->avctx, AV_LOG_DEBUG, "\n");
1019  }
1020  }
1021 
1022  if (!s->avctx->lowres) {
1023 #if !CONFIG_SMALL
1024  if (s->out_format == FMT_MPEG1)
1026  else
1028 #else
1030 #endif
1031  } else
1033 }
ff_h263_round_chroma
static int ff_h263_round_chroma(int x)
Definition: motion_est.h:98
PICT_FRAME
#define PICT_FRAME
Definition: mpegutils.h:38
ff_mpv_common_init
av_cold int ff_mpv_common_init(MpegEncContext *s)
init common structure for both encoder and decoder.
Definition: mpegvideo.c:681
ff_draw_horiz_band
void ff_draw_horiz_band(AVCodecContext *avctx, const AVFrame *cur, const AVFrame *last, int y, int h, int picture_structure, int first_field, int low_delay)
Draw a horizontal band if supported.
Definition: mpegutils.c:53
AVCodecContext::hwaccel
const struct AVHWAccel * hwaccel
Hardware accelerator in use.
Definition: avcodec.h:1427
h264_chroma_mc_func
void(* h264_chroma_mc_func)(uint8_t *dst, const uint8_t *src, ptrdiff_t srcStride, int h, int x, int y)
Definition: h264chroma.h:25
MV_TYPE_16X16
#define MV_TYPE_16X16
1 vector for the whole mb
Definition: mpegvideo.h:261
av_clip
#define av_clip
Definition: common.h:98
ff_mpeg_framesize_alloc
int ff_mpeg_framesize_alloc(AVCodecContext *avctx, MotionEstContext *me, ScratchpadContext *sc, int linesize)
Definition: mpegpicture.c:87
FMT_MPEG1
@ FMT_MPEG1
Definition: mpegutils.h:117
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
AVCodecContext::workaround_bugs
int workaround_bugs
Work around bugs in encoders which sometimes cannot be detected automatically.
Definition: avcodec.h:1349
ff_mpv_export_qp_table
int ff_mpv_export_qp_table(const MpegEncContext *s, AVFrame *f, const Picture *p, int qp_type)
Definition: mpegvideo_dec.c:505
ff_mpv_init_context_frame
int ff_mpv_init_context_frame(MpegEncContext *s)
Initialize and allocates MpegEncContext fields dependent on the resolution.
Definition: mpegvideo.c:539
ff_mpv_common_defaults
void ff_mpv_common_defaults(MpegEncContext *s)
Set the given MpegEncContext to common defaults (same for encoding and decoding).
Definition: mpegvideo.c:521
MAY_BE_MPEG12
#define MAY_BE_MPEG12
Definition: mpv_reconstruct_mb_template.c:24
mpeg4videodec.h
MV_TYPE_16X8
#define MV_TYPE_16X8
2 vectors, one per 16x8 block
Definition: mpegvideo.h:263
ff_thread_can_start_frame
int ff_thread_can_start_frame(AVCodecContext *avctx)
Definition: pthread_frame.c:924
NOT_MPEG12
#define NOT_MPEG12
Definition: mpv_reconstruct_mb_template.c:23
mpv_reconstruct_mb_template.c
Picture::alloc_mb_width
int alloc_mb_width
mb_width used to allocate tables
Definition: mpegpicture.h:65
MpegEncContext::current_picture
Picture current_picture
copy of the current picture structure.
Definition: mpegvideo.h:169
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:344
ff_mpv_report_decode_progress
void ff_mpv_report_decode_progress(MpegEncContext *s)
Definition: mpegvideo_dec.c:564
AVFrame::width
int width
Definition: frame.h:416
w
uint8_t w
Definition: llviddspenc.c:38
internal.h
last_picture
enum AVPictureType last_picture
Definition: movenc.c:69
b
#define b
Definition: input.c:41
MpegEncContext::next_picture
Picture next_picture
copy of the next picture structure.
Definition: mpegvideo.h:157
ff_toupper4
unsigned int ff_toupper4(unsigned int x)
Definition: to_upper4.h:29
PICT_BOTTOM_FIELD
#define PICT_BOTTOM_FIELD
Definition: mpegutils.h:37
MpegEncContext::dest
uint8_t * dest[3]
Definition: mpegvideo.h:290
mpegvideo.h
MpegEncContext::avctx
struct AVCodecContext * avctx
Definition: mpegvideo.h:85
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
Picture
Picture.
Definition: mpegpicture.h:46
mpegutils.h
thread.h
ThreadFrame::f
AVFrame * f
Definition: threadframe.h:28
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:365
MV_TYPE_DMV
#define MV_TYPE_DMV
2 vectors, special mpeg2 Dual Prime Vectors
Definition: mpegvideo.h:265
AV_FRAME_FLAG_TOP_FIELD_FIRST
#define AV_FRAME_FLAG_TOP_FIELD_FIRST
A flag to mark frames where the top field is displayed first if the content is interlaced.
Definition: frame.h:608
AV_VIDEO_ENC_PARAMS_MPEG2
@ AV_VIDEO_ENC_PARAMS_MPEG2
Definition: video_enc_params.h:65
DEFINITELY_MPEG12
#define DEFINITELY_MPEG12
Definition: mpv_reconstruct_mb_template.c:25
ff_mpeg_unref_picture
void ff_mpeg_unref_picture(Picture *pic)
Deallocate a picture; frees the picture tables in case they need to be reallocated anyway.
Definition: mpegpicture.c:253
ff_mpv_reconstruct_mb
void ff_mpv_reconstruct_mb(MpegEncContext *s, int16_t block[12][64])
Definition: mpegvideo_dec.c:1008
AVCodecContext::codec
const struct AVCodec * codec
Definition: avcodec.h:454
fail
#define fail()
Definition: checkasm.h:179
MpegEncContext::linesize
ptrdiff_t linesize
line size, in bytes, may be different from width
Definition: mpegvideo.h:123
av_pix_fmt_get_chroma_sub_sample
int av_pix_fmt_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift)
Utility function to access log2_chroma_w log2_chroma_h from the pixel format AVPixFmtDescriptor.
Definition: pixdesc.c:2990
hpel_motion_lowres
static int hpel_motion_lowres(MpegEncContext *s, uint8_t *dest, const uint8_t *src, int field_based, int field_select, int src_x, int src_y, int width, int height, ptrdiff_t stride, int h_edge_pos, int v_edge_pos, int w, int h, const h264_chroma_mc_func *pix_op, int motion_x, int motion_y)
Definition: mpegvideo_dec.c:571
AVCodecContext::coded_height
int coded_height
Definition: avcodec.h:633
ff_print_debug_info
void ff_print_debug_info(const MpegEncContext *s, const Picture *p, AVFrame *pict)
Definition: mpegvideo_dec.c:498
AVVideoEncParams
Video encoding parameters for a given frame.
Definition: video_enc_params.h:73
MAX_PICTURE_COUNT
#define MAX_PICTURE_COUNT
Definition: mpegpicture.h:33
mult
static int16_t mult(Float11 *f1, Float11 *f2)
Definition: g726.c:60
avassert.h
mpegvideodec.h
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
AV_FRAME_FLAG_KEY
#define AV_FRAME_FLAG_KEY
A flag to mark frames that are keyframes.
Definition: frame.h:595
ff_thread_report_progress
void ff_thread_report_progress(ThreadFrame *f, int n, int field)
Notify later decoding threads when part of their reference picture is ready.
Definition: pthread_frame.c:573
emms_c
#define emms_c()
Definition: emms.h:63
ff_mpv_common_frame_size_change
int ff_mpv_common_frame_size_change(MpegEncContext *s)
Definition: mpegvideo_dec.c:190
width
#define width
ff_hwaccel_frame_priv_alloc
int ff_hwaccel_frame_priv_alloc(AVCodecContext *avctx, void **hwaccel_picture_private)
Allocate a hwaccel frame private data if the provided avctx uses a hwaccel method that needs it.
Definition: decode.c:1901
s
#define s(width, name)
Definition: cbs_vp9.c:198
AV_CEIL_RSHIFT
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:58
MpegEncContext::h_edge_pos
int h_edge_pos
Definition: mpegvideo.h:121
AV_GET_BUFFER_FLAG_REF
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
Definition: avcodec.h:425
s1
#define s1
Definition: regdef.h:38
REBASE_PICTURE
#define REBASE_PICTURE(pic, new_ctx, old_ctx)
FMT_H261
@ FMT_H261
Definition: mpegutils.h:118
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:40
UPDATE_PICTURE
#define UPDATE_PICTURE(pic)
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:201
PICT_TOP_FIELD
#define PICT_TOP_FIELD
Definition: mpegutils.h:36
decode.h
limits.h
AV_CODEC_ID_VC1IMAGE
@ AV_CODEC_ID_VC1IMAGE
Definition: codec_id.h:204
Picture::hwaccel_picture_private
void * hwaccel_picture_private
RefStruct reference for hardware accelerator private data.
Definition: mpegpicture.h:70
frame
static AVFrame * frame
Definition: demux_decode.c:54
AVCodecContext::codec_id
enum AVCodecID codec_id
Definition: avcodec.h:455
if
if(ret)
Definition: filter_design.txt:179
Picture::reference
int reference
Definition: mpegpicture.h:77
ff_find_unused_picture
int ff_find_unused_picture(AVCodecContext *avctx, Picture *picture, int shared)
Definition: mpegpicture.c:380
threadframe.h
MpegEncContext::field_select
int field_select[2][2]
Definition: mpegvideo.h:272
NULL
#define NULL
Definition: coverity.c:32
mpv_reconstruct_mb_internal
static av_always_inline void mpv_reconstruct_mb_internal(MpegEncContext *s, int16_t block[12][64], int lowres_flag, int is_mpeg12)
Definition: mpv_reconstruct_mb_template.c:56
MpegEncContext::mb_y
int mb_y
Definition: mpegvideo.h:283
ff_mpv_idct_init
av_cold void ff_mpv_idct_init(MpegEncContext *s)
Definition: mpegvideo.c:342
ff_mpeg_ref_picture
int ff_mpeg_ref_picture(Picture *dst, Picture *src)
Definition: mpegpicture.c:304
Picture::tf
ThreadFrame tf
Definition: mpegpicture.h:48
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:279
FF_PTR_ADD
#define FF_PTR_ADD(ptr, off)
Definition: internal.h:90
FF_BUG_IEDGE
#define FF_BUG_IEDGE
Definition: avcodec.h:1364
lowres
static int lowres
Definition: ffplay.c:333
FF_MPV_QSCALE_TYPE_MPEG1
#define FF_MPV_QSCALE_TYPE_MPEG1
Definition: mpegvideodec.h:40
AV_CODEC_ID_MPEG1VIDEO
@ AV_CODEC_ID_MPEG1VIDEO
Definition: codec_id.h:53
MpegEncContext::private_ctx
void * private_ctx
Definition: mpegvideo.h:88
MV_TYPE_8X8
#define MV_TYPE_8X8
4 vectors (H.263, MPEG-4 4MV)
Definition: mpegvideo.h:262
ff_dlog
#define ff_dlog(a,...)
Definition: tableprint_vlc.h:28
add_dct
static void add_dct(MpegEncContext *s, int16_t *block, int i, uint8_t *dest, int line_size)
Definition: mpegvideo_dec.c:997
av_video_enc_params_create_side_data
AVVideoEncParams * av_video_enc_params_create_side_data(AVFrame *frame, enum AVVideoEncParamsType type, unsigned int nb_blocks)
Allocates memory for AVEncodeInfoFrame plus an array of.
Definition: video_enc_params.c:58
FF_DEBUG_DCT_COEFF
#define FF_DEBUG_DCT_COEFF
Definition: avcodec.h:1402
f
f
Definition: af_crystalizer.c:121
AV_CODEC_FLAG_GRAY
#define AV_CODEC_FLAG_GRAY
Only decode/encode grayscale.
Definition: avcodec.h:322
MpegEncContext::v_edge_pos
int v_edge_pos
horizontal / vertical position of the right/bottom edge (pixel replication)
Definition: mpegvideo.h:121
AV_CODEC_ID_H263
@ AV_CODEC_ID_H263
Definition: codec_id.h:56
h264chroma.h
ff_mpeg_draw_horiz_band
void ff_mpeg_draw_horiz_band(MpegEncContext *s, int y, int h)
Definition: mpegvideo_dec.c:535
Picture::alloc_mb_height
int alloc_mb_height
mb_height used to allocate tables
Definition: mpegpicture.h:66
AVFrame::format
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames,...
Definition: frame.h:431
ff_mpv_frame_start
int ff_mpv_frame_start(MpegEncContext *s, AVCodecContext *avctx)
generic function called after decoding the header and before a frame is decoded.
Definition: mpegvideo_dec.c:295
ff_mpeg_flush
void ff_mpeg_flush(AVCodecContext *avctx)
Definition: mpegvideo_dec.c:543
height
#define height
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
MV_TYPE_FIELD
#define MV_TYPE_FIELD
2 vectors, one per field
Definition: mpegvideo.h:264
Picture::motion_val
int16_t(*[2] motion_val)[2]
Definition: mpegpicture.h:54
color_frame
static void color_frame(AVFrame *frame, int luma)
Definition: mpegvideo_dec.c:273
lowest_referenced_row
static int lowest_referenced_row(MpegEncContext *s, int dir)
find the lowest MB row referenced in the MVs
Definition: mpegvideo_dec.c:961
AV_CODEC_ID_MSS2
@ AV_CODEC_ID_MSS2
Definition: codec_id.h:219
AVCodec::id
enum AVCodecID id
Definition: codec.h:201
emms.h
avcodec_default_get_buffer2
int avcodec_default_get_buffer2(AVCodecContext *s, AVFrame *frame, int flags)
The default callback for AVCodecContext.get_buffer2().
Definition: get_buffer.c:254
av_assert2
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
Definition: avassert.h:67
MpegEncContext::uvlinesize
ptrdiff_t uvlinesize
line size, for chroma in bytes, may be different from width
Definition: mpegvideo.h:124
FMT_H263
@ FMT_H263
Definition: mpegutils.h:119
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:255
internal.h
ff_mpv_decode_init
void ff_mpv_decode_init(MpegEncContext *s, AVCodecContext *avctx)
Initialize the given MpegEncContext for decoding.
Definition: mpegvideo_dec.c:45
mpeg_motion_lowres
static av_always_inline void mpeg_motion_lowres(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int field_based, int bottom_field, int field_select, uint8_t *const *ref_picture, const h264_chroma_mc_func *pix_op, int motion_x, int motion_y, int h, int mb_y)
Definition: mpegvideo_dec.c:618
MpegEncContext::mb_x
int mb_x
Definition: mpegvideo.h:283
av_fast_padded_malloc
void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_malloc but the buffer has additional AV_INPUT_BUFFER_PADDING_SIZE at the end w...
Definition: utils.c:52
av_always_inline
#define av_always_inline
Definition: attributes.h:49
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
ff_alloc_picture
int ff_alloc_picture(AVCodecContext *avctx, Picture *pic, MotionEstContext *me, ScratchpadContext *sc, int encoding, int out_format, int mb_stride, int mb_width, int mb_height, int b8_stride, ptrdiff_t *linesize, ptrdiff_t *uvlinesize)
Allocate a Picture.
Definition: mpegpicture.c:203
AVVideoBlockParams
Data structure for storing block-level encoding information.
Definition: video_enc_params.h:120
AVCodecContext::height
int height
Definition: avcodec.h:618
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:657
MPV_motion_lowres
static void MPV_motion_lowres(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int dir, uint8_t *const *ref_picture, const h264_chroma_mc_func *pix_op)
motion compensation of a single macroblock
Definition: mpegvideo_dec.c:825
ff_thread_get_ext_buffer
int ff_thread_get_ext_buffer(AVCodecContext *avctx, ThreadFrame *f, int flags)
Wrapper around ff_get_buffer() for frame-multithreaded codecs.
Definition: pthread_frame.c:968
AV_FRAME_FLAG_INTERLACED
#define AV_FRAME_FLAG_INTERLACED
A flag to mark frames whose content is interlaced.
Definition: frame.h:603
FF_DEBUG_NOMC
#define FF_DEBUG_NOMC
Definition: avcodec.h:1411
avcodec.h
stride
#define stride
Definition: h264pred_template.c:537
chroma_4mv_motion_lowres
static void chroma_4mv_motion_lowres(MpegEncContext *s, uint8_t *dest_cb, uint8_t *dest_cr, uint8_t *const *ref_picture, const h264_chroma_mc_func *pix_op, int mx, int my)
Definition: mpegvideo_dec.c:757
ret
ret
Definition: filter_design.txt:187
wmv2dec.h
Picture::qscale_table
int8_t * qscale_table
Definition: mpegpicture.h:51
alloc_picture
static int alloc_picture(MpegEncContext *s, Picture *pic)
Definition: mpegvideo_dec.c:238
AV_CODEC_EXPORT_DATA_VIDEO_ENC_PARAMS
#define AV_CODEC_EXPORT_DATA_VIDEO_ENC_PARAMS
Decoding only.
Definition: avcodec.h:415
ff_mpv_free_context_frame
void ff_mpv_free_context_frame(MpegEncContext *s)
Frees and resets MpegEncContext fields depending on the resolution as well as the slice thread contex...
Definition: mpegvideo.c:758
ff_mpeg_update_thread_context
int ff_mpeg_update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
Definition: mpegvideo_dec.c:62
AVCodecContext
main external API structure.
Definition: avcodec.h:445
AVFrame::height
int height
Definition: frame.h:416
Picture::mb_type
uint32_t * mb_type
types and macros are defined in mpegutils.h
Definition: mpegpicture.h:57
AV_PICTURE_TYPE_B
@ AV_PICTURE_TYPE_B
Bi-dir predicted.
Definition: avutil.h:281
Picture::f
struct AVFrame * f
Definition: mpegpicture.h:47
ff_mpv_frame_end
void ff_mpv_frame_end(MpegEncContext *s)
Definition: mpegvideo_dec.c:490
ff_h264chroma_init
av_cold void ff_h264chroma_init(H264ChromaContext *c, int bit_depth)
Definition: h264chroma.c:41
AVCodecContext::coded_width
int coded_width
Bitstream width / height, may be different from width/height e.g.
Definition: avcodec.h:633
AV_PICTURE_TYPE_P
@ AV_PICTURE_TYPE_P
Predicted.
Definition: avutil.h:280
ff_mpv_init_duplicate_contexts
int ff_mpv_init_duplicate_contexts(MpegEncContext *s)
Initialize an MpegEncContext's thread contexts.
Definition: mpegvideo.c:410
AVCodecContext::codec_tag
unsigned int codec_tag
fourcc (LSB first, so "ABCD" -> ('D'<<24) + ('C'<<16) + ('B'<<8) + 'A').
Definition: avcodec.h:470
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:472
src
INIT_CLIP pixel * src
Definition: h264pred_template.c:418
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:618
imgutils.h
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
Definition: frame.h:389
block
The exact code depends on how similar the blocks are and how related they are to the block
Definition: filter_design.txt:207
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
av_video_enc_params_block
static av_always_inline AVVideoBlockParams * av_video_enc_params_block(AVVideoEncParams *par, unsigned int idx)
Get the block at the specified.
Definition: video_enc_params.h:143
h
h
Definition: vp9dsp_template.c:2038
AV_CODEC_ID_WMV3IMAGE
@ AV_CODEC_ID_WMV3IMAGE
Definition: codec_id.h:203
av_image_check_size
int av_image_check_size(unsigned int w, unsigned int h, int log_offset, void *log_ctx)
Check if the given dimension of an image is valid, meaning that all bytes of the image can be address...
Definition: imgutils.c:318
AV_CODEC_ID_FLV1
@ AV_CODEC_ID_FLV1
Definition: codec_id.h:73
AV_CODEC_ID_MPEG2VIDEO
@ AV_CODEC_ID_MPEG2VIDEO
preferred ID for MPEG-1/2 video decoding
Definition: codec_id.h:54
MpegEncContext
MpegEncContext.
Definition: mpegvideo.h:67
Picture::alloc_mb_stride
int alloc_mb_stride
mb_stride used to allocate tables
Definition: mpegpicture.h:67
video_enc_params.h
ff_print_debug_info2
void ff_print_debug_info2(AVCodecContext *avctx, AVFrame *pict, const uint8_t *mbskip_table, const uint32_t *mbtype_table, const int8_t *qscale_table, int16_t(*const motion_val[2])[2], int mb_width, int mb_height, int mb_stride, int quarter_sample)
Print debugging info for the given picture.
Definition: mpegutils.c:159