FFmpeg
vp9.c
Go to the documentation of this file.
1 /*
2  * VP9 compatible video decoder
3  *
4  * Copyright (C) 2013 Ronald S. Bultje <rsbultje gmail com>
5  * Copyright (C) 2013 Clément Bœsch <u pkh me>
6  *
7  * This file is part of FFmpeg.
8  *
9  * FFmpeg is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU Lesser General Public
11  * License as published by the Free Software Foundation; either
12  * version 2.1 of the License, or (at your option) any later version.
13  *
14  * FFmpeg is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17  * Lesser General Public License for more details.
18  *
19  * You should have received a copy of the GNU Lesser General Public
20  * License along with FFmpeg; if not, write to the Free Software
21  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22  */
23 
24 #include "config_components.h"
25 
26 #include "avcodec.h"
27 #include "codec_internal.h"
28 #include "decode.h"
29 #include "get_bits.h"
30 #include "hwconfig.h"
31 #include "profiles.h"
32 #include "thread.h"
33 #include "threadframe.h"
34 #include "pthread_internal.h"
35 
36 #include "videodsp.h"
37 #include "vp89_rac.h"
38 #include "vp9.h"
39 #include "vp9data.h"
40 #include "vp9dec.h"
41 #include "vpx_rac.h"
42 #include "libavutil/avassert.h"
43 #include "libavutil/pixdesc.h"
45 
46 #define VP9_SYNCCODE 0x498342
47 
48 #if HAVE_THREADS
49 DEFINE_OFFSET_ARRAY(VP9Context, vp9_context, pthread_init_cnt,
50  (offsetof(VP9Context, progress_mutex)),
51  (offsetof(VP9Context, progress_cond)));
52 
53 static int vp9_alloc_entries(AVCodecContext *avctx, int n) {
54  VP9Context *s = avctx->priv_data;
55  int i;
56 
57  if (avctx->active_thread_type & FF_THREAD_SLICE) {
58  if (s->entries)
59  av_freep(&s->entries);
60 
61  s->entries = av_malloc_array(n, sizeof(atomic_int));
62  if (!s->entries)
63  return AVERROR(ENOMEM);
64 
65  for (i = 0; i < n; i++)
66  atomic_init(&s->entries[i], 0);
67  }
68  return 0;
69 }
70 
71 static void vp9_report_tile_progress(VP9Context *s, int field, int n) {
72  pthread_mutex_lock(&s->progress_mutex);
73  atomic_fetch_add_explicit(&s->entries[field], n, memory_order_release);
74  pthread_cond_signal(&s->progress_cond);
75  pthread_mutex_unlock(&s->progress_mutex);
76 }
77 
78 static void vp9_await_tile_progress(VP9Context *s, int field, int n) {
79  if (atomic_load_explicit(&s->entries[field], memory_order_acquire) >= n)
80  return;
81 
82  pthread_mutex_lock(&s->progress_mutex);
83  while (atomic_load_explicit(&s->entries[field], memory_order_relaxed) != n)
84  pthread_cond_wait(&s->progress_cond, &s->progress_mutex);
85  pthread_mutex_unlock(&s->progress_mutex);
86 }
87 #else
88 static int vp9_alloc_entries(AVCodecContext *avctx, int n) { return 0; }
89 #endif
90 
92 {
93  av_freep(&td->b_base);
94  av_freep(&td->block_base);
95  av_freep(&td->block_structure);
96 }
97 
99 {
100  ff_thread_release_ext_buffer(avctx, &f->tf);
101  av_buffer_unref(&f->extradata);
102  av_buffer_unref(&f->hwaccel_priv_buf);
103  f->segmentation_map = NULL;
104  f->hwaccel_picture_private = NULL;
105 }
106 
108 {
109  VP9Context *s = avctx->priv_data;
110  int ret, sz;
111 
113  if (ret < 0)
114  return ret;
115 
116  sz = 64 * s->sb_cols * s->sb_rows;
117  if (sz != s->frame_extradata_pool_size) {
118  av_buffer_pool_uninit(&s->frame_extradata_pool);
119  s->frame_extradata_pool = av_buffer_pool_init(sz * (1 + sizeof(VP9mvrefPair)), NULL);
120  if (!s->frame_extradata_pool) {
121  s->frame_extradata_pool_size = 0;
122  goto fail;
123  }
124  s->frame_extradata_pool_size = sz;
125  }
126  f->extradata = av_buffer_pool_get(s->frame_extradata_pool);
127  if (!f->extradata) {
128  goto fail;
129  }
130  memset(f->extradata->data, 0, f->extradata->size);
131 
132  f->segmentation_map = f->extradata->data;
133  f->mv = (VP9mvrefPair *) (f->extradata->data + sz);
134 
135  if (avctx->hwaccel) {
136  const AVHWAccel *hwaccel = avctx->hwaccel;
137  av_assert0(!f->hwaccel_picture_private);
138  if (hwaccel->frame_priv_data_size) {
139  f->hwaccel_priv_buf = av_buffer_allocz(hwaccel->frame_priv_data_size);
140  if (!f->hwaccel_priv_buf)
141  goto fail;
142  f->hwaccel_picture_private = f->hwaccel_priv_buf->data;
143  }
144  }
145 
146  return 0;
147 
148 fail:
149  vp9_frame_unref(avctx, f);
150  return AVERROR(ENOMEM);
151 }
152 
154 {
155  int ret;
156 
157  ret = ff_thread_ref_frame(&dst->tf, &src->tf);
158  if (ret < 0)
159  return ret;
160 
161  dst->extradata = av_buffer_ref(src->extradata);
162  if (!dst->extradata)
163  goto fail;
164 
165  dst->segmentation_map = src->segmentation_map;
166  dst->mv = src->mv;
167  dst->uses_2pass = src->uses_2pass;
168 
169  if (src->hwaccel_picture_private) {
170  dst->hwaccel_priv_buf = av_buffer_ref(src->hwaccel_priv_buf);
171  if (!dst->hwaccel_priv_buf)
172  goto fail;
174  }
175 
176  return 0;
177 
178 fail:
179  vp9_frame_unref(avctx, dst);
180  return AVERROR(ENOMEM);
181 }
182 
183 static int update_size(AVCodecContext *avctx, int w, int h)
184 {
185 #define HWACCEL_MAX (CONFIG_VP9_DXVA2_HWACCEL + \
186  CONFIG_VP9_D3D11VA_HWACCEL * 2 + \
187  CONFIG_VP9_NVDEC_HWACCEL + \
188  CONFIG_VP9_VAAPI_HWACCEL + \
189  CONFIG_VP9_VDPAU_HWACCEL + \
190  CONFIG_VP9_VIDEOTOOLBOX_HWACCEL)
191  enum AVPixelFormat pix_fmts[HWACCEL_MAX + 2], *fmtp = pix_fmts;
192  VP9Context *s = avctx->priv_data;
193  uint8_t *p;
194  int bytesperpixel = s->bytesperpixel, ret, cols, rows;
195  int lflvl_len, i;
196 
197  av_assert0(w > 0 && h > 0);
198 
199  if (!(s->pix_fmt == s->gf_fmt && w == s->w && h == s->h)) {
200  if ((ret = ff_set_dimensions(avctx, w, h)) < 0)
201  return ret;
202 
203  switch (s->pix_fmt) {
204  case AV_PIX_FMT_YUV420P:
206 #if CONFIG_VP9_DXVA2_HWACCEL
207  *fmtp++ = AV_PIX_FMT_DXVA2_VLD;
208 #endif
209 #if CONFIG_VP9_D3D11VA_HWACCEL
210  *fmtp++ = AV_PIX_FMT_D3D11VA_VLD;
211  *fmtp++ = AV_PIX_FMT_D3D11;
212 #endif
213 #if CONFIG_VP9_NVDEC_HWACCEL
214  *fmtp++ = AV_PIX_FMT_CUDA;
215 #endif
216 #if CONFIG_VP9_VAAPI_HWACCEL
217  *fmtp++ = AV_PIX_FMT_VAAPI;
218 #endif
219 #if CONFIG_VP9_VDPAU_HWACCEL
220  *fmtp++ = AV_PIX_FMT_VDPAU;
221 #endif
222 #if CONFIG_VP9_VIDEOTOOLBOX_HWACCEL
223  *fmtp++ = AV_PIX_FMT_VIDEOTOOLBOX;
224 #endif
225  break;
227 #if CONFIG_VP9_NVDEC_HWACCEL
228  *fmtp++ = AV_PIX_FMT_CUDA;
229 #endif
230 #if CONFIG_VP9_VAAPI_HWACCEL
231  *fmtp++ = AV_PIX_FMT_VAAPI;
232 #endif
233 #if CONFIG_VP9_VDPAU_HWACCEL
234  *fmtp++ = AV_PIX_FMT_VDPAU;
235 #endif
236  break;
237  case AV_PIX_FMT_YUV444P:
240 #if CONFIG_VP9_VAAPI_HWACCEL
241  *fmtp++ = AV_PIX_FMT_VAAPI;
242 #endif
243  break;
244  }
245 
246  *fmtp++ = s->pix_fmt;
247  *fmtp = AV_PIX_FMT_NONE;
248 
250  if (ret < 0)
251  return ret;
252 
253  avctx->pix_fmt = ret;
254  s->gf_fmt = s->pix_fmt;
255  s->w = w;
256  s->h = h;
257  }
258 
259  cols = (w + 7) >> 3;
260  rows = (h + 7) >> 3;
261 
262  if (s->intra_pred_data[0] && cols == s->cols && rows == s->rows && s->pix_fmt == s->last_fmt)
263  return 0;
264 
265  s->last_fmt = s->pix_fmt;
266  s->sb_cols = (w + 63) >> 6;
267  s->sb_rows = (h + 63) >> 6;
268  s->cols = (w + 7) >> 3;
269  s->rows = (h + 7) >> 3;
270  lflvl_len = avctx->active_thread_type == FF_THREAD_SLICE ? s->sb_rows : 1;
271 
272 #define assign(var, type, n) var = (type) p; p += s->sb_cols * (n) * sizeof(*var)
273  av_freep(&s->intra_pred_data[0]);
274  // FIXME we slightly over-allocate here for subsampled chroma, but a little
275  // bit of padding shouldn't affect performance...
276  p = av_malloc(s->sb_cols * (128 + 192 * bytesperpixel +
277  lflvl_len * sizeof(*s->lflvl) + 16 * sizeof(*s->above_mv_ctx)));
278  if (!p)
279  return AVERROR(ENOMEM);
280  assign(s->intra_pred_data[0], uint8_t *, 64 * bytesperpixel);
281  assign(s->intra_pred_data[1], uint8_t *, 64 * bytesperpixel);
282  assign(s->intra_pred_data[2], uint8_t *, 64 * bytesperpixel);
283  assign(s->above_y_nnz_ctx, uint8_t *, 16);
284  assign(s->above_mode_ctx, uint8_t *, 16);
285  assign(s->above_mv_ctx, VP9mv(*)[2], 16);
286  assign(s->above_uv_nnz_ctx[0], uint8_t *, 16);
287  assign(s->above_uv_nnz_ctx[1], uint8_t *, 16);
288  assign(s->above_partition_ctx, uint8_t *, 8);
289  assign(s->above_skip_ctx, uint8_t *, 8);
290  assign(s->above_txfm_ctx, uint8_t *, 8);
291  assign(s->above_segpred_ctx, uint8_t *, 8);
292  assign(s->above_intra_ctx, uint8_t *, 8);
293  assign(s->above_comp_ctx, uint8_t *, 8);
294  assign(s->above_ref_ctx, uint8_t *, 8);
295  assign(s->above_filter_ctx, uint8_t *, 8);
296  assign(s->lflvl, VP9Filter *, lflvl_len);
297 #undef assign
298 
299  if (s->td) {
300  for (i = 0; i < s->active_tile_cols; i++)
301  vp9_tile_data_free(&s->td[i]);
302  }
303 
304  if (s->s.h.bpp != s->last_bpp) {
305  ff_vp9dsp_init(&s->dsp, s->s.h.bpp, avctx->flags & AV_CODEC_FLAG_BITEXACT);
306  ff_videodsp_init(&s->vdsp, s->s.h.bpp);
307  s->last_bpp = s->s.h.bpp;
308  }
309 
310  return 0;
311 }
312 
314 {
315  int i;
316  VP9Context *s = avctx->priv_data;
317  int chroma_blocks, chroma_eobs, bytesperpixel = s->bytesperpixel;
318  VP9TileData *td = &s->td[0];
319 
320  if (td->b_base && td->block_base && s->block_alloc_using_2pass == s->s.frames[CUR_FRAME].uses_2pass)
321  return 0;
322 
324  chroma_blocks = 64 * 64 >> (s->ss_h + s->ss_v);
325  chroma_eobs = 16 * 16 >> (s->ss_h + s->ss_v);
326  if (s->s.frames[CUR_FRAME].uses_2pass) {
327  int sbs = s->sb_cols * s->sb_rows;
328 
329  td->b_base = av_malloc_array(s->cols * s->rows, sizeof(VP9Block));
330  td->block_base = av_mallocz(((64 * 64 + 2 * chroma_blocks) * bytesperpixel * sizeof(int16_t) +
331  16 * 16 + 2 * chroma_eobs) * sbs);
332  if (!td->b_base || !td->block_base)
333  return AVERROR(ENOMEM);
334  td->uvblock_base[0] = td->block_base + sbs * 64 * 64 * bytesperpixel;
335  td->uvblock_base[1] = td->uvblock_base[0] + sbs * chroma_blocks * bytesperpixel;
336  td->eob_base = (uint8_t *) (td->uvblock_base[1] + sbs * chroma_blocks * bytesperpixel);
337  td->uveob_base[0] = td->eob_base + 16 * 16 * sbs;
338  td->uveob_base[1] = td->uveob_base[0] + chroma_eobs * sbs;
339 
341  td->block_structure = av_malloc_array(s->cols * s->rows, sizeof(*td->block_structure));
342  if (!td->block_structure)
343  return AVERROR(ENOMEM);
344  }
345  } else {
346  for (i = 1; i < s->active_tile_cols; i++)
347  vp9_tile_data_free(&s->td[i]);
348 
349  for (i = 0; i < s->active_tile_cols; i++) {
350  s->td[i].b_base = av_malloc(sizeof(VP9Block));
351  s->td[i].block_base = av_mallocz((64 * 64 + 2 * chroma_blocks) * bytesperpixel * sizeof(int16_t) +
352  16 * 16 + 2 * chroma_eobs);
353  if (!s->td[i].b_base || !s->td[i].block_base)
354  return AVERROR(ENOMEM);
355  s->td[i].uvblock_base[0] = s->td[i].block_base + 64 * 64 * bytesperpixel;
356  s->td[i].uvblock_base[1] = s->td[i].uvblock_base[0] + chroma_blocks * bytesperpixel;
357  s->td[i].eob_base = (uint8_t *) (s->td[i].uvblock_base[1] + chroma_blocks * bytesperpixel);
358  s->td[i].uveob_base[0] = s->td[i].eob_base + 16 * 16;
359  s->td[i].uveob_base[1] = s->td[i].uveob_base[0] + chroma_eobs;
360 
362  s->td[i].block_structure = av_malloc_array(s->cols * s->rows, sizeof(*td->block_structure));
363  if (!s->td[i].block_structure)
364  return AVERROR(ENOMEM);
365  }
366  }
367  }
368  s->block_alloc_using_2pass = s->s.frames[CUR_FRAME].uses_2pass;
369 
370  return 0;
371 }
372 
373 // The sign bit is at the end, not the start, of a bit sequence
375 {
376  int v = get_bits(gb, n);
377  return get_bits1(gb) ? -v : v;
378 }
379 
380 static av_always_inline int inv_recenter_nonneg(int v, int m)
381 {
382  if (v > 2 * m)
383  return v;
384  if (v & 1)
385  return m - ((v + 1) >> 1);
386  return m + (v >> 1);
387 }
388 
389 // differential forward probability updates
390 static int update_prob(VPXRangeCoder *c, int p)
391 {
392  static const uint8_t inv_map_table[255] = {
393  7, 20, 33, 46, 59, 72, 85, 98, 111, 124, 137, 150, 163, 176,
394  189, 202, 215, 228, 241, 254, 1, 2, 3, 4, 5, 6, 8, 9,
395  10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 21, 22, 23, 24,
396  25, 26, 27, 28, 29, 30, 31, 32, 34, 35, 36, 37, 38, 39,
397  40, 41, 42, 43, 44, 45, 47, 48, 49, 50, 51, 52, 53, 54,
398  55, 56, 57, 58, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
399  70, 71, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84,
400  86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 99, 100,
401  101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 112, 113, 114, 115,
402  116, 117, 118, 119, 120, 121, 122, 123, 125, 126, 127, 128, 129, 130,
403  131, 132, 133, 134, 135, 136, 138, 139, 140, 141, 142, 143, 144, 145,
404  146, 147, 148, 149, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160,
405  161, 162, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175,
406  177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 190, 191,
407  192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 203, 204, 205, 206,
408  207, 208, 209, 210, 211, 212, 213, 214, 216, 217, 218, 219, 220, 221,
409  222, 223, 224, 225, 226, 227, 229, 230, 231, 232, 233, 234, 235, 236,
410  237, 238, 239, 240, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251,
411  252, 253, 253,
412  };
413  int d;
414 
415  /* This code is trying to do a differential probability update. For a
416  * current probability A in the range [1, 255], the difference to a new
417  * probability of any value can be expressed differentially as 1-A, 255-A
418  * where some part of this (absolute range) exists both in positive as
419  * well as the negative part, whereas another part only exists in one
420  * half. We're trying to code this shared part differentially, i.e.
421  * times two where the value of the lowest bit specifies the sign, and
422  * the single part is then coded on top of this. This absolute difference
423  * then again has a value of [0, 254], but a bigger value in this range
424  * indicates that we're further away from the original value A, so we
425  * can code this as a VLC code, since higher values are increasingly
426  * unlikely. The first 20 values in inv_map_table[] allow 'cheap, rough'
427  * updates vs. the 'fine, exact' updates further down the range, which
428  * adds one extra dimension to this differential update model. */
429 
430  if (!vp89_rac_get(c)) {
431  d = vp89_rac_get_uint(c, 4) + 0;
432  } else if (!vp89_rac_get(c)) {
433  d = vp89_rac_get_uint(c, 4) + 16;
434  } else if (!vp89_rac_get(c)) {
435  d = vp89_rac_get_uint(c, 5) + 32;
436  } else {
437  d = vp89_rac_get_uint(c, 7);
438  if (d >= 65)
439  d = (d << 1) - 65 + vp89_rac_get(c);
440  d += 64;
441  av_assert2(d < FF_ARRAY_ELEMS(inv_map_table));
442  }
443 
444  return p <= 128 ? 1 + inv_recenter_nonneg(inv_map_table[d], p - 1) :
445  255 - inv_recenter_nonneg(inv_map_table[d], 255 - p);
446 }
447 
449 {
450  static const enum AVColorSpace colorspaces[8] = {
453  };
454  VP9Context *s = avctx->priv_data;
455  int bits = avctx->profile <= 1 ? 0 : 1 + get_bits1(&s->gb); // 0:8, 1:10, 2:12
456 
457  s->bpp_index = bits;
458  s->s.h.bpp = 8 + bits * 2;
459  s->bytesperpixel = (7 + s->s.h.bpp) >> 3;
460  avctx->colorspace = colorspaces[get_bits(&s->gb, 3)];
461  if (avctx->colorspace == AVCOL_SPC_RGB) { // RGB = profile 1
462  static const enum AVPixelFormat pix_fmt_rgb[3] = {
464  };
465  s->ss_h = s->ss_v = 0;
466  avctx->color_range = AVCOL_RANGE_JPEG;
467  s->pix_fmt = pix_fmt_rgb[bits];
468  if (avctx->profile & 1) {
469  if (get_bits1(&s->gb)) {
470  av_log(avctx, AV_LOG_ERROR, "Reserved bit set in RGB\n");
471  return AVERROR_INVALIDDATA;
472  }
473  } else {
474  av_log(avctx, AV_LOG_ERROR, "RGB not supported in profile %d\n",
475  avctx->profile);
476  return AVERROR_INVALIDDATA;
477  }
478  } else {
479  static const enum AVPixelFormat pix_fmt_for_ss[3][2 /* v */][2 /* h */] = {
486  };
488  if (avctx->profile & 1) {
489  s->ss_h = get_bits1(&s->gb);
490  s->ss_v = get_bits1(&s->gb);
491  s->pix_fmt = pix_fmt_for_ss[bits][s->ss_v][s->ss_h];
492  if (s->pix_fmt == AV_PIX_FMT_YUV420P) {
493  av_log(avctx, AV_LOG_ERROR, "YUV 4:2:0 not supported in profile %d\n",
494  avctx->profile);
495  return AVERROR_INVALIDDATA;
496  } else if (get_bits1(&s->gb)) {
497  av_log(avctx, AV_LOG_ERROR, "Profile %d color details reserved bit set\n",
498  avctx->profile);
499  return AVERROR_INVALIDDATA;
500  }
501  } else {
502  s->ss_h = s->ss_v = 1;
503  s->pix_fmt = pix_fmt_for_ss[bits][1][1];
504  }
505  }
506 
507  return 0;
508 }
509 
511  const uint8_t *data, int size, int *ref)
512 {
513  VP9Context *s = avctx->priv_data;
514  int c, i, j, k, l, m, n, w, h, max, size2, ret, sharp;
515  int last_invisible;
516  const uint8_t *data2;
517 
518  /* general header */
519  if ((ret = init_get_bits8(&s->gb, data, size)) < 0) {
520  av_log(avctx, AV_LOG_ERROR, "Failed to initialize bitstream reader\n");
521  return ret;
522  }
523  if (get_bits(&s->gb, 2) != 0x2) { // frame marker
524  av_log(avctx, AV_LOG_ERROR, "Invalid frame marker\n");
525  return AVERROR_INVALIDDATA;
526  }
527  avctx->profile = get_bits1(&s->gb);
528  avctx->profile |= get_bits1(&s->gb) << 1;
529  if (avctx->profile == 3) avctx->profile += get_bits1(&s->gb);
530  if (avctx->profile > 3) {
531  av_log(avctx, AV_LOG_ERROR, "Profile %d is not yet supported\n", avctx->profile);
532  return AVERROR_INVALIDDATA;
533  }
534  s->s.h.profile = avctx->profile;
535  if (get_bits1(&s->gb)) {
536  *ref = get_bits(&s->gb, 3);
537  return 0;
538  }
539 
540  s->last_keyframe = s->s.h.keyframe;
541  s->s.h.keyframe = !get_bits1(&s->gb);
542 
543  last_invisible = s->s.h.invisible;
544  s->s.h.invisible = !get_bits1(&s->gb);
545  s->s.h.errorres = get_bits1(&s->gb);
546  s->s.h.use_last_frame_mvs = !s->s.h.errorres && !last_invisible;
547 
548  if (s->s.h.keyframe) {
549  if (get_bits(&s->gb, 24) != VP9_SYNCCODE) { // synccode
550  av_log(avctx, AV_LOG_ERROR, "Invalid sync code\n");
551  return AVERROR_INVALIDDATA;
552  }
553  if ((ret = read_colorspace_details(avctx)) < 0)
554  return ret;
555  // for profile 1, here follows the subsampling bits
556  s->s.h.refreshrefmask = 0xff;
557  w = get_bits(&s->gb, 16) + 1;
558  h = get_bits(&s->gb, 16) + 1;
559  if (get_bits1(&s->gb)) // display size
560  skip_bits(&s->gb, 32);
561  } else {
562  s->s.h.intraonly = s->s.h.invisible ? get_bits1(&s->gb) : 0;
563  s->s.h.resetctx = s->s.h.errorres ? 0 : get_bits(&s->gb, 2);
564  if (s->s.h.intraonly) {
565  if (get_bits(&s->gb, 24) != VP9_SYNCCODE) { // synccode
566  av_log(avctx, AV_LOG_ERROR, "Invalid sync code\n");
567  return AVERROR_INVALIDDATA;
568  }
569  if (avctx->profile >= 1) {
570  if ((ret = read_colorspace_details(avctx)) < 0)
571  return ret;
572  } else {
573  s->ss_h = s->ss_v = 1;
574  s->s.h.bpp = 8;
575  s->bpp_index = 0;
576  s->bytesperpixel = 1;
577  s->pix_fmt = AV_PIX_FMT_YUV420P;
578  avctx->colorspace = AVCOL_SPC_BT470BG;
579  avctx->color_range = AVCOL_RANGE_MPEG;
580  }
581  s->s.h.refreshrefmask = get_bits(&s->gb, 8);
582  w = get_bits(&s->gb, 16) + 1;
583  h = get_bits(&s->gb, 16) + 1;
584  if (get_bits1(&s->gb)) // display size
585  skip_bits(&s->gb, 32);
586  } else {
587  s->s.h.refreshrefmask = get_bits(&s->gb, 8);
588  s->s.h.refidx[0] = get_bits(&s->gb, 3);
589  s->s.h.signbias[0] = get_bits1(&s->gb) && !s->s.h.errorres;
590  s->s.h.refidx[1] = get_bits(&s->gb, 3);
591  s->s.h.signbias[1] = get_bits1(&s->gb) && !s->s.h.errorres;
592  s->s.h.refidx[2] = get_bits(&s->gb, 3);
593  s->s.h.signbias[2] = get_bits1(&s->gb) && !s->s.h.errorres;
594  if (!s->s.refs[s->s.h.refidx[0]].f->buf[0] ||
595  !s->s.refs[s->s.h.refidx[1]].f->buf[0] ||
596  !s->s.refs[s->s.h.refidx[2]].f->buf[0]) {
597  av_log(avctx, AV_LOG_ERROR, "Not all references are available\n");
598  return AVERROR_INVALIDDATA;
599  }
600  if (get_bits1(&s->gb)) {
601  w = s->s.refs[s->s.h.refidx[0]].f->width;
602  h = s->s.refs[s->s.h.refidx[0]].f->height;
603  } else if (get_bits1(&s->gb)) {
604  w = s->s.refs[s->s.h.refidx[1]].f->width;
605  h = s->s.refs[s->s.h.refidx[1]].f->height;
606  } else if (get_bits1(&s->gb)) {
607  w = s->s.refs[s->s.h.refidx[2]].f->width;
608  h = s->s.refs[s->s.h.refidx[2]].f->height;
609  } else {
610  w = get_bits(&s->gb, 16) + 1;
611  h = get_bits(&s->gb, 16) + 1;
612  }
613  // Note that in this code, "CUR_FRAME" is actually before we
614  // have formally allocated a frame, and thus actually represents
615  // the _last_ frame
616  s->s.h.use_last_frame_mvs &= s->s.frames[CUR_FRAME].tf.f->width == w &&
617  s->s.frames[CUR_FRAME].tf.f->height == h;
618  if (get_bits1(&s->gb)) // display size
619  skip_bits(&s->gb, 32);
620  s->s.h.highprecisionmvs = get_bits1(&s->gb);
621  s->s.h.filtermode = get_bits1(&s->gb) ? FILTER_SWITCHABLE :
622  get_bits(&s->gb, 2);
623  s->s.h.allowcompinter = s->s.h.signbias[0] != s->s.h.signbias[1] ||
624  s->s.h.signbias[0] != s->s.h.signbias[2];
625  if (s->s.h.allowcompinter) {
626  if (s->s.h.signbias[0] == s->s.h.signbias[1]) {
627  s->s.h.fixcompref = 2;
628  s->s.h.varcompref[0] = 0;
629  s->s.h.varcompref[1] = 1;
630  } else if (s->s.h.signbias[0] == s->s.h.signbias[2]) {
631  s->s.h.fixcompref = 1;
632  s->s.h.varcompref[0] = 0;
633  s->s.h.varcompref[1] = 2;
634  } else {
635  s->s.h.fixcompref = 0;
636  s->s.h.varcompref[0] = 1;
637  s->s.h.varcompref[1] = 2;
638  }
639  }
640  }
641  }
642  s->s.h.refreshctx = s->s.h.errorres ? 0 : get_bits1(&s->gb);
643  s->s.h.parallelmode = s->s.h.errorres ? 1 : get_bits1(&s->gb);
644  s->s.h.framectxid = c = get_bits(&s->gb, 2);
645  if (s->s.h.keyframe || s->s.h.intraonly)
646  s->s.h.framectxid = 0; // BUG: libvpx ignores this field in keyframes
647 
648  /* loopfilter header data */
649  if (s->s.h.keyframe || s->s.h.errorres || s->s.h.intraonly) {
650  // reset loopfilter defaults
651  s->s.h.lf_delta.ref[0] = 1;
652  s->s.h.lf_delta.ref[1] = 0;
653  s->s.h.lf_delta.ref[2] = -1;
654  s->s.h.lf_delta.ref[3] = -1;
655  s->s.h.lf_delta.mode[0] = 0;
656  s->s.h.lf_delta.mode[1] = 0;
657  memset(s->s.h.segmentation.feat, 0, sizeof(s->s.h.segmentation.feat));
658  }
659  s->s.h.filter.level = get_bits(&s->gb, 6);
660  sharp = get_bits(&s->gb, 3);
661  // if sharpness changed, reinit lim/mblim LUTs. if it didn't change, keep
662  // the old cache values since they are still valid
663  if (s->s.h.filter.sharpness != sharp) {
664  for (i = 1; i <= 63; i++) {
665  int limit = i;
666 
667  if (sharp > 0) {
668  limit >>= (sharp + 3) >> 2;
669  limit = FFMIN(limit, 9 - sharp);
670  }
671  limit = FFMAX(limit, 1);
672 
673  s->filter_lut.lim_lut[i] = limit;
674  s->filter_lut.mblim_lut[i] = 2 * (i + 2) + limit;
675  }
676  }
677  s->s.h.filter.sharpness = sharp;
678  if ((s->s.h.lf_delta.enabled = get_bits1(&s->gb))) {
679  if ((s->s.h.lf_delta.updated = get_bits1(&s->gb))) {
680  for (i = 0; i < 4; i++)
681  if (get_bits1(&s->gb))
682  s->s.h.lf_delta.ref[i] = get_sbits_inv(&s->gb, 6);
683  for (i = 0; i < 2; i++)
684  if (get_bits1(&s->gb))
685  s->s.h.lf_delta.mode[i] = get_sbits_inv(&s->gb, 6);
686  }
687  }
688 
689  /* quantization header data */
690  s->s.h.yac_qi = get_bits(&s->gb, 8);
691  s->s.h.ydc_qdelta = get_bits1(&s->gb) ? get_sbits_inv(&s->gb, 4) : 0;
692  s->s.h.uvdc_qdelta = get_bits1(&s->gb) ? get_sbits_inv(&s->gb, 4) : 0;
693  s->s.h.uvac_qdelta = get_bits1(&s->gb) ? get_sbits_inv(&s->gb, 4) : 0;
694  s->s.h.lossless = s->s.h.yac_qi == 0 && s->s.h.ydc_qdelta == 0 &&
695  s->s.h.uvdc_qdelta == 0 && s->s.h.uvac_qdelta == 0;
696  if (s->s.h.lossless)
698 
699  /* segmentation header info */
700  if ((s->s.h.segmentation.enabled = get_bits1(&s->gb))) {
701  if ((s->s.h.segmentation.update_map = get_bits1(&s->gb))) {
702  for (i = 0; i < 7; i++)
703  s->s.h.segmentation.prob[i] = get_bits1(&s->gb) ?
704  get_bits(&s->gb, 8) : 255;
705  if ((s->s.h.segmentation.temporal = get_bits1(&s->gb)))
706  for (i = 0; i < 3; i++)
707  s->s.h.segmentation.pred_prob[i] = get_bits1(&s->gb) ?
708  get_bits(&s->gb, 8) : 255;
709  }
710 
711  if (get_bits1(&s->gb)) {
712  s->s.h.segmentation.absolute_vals = get_bits1(&s->gb);
713  for (i = 0; i < 8; i++) {
714  if ((s->s.h.segmentation.feat[i].q_enabled = get_bits1(&s->gb)))
715  s->s.h.segmentation.feat[i].q_val = get_sbits_inv(&s->gb, 8);
716  if ((s->s.h.segmentation.feat[i].lf_enabled = get_bits1(&s->gb)))
717  s->s.h.segmentation.feat[i].lf_val = get_sbits_inv(&s->gb, 6);
718  if ((s->s.h.segmentation.feat[i].ref_enabled = get_bits1(&s->gb)))
719  s->s.h.segmentation.feat[i].ref_val = get_bits(&s->gb, 2);
720  s->s.h.segmentation.feat[i].skip_enabled = get_bits1(&s->gb);
721  }
722  }
723  }
724 
725  // set qmul[] based on Y/UV, AC/DC and segmentation Q idx deltas
726  for (i = 0; i < (s->s.h.segmentation.enabled ? 8 : 1); i++) {
727  int qyac, qydc, quvac, quvdc, lflvl, sh;
728 
729  if (s->s.h.segmentation.enabled && s->s.h.segmentation.feat[i].q_enabled) {
730  if (s->s.h.segmentation.absolute_vals)
731  qyac = av_clip_uintp2(s->s.h.segmentation.feat[i].q_val, 8);
732  else
733  qyac = av_clip_uintp2(s->s.h.yac_qi + s->s.h.segmentation.feat[i].q_val, 8);
734  } else {
735  qyac = s->s.h.yac_qi;
736  }
737  qydc = av_clip_uintp2(qyac + s->s.h.ydc_qdelta, 8);
738  quvdc = av_clip_uintp2(qyac + s->s.h.uvdc_qdelta, 8);
739  quvac = av_clip_uintp2(qyac + s->s.h.uvac_qdelta, 8);
740  qyac = av_clip_uintp2(qyac, 8);
741 
742  s->s.h.segmentation.feat[i].qmul[0][0] = ff_vp9_dc_qlookup[s->bpp_index][qydc];
743  s->s.h.segmentation.feat[i].qmul[0][1] = ff_vp9_ac_qlookup[s->bpp_index][qyac];
744  s->s.h.segmentation.feat[i].qmul[1][0] = ff_vp9_dc_qlookup[s->bpp_index][quvdc];
745  s->s.h.segmentation.feat[i].qmul[1][1] = ff_vp9_ac_qlookup[s->bpp_index][quvac];
746 
747  sh = s->s.h.filter.level >= 32;
748  if (s->s.h.segmentation.enabled && s->s.h.segmentation.feat[i].lf_enabled) {
749  if (s->s.h.segmentation.absolute_vals)
750  lflvl = av_clip_uintp2(s->s.h.segmentation.feat[i].lf_val, 6);
751  else
752  lflvl = av_clip_uintp2(s->s.h.filter.level + s->s.h.segmentation.feat[i].lf_val, 6);
753  } else {
754  lflvl = s->s.h.filter.level;
755  }
756  if (s->s.h.lf_delta.enabled) {
757  s->s.h.segmentation.feat[i].lflvl[0][0] =
758  s->s.h.segmentation.feat[i].lflvl[0][1] =
759  av_clip_uintp2(lflvl + (s->s.h.lf_delta.ref[0] * (1 << sh)), 6);
760  for (j = 1; j < 4; j++) {
761  s->s.h.segmentation.feat[i].lflvl[j][0] =
762  av_clip_uintp2(lflvl + ((s->s.h.lf_delta.ref[j] +
763  s->s.h.lf_delta.mode[0]) * (1 << sh)), 6);
764  s->s.h.segmentation.feat[i].lflvl[j][1] =
765  av_clip_uintp2(lflvl + ((s->s.h.lf_delta.ref[j] +
766  s->s.h.lf_delta.mode[1]) * (1 << sh)), 6);
767  }
768  } else {
769  memset(s->s.h.segmentation.feat[i].lflvl, lflvl,
770  sizeof(s->s.h.segmentation.feat[i].lflvl));
771  }
772  }
773 
774  /* tiling info */
775  if ((ret = update_size(avctx, w, h)) < 0) {
776  av_log(avctx, AV_LOG_ERROR, "Failed to initialize decoder for %dx%d @ %d\n",
777  w, h, s->pix_fmt);
778  return ret;
779  }
780  for (s->s.h.tiling.log2_tile_cols = 0;
781  s->sb_cols > (64 << s->s.h.tiling.log2_tile_cols);
782  s->s.h.tiling.log2_tile_cols++) ;
783  for (max = 0; (s->sb_cols >> max) >= 4; max++) ;
784  max = FFMAX(0, max - 1);
785  while (max > s->s.h.tiling.log2_tile_cols) {
786  if (get_bits1(&s->gb))
787  s->s.h.tiling.log2_tile_cols++;
788  else
789  break;
790  }
791  s->s.h.tiling.log2_tile_rows = decode012(&s->gb);
792  s->s.h.tiling.tile_rows = 1 << s->s.h.tiling.log2_tile_rows;
793  if (s->s.h.tiling.tile_cols != (1 << s->s.h.tiling.log2_tile_cols)) {
794  int n_range_coders;
795  VPXRangeCoder *rc;
796 
797  if (s->td) {
798  for (i = 0; i < s->active_tile_cols; i++)
799  vp9_tile_data_free(&s->td[i]);
800  av_freep(&s->td);
801  }
802 
803  s->s.h.tiling.tile_cols = 1 << s->s.h.tiling.log2_tile_cols;
804  s->active_tile_cols = avctx->active_thread_type == FF_THREAD_SLICE ?
805  s->s.h.tiling.tile_cols : 1;
806  vp9_alloc_entries(avctx, s->sb_rows);
807  if (avctx->active_thread_type == FF_THREAD_SLICE) {
808  n_range_coders = 4; // max_tile_rows
809  } else {
810  n_range_coders = s->s.h.tiling.tile_cols;
811  }
812  s->td = av_calloc(s->active_tile_cols, sizeof(VP9TileData) +
813  n_range_coders * sizeof(VPXRangeCoder));
814  if (!s->td)
815  return AVERROR(ENOMEM);
816  rc = (VPXRangeCoder *) &s->td[s->active_tile_cols];
817  for (i = 0; i < s->active_tile_cols; i++) {
818  s->td[i].s = s;
819  s->td[i].c_b = rc;
820  rc += n_range_coders;
821  }
822  }
823 
824  /* check reference frames */
825  if (!s->s.h.keyframe && !s->s.h.intraonly) {
826  int valid_ref_frame = 0;
827  for (i = 0; i < 3; i++) {
828  AVFrame *ref = s->s.refs[s->s.h.refidx[i]].f;
829  int refw = ref->width, refh = ref->height;
830 
831  if (ref->format != avctx->pix_fmt) {
832  av_log(avctx, AV_LOG_ERROR,
833  "Ref pixfmt (%s) did not match current frame (%s)",
834  av_get_pix_fmt_name(ref->format),
835  av_get_pix_fmt_name(avctx->pix_fmt));
836  return AVERROR_INVALIDDATA;
837  } else if (refw == w && refh == h) {
838  s->mvscale[i][0] = s->mvscale[i][1] = 0;
839  } else {
840  /* Check to make sure at least one of frames that */
841  /* this frame references has valid dimensions */
842  if (w * 2 < refw || h * 2 < refh || w > 16 * refw || h > 16 * refh) {
843  av_log(avctx, AV_LOG_WARNING,
844  "Invalid ref frame dimensions %dx%d for frame size %dx%d\n",
845  refw, refh, w, h);
846  s->mvscale[i][0] = s->mvscale[i][1] = REF_INVALID_SCALE;
847  continue;
848  }
849  s->mvscale[i][0] = (refw << 14) / w;
850  s->mvscale[i][1] = (refh << 14) / h;
851  s->mvstep[i][0] = 16 * s->mvscale[i][0] >> 14;
852  s->mvstep[i][1] = 16 * s->mvscale[i][1] >> 14;
853  }
854  valid_ref_frame++;
855  }
856  if (!valid_ref_frame) {
857  av_log(avctx, AV_LOG_ERROR, "No valid reference frame is found, bitstream not supported\n");
858  return AVERROR_INVALIDDATA;
859  }
860  }
861 
862  if (s->s.h.keyframe || s->s.h.errorres || (s->s.h.intraonly && s->s.h.resetctx == 3)) {
863  s->prob_ctx[0].p = s->prob_ctx[1].p = s->prob_ctx[2].p =
864  s->prob_ctx[3].p = ff_vp9_default_probs;
865  memcpy(s->prob_ctx[0].coef, ff_vp9_default_coef_probs,
866  sizeof(ff_vp9_default_coef_probs));
867  memcpy(s->prob_ctx[1].coef, ff_vp9_default_coef_probs,
868  sizeof(ff_vp9_default_coef_probs));
869  memcpy(s->prob_ctx[2].coef, ff_vp9_default_coef_probs,
870  sizeof(ff_vp9_default_coef_probs));
871  memcpy(s->prob_ctx[3].coef, ff_vp9_default_coef_probs,
872  sizeof(ff_vp9_default_coef_probs));
873  } else if (s->s.h.intraonly && s->s.h.resetctx == 2) {
874  s->prob_ctx[c].p = ff_vp9_default_probs;
875  memcpy(s->prob_ctx[c].coef, ff_vp9_default_coef_probs,
876  sizeof(ff_vp9_default_coef_probs));
877  }
878 
879  // next 16 bits is size of the rest of the header (arith-coded)
880  s->s.h.compressed_header_size = size2 = get_bits(&s->gb, 16);
881  s->s.h.uncompressed_header_size = (get_bits_count(&s->gb) + 7) / 8;
882 
883  data2 = align_get_bits(&s->gb);
884  if (size2 > size - (data2 - data)) {
885  av_log(avctx, AV_LOG_ERROR, "Invalid compressed header size\n");
886  return AVERROR_INVALIDDATA;
887  }
888  ret = ff_vpx_init_range_decoder(&s->c, data2, size2);
889  if (ret < 0)
890  return ret;
891 
892  if (vpx_rac_get_prob_branchy(&s->c, 128)) { // marker bit
893  av_log(avctx, AV_LOG_ERROR, "Marker bit was set\n");
894  return AVERROR_INVALIDDATA;
895  }
896 
897  for (i = 0; i < s->active_tile_cols; i++) {
898  if (s->s.h.keyframe || s->s.h.intraonly) {
899  memset(s->td[i].counts.coef, 0, sizeof(s->td[0].counts.coef));
900  memset(s->td[i].counts.eob, 0, sizeof(s->td[0].counts.eob));
901  } else {
902  memset(&s->td[i].counts, 0, sizeof(s->td[0].counts));
903  }
904  s->td[i].nb_block_structure = 0;
905  }
906 
907  /* FIXME is it faster to not copy here, but do it down in the fw updates
908  * as explicit copies if the fw update is missing (and skip the copy upon
909  * fw update)? */
910  s->prob.p = s->prob_ctx[c].p;
911 
912  // txfm updates
913  if (s->s.h.lossless) {
914  s->s.h.txfmmode = TX_4X4;
915  } else {
916  s->s.h.txfmmode = vp89_rac_get_uint(&s->c, 2);
917  if (s->s.h.txfmmode == 3)
918  s->s.h.txfmmode += vp89_rac_get(&s->c);
919 
920  if (s->s.h.txfmmode == TX_SWITCHABLE) {
921  for (i = 0; i < 2; i++)
922  if (vpx_rac_get_prob_branchy(&s->c, 252))
923  s->prob.p.tx8p[i] = update_prob(&s->c, s->prob.p.tx8p[i]);
924  for (i = 0; i < 2; i++)
925  for (j = 0; j < 2; j++)
926  if (vpx_rac_get_prob_branchy(&s->c, 252))
927  s->prob.p.tx16p[i][j] =
928  update_prob(&s->c, s->prob.p.tx16p[i][j]);
929  for (i = 0; i < 2; i++)
930  for (j = 0; j < 3; j++)
931  if (vpx_rac_get_prob_branchy(&s->c, 252))
932  s->prob.p.tx32p[i][j] =
933  update_prob(&s->c, s->prob.p.tx32p[i][j]);
934  }
935  }
936 
937  // coef updates
938  for (i = 0; i < 4; i++) {
939  uint8_t (*ref)[2][6][6][3] = s->prob_ctx[c].coef[i];
940  if (vp89_rac_get(&s->c)) {
941  for (j = 0; j < 2; j++)
942  for (k = 0; k < 2; k++)
943  for (l = 0; l < 6; l++)
944  for (m = 0; m < 6; m++) {
945  uint8_t *p = s->prob.coef[i][j][k][l][m];
946  uint8_t *r = ref[j][k][l][m];
947  if (m >= 3 && l == 0) // dc only has 3 pt
948  break;
949  for (n = 0; n < 3; n++) {
950  if (vpx_rac_get_prob_branchy(&s->c, 252))
951  p[n] = update_prob(&s->c, r[n]);
952  else
953  p[n] = r[n];
954  }
955  memcpy(&p[3], ff_vp9_model_pareto8[p[2]], 8);
956  }
957  } else {
958  for (j = 0; j < 2; j++)
959  for (k = 0; k < 2; k++)
960  for (l = 0; l < 6; l++)
961  for (m = 0; m < 6; m++) {
962  uint8_t *p = s->prob.coef[i][j][k][l][m];
963  uint8_t *r = ref[j][k][l][m];
964  if (m > 3 && l == 0) // dc only has 3 pt
965  break;
966  memcpy(p, r, 3);
967  memcpy(&p[3], ff_vp9_model_pareto8[p[2]], 8);
968  }
969  }
970  if (s->s.h.txfmmode == i)
971  break;
972  }
973 
974  // mode updates
975  for (i = 0; i < 3; i++)
976  if (vpx_rac_get_prob_branchy(&s->c, 252))
977  s->prob.p.skip[i] = update_prob(&s->c, s->prob.p.skip[i]);
978  if (!s->s.h.keyframe && !s->s.h.intraonly) {
979  for (i = 0; i < 7; i++)
980  for (j = 0; j < 3; j++)
981  if (vpx_rac_get_prob_branchy(&s->c, 252))
982  s->prob.p.mv_mode[i][j] =
983  update_prob(&s->c, s->prob.p.mv_mode[i][j]);
984 
985  if (s->s.h.filtermode == FILTER_SWITCHABLE)
986  for (i = 0; i < 4; i++)
987  for (j = 0; j < 2; j++)
988  if (vpx_rac_get_prob_branchy(&s->c, 252))
989  s->prob.p.filter[i][j] =
990  update_prob(&s->c, s->prob.p.filter[i][j]);
991 
992  for (i = 0; i < 4; i++)
993  if (vpx_rac_get_prob_branchy(&s->c, 252))
994  s->prob.p.intra[i] = update_prob(&s->c, s->prob.p.intra[i]);
995 
996  if (s->s.h.allowcompinter) {
997  s->s.h.comppredmode = vp89_rac_get(&s->c);
998  if (s->s.h.comppredmode)
999  s->s.h.comppredmode += vp89_rac_get(&s->c);
1000  if (s->s.h.comppredmode == PRED_SWITCHABLE)
1001  for (i = 0; i < 5; i++)
1002  if (vpx_rac_get_prob_branchy(&s->c, 252))
1003  s->prob.p.comp[i] =
1004  update_prob(&s->c, s->prob.p.comp[i]);
1005  } else {
1006  s->s.h.comppredmode = PRED_SINGLEREF;
1007  }
1008 
1009  if (s->s.h.comppredmode != PRED_COMPREF) {
1010  for (i = 0; i < 5; i++) {
1011  if (vpx_rac_get_prob_branchy(&s->c, 252))
1012  s->prob.p.single_ref[i][0] =
1013  update_prob(&s->c, s->prob.p.single_ref[i][0]);
1014  if (vpx_rac_get_prob_branchy(&s->c, 252))
1015  s->prob.p.single_ref[i][1] =
1016  update_prob(&s->c, s->prob.p.single_ref[i][1]);
1017  }
1018  }
1019 
1020  if (s->s.h.comppredmode != PRED_SINGLEREF) {
1021  for (i = 0; i < 5; i++)
1022  if (vpx_rac_get_prob_branchy(&s->c, 252))
1023  s->prob.p.comp_ref[i] =
1024  update_prob(&s->c, s->prob.p.comp_ref[i]);
1025  }
1026 
1027  for (i = 0; i < 4; i++)
1028  for (j = 0; j < 9; j++)
1029  if (vpx_rac_get_prob_branchy(&s->c, 252))
1030  s->prob.p.y_mode[i][j] =
1031  update_prob(&s->c, s->prob.p.y_mode[i][j]);
1032 
1033  for (i = 0; i < 4; i++)
1034  for (j = 0; j < 4; j++)
1035  for (k = 0; k < 3; k++)
1036  if (vpx_rac_get_prob_branchy(&s->c, 252))
1037  s->prob.p.partition[3 - i][j][k] =
1038  update_prob(&s->c,
1039  s->prob.p.partition[3 - i][j][k]);
1040 
1041  // mv fields don't use the update_prob subexp model for some reason
1042  for (i = 0; i < 3; i++)
1043  if (vpx_rac_get_prob_branchy(&s->c, 252))
1044  s->prob.p.mv_joint[i] = (vp89_rac_get_uint(&s->c, 7) << 1) | 1;
1045 
1046  for (i = 0; i < 2; i++) {
1047  if (vpx_rac_get_prob_branchy(&s->c, 252))
1048  s->prob.p.mv_comp[i].sign =
1049  (vp89_rac_get_uint(&s->c, 7) << 1) | 1;
1050 
1051  for (j = 0; j < 10; j++)
1052  if (vpx_rac_get_prob_branchy(&s->c, 252))
1053  s->prob.p.mv_comp[i].classes[j] =
1054  (vp89_rac_get_uint(&s->c, 7) << 1) | 1;
1055 
1056  if (vpx_rac_get_prob_branchy(&s->c, 252))
1057  s->prob.p.mv_comp[i].class0 =
1058  (vp89_rac_get_uint(&s->c, 7) << 1) | 1;
1059 
1060  for (j = 0; j < 10; j++)
1061  if (vpx_rac_get_prob_branchy(&s->c, 252))
1062  s->prob.p.mv_comp[i].bits[j] =
1063  (vp89_rac_get_uint(&s->c, 7) << 1) | 1;
1064  }
1065 
1066  for (i = 0; i < 2; i++) {
1067  for (j = 0; j < 2; j++)
1068  for (k = 0; k < 3; k++)
1069  if (vpx_rac_get_prob_branchy(&s->c, 252))
1070  s->prob.p.mv_comp[i].class0_fp[j][k] =
1071  (vp89_rac_get_uint(&s->c, 7) << 1) | 1;
1072 
1073  for (j = 0; j < 3; j++)
1074  if (vpx_rac_get_prob_branchy(&s->c, 252))
1075  s->prob.p.mv_comp[i].fp[j] =
1076  (vp89_rac_get_uint(&s->c, 7) << 1) | 1;
1077  }
1078 
1079  if (s->s.h.highprecisionmvs) {
1080  for (i = 0; i < 2; i++) {
1081  if (vpx_rac_get_prob_branchy(&s->c, 252))
1082  s->prob.p.mv_comp[i].class0_hp =
1083  (vp89_rac_get_uint(&s->c, 7) << 1) | 1;
1084 
1085  if (vpx_rac_get_prob_branchy(&s->c, 252))
1086  s->prob.p.mv_comp[i].hp =
1087  (vp89_rac_get_uint(&s->c, 7) << 1) | 1;
1088  }
1089  }
1090  }
1091 
1092  return (data2 - data) + size2;
1093 }
1094 
1095 static void decode_sb(VP9TileData *td, int row, int col, VP9Filter *lflvl,
1096  ptrdiff_t yoff, ptrdiff_t uvoff, enum BlockLevel bl)
1097 {
1098  const VP9Context *s = td->s;
1099  int c = ((s->above_partition_ctx[col] >> (3 - bl)) & 1) |
1100  (((td->left_partition_ctx[row & 0x7] >> (3 - bl)) & 1) << 1);
1101  const uint8_t *p = s->s.h.keyframe || s->s.h.intraonly ? ff_vp9_default_kf_partition_probs[bl][c] :
1102  s->prob.p.partition[bl][c];
1103  enum BlockPartition bp;
1104  ptrdiff_t hbs = 4 >> bl;
1105  AVFrame *f = s->s.frames[CUR_FRAME].tf.f;
1106  ptrdiff_t y_stride = f->linesize[0], uv_stride = f->linesize[1];
1107  int bytesperpixel = s->bytesperpixel;
1108 
1109  if (bl == BL_8X8) {
1111  ff_vp9_decode_block(td, row, col, lflvl, yoff, uvoff, bl, bp);
1112  } else if (col + hbs < s->cols) { // FIXME why not <=?
1113  if (row + hbs < s->rows) { // FIXME why not <=?
1115  switch (bp) {
1116  case PARTITION_NONE:
1117  ff_vp9_decode_block(td, row, col, lflvl, yoff, uvoff, bl, bp);
1118  break;
1119  case PARTITION_H:
1120  ff_vp9_decode_block(td, row, col, lflvl, yoff, uvoff, bl, bp);
1121  yoff += hbs * 8 * y_stride;
1122  uvoff += hbs * 8 * uv_stride >> s->ss_v;
1123  ff_vp9_decode_block(td, row + hbs, col, lflvl, yoff, uvoff, bl, bp);
1124  break;
1125  case PARTITION_V:
1126  ff_vp9_decode_block(td, row, col, lflvl, yoff, uvoff, bl, bp);
1127  yoff += hbs * 8 * bytesperpixel;
1128  uvoff += hbs * 8 * bytesperpixel >> s->ss_h;
1129  ff_vp9_decode_block(td, row, col + hbs, lflvl, yoff, uvoff, bl, bp);
1130  break;
1131  case PARTITION_SPLIT:
1132  decode_sb(td, row, col, lflvl, yoff, uvoff, bl + 1);
1133  decode_sb(td, row, col + hbs, lflvl,
1134  yoff + 8 * hbs * bytesperpixel,
1135  uvoff + (8 * hbs * bytesperpixel >> s->ss_h), bl + 1);
1136  yoff += hbs * 8 * y_stride;
1137  uvoff += hbs * 8 * uv_stride >> s->ss_v;
1138  decode_sb(td, row + hbs, col, lflvl, yoff, uvoff, bl + 1);
1139  decode_sb(td, row + hbs, col + hbs, lflvl,
1140  yoff + 8 * hbs * bytesperpixel,
1141  uvoff + (8 * hbs * bytesperpixel >> s->ss_h), bl + 1);
1142  break;
1143  default:
1144  av_assert0(0);
1145  }
1146  } else if (vpx_rac_get_prob_branchy(td->c, p[1])) {
1147  bp = PARTITION_SPLIT;
1148  decode_sb(td, row, col, lflvl, yoff, uvoff, bl + 1);
1149  decode_sb(td, row, col + hbs, lflvl,
1150  yoff + 8 * hbs * bytesperpixel,
1151  uvoff + (8 * hbs * bytesperpixel >> s->ss_h), bl + 1);
1152  } else {
1153  bp = PARTITION_H;
1154  ff_vp9_decode_block(td, row, col, lflvl, yoff, uvoff, bl, bp);
1155  }
1156  } else if (row + hbs < s->rows) { // FIXME why not <=?
1157  if (vpx_rac_get_prob_branchy(td->c, p[2])) {
1158  bp = PARTITION_SPLIT;
1159  decode_sb(td, row, col, lflvl, yoff, uvoff, bl + 1);
1160  yoff += hbs * 8 * y_stride;
1161  uvoff += hbs * 8 * uv_stride >> s->ss_v;
1162  decode_sb(td, row + hbs, col, lflvl, yoff, uvoff, bl + 1);
1163  } else {
1164  bp = PARTITION_V;
1165  ff_vp9_decode_block(td, row, col, lflvl, yoff, uvoff, bl, bp);
1166  }
1167  } else {
1168  bp = PARTITION_SPLIT;
1169  decode_sb(td, row, col, lflvl, yoff, uvoff, bl + 1);
1170  }
1171  td->counts.partition[bl][c][bp]++;
1172 }
1173 
1174 static void decode_sb_mem(VP9TileData *td, int row, int col, VP9Filter *lflvl,
1175  ptrdiff_t yoff, ptrdiff_t uvoff, enum BlockLevel bl)
1176 {
1177  const VP9Context *s = td->s;
1178  VP9Block *b = td->b;
1179  ptrdiff_t hbs = 4 >> bl;
1180  AVFrame *f = s->s.frames[CUR_FRAME].tf.f;
1181  ptrdiff_t y_stride = f->linesize[0], uv_stride = f->linesize[1];
1182  int bytesperpixel = s->bytesperpixel;
1183 
1184  if (bl == BL_8X8) {
1185  av_assert2(b->bl == BL_8X8);
1186  ff_vp9_decode_block(td, row, col, lflvl, yoff, uvoff, b->bl, b->bp);
1187  } else if (td->b->bl == bl) {
1188  ff_vp9_decode_block(td, row, col, lflvl, yoff, uvoff, b->bl, b->bp);
1189  if (b->bp == PARTITION_H && row + hbs < s->rows) {
1190  yoff += hbs * 8 * y_stride;
1191  uvoff += hbs * 8 * uv_stride >> s->ss_v;
1192  ff_vp9_decode_block(td, row + hbs, col, lflvl, yoff, uvoff, b->bl, b->bp);
1193  } else if (b->bp == PARTITION_V && col + hbs < s->cols) {
1194  yoff += hbs * 8 * bytesperpixel;
1195  uvoff += hbs * 8 * bytesperpixel >> s->ss_h;
1196  ff_vp9_decode_block(td, row, col + hbs, lflvl, yoff, uvoff, b->bl, b->bp);
1197  }
1198  } else {
1199  decode_sb_mem(td, row, col, lflvl, yoff, uvoff, bl + 1);
1200  if (col + hbs < s->cols) { // FIXME why not <=?
1201  if (row + hbs < s->rows) {
1202  decode_sb_mem(td, row, col + hbs, lflvl, yoff + 8 * hbs * bytesperpixel,
1203  uvoff + (8 * hbs * bytesperpixel >> s->ss_h), bl + 1);
1204  yoff += hbs * 8 * y_stride;
1205  uvoff += hbs * 8 * uv_stride >> s->ss_v;
1206  decode_sb_mem(td, row + hbs, col, lflvl, yoff, uvoff, bl + 1);
1207  decode_sb_mem(td, row + hbs, col + hbs, lflvl,
1208  yoff + 8 * hbs * bytesperpixel,
1209  uvoff + (8 * hbs * bytesperpixel >> s->ss_h), bl + 1);
1210  } else {
1211  yoff += hbs * 8 * bytesperpixel;
1212  uvoff += hbs * 8 * bytesperpixel >> s->ss_h;
1213  decode_sb_mem(td, row, col + hbs, lflvl, yoff, uvoff, bl + 1);
1214  }
1215  } else if (row + hbs < s->rows) {
1216  yoff += hbs * 8 * y_stride;
1217  uvoff += hbs * 8 * uv_stride >> s->ss_v;
1218  decode_sb_mem(td, row + hbs, col, lflvl, yoff, uvoff, bl + 1);
1219  }
1220  }
1221 }
1222 
1223 static void set_tile_offset(int *start, int *end, int idx, int log2_n, int n)
1224 {
1225  int sb_start = ( idx * n) >> log2_n;
1226  int sb_end = ((idx + 1) * n) >> log2_n;
1227  *start = FFMIN(sb_start, n) << 3;
1228  *end = FFMIN(sb_end, n) << 3;
1229 }
1230 
1232 {
1233  int i;
1234 
1235  av_freep(&s->intra_pred_data[0]);
1236  for (i = 0; i < s->active_tile_cols; i++)
1237  vp9_tile_data_free(&s->td[i]);
1238 }
1239 
1241 {
1242  VP9Context *s = avctx->priv_data;
1243  int i;
1244 
1245  for (i = 0; i < 3; i++) {
1246  vp9_frame_unref(avctx, &s->s.frames[i]);
1247  av_frame_free(&s->s.frames[i].tf.f);
1248  }
1249  av_buffer_pool_uninit(&s->frame_extradata_pool);
1250  for (i = 0; i < 8; i++) {
1251  ff_thread_release_ext_buffer(avctx, &s->s.refs[i]);
1252  av_frame_free(&s->s.refs[i].f);
1253  ff_thread_release_ext_buffer(avctx, &s->next_refs[i]);
1254  av_frame_free(&s->next_refs[i].f);
1255  }
1256 
1257  free_buffers(s);
1258 #if HAVE_THREADS
1259  av_freep(&s->entries);
1260  ff_pthread_free(s, vp9_context_offsets);
1261 #endif
1262  av_freep(&s->td);
1263  return 0;
1264 }
1265 
1266 static int decode_tiles(AVCodecContext *avctx,
1267  const uint8_t *data, int size)
1268 {
1269  VP9Context *s = avctx->priv_data;
1270  VP9TileData *td = &s->td[0];
1271  int row, col, tile_row, tile_col, ret;
1272  int bytesperpixel;
1273  int tile_row_start, tile_row_end, tile_col_start, tile_col_end;
1274  AVFrame *f;
1275  ptrdiff_t yoff, uvoff, ls_y, ls_uv;
1276 
1277  f = s->s.frames[CUR_FRAME].tf.f;
1278  ls_y = f->linesize[0];
1279  ls_uv =f->linesize[1];
1280  bytesperpixel = s->bytesperpixel;
1281 
1282  yoff = uvoff = 0;
1283  for (tile_row = 0; tile_row < s->s.h.tiling.tile_rows; tile_row++) {
1284  set_tile_offset(&tile_row_start, &tile_row_end,
1285  tile_row, s->s.h.tiling.log2_tile_rows, s->sb_rows);
1286 
1287  for (tile_col = 0; tile_col < s->s.h.tiling.tile_cols; tile_col++) {
1288  int64_t tile_size;
1289 
1290  if (tile_col == s->s.h.tiling.tile_cols - 1 &&
1291  tile_row == s->s.h.tiling.tile_rows - 1) {
1292  tile_size = size;
1293  } else {
1294  tile_size = AV_RB32(data);
1295  data += 4;
1296  size -= 4;
1297  }
1298  if (tile_size > size)
1299  return AVERROR_INVALIDDATA;
1300  ret = ff_vpx_init_range_decoder(&td->c_b[tile_col], data, tile_size);
1301  if (ret < 0)
1302  return ret;
1303  if (vpx_rac_get_prob_branchy(&td->c_b[tile_col], 128)) // marker bit
1304  return AVERROR_INVALIDDATA;
1305  data += tile_size;
1306  size -= tile_size;
1307  }
1308 
1309  for (row = tile_row_start; row < tile_row_end;
1310  row += 8, yoff += ls_y * 64, uvoff += ls_uv * 64 >> s->ss_v) {
1311  VP9Filter *lflvl_ptr = s->lflvl;
1312  ptrdiff_t yoff2 = yoff, uvoff2 = uvoff;
1313 
1314  for (tile_col = 0; tile_col < s->s.h.tiling.tile_cols; tile_col++) {
1315  set_tile_offset(&tile_col_start, &tile_col_end,
1316  tile_col, s->s.h.tiling.log2_tile_cols, s->sb_cols);
1317  td->tile_col_start = tile_col_start;
1318  if (s->pass != 2) {
1319  memset(td->left_partition_ctx, 0, 8);
1320  memset(td->left_skip_ctx, 0, 8);
1321  if (s->s.h.keyframe || s->s.h.intraonly) {
1322  memset(td->left_mode_ctx, DC_PRED, 16);
1323  } else {
1324  memset(td->left_mode_ctx, NEARESTMV, 8);
1325  }
1326  memset(td->left_y_nnz_ctx, 0, 16);
1327  memset(td->left_uv_nnz_ctx, 0, 32);
1328  memset(td->left_segpred_ctx, 0, 8);
1329 
1330  td->c = &td->c_b[tile_col];
1331  }
1332 
1333  for (col = tile_col_start;
1334  col < tile_col_end;
1335  col += 8, yoff2 += 64 * bytesperpixel,
1336  uvoff2 += 64 * bytesperpixel >> s->ss_h, lflvl_ptr++) {
1337  // FIXME integrate with lf code (i.e. zero after each
1338  // use, similar to invtxfm coefficients, or similar)
1339  if (s->pass != 1) {
1340  memset(lflvl_ptr->mask, 0, sizeof(lflvl_ptr->mask));
1341  }
1342 
1343  if (s->pass == 2) {
1344  decode_sb_mem(td, row, col, lflvl_ptr,
1345  yoff2, uvoff2, BL_64X64);
1346  } else {
1347  if (vpx_rac_is_end(td->c)) {
1348  return AVERROR_INVALIDDATA;
1349  }
1350  decode_sb(td, row, col, lflvl_ptr,
1351  yoff2, uvoff2, BL_64X64);
1352  }
1353  }
1354  }
1355 
1356  if (s->pass == 1)
1357  continue;
1358 
1359  // backup pre-loopfilter reconstruction data for intra
1360  // prediction of next row of sb64s
1361  if (row + 8 < s->rows) {
1362  memcpy(s->intra_pred_data[0],
1363  f->data[0] + yoff + 63 * ls_y,
1364  8 * s->cols * bytesperpixel);
1365  memcpy(s->intra_pred_data[1],
1366  f->data[1] + uvoff + ((64 >> s->ss_v) - 1) * ls_uv,
1367  8 * s->cols * bytesperpixel >> s->ss_h);
1368  memcpy(s->intra_pred_data[2],
1369  f->data[2] + uvoff + ((64 >> s->ss_v) - 1) * ls_uv,
1370  8 * s->cols * bytesperpixel >> s->ss_h);
1371  }
1372 
1373  // loopfilter one row
1374  if (s->s.h.filter.level) {
1375  yoff2 = yoff;
1376  uvoff2 = uvoff;
1377  lflvl_ptr = s->lflvl;
1378  for (col = 0; col < s->cols;
1379  col += 8, yoff2 += 64 * bytesperpixel,
1380  uvoff2 += 64 * bytesperpixel >> s->ss_h, lflvl_ptr++) {
1381  ff_vp9_loopfilter_sb(avctx, lflvl_ptr, row, col,
1382  yoff2, uvoff2);
1383  }
1384  }
1385 
1386  // FIXME maybe we can make this more finegrained by running the
1387  // loopfilter per-block instead of after each sbrow
1388  // In fact that would also make intra pred left preparation easier?
1389  ff_thread_report_progress(&s->s.frames[CUR_FRAME].tf, row >> 3, 0);
1390  }
1391  }
1392  return 0;
1393 }
1394 
1395 #if HAVE_THREADS
1396 static av_always_inline
1397 int decode_tiles_mt(AVCodecContext *avctx, void *tdata, int jobnr,
1398  int threadnr)
1399 {
1400  VP9Context *s = avctx->priv_data;
1401  VP9TileData *td = &s->td[jobnr];
1402  ptrdiff_t uvoff, yoff, ls_y, ls_uv;
1403  int bytesperpixel = s->bytesperpixel, row, col, tile_row;
1404  unsigned tile_cols_len;
1405  int tile_row_start, tile_row_end, tile_col_start, tile_col_end;
1406  VP9Filter *lflvl_ptr_base;
1407  AVFrame *f;
1408 
1409  f = s->s.frames[CUR_FRAME].tf.f;
1410  ls_y = f->linesize[0];
1411  ls_uv =f->linesize[1];
1412 
1413  set_tile_offset(&tile_col_start, &tile_col_end,
1414  jobnr, s->s.h.tiling.log2_tile_cols, s->sb_cols);
1415  td->tile_col_start = tile_col_start;
1416  uvoff = (64 * bytesperpixel >> s->ss_h)*(tile_col_start >> 3);
1417  yoff = (64 * bytesperpixel)*(tile_col_start >> 3);
1418  lflvl_ptr_base = s->lflvl+(tile_col_start >> 3);
1419 
1420  for (tile_row = 0; tile_row < s->s.h.tiling.tile_rows; tile_row++) {
1421  set_tile_offset(&tile_row_start, &tile_row_end,
1422  tile_row, s->s.h.tiling.log2_tile_rows, s->sb_rows);
1423 
1424  td->c = &td->c_b[tile_row];
1425  for (row = tile_row_start; row < tile_row_end;
1426  row += 8, yoff += ls_y * 64, uvoff += ls_uv * 64 >> s->ss_v) {
1427  ptrdiff_t yoff2 = yoff, uvoff2 = uvoff;
1428  VP9Filter *lflvl_ptr = lflvl_ptr_base+s->sb_cols*(row >> 3);
1429 
1430  memset(td->left_partition_ctx, 0, 8);
1431  memset(td->left_skip_ctx, 0, 8);
1432  if (s->s.h.keyframe || s->s.h.intraonly) {
1433  memset(td->left_mode_ctx, DC_PRED, 16);
1434  } else {
1435  memset(td->left_mode_ctx, NEARESTMV, 8);
1436  }
1437  memset(td->left_y_nnz_ctx, 0, 16);
1438  memset(td->left_uv_nnz_ctx, 0, 32);
1439  memset(td->left_segpred_ctx, 0, 8);
1440 
1441  for (col = tile_col_start;
1442  col < tile_col_end;
1443  col += 8, yoff2 += 64 * bytesperpixel,
1444  uvoff2 += 64 * bytesperpixel >> s->ss_h, lflvl_ptr++) {
1445  // FIXME integrate with lf code (i.e. zero after each
1446  // use, similar to invtxfm coefficients, or similar)
1447  memset(lflvl_ptr->mask, 0, sizeof(lflvl_ptr->mask));
1448  decode_sb(td, row, col, lflvl_ptr,
1449  yoff2, uvoff2, BL_64X64);
1450  }
1451 
1452  // backup pre-loopfilter reconstruction data for intra
1453  // prediction of next row of sb64s
1454  tile_cols_len = tile_col_end - tile_col_start;
1455  if (row + 8 < s->rows) {
1456  memcpy(s->intra_pred_data[0] + (tile_col_start * 8 * bytesperpixel),
1457  f->data[0] + yoff + 63 * ls_y,
1458  8 * tile_cols_len * bytesperpixel);
1459  memcpy(s->intra_pred_data[1] + (tile_col_start * 8 * bytesperpixel >> s->ss_h),
1460  f->data[1] + uvoff + ((64 >> s->ss_v) - 1) * ls_uv,
1461  8 * tile_cols_len * bytesperpixel >> s->ss_h);
1462  memcpy(s->intra_pred_data[2] + (tile_col_start * 8 * bytesperpixel >> s->ss_h),
1463  f->data[2] + uvoff + ((64 >> s->ss_v) - 1) * ls_uv,
1464  8 * tile_cols_len * bytesperpixel >> s->ss_h);
1465  }
1466 
1467  vp9_report_tile_progress(s, row >> 3, 1);
1468  }
1469  }
1470  return 0;
1471 }
1472 
1473 static av_always_inline
1474 int loopfilter_proc(AVCodecContext *avctx)
1475 {
1476  VP9Context *s = avctx->priv_data;
1477  ptrdiff_t uvoff, yoff, ls_y, ls_uv;
1478  VP9Filter *lflvl_ptr;
1479  int bytesperpixel = s->bytesperpixel, col, i;
1480  AVFrame *f;
1481 
1482  f = s->s.frames[CUR_FRAME].tf.f;
1483  ls_y = f->linesize[0];
1484  ls_uv =f->linesize[1];
1485 
1486  for (i = 0; i < s->sb_rows; i++) {
1487  vp9_await_tile_progress(s, i, s->s.h.tiling.tile_cols);
1488 
1489  if (s->s.h.filter.level) {
1490  yoff = (ls_y * 64)*i;
1491  uvoff = (ls_uv * 64 >> s->ss_v)*i;
1492  lflvl_ptr = s->lflvl+s->sb_cols*i;
1493  for (col = 0; col < s->cols;
1494  col += 8, yoff += 64 * bytesperpixel,
1495  uvoff += 64 * bytesperpixel >> s->ss_h, lflvl_ptr++) {
1496  ff_vp9_loopfilter_sb(avctx, lflvl_ptr, i << 3, col,
1497  yoff, uvoff);
1498  }
1499  }
1500  }
1501  return 0;
1502 }
1503 #endif
1504 
1506 {
1507  AVVideoEncParams *par;
1508  unsigned int tile, nb_blocks = 0;
1509 
1510  if (s->s.h.segmentation.enabled) {
1511  for (tile = 0; tile < s->active_tile_cols; tile++)
1512  nb_blocks += s->td[tile].nb_block_structure;
1513  }
1514 
1516  AV_VIDEO_ENC_PARAMS_VP9, nb_blocks);
1517  if (!par)
1518  return AVERROR(ENOMEM);
1519 
1520  par->qp = s->s.h.yac_qi;
1521  par->delta_qp[0][0] = s->s.h.ydc_qdelta;
1522  par->delta_qp[1][0] = s->s.h.uvdc_qdelta;
1523  par->delta_qp[2][0] = s->s.h.uvdc_qdelta;
1524  par->delta_qp[1][1] = s->s.h.uvac_qdelta;
1525  par->delta_qp[2][1] = s->s.h.uvac_qdelta;
1526 
1527  if (nb_blocks) {
1528  unsigned int block = 0;
1529  unsigned int tile, block_tile;
1530 
1531  for (tile = 0; tile < s->active_tile_cols; tile++) {
1532  VP9TileData *td = &s->td[tile];
1533 
1534  for (block_tile = 0; block_tile < td->nb_block_structure; block_tile++) {
1536  unsigned int row = td->block_structure[block_tile].row;
1537  unsigned int col = td->block_structure[block_tile].col;
1538  uint8_t seg_id = frame->segmentation_map[row * 8 * s->sb_cols + col];
1539 
1540  b->src_x = col * 8;
1541  b->src_y = row * 8;
1542  b->w = 1 << (3 + td->block_structure[block_tile].block_size_idx_x);
1543  b->h = 1 << (3 + td->block_structure[block_tile].block_size_idx_y);
1544 
1545  if (s->s.h.segmentation.feat[seg_id].q_enabled) {
1546  b->delta_qp = s->s.h.segmentation.feat[seg_id].q_val;
1547  if (s->s.h.segmentation.absolute_vals)
1548  b->delta_qp -= par->qp;
1549  }
1550  }
1551  }
1552  }
1553 
1554  return 0;
1555 }
1556 
1558  int *got_frame, AVPacket *pkt)
1559 {
1560  const uint8_t *data = pkt->data;
1561  int size = pkt->size;
1562  VP9Context *s = avctx->priv_data;
1563  int ret, i, j, ref;
1564  int retain_segmap_ref = s->s.frames[REF_FRAME_SEGMAP].segmentation_map &&
1565  (!s->s.h.segmentation.enabled || !s->s.h.segmentation.update_map);
1566  AVFrame *f;
1567 
1568  if ((ret = decode_frame_header(avctx, data, size, &ref)) < 0) {
1569  return ret;
1570  } else if (ret == 0) {
1571  if (!s->s.refs[ref].f->buf[0]) {
1572  av_log(avctx, AV_LOG_ERROR, "Requested reference %d not available\n", ref);
1573  return AVERROR_INVALIDDATA;
1574  }
1575  if ((ret = av_frame_ref(frame, s->s.refs[ref].f)) < 0)
1576  return ret;
1577  frame->pts = pkt->pts;
1578  frame->pkt_dts = pkt->dts;
1579  for (i = 0; i < 8; i++) {
1580  if (s->next_refs[i].f->buf[0])
1581  ff_thread_release_ext_buffer(avctx, &s->next_refs[i]);
1582  if (s->s.refs[i].f->buf[0] &&
1583  (ret = ff_thread_ref_frame(&s->next_refs[i], &s->s.refs[i])) < 0)
1584  return ret;
1585  }
1586  *got_frame = 1;
1587  return pkt->size;
1588  }
1589  data += ret;
1590  size -= ret;
1591 
1592  if (!retain_segmap_ref || s->s.h.keyframe || s->s.h.intraonly) {
1593  if (s->s.frames[REF_FRAME_SEGMAP].tf.f->buf[0])
1594  vp9_frame_unref(avctx, &s->s.frames[REF_FRAME_SEGMAP]);
1595  if (!s->s.h.keyframe && !s->s.h.intraonly && !s->s.h.errorres && s->s.frames[CUR_FRAME].tf.f->buf[0] &&
1596  (ret = vp9_frame_ref(avctx, &s->s.frames[REF_FRAME_SEGMAP], &s->s.frames[CUR_FRAME])) < 0)
1597  return ret;
1598  }
1599  if (s->s.frames[REF_FRAME_MVPAIR].tf.f->buf[0])
1600  vp9_frame_unref(avctx, &s->s.frames[REF_FRAME_MVPAIR]);
1601  if (!s->s.h.intraonly && !s->s.h.keyframe && !s->s.h.errorres && s->s.frames[CUR_FRAME].tf.f->buf[0] &&
1602  (ret = vp9_frame_ref(avctx, &s->s.frames[REF_FRAME_MVPAIR], &s->s.frames[CUR_FRAME])) < 0)
1603  return ret;
1604  if (s->s.frames[CUR_FRAME].tf.f->buf[0])
1605  vp9_frame_unref(avctx, &s->s.frames[CUR_FRAME]);
1606  if ((ret = vp9_frame_alloc(avctx, &s->s.frames[CUR_FRAME])) < 0)
1607  return ret;
1608  f = s->s.frames[CUR_FRAME].tf.f;
1609  f->key_frame = s->s.h.keyframe;
1610  f->pict_type = (s->s.h.keyframe || s->s.h.intraonly) ? AV_PICTURE_TYPE_I : AV_PICTURE_TYPE_P;
1611 
1612  if (s->s.frames[REF_FRAME_SEGMAP].tf.f->buf[0] &&
1613  (s->s.frames[REF_FRAME_MVPAIR].tf.f->width != s->s.frames[CUR_FRAME].tf.f->width ||
1614  s->s.frames[REF_FRAME_MVPAIR].tf.f->height != s->s.frames[CUR_FRAME].tf.f->height)) {
1615  vp9_frame_unref(avctx, &s->s.frames[REF_FRAME_SEGMAP]);
1616  }
1617 
1618  // ref frame setup
1619  for (i = 0; i < 8; i++) {
1620  if (s->next_refs[i].f->buf[0])
1621  ff_thread_release_ext_buffer(avctx, &s->next_refs[i]);
1622  if (s->s.h.refreshrefmask & (1 << i)) {
1623  ret = ff_thread_ref_frame(&s->next_refs[i], &s->s.frames[CUR_FRAME].tf);
1624  } else if (s->s.refs[i].f->buf[0]) {
1625  ret = ff_thread_ref_frame(&s->next_refs[i], &s->s.refs[i]);
1626  }
1627  if (ret < 0)
1628  return ret;
1629  }
1630 
1631  if (avctx->hwaccel) {
1632  ret = avctx->hwaccel->start_frame(avctx, NULL, 0);
1633  if (ret < 0)
1634  return ret;
1635  ret = avctx->hwaccel->decode_slice(avctx, pkt->data, pkt->size);
1636  if (ret < 0)
1637  return ret;
1638  ret = avctx->hwaccel->end_frame(avctx);
1639  if (ret < 0)
1640  return ret;
1641  goto finish;
1642  }
1643 
1644  // main tile decode loop
1645  memset(s->above_partition_ctx, 0, s->cols);
1646  memset(s->above_skip_ctx, 0, s->cols);
1647  if (s->s.h.keyframe || s->s.h.intraonly) {
1648  memset(s->above_mode_ctx, DC_PRED, s->cols * 2);
1649  } else {
1650  memset(s->above_mode_ctx, NEARESTMV, s->cols);
1651  }
1652  memset(s->above_y_nnz_ctx, 0, s->sb_cols * 16);
1653  memset(s->above_uv_nnz_ctx[0], 0, s->sb_cols * 16 >> s->ss_h);
1654  memset(s->above_uv_nnz_ctx[1], 0, s->sb_cols * 16 >> s->ss_h);
1655  memset(s->above_segpred_ctx, 0, s->cols);
1656  s->pass = s->s.frames[CUR_FRAME].uses_2pass =
1657  avctx->active_thread_type == FF_THREAD_FRAME && s->s.h.refreshctx && !s->s.h.parallelmode;
1658  if ((ret = update_block_buffers(avctx)) < 0) {
1659  av_log(avctx, AV_LOG_ERROR,
1660  "Failed to allocate block buffers\n");
1661  return ret;
1662  }
1663  if (s->s.h.refreshctx && s->s.h.parallelmode) {
1664  int j, k, l, m;
1665 
1666  for (i = 0; i < 4; i++) {
1667  for (j = 0; j < 2; j++)
1668  for (k = 0; k < 2; k++)
1669  for (l = 0; l < 6; l++)
1670  for (m = 0; m < 6; m++)
1671  memcpy(s->prob_ctx[s->s.h.framectxid].coef[i][j][k][l][m],
1672  s->prob.coef[i][j][k][l][m], 3);
1673  if (s->s.h.txfmmode == i)
1674  break;
1675  }
1676  s->prob_ctx[s->s.h.framectxid].p = s->prob.p;
1677  ff_thread_finish_setup(avctx);
1678  } else if (!s->s.h.refreshctx) {
1679  ff_thread_finish_setup(avctx);
1680  }
1681 
1682 #if HAVE_THREADS
1683  if (avctx->active_thread_type & FF_THREAD_SLICE) {
1684  for (i = 0; i < s->sb_rows; i++)
1685  atomic_store(&s->entries[i], 0);
1686  }
1687 #endif
1688 
1689  do {
1690  for (i = 0; i < s->active_tile_cols; i++) {
1691  s->td[i].b = s->td[i].b_base;
1692  s->td[i].block = s->td[i].block_base;
1693  s->td[i].uvblock[0] = s->td[i].uvblock_base[0];
1694  s->td[i].uvblock[1] = s->td[i].uvblock_base[1];
1695  s->td[i].eob = s->td[i].eob_base;
1696  s->td[i].uveob[0] = s->td[i].uveob_base[0];
1697  s->td[i].uveob[1] = s->td[i].uveob_base[1];
1698  s->td[i].error_info = 0;
1699  }
1700 
1701 #if HAVE_THREADS
1702  if (avctx->active_thread_type == FF_THREAD_SLICE) {
1703  int tile_row, tile_col;
1704 
1705  av_assert1(!s->pass);
1706 
1707  for (tile_row = 0; tile_row < s->s.h.tiling.tile_rows; tile_row++) {
1708  for (tile_col = 0; tile_col < s->s.h.tiling.tile_cols; tile_col++) {
1709  int64_t tile_size;
1710 
1711  if (tile_col == s->s.h.tiling.tile_cols - 1 &&
1712  tile_row == s->s.h.tiling.tile_rows - 1) {
1713  tile_size = size;
1714  } else {
1715  tile_size = AV_RB32(data);
1716  data += 4;
1717  size -= 4;
1718  }
1719  if (tile_size > size)
1720  return AVERROR_INVALIDDATA;
1721  ret = ff_vpx_init_range_decoder(&s->td[tile_col].c_b[tile_row], data, tile_size);
1722  if (ret < 0)
1723  return ret;
1724  if (vpx_rac_get_prob_branchy(&s->td[tile_col].c_b[tile_row], 128)) // marker bit
1725  return AVERROR_INVALIDDATA;
1726  data += tile_size;
1727  size -= tile_size;
1728  }
1729  }
1730 
1731  ff_slice_thread_execute_with_mainfunc(avctx, decode_tiles_mt, loopfilter_proc, s->td, NULL, s->s.h.tiling.tile_cols);
1732  } else
1733 #endif
1734  {
1735  ret = decode_tiles(avctx, data, size);
1736  if (ret < 0) {
1737  ff_thread_report_progress(&s->s.frames[CUR_FRAME].tf, INT_MAX, 0);
1738  return ret;
1739  }
1740  }
1741 
1742  // Sum all counts fields into td[0].counts for tile threading
1743  if (avctx->active_thread_type == FF_THREAD_SLICE)
1744  for (i = 1; i < s->s.h.tiling.tile_cols; i++)
1745  for (j = 0; j < sizeof(s->td[i].counts) / sizeof(unsigned); j++)
1746  ((unsigned *)&s->td[0].counts)[j] += ((unsigned *)&s->td[i].counts)[j];
1747 
1748  if (s->pass < 2 && s->s.h.refreshctx && !s->s.h.parallelmode) {
1750  ff_thread_finish_setup(avctx);
1751  }
1752  } while (s->pass++ == 1);
1753  ff_thread_report_progress(&s->s.frames[CUR_FRAME].tf, INT_MAX, 0);
1754 
1755  if (s->td->error_info < 0) {
1756  av_log(avctx, AV_LOG_ERROR, "Failed to decode tile data\n");
1757  s->td->error_info = 0;
1758  return AVERROR_INVALIDDATA;
1759  }
1761  ret = vp9_export_enc_params(s, &s->s.frames[CUR_FRAME]);
1762  if (ret < 0)
1763  return ret;
1764  }
1765 
1766 finish:
1767  // ref frame setup
1768  for (i = 0; i < 8; i++) {
1769  if (s->s.refs[i].f->buf[0])
1770  ff_thread_release_ext_buffer(avctx, &s->s.refs[i]);
1771  if (s->next_refs[i].f->buf[0] &&
1772  (ret = ff_thread_ref_frame(&s->s.refs[i], &s->next_refs[i])) < 0)
1773  return ret;
1774  }
1775 
1776  if (!s->s.h.invisible) {
1777  if ((ret = av_frame_ref(frame, s->s.frames[CUR_FRAME].tf.f)) < 0)
1778  return ret;
1779  *got_frame = 1;
1780  }
1781 
1782  return pkt->size;
1783 }
1784 
1786 {
1787  VP9Context *s = avctx->priv_data;
1788  int i;
1789 
1790  for (i = 0; i < 3; i++)
1791  vp9_frame_unref(avctx, &s->s.frames[i]);
1792  for (i = 0; i < 8; i++)
1793  ff_thread_release_ext_buffer(avctx, &s->s.refs[i]);
1794 }
1795 
1797 {
1798  VP9Context *s = avctx->priv_data;
1799  int ret;
1800 
1801  s->last_bpp = 0;
1802  s->s.h.filter.sharpness = -1;
1803 
1804 #if HAVE_THREADS
1805  if (avctx->active_thread_type & FF_THREAD_SLICE) {
1806  ret = ff_pthread_init(s, vp9_context_offsets);
1807  if (ret < 0)
1808  return ret;
1809  }
1810 #endif
1811 
1812  for (int i = 0; i < 3; i++) {
1813  s->s.frames[i].tf.f = av_frame_alloc();
1814  if (!s->s.frames[i].tf.f)
1815  return AVERROR(ENOMEM);
1816  }
1817  for (int i = 0; i < 8; i++) {
1818  s->s.refs[i].f = av_frame_alloc();
1819  s->next_refs[i].f = av_frame_alloc();
1820  if (!s->s.refs[i].f || !s->next_refs[i].f)
1821  return AVERROR(ENOMEM);
1822  }
1823  return 0;
1824 }
1825 
1826 #if HAVE_THREADS
1827 static int vp9_decode_update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
1828 {
1829  int i, ret;
1830  VP9Context *s = dst->priv_data, *ssrc = src->priv_data;
1831 
1832  for (i = 0; i < 3; i++) {
1833  if (s->s.frames[i].tf.f->buf[0])
1834  vp9_frame_unref(dst, &s->s.frames[i]);
1835  if (ssrc->s.frames[i].tf.f->buf[0]) {
1836  if ((ret = vp9_frame_ref(dst, &s->s.frames[i], &ssrc->s.frames[i])) < 0)
1837  return ret;
1838  }
1839  }
1840  for (i = 0; i < 8; i++) {
1841  if (s->s.refs[i].f->buf[0])
1842  ff_thread_release_ext_buffer(dst, &s->s.refs[i]);
1843  if (ssrc->next_refs[i].f->buf[0]) {
1844  if ((ret = ff_thread_ref_frame(&s->s.refs[i], &ssrc->next_refs[i])) < 0)
1845  return ret;
1846  }
1847  }
1848 
1849  s->s.h.invisible = ssrc->s.h.invisible;
1850  s->s.h.keyframe = ssrc->s.h.keyframe;
1851  s->s.h.intraonly = ssrc->s.h.intraonly;
1852  s->ss_v = ssrc->ss_v;
1853  s->ss_h = ssrc->ss_h;
1854  s->s.h.segmentation.enabled = ssrc->s.h.segmentation.enabled;
1855  s->s.h.segmentation.update_map = ssrc->s.h.segmentation.update_map;
1856  s->s.h.segmentation.absolute_vals = ssrc->s.h.segmentation.absolute_vals;
1857  s->bytesperpixel = ssrc->bytesperpixel;
1858  s->gf_fmt = ssrc->gf_fmt;
1859  s->w = ssrc->w;
1860  s->h = ssrc->h;
1861  s->s.h.bpp = ssrc->s.h.bpp;
1862  s->bpp_index = ssrc->bpp_index;
1863  s->pix_fmt = ssrc->pix_fmt;
1864  memcpy(&s->prob_ctx, &ssrc->prob_ctx, sizeof(s->prob_ctx));
1865  memcpy(&s->s.h.lf_delta, &ssrc->s.h.lf_delta, sizeof(s->s.h.lf_delta));
1866  memcpy(&s->s.h.segmentation.feat, &ssrc->s.h.segmentation.feat,
1867  sizeof(s->s.h.segmentation.feat));
1868 
1869  return 0;
1870 }
1871 #endif
1872 
1874  .p.name = "vp9",
1875  CODEC_LONG_NAME("Google VP9"),
1876  .p.type = AVMEDIA_TYPE_VIDEO,
1877  .p.id = AV_CODEC_ID_VP9,
1878  .priv_data_size = sizeof(VP9Context),
1879  .init = vp9_decode_init,
1880  .close = vp9_decode_free,
1883  .caps_internal = FF_CODEC_CAP_INIT_CLEANUP |
1886  .flush = vp9_decode_flush,
1887  UPDATE_THREAD_CONTEXT(vp9_decode_update_thread_context),
1888  .p.profiles = NULL_IF_CONFIG_SMALL(ff_vp9_profiles),
1889  .bsfs = "vp9_superframe_split",
1890  .hw_configs = (const AVCodecHWConfigInternal *const []) {
1891 #if CONFIG_VP9_DXVA2_HWACCEL
1892  HWACCEL_DXVA2(vp9),
1893 #endif
1894 #if CONFIG_VP9_D3D11VA_HWACCEL
1895  HWACCEL_D3D11VA(vp9),
1896 #endif
1897 #if CONFIG_VP9_D3D11VA2_HWACCEL
1898  HWACCEL_D3D11VA2(vp9),
1899 #endif
1900 #if CONFIG_VP9_NVDEC_HWACCEL
1901  HWACCEL_NVDEC(vp9),
1902 #endif
1903 #if CONFIG_VP9_VAAPI_HWACCEL
1904  HWACCEL_VAAPI(vp9),
1905 #endif
1906 #if CONFIG_VP9_VDPAU_HWACCEL
1907  HWACCEL_VDPAU(vp9),
1908 #endif
1909 #if CONFIG_VP9_VIDEOTOOLBOX_HWACCEL
1910  HWACCEL_VIDEOTOOLBOX(vp9),
1911 #endif
1912  NULL
1913  },
1914 };
AVVideoEncParams::qp
int32_t qp
Base quantisation parameter for the frame.
Definition: video_enc_params.h:103
hwconfig.h
AVCodecContext::hwaccel
const struct AVHWAccel * hwaccel
Hardware accelerator in use.
Definition: avcodec.h:1405
av_buffer_pool_init
AVBufferPool * av_buffer_pool_init(size_t size, AVBufferRef *(*alloc)(size_t size))
Allocate and initialize a buffer pool.
Definition: buffer.c:280
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:186
AV_PIX_FMT_CUDA
@ AV_PIX_FMT_CUDA
HW acceleration through CUDA.
Definition: pixfmt.h:253
FF_CODEC_CAP_SLICE_THREAD_HAS_MF
#define FF_CODEC_CAP_SLICE_THREAD_HAS_MF
Codec initializes slice-based threading with a main function.
Definition: codec_internal.h:64
decode_tiles
static int decode_tiles(AVCodecContext *avctx, const uint8_t *data, int size)
Definition: vp9.c:1266
td
#define td
Definition: regdef.h:70
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
vp9_frame_alloc
static int vp9_frame_alloc(AVCodecContext *avctx, VP9Frame *f)
Definition: vp9.c:107
atomic_store
#define atomic_store(object, desired)
Definition: stdatomic.h:85
FF_CODEC_CAP_INIT_CLEANUP
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Definition: codec_internal.h:42
r
const char * r
Definition: vf_curves.c:126
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
PRED_SWITCHABLE
@ PRED_SWITCHABLE
Definition: vp9shared.h:52
PRED_SINGLEREF
@ PRED_SINGLEREF
Definition: vp9shared.h:50
AVCodecContext::colorspace
enum AVColorSpace colorspace
YUV colorspace type.
Definition: avcodec.h:1002
ff_thread_release_ext_buffer
void ff_thread_release_ext_buffer(AVCodecContext *avctx, ThreadFrame *f)
Unref a ThreadFrame.
Definition: pthread_frame.c:972
ff_thread_get_format
#define ff_thread_get_format
Definition: thread.h:65
VP9Frame::segmentation_map
uint8_t * segmentation_map
Definition: vp9shared.h:68
VP9Frame
Definition: vp9shared.h:65
AVBufferRef::data
uint8_t * data
The data buffer.
Definition: buffer.h:90
av_clip_uintp2
#define av_clip_uintp2
Definition: common.h:119
ff_vp9_decoder
const FFCodec ff_vp9_decoder
Definition: vp9.c:1873
decode_sb
static void decode_sb(VP9TileData *td, int row, int col, VP9Filter *lflvl, ptrdiff_t yoff, ptrdiff_t uvoff, enum BlockLevel bl)
Definition: vp9.c:1095
ff_vp9_adapt_probs
void ff_vp9_adapt_probs(VP9Context *s)
Definition: vp9prob.c:44
vp9_decode_flush
static void vp9_decode_flush(AVCodecContext *avctx)
Definition: vp9.c:1785
get_bits_count
static int get_bits_count(const GetBitContext *s)
Definition: get_bits.h:256
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:99
PRED_COMPREF
@ PRED_COMPREF
Definition: vp9shared.h:51
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:330
pixdesc.h
w
uint8_t w
Definition: llviddspenc.c:38
HWACCEL_DXVA2
#define HWACCEL_DXVA2(codec)
Definition: hwconfig.h:67
AVCOL_RANGE_JPEG
@ AVCOL_RANGE_JPEG
Full range content.
Definition: pixfmt.h:661
BlockPartition
BlockPartition
Definition: vp9shared.h:35
AVPacket::data
uint8_t * data
Definition: packet.h:374
DC_PRED
@ DC_PRED
Definition: vp9.h:48
HWACCEL_D3D11VA2
#define HWACCEL_D3D11VA2(codec)
Definition: hwconfig.h:69
b
#define b
Definition: input.c:41
data
const char data[16]
Definition: mxf.c:146
update_size
static int update_size(AVCodecContext *avctx, int w, int h)
Definition: vp9.c:183
decode_sb_mem
static void decode_sb_mem(VP9TileData *td, int row, int col, VP9Filter *lflvl, ptrdiff_t yoff, ptrdiff_t uvoff, enum BlockLevel bl)
Definition: vp9.c:1174
REF_FRAME_SEGMAP
#define REF_FRAME_SEGMAP
Definition: vp9shared.h:171
decode_frame_header
static int decode_frame_header(AVCodecContext *avctx, const uint8_t *data, int size, int *ref)
Definition: vp9.c:510
AV_PIX_FMT_YUV420P10
#define AV_PIX_FMT_YUV420P10
Definition: pixfmt.h:459
atomic_int
intptr_t atomic_int
Definition: stdatomic.h:55
AV_PIX_FMT_D3D11VA_VLD
@ AV_PIX_FMT_D3D11VA_VLD
HW decoding through Direct3D11 via old API, Picture.data[3] contains a ID3D11VideoDecoderOutputView p...
Definition: pixfmt.h:247
FFCodec
Definition: codec_internal.h:127
AVCOL_SPC_RGB
@ AVCOL_SPC_RGB
order of coefficients is actually GBR, also IEC 61966-2-1 (sRGB), YZX and ST 428-1
Definition: pixfmt.h:588
AV_PIX_FMT_YUV440P
@ AV_PIX_FMT_YUV440P
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
Definition: pixfmt.h:99
max
#define max(a, b)
Definition: cuda_runtime.h:33
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
VP9_SYNCCODE
#define VP9_SYNCCODE
Definition: vp9.c:46
av_buffer_ref
AVBufferRef * av_buffer_ref(const AVBufferRef *buf)
Create a new reference to an AVBuffer.
Definition: buffer.c:103
vp89_rac.h
VP9Frame::hwaccel_priv_buf
AVBufferRef * hwaccel_priv_buf
Definition: vp9shared.h:72
VP9Filter
Definition: vp9dec.h:79
ff_set_dimensions
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
Definition: utils.c:91
VPXRangeCoder
Definition: vpx_rac.h:35
thread.h
ff_pthread_free
av_cold void ff_pthread_free(void *obj, const unsigned offsets[])
Definition: pthread.c:91
FILTER_SWITCHABLE
@ FILTER_SWITCHABLE
Definition: vp9.h:70
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:30
VP9Block
Definition: vp9dec.h:85
VP9Frame::tf
ThreadFrame tf
Definition: vp9shared.h:66
skip_bits
static void skip_bits(GetBitContext *s, int n)
Definition: get_bits.h:371
AVCOL_SPC_BT470BG
@ AVCOL_SPC_BT470BG
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601
Definition: pixfmt.h:593
get_bits
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:325
AVCOL_SPC_RESERVED
@ AVCOL_SPC_RESERVED
reserved for future use by ITU-T and ISO/IEC just like 15-255 are
Definition: pixfmt.h:591
AVHWAccel
Definition: avcodec.h:2076
VP9Frame::extradata
AVBufferRef * extradata
Definition: vp9shared.h:67
TX_SWITCHABLE
@ TX_SWITCHABLE
Definition: vp9.h:33
FFCodec::p
AVCodec p
The public AVCodec.
Definition: codec_internal.h:131
finish
static void finish(void)
Definition: movenc.c:342
ff_vp9_ac_qlookup
const int16_t ff_vp9_ac_qlookup[3][256]
Definition: vp9data.c:334
AVVideoEncParams::delta_qp
int32_t delta_qp[4][2]
Quantisation parameter offset from the base (per-frame) qp for a given plane (first index) and AC/DC ...
Definition: video_enc_params.h:109
fail
#define fail()
Definition: checkasm.h:134
AV_PIX_FMT_GBRP10
#define AV_PIX_FMT_GBRP10
Definition: pixfmt.h:475
GetBitContext
Definition: get_bits.h:107
AVCodecContext::flags
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:506
HWACCEL_VDPAU
#define HWACCEL_VDPAU(codec)
Definition: hwconfig.h:75
ff_videodsp_init
av_cold void ff_videodsp_init(VideoDSPContext *ctx, int bpc)
Definition: videodsp.c:39
PARTITION_NONE
@ PARTITION_NONE
Definition: vp9shared.h:36
VP9Frame::hwaccel_picture_private
void * hwaccel_picture_private
Definition: vp9shared.h:73
AVVideoEncParams
Video encoding parameters for a given frame.
Definition: video_enc_params.h:73
vp9_decode_free
static av_cold int vp9_decode_free(AVCodecContext *avctx)
Definition: vp9.c:1240
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:87
AV_PIX_FMT_YUV444P10
#define AV_PIX_FMT_YUV444P10
Definition: pixfmt.h:462
avassert.h
ff_vp9_model_pareto8
const uint8_t ff_vp9_model_pareto8[256][8]
Definition: vp9data.c:1176
pkt
AVPacket * pkt
Definition: movenc.c:59
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
av_cold
#define av_cold
Definition: attributes.h:90
init_get_bits8
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
Definition: get_bits.h:524
FF_CODEC_PROPERTY_LOSSLESS
#define FF_CODEC_PROPERTY_LOSSLESS
Definition: avcodec.h:1852
ff_thread_report_progress
void ff_thread_report_progress(ThreadFrame *f, int n, int field)
Notify later decoding threads when part of their reference picture is ready.
Definition: pthread_frame.c:544
BL_8X8
@ BL_8X8
Definition: vp9shared.h:80
av_buffer_pool_get
AVBufferRef * av_buffer_pool_get(AVBufferPool *pool)
Allocate a new AVBuffer, reusing an old buffer from the pool when available.
Definition: buffer.c:384
PARTITION_V
@ PARTITION_V
Definition: vp9shared.h:38
FF_CODEC_DECODE_CB
#define FF_CODEC_DECODE_CB(func)
Definition: codec_internal.h:306
vp9_frame_ref
static int vp9_frame_ref(AVCodecContext *avctx, VP9Frame *dst, VP9Frame *src)
Definition: vp9.c:153
AV_PIX_FMT_DXVA2_VLD
@ AV_PIX_FMT_DXVA2_VLD
HW decoding through DXVA2, Picture.data[3] contains a LPDIRECT3DSURFACE9 pointer.
Definition: pixfmt.h:127
s
#define s(width, name)
Definition: cbs_vp9.c:256
AVCOL_SPC_SMPTE170M
@ AVCOL_SPC_SMPTE170M
also ITU-R BT601-6 525 / ITU-R BT1358 525 / ITU-R BT1700 NTSC / functionally identical to above
Definition: pixfmt.h:594
AV_GET_BUFFER_FLAG_REF
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
Definition: avcodec.h:404
AV_CODEC_ID_VP9
@ AV_CODEC_ID_VP9
Definition: codec_id.h:220
vp9data.h
init
int(* init)(AVBSFContext *ctx)
Definition: dts2pts_bsf.c:365
bits
uint8_t bits
Definition: vp3data.h:128
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:296
decode.h
get_bits.h
field
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this field
Definition: writing_filters.txt:78
ff_vp9dsp_init
av_cold void ff_vp9dsp_init(VP9DSPContext *dsp, int bpp, int bitexact)
Definition: vp9dsp.c:88
ff_vp9_partition_tree
const int8_t ff_vp9_partition_tree[3][2]
Definition: vp9data.c:35
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
vp9_decode_frame
static int vp9_decode_frame(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *pkt)
Definition: vp9.c:1557
CODEC_LONG_NAME
#define CODEC_LONG_NAME(str)
Definition: codec_internal.h:272
ff_thread_ref_frame
int ff_thread_ref_frame(ThreadFrame *dst, const ThreadFrame *src)
Definition: utils.c:896
AV_CODEC_CAP_FRAME_THREADS
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
Definition: codec.h:107
threadframe.h
NULL
#define NULL
Definition: coverity.c:32
AVCodecContext::color_range
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: avcodec.h:1009
av_buffer_unref
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it.
Definition: buffer.c:139
VP9Context
Definition: vp9dec.h:97
REF_FRAME_MVPAIR
#define REF_FRAME_MVPAIR
Definition: vp9shared.h:170
AVHWAccel::end_frame
int(* end_frame)(AVCodecContext *avctx)
Called at the end of each frame or field picture.
Definition: avcodec.h:2176
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:274
get_bits1
static unsigned int get_bits1(GetBitContext *s)
Definition: get_bits.h:378
vp89_rac_get_uint
static av_unused int vp89_rac_get_uint(VPXRangeCoder *c, int bits)
Definition: vp89_rac.h:41
profiles.h
av_buffer_pool_uninit
void av_buffer_pool_uninit(AVBufferPool **ppool)
Mark the pool as being available for freeing.
Definition: buffer.c:322
AV_PIX_FMT_YUV440P10
#define AV_PIX_FMT_YUV440P10
Definition: pixfmt.h:461
pthread_internal.h
UPDATE_THREAD_CONTEXT
#define UPDATE_THREAD_CONTEXT(func)
Definition: codec_internal.h:281
AV_PIX_FMT_YUV422P10
#define AV_PIX_FMT_YUV422P10
Definition: pixfmt.h:460
VP9mv
Definition: vp9shared.h:55
PARTITION_SPLIT
@ PARTITION_SPLIT
Definition: vp9shared.h:39
atomic_load_explicit
#define atomic_load_explicit(object, order)
Definition: stdatomic.h:96
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
pthread_mutex_unlock
#define pthread_mutex_unlock(a)
Definition: ffprobe.c:79
av_video_enc_params_create_side_data
AVVideoEncParams * av_video_enc_params_create_side_data(AVFrame *frame, enum AVVideoEncParamsType type, unsigned int nb_blocks)
Allocates memory for AVEncodeInfoFrame plus an array of.
Definition: video_enc_params.c:58
vp9.h
VP9Frame::uses_2pass
int uses_2pass
Definition: vp9shared.h:70
f
f
Definition: af_crystalizer.c:122
AV_CODEC_CAP_DR1
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:52
AVPacket::size
int size
Definition: packet.h:375
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:115
av_frame_ref
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:344
codec_internal.h
pix_fmt_rgb
static enum AVPixelFormat pix_fmt_rgb[3]
Definition: libdav1d.c:63
REF_INVALID_SCALE
#define REF_INVALID_SCALE
Definition: vp9dec.h:43
for
for(k=2;k<=8;++k)
Definition: h264pred_template.c:425
read_colorspace_details
static int read_colorspace_details(AVCodecContext *avctx)
Definition: vp9.c:448
AV_PIX_FMT_YUV422P12
#define AV_PIX_FMT_YUV422P12
Definition: pixfmt.h:464
size
int size
Definition: twinvq_data.h:10344
vp9_alloc_entries
static int vp9_alloc_entries(AVCodecContext *avctx, int n)
Definition: vp9.c:88
atomic_fetch_add_explicit
#define atomic_fetch_add_explicit(object, operand, order)
Definition: stdatomic.h:149
FF_CODEC_CAP_ALLOCATE_PROGRESS
#define FF_CODEC_CAP_ALLOCATE_PROGRESS
Definition: codec_internal.h:69
free_buffers
static void free_buffers(VP9Context *s)
Definition: vp9.c:1231
AV_RB32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_RB32
Definition: bytestream.h:96
AV_PIX_FMT_YUV444P12
#define AV_PIX_FMT_YUV444P12
Definition: pixfmt.h:466
FF_THREAD_SLICE
#define FF_THREAD_SLICE
Decode more than one part of a single frame at once.
Definition: avcodec.h:1514
AVCodecHWConfigInternal
Definition: hwconfig.h:29
TX_4X4
@ TX_4X4
Definition: vp9.h:28
update_block_buffers
static int update_block_buffers(AVCodecContext *avctx)
Definition: vp9.c:313
AVPacket::dts
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed.
Definition: packet.h:373
AV_CODEC_CAP_SLICE_THREADS
#define AV_CODEC_CAP_SLICE_THREADS
Codec supports slice-based (or partition-based) multithreading.
Definition: codec.h:111
HWACCEL_D3D11VA
#define HWACCEL_D3D11VA(codec)
Definition: hwconfig.h:79
AV_PIX_FMT_D3D11
@ AV_PIX_FMT_D3D11
Hardware surfaces for Direct3D11.
Definition: pixfmt.h:333
inv_recenter_nonneg
static av_always_inline int inv_recenter_nonneg(int v, int m)
Definition: vp9.c:380
HWACCEL_NVDEC
#define HWACCEL_NVDEC(codec)
Definition: hwconfig.h:71
vpx_rac_is_end
static av_always_inline int vpx_rac_is_end(VPXRangeCoder *c)
returns 1 if the end of the stream has been reached, 0 otherwise.
Definition: vpx_rac.h:51
AV_PIX_FMT_VAAPI
@ AV_PIX_FMT_VAAPI
Hardware acceleration through VA-API, data[3] contains a VASurfaceID.
Definition: pixfmt.h:119
FF_THREAD_FRAME
#define FF_THREAD_FRAME
Decode more than one frame at once.
Definition: avcodec.h:1513
AV_PIX_FMT_VDPAU
@ AV_PIX_FMT_VDPAU
HW acceleration through VDPAU, Picture.data[3] contains a VdpVideoSurface.
Definition: pixfmt.h:187
ff_slice_thread_execute_with_mainfunc
int ff_slice_thread_execute_with_mainfunc(AVCodecContext *avctx, action_func2 *func2, main_func *mainfunc, void *arg, int *ret, int job_count)
Definition: pthread_slice.c:126
AVCOL_SPC_SMPTE240M
@ AVCOL_SPC_SMPTE240M
derived from 170M primaries and D65 white point, 170M is derived from BT470 System M's primaries
Definition: pixfmt.h:595
assign
#define assign(var, type, n)
AV_PIX_FMT_VIDEOTOOLBOX
@ AV_PIX_FMT_VIDEOTOOLBOX
hardware decoding through Videotoolbox
Definition: pixfmt.h:302
av_assert2
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
Definition: avassert.h:64
update_prob
static int update_prob(VPXRangeCoder *c, int p)
Definition: vp9.c:390
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:269
AVPacket::pts
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
Definition: packet.h:367
DEFINE_OFFSET_ARRAY
#define DEFINE_OFFSET_ARRAY(type, name, cnt_variable, mutexes, conds)
Definition: pthread_internal.h:61
AVCodecContext::properties
unsigned properties
Properties of the stream that gets decoded.
Definition: avcodec.h:1851
AVCOL_SPC_BT2020_NCL
@ AVCOL_SPC_BT2020_NCL
ITU-R BT2020 non-constant luminance system.
Definition: pixfmt.h:598
AVHWAccel::decode_slice
int(* decode_slice)(AVCodecContext *avctx, const uint8_t *buf, uint32_t buf_size)
Callback for each slice.
Definition: avcodec.h:2165
vpx_rac.h
decode012
static int BS_FUNC() decode012(BSCTX *bc)
Return decoded truncated unary code for the values 0, 1, 2.
Definition: bitstream_template.h:436
AV_PIX_FMT_GBRP12
#define AV_PIX_FMT_GBRP12
Definition: pixfmt.h:476
av_malloc_array
#define av_malloc_array(a, b)
Definition: tableprint_vlc.h:31
AVColorSpace
AVColorSpace
YUV colorspace type.
Definition: pixfmt.h:587
av_assert1
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:53
av_always_inline
#define av_always_inline
Definition: attributes.h:49
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
vpx_rac_get_prob_branchy
static av_always_inline int vpx_rac_get_prob_branchy(VPXRangeCoder *c, int prob)
Definition: vpx_rac.h:99
AVVideoBlockParams
Data structure for storing block-level encoding information.
Definition: video_enc_params.h:120
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:254
get_sbits_inv
static av_always_inline int get_sbits_inv(GetBitContext *gb, int n)
Definition: vp9.c:374
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:191
AVCOL_SPC_UNSPECIFIED
@ AVCOL_SPC_UNSPECIFIED
Definition: pixfmt.h:590
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:635
ff_thread_get_ext_buffer
int ff_thread_get_ext_buffer(AVCodecContext *avctx, ThreadFrame *f, int flags)
Wrapper around ff_get_buffer() for frame-multithreaded codecs.
Definition: pthread_frame.c:930
AVCOL_RANGE_MPEG
@ AVCOL_RANGE_MPEG
Narrow or limited range content.
Definition: pixfmt.h:644
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Definition: mem.c:262
HWACCEL_VIDEOTOOLBOX
#define HWACCEL_VIDEOTOOLBOX(codec)
Definition: hwconfig.h:77
avcodec.h
limit
static double limit(double x)
Definition: vf_pseudocolor.c:130
vp89_rac_get_tree
static av_always_inline int vp89_rac_get_tree(VPXRangeCoder *c, const int8_t(*tree)[2], const uint8_t *probs)
Definition: vp89_rac.h:54
BL_64X64
@ BL_64X64
Definition: vp9shared.h:77
av_buffer_allocz
AVBufferRef * av_buffer_allocz(size_t size)
Same as av_buffer_alloc(), except the returned buffer will be initialized to zero.
Definition: buffer.c:93
ret
ret
Definition: filter_design.txt:187
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
vp9_decode_init
static av_cold int vp9_decode_init(AVCodecContext *avctx)
Definition: vp9.c:1796
align_get_bits
static const uint8_t * align_get_bits(GetBitContext *s)
Definition: get_bits.h:540
ff_vpx_init_range_decoder
int ff_vpx_init_range_decoder(VPXRangeCoder *c, const uint8_t *buf, int buf_size)
Definition: vpx_rac.c:42
vp9_tile_data_free
static void vp9_tile_data_free(VP9TileData *td)
Definition: vp9.c:91
ff_thread_finish_setup
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call ff_thread_finish_setup() afterwards. If some code can 't be moved
VP9mvrefPair
Definition: vp9shared.h:60
AV_PIX_FMT_YUV420P12
#define AV_PIX_FMT_YUV420P12
Definition: pixfmt.h:463
pthread_cond_signal
static av_always_inline int pthread_cond_signal(pthread_cond_t *cond)
Definition: os2threads.h:152
AV_CODEC_EXPORT_DATA_VIDEO_ENC_PARAMS
#define AV_CODEC_EXPORT_DATA_VIDEO_ENC_PARAMS
Decoding only.
Definition: avcodec.h:394
VP9TileData
Definition: vp9dec.h:168
vp89_rac_get
static av_always_inline int vp89_rac_get(VPXRangeCoder *c)
Definition: vp89_rac.h:36
AVCodecContext
main external API structure.
Definition: avcodec.h:426
AVCodecContext::active_thread_type
int active_thread_type
Which multithreading methods are in use by the codec.
Definition: avcodec.h:1521
VP9Filter::mask
uint8_t mask[2][2][8][4]
Definition: vp9dec.h:82
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
AVHWAccel::frame_priv_data_size
int frame_priv_data_size
Size of per-frame hardware accelerator private data.
Definition: avcodec.h:2185
vp9_frame_unref
static void vp9_frame_unref(AVCodecContext *avctx, VP9Frame *f)
Definition: vp9.c:98
VP9Frame::mv
VP9mvrefPair * mv
Definition: vp9shared.h:69
AVCodecContext::profile
int profile
profile
Definition: avcodec.h:1565
ff_vp9_decode_block
void ff_vp9_decode_block(VP9TileData *td, int row, int col, VP9Filter *lflvl, ptrdiff_t yoff, ptrdiff_t uvoff, enum BlockLevel bl, enum BlockPartition bp)
Definition: vp9block.c:1263
NEARESTMV
@ NEARESTMV
Definition: vp9shared.h:43
ref
static int ref[MAX_W *MAX_W]
Definition: jpeg2000dwt.c:112
BlockLevel
BlockLevel
Definition: vp9shared.h:76
AVCodecContext::export_side_data
int export_side_data
Bit set of AV_CODEC_EXPORT_DATA_* flags, which affects the kind of metadata exported in frame,...
Definition: avcodec.h:2004
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:71
ff_pthread_init
av_cold int ff_pthread_init(void *obj, const unsigned offsets[])
Initialize/destroy a list of mutexes/conditions contained in a structure.
Definition: pthread.c:104
pthread_cond_wait
static av_always_inline int pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
Definition: os2threads.h:192
vp9dec.h
AVHWAccel::start_frame
int(* start_frame)(AVCodecContext *avctx, const uint8_t *buf, uint32_t buf_size)
Called at the beginning of each frame or field picture.
Definition: avcodec.h:2138
AV_PIX_FMT_GBRP
@ AV_PIX_FMT_GBRP
planar GBR 4:4:4 24bpp
Definition: pixfmt.h:158
AV_PICTURE_TYPE_P
@ AV_PICTURE_TYPE_P
Predicted.
Definition: avutil.h:275
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
ff_vp9_default_kf_partition_probs
const uint8_t ff_vp9_default_kf_partition_probs[4][4][3]
Definition: vp9data.c:41
AV_VIDEO_ENC_PARAMS_VP9
@ AV_VIDEO_ENC_PARAMS_VP9
VP9 stores:
Definition: video_enc_params.h:44
AV_PIX_FMT_YUV422P
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:70
AV_CODEC_FLAG_BITEXACT
#define AV_CODEC_FLAG_BITEXACT
Use only bitexact stuff (except (I)DCT).
Definition: avcodec.h:321
ff_vp9_default_probs
const ProbContext ff_vp9_default_probs
Definition: vp9data.c:1435
CUR_FRAME
#define CUR_FRAME
Definition: vp9shared.h:169
vp9_export_enc_params
static int vp9_export_enc_params(VP9Context *s, VP9Frame *frame)
Definition: vp9.c:1505
AVPacket
This structure stores compressed data.
Definition: packet.h:351
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:453
PARTITION_H
@ PARTITION_H
Definition: vp9shared.h:37
ff_vp9_loopfilter_sb
void ff_vp9_loopfilter_sb(AVCodecContext *avctx, VP9Filter *lflvl, int row, int col, ptrdiff_t yoff, ptrdiff_t uvoff)
Definition: vp9lpf.c:178
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
src
INIT_CLIP pixel * src
Definition: h264pred_template.c:418
videodsp.h
HWACCEL_VAAPI
#define HWACCEL_VAAPI(codec)
Definition: hwconfig.h:73
d
d
Definition: ffmpeg_filter.c:156
HWACCEL_MAX
#define HWACCEL_MAX
block
The exact code depends on how similar the blocks are and how related they are to the block
Definition: filter_design.txt:207
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
av_video_enc_params_block
static av_always_inline AVVideoBlockParams * av_video_enc_params_block(AVVideoEncParams *par, unsigned int idx)
Definition: video_enc_params.h:143
AV_PIX_FMT_YUV440P12
#define AV_PIX_FMT_YUV440P12
Definition: pixfmt.h:465
h
h
Definition: vp9dsp_template.c:2038
atomic_init
#define atomic_init(obj, value)
Definition: stdatomic.h:33
AVCOL_SPC_BT709
@ AVCOL_SPC_BT709
also ITU-R BT1361 / IEC 61966-2-4 xvYCC709 / derived in SMPTE RP 177 Annex B
Definition: pixfmt.h:589
ff_vp9_profiles
const AVProfile ff_vp9_profiles[]
Definition: profiles.c:139
video_enc_params.h
set_tile_offset
static void set_tile_offset(int *start, int *end, int idx, int log2_n, int n)
Definition: vp9.c:1223
av_get_pix_fmt_name
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.
Definition: pixdesc.c:2808
ff_vp9_dc_qlookup
const int16_t ff_vp9_dc_qlookup[3][256]
Definition: vp9data.c:231
pthread_mutex_lock
#define pthread_mutex_lock(a)
Definition: ffprobe.c:75
ff_vp9_default_coef_probs
const uint8_t ff_vp9_default_coef_probs[4][2][2][6][6][3]
Definition: vp9data.c:1540