FFmpeg
ffv1dec.c
Go to the documentation of this file.
1 /*
2  * FFV1 decoder
3  *
4  * Copyright (c) 2003-2013 Michael Niedermayer <michaelni@gmx.at>
5  *
6  * This file is part of FFmpeg.
7  *
8  * FFmpeg is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU Lesser General Public
10  * License as published by the Free Software Foundation; either
11  * version 2.1 of the License, or (at your option) any later version.
12  *
13  * FFmpeg is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16  * Lesser General Public License for more details.
17  *
18  * You should have received a copy of the GNU Lesser General Public
19  * License along with FFmpeg; if not, write to the Free Software
20  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21  */
22 
23 /**
24  * @file
25  * FF Video Codec 1 (a lossless codec) decoder
26  */
27 
28 #include "libavutil/avassert.h"
29 #include "libavutil/crc.h"
30 #include "libavutil/opt.h"
31 #include "libavutil/imgutils.h"
32 #include "libavutil/pixdesc.h"
33 #include "avcodec.h"
34 #include "codec_internal.h"
35 #include "get_bits.h"
36 #include "rangecoder.h"
37 #include "golomb.h"
38 #include "mathops.h"
39 #include "ffv1.h"
40 #include "thread.h"
41 #include "threadframe.h"
42 
43 static inline av_flatten int get_symbol_inline(RangeCoder *c, uint8_t *state,
44  int is_signed)
45 {
46  if (get_rac(c, state + 0))
47  return 0;
48  else {
49  int i, e;
50  unsigned a;
51  e = 0;
52  while (get_rac(c, state + 1 + FFMIN(e, 9))) { // 1..10
53  e++;
54  if (e > 31)
55  return AVERROR_INVALIDDATA;
56  }
57 
58  a = 1;
59  for (i = e - 1; i >= 0; i--)
60  a += a + get_rac(c, state + 22 + FFMIN(i, 9)); // 22..31
61 
62  e = -(is_signed && get_rac(c, state + 11 + FFMIN(e, 10))); // 11..21
63  return (a ^ e) - e;
64  }
65 }
66 
67 static av_noinline int get_symbol(RangeCoder *c, uint8_t *state, int is_signed)
68 {
69  return get_symbol_inline(c, state, is_signed);
70 }
71 
72 static inline int get_vlc_symbol(GetBitContext *gb, VlcState *const state,
73  int bits)
74 {
75  int k, i, v, ret;
76 
77  i = state->count;
78  k = 0;
79  while (i < state->error_sum) { // FIXME: optimize
80  k++;
81  i += i;
82  }
83 
84  v = get_sr_golomb(gb, k, 12, bits);
85  ff_dlog(NULL, "v:%d bias:%d error:%d drift:%d count:%d k:%d",
86  v, state->bias, state->error_sum, state->drift, state->count, k);
87 
88  v ^= ((2 * state->drift + state->count) >> 31);
89 
90  ret = fold(v + state->bias, bits);
91 
93 
94  return ret;
95 }
96 
98 {
99  if (s->ac != AC_GOLOMB_RICE) {
100  RangeCoder *const c = &s->c;
101  if (c->overread > MAX_OVERREAD)
102  return AVERROR_INVALIDDATA;
103  } else {
104  if (get_bits_left(&s->gb) < 1)
105  return AVERROR_INVALIDDATA;
106  }
107  return 0;
108 }
109 
110 #define TYPE int16_t
111 #define RENAME(name) name
112 #include "ffv1dec_template.c"
113 #undef TYPE
114 #undef RENAME
115 
116 #define TYPE int32_t
117 #define RENAME(name) name ## 32
118 #include "ffv1dec_template.c"
119 
120 static int decode_plane(FFV1Context *s, uint8_t *src,
121  int w, int h, int stride, int plane_index,
122  int pixel_stride)
123 {
124  int x, y;
125  int16_t *sample[2];
126  sample[0] = s->sample_buffer + 3;
127  sample[1] = s->sample_buffer + w + 6 + 3;
128 
129  s->run_index = 0;
130 
131  memset(s->sample_buffer, 0, 2 * (w + 6) * sizeof(*s->sample_buffer));
132 
133  for (y = 0; y < h; y++) {
134  int16_t *temp = sample[0]; // FIXME: try a normal buffer
135 
136  sample[0] = sample[1];
137  sample[1] = temp;
138 
139  sample[1][-1] = sample[0][0];
140  sample[0][w] = sample[0][w - 1];
141 
142  if (s->avctx->bits_per_raw_sample <= 8) {
143  int ret = decode_line(s, w, sample, plane_index, 8);
144  if (ret < 0)
145  return ret;
146  for (x = 0; x < w; x++)
147  src[x*pixel_stride + stride * y] = sample[1][x];
148  } else {
149  int ret = decode_line(s, w, sample, plane_index, s->avctx->bits_per_raw_sample);
150  if (ret < 0)
151  return ret;
152  if (s->packed_at_lsb) {
153  for (x = 0; x < w; x++) {
154  ((uint16_t*)(src + stride*y))[x*pixel_stride] = sample[1][x];
155  }
156  } else {
157  for (x = 0; x < w; x++) {
158  ((uint16_t*)(src + stride*y))[x*pixel_stride] = sample[1][x] << (16 - s->avctx->bits_per_raw_sample) | ((uint16_t **)sample)[1][x] >> (2 * s->avctx->bits_per_raw_sample - 16);
159  }
160  }
161  }
162  }
163  return 0;
164 }
165 
167 {
168  RangeCoder *c = &fs->c;
169  uint8_t state[CONTEXT_SIZE];
170  unsigned ps, i, context_count;
171  int sx, sy, sw, sh;
172 
173  memset(state, 128, sizeof(state));
174  sx = get_symbol(c, state, 0);
175  sy = get_symbol(c, state, 0);
176  sw = get_symbol(c, state, 0) + 1U;
177  sh = get_symbol(c, state, 0) + 1U;
178 
179  av_assert0(f->version > 2);
180 
181 
182  if (sx < 0 || sy < 0 || sw <= 0 || sh <= 0)
183  return AVERROR_INVALIDDATA;
184  if (sx > f->num_h_slices - sw || sy > f->num_v_slices - sh)
185  return AVERROR_INVALIDDATA;
186 
187  fs->slice_x = sx * (int64_t)f->width / f->num_h_slices;
188  fs->slice_y = sy * (int64_t)f->height / f->num_v_slices;
189  fs->slice_width = (sx + sw) * (int64_t)f->width / f->num_h_slices - fs->slice_x;
190  fs->slice_height = (sy + sh) * (int64_t)f->height / f->num_v_slices - fs->slice_y;
191 
192  av_assert0((unsigned)fs->slice_width <= f->width &&
193  (unsigned)fs->slice_height <= f->height);
194  av_assert0 ( (unsigned)fs->slice_x + (uint64_t)fs->slice_width <= f->width
195  && (unsigned)fs->slice_y + (uint64_t)fs->slice_height <= f->height);
196 
197  if (fs->ac == AC_GOLOMB_RICE && fs->slice_width >= (1<<23))
198  return AVERROR_INVALIDDATA;
199 
200  for (i = 0; i < f->plane_count; i++) {
201  PlaneContext * const p = &fs->plane[i];
202  int idx = get_symbol(c, state, 0);
203  if (idx >= (unsigned)f->quant_table_count) {
204  av_log(f->avctx, AV_LOG_ERROR, "quant_table_index out of range\n");
205  return -1;
206  }
207  p->quant_table_index = idx;
208  memcpy(p->quant_table, f->quant_tables[idx], sizeof(p->quant_table));
209  context_count = f->context_count[idx];
210 
211  if (p->context_count < context_count) {
212  av_freep(&p->state);
213  av_freep(&p->vlc_state);
214  }
216  }
217 
218  ps = get_symbol(c, state, 0);
219  if (ps == 1) {
220  f->cur->flags |= AV_FRAME_FLAG_INTERLACED;
221  f->cur->flags |= AV_FRAME_FLAG_TOP_FIELD_FIRST;
222  } else if (ps == 2) {
223  f->cur->flags |= AV_FRAME_FLAG_INTERLACED;
224  f->cur->flags &= ~AV_FRAME_FLAG_TOP_FIELD_FIRST;
225  } else if (ps == 3) {
226  f->cur->flags &= ~AV_FRAME_FLAG_INTERLACED;
227  }
228  f->cur->sample_aspect_ratio.num = get_symbol(c, state, 0);
229  f->cur->sample_aspect_ratio.den = get_symbol(c, state, 0);
230 
231  if (av_image_check_sar(f->width, f->height,
232  f->cur->sample_aspect_ratio) < 0) {
233  av_log(f->avctx, AV_LOG_WARNING, "ignoring invalid SAR: %u/%u\n",
234  f->cur->sample_aspect_ratio.num,
235  f->cur->sample_aspect_ratio.den);
236  f->cur->sample_aspect_ratio = (AVRational){ 0, 1 };
237  }
238 
239  if (fs->version > 3) {
240  fs->slice_reset_contexts = get_rac(c, state);
241  fs->slice_coding_mode = get_symbol(c, state, 0);
242  if (fs->slice_coding_mode != 1) {
243  fs->slice_rct_by_coef = get_symbol(c, state, 0);
244  fs->slice_rct_ry_coef = get_symbol(c, state, 0);
245  if ((uint64_t)fs->slice_rct_by_coef + (uint64_t)fs->slice_rct_ry_coef > 4) {
246  av_log(f->avctx, AV_LOG_ERROR, "slice_rct_y_coef out of range\n");
247  return AVERROR_INVALIDDATA;
248  }
249  }
250  }
251 
252  return 0;
253 }
254 
255 static int decode_slice(AVCodecContext *c, void *arg)
256 {
257  FFV1Context *fs = *(void **)arg;
258  FFV1Context *f = fs->avctx->priv_data;
259  int width, height, x, y, ret;
260  const int ps = av_pix_fmt_desc_get(c->pix_fmt)->comp[0].step;
261  AVFrame * const p = f->cur;
262  int i, si;
263 
264  for( si=0; fs != f->slice_context[si]; si ++)
265  ;
266 
267  if(f->fsrc && !(p->flags & AV_FRAME_FLAG_KEY))
268  ff_thread_await_progress(&f->last_picture, si, 0);
269 
270  if(f->fsrc && !(p->flags & AV_FRAME_FLAG_KEY)) {
271  FFV1Context *fssrc = f->fsrc->slice_context[si];
272  FFV1Context *fsdst = f->slice_context[si];
273  av_assert1(fsdst->plane_count == fssrc->plane_count);
274  av_assert1(fsdst == fs);
275 
276  if (!(p->flags & AV_FRAME_FLAG_KEY))
277  fsdst->slice_damaged |= fssrc->slice_damaged;
278 
279  for (i = 0; i < f->plane_count; i++) {
280  PlaneContext *psrc = &fssrc->plane[i];
281  PlaneContext *pdst = &fsdst->plane[i];
282 
283  av_free(pdst->state);
284  av_free(pdst->vlc_state);
285  memcpy(pdst, psrc, sizeof(*pdst));
286  pdst->state = NULL;
287  pdst->vlc_state = NULL;
288 
289  if (fssrc->ac) {
291  memcpy(pdst->state, psrc->state, CONTEXT_SIZE * psrc->context_count);
292  } else {
293  pdst->vlc_state = av_malloc_array(sizeof(*pdst->vlc_state), psrc->context_count);
294  memcpy(pdst->vlc_state, psrc->vlc_state, sizeof(*pdst->vlc_state) * psrc->context_count);
295  }
296  }
297  }
298 
299  fs->slice_rct_by_coef = 1;
300  fs->slice_rct_ry_coef = 1;
301 
302  if (f->version > 2) {
303  if (ff_ffv1_init_slice_state(f, fs) < 0)
304  return AVERROR(ENOMEM);
305  if (decode_slice_header(f, fs) < 0) {
306  fs->slice_x = fs->slice_y = fs->slice_height = fs->slice_width = 0;
307  fs->slice_damaged = 1;
308  return AVERROR_INVALIDDATA;
309  }
310  }
311  if ((ret = ff_ffv1_init_slice_state(f, fs)) < 0)
312  return ret;
313  if ((f->cur->flags & AV_FRAME_FLAG_KEY) || fs->slice_reset_contexts) {
315  } else if (fs->slice_damaged) {
316  return AVERROR_INVALIDDATA;
317  }
318 
319  width = fs->slice_width;
320  height = fs->slice_height;
321  x = fs->slice_x;
322  y = fs->slice_y;
323 
324  if (fs->ac == AC_GOLOMB_RICE) {
325  if (f->version == 3 && f->micro_version > 1 || f->version > 3)
326  get_rac(&fs->c, (uint8_t[]) { 129 });
327  fs->ac_byte_count = f->version > 2 || (!x && !y) ? fs->c.bytestream - fs->c.bytestream_start - 1 : 0;
328  init_get_bits(&fs->gb,
329  fs->c.bytestream_start + fs->ac_byte_count,
330  (fs->c.bytestream_end - fs->c.bytestream_start - fs->ac_byte_count) * 8);
331  }
332 
333  av_assert1(width && height);
334  if (f->colorspace == 0 && (f->chroma_planes || !fs->transparency)) {
335  const int chroma_width = AV_CEIL_RSHIFT(width, f->chroma_h_shift);
336  const int chroma_height = AV_CEIL_RSHIFT(height, f->chroma_v_shift);
337  const int cx = x >> f->chroma_h_shift;
338  const int cy = y >> f->chroma_v_shift;
339  decode_plane(fs, p->data[0] + ps*x + y*p->linesize[0], width, height, p->linesize[0], 0, 1);
340 
341  if (f->chroma_planes) {
342  decode_plane(fs, p->data[1] + ps*cx+cy*p->linesize[1], chroma_width, chroma_height, p->linesize[1], 1, 1);
343  decode_plane(fs, p->data[2] + ps*cx+cy*p->linesize[2], chroma_width, chroma_height, p->linesize[2], 1, 1);
344  }
345  if (fs->transparency)
346  decode_plane(fs, p->data[3] + ps*x + y*p->linesize[3], width, height, p->linesize[3], (f->version >= 4 && !f->chroma_planes) ? 1 : 2, 1);
347  } else if (f->colorspace == 0) {
348  decode_plane(fs, p->data[0] + ps*x + y*p->linesize[0] , width, height, p->linesize[0], 0, 2);
349  decode_plane(fs, p->data[0] + ps*x + y*p->linesize[0] + 1, width, height, p->linesize[0], 1, 2);
350  } else if (f->use32bit) {
351  uint8_t *planes[4] = { p->data[0] + ps * x + y * p->linesize[0],
352  p->data[1] + ps * x + y * p->linesize[1],
353  p->data[2] + ps * x + y * p->linesize[2],
354  p->data[3] + ps * x + y * p->linesize[3] };
355  decode_rgb_frame32(fs, planes, width, height, p->linesize);
356  } else {
357  uint8_t *planes[4] = { p->data[0] + ps * x + y * p->linesize[0],
358  p->data[1] + ps * x + y * p->linesize[1],
359  p->data[2] + ps * x + y * p->linesize[2],
360  p->data[3] + ps * x + y * p->linesize[3] };
362  }
363  if (fs->ac != AC_GOLOMB_RICE && f->version > 2) {
364  int v;
365  get_rac(&fs->c, (uint8_t[]) { 129 });
366  v = fs->c.bytestream_end - fs->c.bytestream - 2 - 5*f->ec;
367  if (v) {
368  av_log(f->avctx, AV_LOG_ERROR, "bytestream end mismatching by %d\n", v);
369  fs->slice_damaged = 1;
370  }
371  }
372 
373  ff_thread_report_progress(&f->picture, si, 0);
374 
375  return 0;
376 }
377 
378 static int read_quant_table(RangeCoder *c, int16_t *quant_table, int scale)
379 {
380  int v;
381  int i = 0;
382  uint8_t state[CONTEXT_SIZE];
383 
384  memset(state, 128, sizeof(state));
385 
386  for (v = 0; i < 128; v++) {
387  unsigned len = get_symbol(c, state, 0) + 1U;
388 
389  if (len > 128 - i || !len)
390  return AVERROR_INVALIDDATA;
391 
392  while (len--) {
393  quant_table[i] = scale * v;
394  i++;
395  }
396  }
397 
398  for (i = 1; i < 128; i++)
399  quant_table[256 - i] = -quant_table[i];
400  quant_table[128] = -quant_table[127];
401 
402  return 2 * v - 1;
403 }
404 
406  int16_t quant_table[MAX_CONTEXT_INPUTS][256])
407 {
408  int i;
409  int context_count = 1;
410 
411  for (i = 0; i < 5; i++) {
413  if (ret < 0)
414  return ret;
415  context_count *= ret;
416  if (context_count > 32768U) {
417  return AVERROR_INVALIDDATA;
418  }
419  }
420  return (context_count + 1) / 2;
421 }
422 
424 {
425  RangeCoder *const c = &f->c;
426  uint8_t state[CONTEXT_SIZE];
427  int i, j, k, ret;
428  uint8_t state2[32][CONTEXT_SIZE];
429  unsigned crc = 0;
430 
431  memset(state2, 128, sizeof(state2));
432  memset(state, 128, sizeof(state));
433 
434  ff_init_range_decoder(c, f->avctx->extradata, f->avctx->extradata_size);
435  ff_build_rac_states(c, 0.05 * (1LL << 32), 256 - 8);
436 
437  f->version = get_symbol(c, state, 0);
438  if (f->version < 2) {
439  av_log(f->avctx, AV_LOG_ERROR, "Invalid version in global header\n");
440  return AVERROR_INVALIDDATA;
441  }
442  if (f->version > 4) {
443  av_log(f->avctx, AV_LOG_ERROR, "unsupported version %d\n",
444  f->version);
445  return AVERROR_PATCHWELCOME;
446  }
447  if (f->version > 2) {
448  c->bytestream_end -= 4;
449  f->micro_version = get_symbol(c, state, 0);
450  if (f->micro_version < 0)
451  return AVERROR_INVALIDDATA;
452  }
453  f->ac = get_symbol(c, state, 0);
454 
455  if (f->ac == AC_RANGE_CUSTOM_TAB) {
456  for (i = 1; i < 256; i++)
457  f->state_transition[i] = get_symbol(c, state, 1) + c->one_state[i];
458  }
459 
460  f->colorspace = get_symbol(c, state, 0); //YUV cs type
461  f->avctx->bits_per_raw_sample = get_symbol(c, state, 0);
462  f->chroma_planes = get_rac(c, state);
463  f->chroma_h_shift = get_symbol(c, state, 0);
464  f->chroma_v_shift = get_symbol(c, state, 0);
465  f->transparency = get_rac(c, state);
466  f->plane_count = 1 + (f->chroma_planes || f->version<4) + f->transparency;
467  f->num_h_slices = 1 + get_symbol(c, state, 0);
468  f->num_v_slices = 1 + get_symbol(c, state, 0);
469 
470  if (f->chroma_h_shift > 4U || f->chroma_v_shift > 4U) {
471  av_log(f->avctx, AV_LOG_ERROR, "chroma shift parameters %d %d are invalid\n",
472  f->chroma_h_shift, f->chroma_v_shift);
473  return AVERROR_INVALIDDATA;
474  }
475 
476  if (f->num_h_slices > (unsigned)f->width || !f->num_h_slices ||
477  f->num_v_slices > (unsigned)f->height || !f->num_v_slices
478  ) {
479  av_log(f->avctx, AV_LOG_ERROR, "slice count invalid\n");
480  return AVERROR_INVALIDDATA;
481  }
482 
483  if (f->num_h_slices > MAX_SLICES / f->num_v_slices) {
484  av_log(f->avctx, AV_LOG_ERROR, "slice count unsupported\n");
485  return AVERROR_PATCHWELCOME;
486  }
487 
488  f->quant_table_count = get_symbol(c, state, 0);
489  if (f->quant_table_count > (unsigned)MAX_QUANT_TABLES || !f->quant_table_count) {
490  av_log(f->avctx, AV_LOG_ERROR, "quant table count %d is invalid\n", f->quant_table_count);
491  f->quant_table_count = 0;
492  return AVERROR_INVALIDDATA;
493  }
494 
495  for (i = 0; i < f->quant_table_count; i++) {
496  f->context_count[i] = read_quant_tables(c, f->quant_tables[i]);
497  if (f->context_count[i] < 0) {
498  av_log(f->avctx, AV_LOG_ERROR, "read_quant_table error\n");
499  return AVERROR_INVALIDDATA;
500  }
501  }
503  return ret;
504 
505  for (i = 0; i < f->quant_table_count; i++)
506  if (get_rac(c, state)) {
507  for (j = 0; j < f->context_count[i]; j++)
508  for (k = 0; k < CONTEXT_SIZE; k++) {
509  int pred = j ? f->initial_states[i][j - 1][k] : 128;
510  f->initial_states[i][j][k] =
511  (pred + get_symbol(c, state2[k], 1)) & 0xFF;
512  }
513  }
514 
515  if (f->version > 2) {
516  f->ec = get_symbol(c, state, 0);
517  if (f->micro_version > 2)
518  f->intra = get_symbol(c, state, 0);
519  }
520 
521  if (f->version > 2) {
522  unsigned v;
524  f->avctx->extradata, f->avctx->extradata_size);
525  if (v || f->avctx->extradata_size < 4) {
526  av_log(f->avctx, AV_LOG_ERROR, "CRC mismatch %X!\n", v);
527  return AVERROR_INVALIDDATA;
528  }
529  crc = AV_RB32(f->avctx->extradata + f->avctx->extradata_size - 4);
530  }
531 
532  if (f->avctx->debug & FF_DEBUG_PICT_INFO)
533  av_log(f->avctx, AV_LOG_DEBUG,
534  "global: ver:%d.%d, coder:%d, colorspace: %d bpr:%d chroma:%d(%d:%d), alpha:%d slices:%dx%d qtabs:%d ec:%d intra:%d CRC:0x%08X\n",
535  f->version, f->micro_version,
536  f->ac,
537  f->colorspace,
538  f->avctx->bits_per_raw_sample,
539  f->chroma_planes, f->chroma_h_shift, f->chroma_v_shift,
540  f->transparency,
541  f->num_h_slices, f->num_v_slices,
542  f->quant_table_count,
543  f->ec,
544  f->intra,
545  crc
546  );
547  return 0;
548 }
549 
551 {
552  uint8_t state[CONTEXT_SIZE];
553  int i, j, context_count = -1; //-1 to avoid warning
554  RangeCoder *const c = &f->slice_context[0]->c;
555 
556  memset(state, 128, sizeof(state));
557 
558  if (f->version < 2) {
560  unsigned v= get_symbol(c, state, 0);
561  if (v >= 2) {
562  av_log(f->avctx, AV_LOG_ERROR, "invalid version %d in ver01 header\n", v);
563  return AVERROR_INVALIDDATA;
564  }
565  f->version = v;
566  f->ac = get_symbol(c, state, 0);
567 
568  if (f->ac == AC_RANGE_CUSTOM_TAB) {
569  for (i = 1; i < 256; i++) {
570  int st = get_symbol(c, state, 1) + c->one_state[i];
571  if (st < 1 || st > 255) {
572  av_log(f->avctx, AV_LOG_ERROR, "invalid state transition %d\n", st);
573  return AVERROR_INVALIDDATA;
574  }
575  f->state_transition[i] = st;
576  }
577  }
578 
579  colorspace = get_symbol(c, state, 0); //YUV cs type
580  bits_per_raw_sample = f->version > 0 ? get_symbol(c, state, 0) : f->avctx->bits_per_raw_sample;
585  if (colorspace == 0 && f->avctx->skip_alpha)
586  transparency = 0;
587 
588  if (f->plane_count) {
589  if (colorspace != f->colorspace ||
590  bits_per_raw_sample != f->avctx->bits_per_raw_sample ||
591  chroma_planes != f->chroma_planes ||
592  chroma_h_shift != f->chroma_h_shift ||
593  chroma_v_shift != f->chroma_v_shift ||
594  transparency != f->transparency) {
595  av_log(f->avctx, AV_LOG_ERROR, "Invalid change of global parameters\n");
596  return AVERROR_INVALIDDATA;
597  }
598  }
599 
600  if (chroma_h_shift > 4U || chroma_v_shift > 4U) {
601  av_log(f->avctx, AV_LOG_ERROR, "chroma shift parameters %d %d are invalid\n",
603  return AVERROR_INVALIDDATA;
604  }
605 
606  f->colorspace = colorspace;
607  f->avctx->bits_per_raw_sample = bits_per_raw_sample;
608  f->chroma_planes = chroma_planes;
609  f->chroma_h_shift = chroma_h_shift;
610  f->chroma_v_shift = chroma_v_shift;
611  f->transparency = transparency;
612 
613  f->plane_count = 2 + f->transparency;
614  }
615 
616  if (f->colorspace == 0) {
617  if (!f->transparency && !f->chroma_planes) {
618  if (f->avctx->bits_per_raw_sample <= 8)
619  f->avctx->pix_fmt = AV_PIX_FMT_GRAY8;
620  else if (f->avctx->bits_per_raw_sample == 9) {
621  f->packed_at_lsb = 1;
622  f->avctx->pix_fmt = AV_PIX_FMT_GRAY9;
623  } else if (f->avctx->bits_per_raw_sample == 10) {
624  f->packed_at_lsb = 1;
625  f->avctx->pix_fmt = AV_PIX_FMT_GRAY10;
626  } else if (f->avctx->bits_per_raw_sample == 12) {
627  f->packed_at_lsb = 1;
628  f->avctx->pix_fmt = AV_PIX_FMT_GRAY12;
629  } else if (f->avctx->bits_per_raw_sample == 14) {
630  f->packed_at_lsb = 1;
631  f->avctx->pix_fmt = AV_PIX_FMT_GRAY14;
632  } else if (f->avctx->bits_per_raw_sample == 16) {
633  f->packed_at_lsb = 1;
634  f->avctx->pix_fmt = AV_PIX_FMT_GRAY16;
635  } else if (f->avctx->bits_per_raw_sample < 16) {
636  f->avctx->pix_fmt = AV_PIX_FMT_GRAY16;
637  } else
638  return AVERROR(ENOSYS);
639  } else if (f->transparency && !f->chroma_planes) {
640  if (f->avctx->bits_per_raw_sample <= 8)
641  f->avctx->pix_fmt = AV_PIX_FMT_YA8;
642  else
643  return AVERROR(ENOSYS);
644  } else if (f->avctx->bits_per_raw_sample<=8 && !f->transparency) {
645  switch(16 * f->chroma_h_shift + f->chroma_v_shift) {
646  case 0x00: f->avctx->pix_fmt = AV_PIX_FMT_YUV444P; break;
647  case 0x01: f->avctx->pix_fmt = AV_PIX_FMT_YUV440P; break;
648  case 0x10: f->avctx->pix_fmt = AV_PIX_FMT_YUV422P; break;
649  case 0x11: f->avctx->pix_fmt = AV_PIX_FMT_YUV420P; break;
650  case 0x20: f->avctx->pix_fmt = AV_PIX_FMT_YUV411P; break;
651  case 0x22: f->avctx->pix_fmt = AV_PIX_FMT_YUV410P; break;
652  }
653  } else if (f->avctx->bits_per_raw_sample <= 8 && f->transparency) {
654  switch(16*f->chroma_h_shift + f->chroma_v_shift) {
655  case 0x00: f->avctx->pix_fmt = AV_PIX_FMT_YUVA444P; break;
656  case 0x10: f->avctx->pix_fmt = AV_PIX_FMT_YUVA422P; break;
657  case 0x11: f->avctx->pix_fmt = AV_PIX_FMT_YUVA420P; break;
658  }
659  } else if (f->avctx->bits_per_raw_sample == 9 && !f->transparency) {
660  f->packed_at_lsb = 1;
661  switch(16 * f->chroma_h_shift + f->chroma_v_shift) {
662  case 0x00: f->avctx->pix_fmt = AV_PIX_FMT_YUV444P9; break;
663  case 0x10: f->avctx->pix_fmt = AV_PIX_FMT_YUV422P9; break;
664  case 0x11: f->avctx->pix_fmt = AV_PIX_FMT_YUV420P9; break;
665  }
666  } else if (f->avctx->bits_per_raw_sample == 9 && f->transparency) {
667  f->packed_at_lsb = 1;
668  switch(16 * f->chroma_h_shift + f->chroma_v_shift) {
669  case 0x00: f->avctx->pix_fmt = AV_PIX_FMT_YUVA444P9; break;
670  case 0x10: f->avctx->pix_fmt = AV_PIX_FMT_YUVA422P9; break;
671  case 0x11: f->avctx->pix_fmt = AV_PIX_FMT_YUVA420P9; break;
672  }
673  } else if (f->avctx->bits_per_raw_sample == 10 && !f->transparency) {
674  f->packed_at_lsb = 1;
675  switch(16 * f->chroma_h_shift + f->chroma_v_shift) {
676  case 0x00: f->avctx->pix_fmt = AV_PIX_FMT_YUV444P10; break;
677  case 0x01: f->avctx->pix_fmt = AV_PIX_FMT_YUV440P10; break;
678  case 0x10: f->avctx->pix_fmt = AV_PIX_FMT_YUV422P10; break;
679  case 0x11: f->avctx->pix_fmt = AV_PIX_FMT_YUV420P10; break;
680  }
681  } else if (f->avctx->bits_per_raw_sample == 10 && f->transparency) {
682  f->packed_at_lsb = 1;
683  switch(16 * f->chroma_h_shift + f->chroma_v_shift) {
684  case 0x00: f->avctx->pix_fmt = AV_PIX_FMT_YUVA444P10; break;
685  case 0x10: f->avctx->pix_fmt = AV_PIX_FMT_YUVA422P10; break;
686  case 0x11: f->avctx->pix_fmt = AV_PIX_FMT_YUVA420P10; break;
687  }
688  } else if (f->avctx->bits_per_raw_sample == 12 && !f->transparency) {
689  f->packed_at_lsb = 1;
690  switch(16 * f->chroma_h_shift + f->chroma_v_shift) {
691  case 0x00: f->avctx->pix_fmt = AV_PIX_FMT_YUV444P12; break;
692  case 0x01: f->avctx->pix_fmt = AV_PIX_FMT_YUV440P12; break;
693  case 0x10: f->avctx->pix_fmt = AV_PIX_FMT_YUV422P12; break;
694  case 0x11: f->avctx->pix_fmt = AV_PIX_FMT_YUV420P12; break;
695  }
696  } else if (f->avctx->bits_per_raw_sample == 12 && f->transparency) {
697  f->packed_at_lsb = 1;
698  switch(16 * f->chroma_h_shift + f->chroma_v_shift) {
699  case 0x00: f->avctx->pix_fmt = AV_PIX_FMT_YUVA444P12; break;
700  case 0x10: f->avctx->pix_fmt = AV_PIX_FMT_YUVA422P12; break;
701  }
702  } else if (f->avctx->bits_per_raw_sample == 14 && !f->transparency) {
703  f->packed_at_lsb = 1;
704  switch(16 * f->chroma_h_shift + f->chroma_v_shift) {
705  case 0x00: f->avctx->pix_fmt = AV_PIX_FMT_YUV444P14; break;
706  case 0x10: f->avctx->pix_fmt = AV_PIX_FMT_YUV422P14; break;
707  case 0x11: f->avctx->pix_fmt = AV_PIX_FMT_YUV420P14; break;
708  }
709  } else if (f->avctx->bits_per_raw_sample == 16 && !f->transparency){
710  f->packed_at_lsb = 1;
711  switch(16 * f->chroma_h_shift + f->chroma_v_shift) {
712  case 0x00: f->avctx->pix_fmt = AV_PIX_FMT_YUV444P16; break;
713  case 0x10: f->avctx->pix_fmt = AV_PIX_FMT_YUV422P16; break;
714  case 0x11: f->avctx->pix_fmt = AV_PIX_FMT_YUV420P16; break;
715  }
716  } else if (f->avctx->bits_per_raw_sample == 16 && f->transparency){
717  f->packed_at_lsb = 1;
718  switch(16 * f->chroma_h_shift + f->chroma_v_shift) {
719  case 0x00: f->avctx->pix_fmt = AV_PIX_FMT_YUVA444P16; break;
720  case 0x10: f->avctx->pix_fmt = AV_PIX_FMT_YUVA422P16; break;
721  case 0x11: f->avctx->pix_fmt = AV_PIX_FMT_YUVA420P16; break;
722  }
723  }
724  } else if (f->colorspace == 1) {
725  if (f->chroma_h_shift || f->chroma_v_shift) {
726  av_log(f->avctx, AV_LOG_ERROR,
727  "chroma subsampling not supported in this colorspace\n");
728  return AVERROR(ENOSYS);
729  }
730  if ( f->avctx->bits_per_raw_sample <= 8 && !f->transparency)
731  f->avctx->pix_fmt = AV_PIX_FMT_0RGB32;
732  else if (f->avctx->bits_per_raw_sample <= 8 && f->transparency)
733  f->avctx->pix_fmt = AV_PIX_FMT_RGB32;
734  else if (f->avctx->bits_per_raw_sample == 9 && !f->transparency)
735  f->avctx->pix_fmt = AV_PIX_FMT_GBRP9;
736  else if (f->avctx->bits_per_raw_sample == 10 && !f->transparency)
737  f->avctx->pix_fmt = AV_PIX_FMT_GBRP10;
738  else if (f->avctx->bits_per_raw_sample == 10 && f->transparency)
739  f->avctx->pix_fmt = AV_PIX_FMT_GBRAP10;
740  else if (f->avctx->bits_per_raw_sample == 12 && !f->transparency)
741  f->avctx->pix_fmt = AV_PIX_FMT_GBRP12;
742  else if (f->avctx->bits_per_raw_sample == 12 && f->transparency)
743  f->avctx->pix_fmt = AV_PIX_FMT_GBRAP12;
744  else if (f->avctx->bits_per_raw_sample == 14 && !f->transparency)
745  f->avctx->pix_fmt = AV_PIX_FMT_GBRP14;
746  else if (f->avctx->bits_per_raw_sample == 14 && f->transparency)
747  f->avctx->pix_fmt = AV_PIX_FMT_GBRAP14;
748  else if (f->avctx->bits_per_raw_sample == 16 && !f->transparency) {
749  f->avctx->pix_fmt = AV_PIX_FMT_GBRP16;
750  f->use32bit = 1;
751  }
752  else if (f->avctx->bits_per_raw_sample == 16 && f->transparency) {
753  f->avctx->pix_fmt = AV_PIX_FMT_GBRAP16;
754  f->use32bit = 1;
755  }
756  } else {
757  av_log(f->avctx, AV_LOG_ERROR, "colorspace not supported\n");
758  return AVERROR(ENOSYS);
759  }
760  if (f->avctx->pix_fmt == AV_PIX_FMT_NONE) {
761  av_log(f->avctx, AV_LOG_ERROR, "format not supported\n");
762  return AVERROR(ENOSYS);
763  }
764 
765  ff_dlog(f->avctx, "%d %d %d\n",
766  f->chroma_h_shift, f->chroma_v_shift, f->avctx->pix_fmt);
767  if (f->version < 2) {
768  context_count = read_quant_tables(c, f->quant_table);
769  if (context_count < 0) {
770  av_log(f->avctx, AV_LOG_ERROR, "read_quant_table error\n");
771  return AVERROR_INVALIDDATA;
772  }
773  f->slice_count = f->max_slice_count;
774  } else if (f->version < 3) {
775  f->slice_count = get_symbol(c, state, 0);
776  } else {
777  const uint8_t *p = c->bytestream_end;
778  for (f->slice_count = 0;
779  f->slice_count < MAX_SLICES && 3 + 5*!!f->ec < p - c->bytestream_start;
780  f->slice_count++) {
781  int trailer = 3 + 5*!!f->ec;
782  int size = AV_RB24(p-trailer);
783  if (size + trailer > p - c->bytestream_start)
784  break;
785  p -= size + trailer;
786  }
787  }
788  if (f->slice_count > (unsigned)MAX_SLICES || f->slice_count <= 0 || f->slice_count > f->max_slice_count) {
789  av_log(f->avctx, AV_LOG_ERROR, "slice count %d is invalid (max=%d)\n", f->slice_count, f->max_slice_count);
790  return AVERROR_INVALIDDATA;
791  }
792 
793  for (j = 0; j < f->slice_count; j++) {
794  FFV1Context *fs = f->slice_context[j];
795  fs->ac = f->ac;
796  fs->packed_at_lsb = f->packed_at_lsb;
797 
798  fs->slice_damaged = 0;
799 
800  if (f->version == 2) {
801  int sx = get_symbol(c, state, 0);
802  int sy = get_symbol(c, state, 0);
803  int sw = get_symbol(c, state, 0) + 1U;
804  int sh = get_symbol(c, state, 0) + 1U;
805 
806  if (sx < 0 || sy < 0 || sw <= 0 || sh <= 0)
807  return AVERROR_INVALIDDATA;
808  if (sx > f->num_h_slices - sw || sy > f->num_v_slices - sh)
809  return AVERROR_INVALIDDATA;
810 
811  fs->slice_x = sx * (int64_t)f->width / f->num_h_slices;
812  fs->slice_y = sy * (int64_t)f->height / f->num_v_slices;
813  fs->slice_width = (sx + sw) * (int64_t)f->width / f->num_h_slices - fs->slice_x;
814  fs->slice_height = (sy + sh) * (int64_t)f->height / f->num_v_slices - fs->slice_y;
815 
816  av_assert0((unsigned)fs->slice_width <= f->width &&
817  (unsigned)fs->slice_height <= f->height);
818  av_assert0 ( (unsigned)fs->slice_x + (uint64_t)fs->slice_width <= f->width
819  && (unsigned)fs->slice_y + (uint64_t)fs->slice_height <= f->height);
820  }
821 
822  for (i = 0; i < f->plane_count; i++) {
823  PlaneContext *const p = &fs->plane[i];
824 
825  if (f->version == 2) {
826  int idx = get_symbol(c, state, 0);
827  if (idx >= (unsigned)f->quant_table_count) {
828  av_log(f->avctx, AV_LOG_ERROR,
829  "quant_table_index out of range\n");
830  return AVERROR_INVALIDDATA;
831  }
832  p->quant_table_index = idx;
833  memcpy(p->quant_table, f->quant_tables[idx],
834  sizeof(p->quant_table));
835  context_count = f->context_count[idx];
836  } else {
837  memcpy(p->quant_table, f->quant_table, sizeof(p->quant_table));
838  }
839 
840  if (f->version <= 2) {
842  if (p->context_count < context_count) {
843  av_freep(&p->state);
844  av_freep(&p->vlc_state);
845  }
847  }
848  }
849  }
850  return 0;
851 }
852 
854 {
856  int ret;
857 
858  if ((ret = ff_ffv1_common_init(avctx)) < 0)
859  return ret;
860 
861  f->picture.f = av_frame_alloc();
862  f->last_picture.f = av_frame_alloc();
863  if (!f->picture.f || !f->last_picture.f)
864  return AVERROR(ENOMEM);
865 
866  if (avctx->extradata_size > 0 && (ret = read_extra_header(f)) < 0)
867  return ret;
868 
869  if ((ret = ff_ffv1_init_slice_contexts(f)) < 0)
870  return ret;
871 
872  return 0;
873 }
874 
876  int *got_frame, AVPacket *avpkt)
877 {
878  uint8_t *buf = avpkt->data;
879  int buf_size = avpkt->size;
881  RangeCoder *const c = &f->slice_context[0]->c;
882  int i, ret;
883  uint8_t keystate = 128;
884  uint8_t *buf_p;
885  AVFrame *p;
886 
887  if (f->last_picture.f)
888  ff_thread_release_ext_buffer(&f->last_picture);
889  FFSWAP(ThreadFrame, f->picture, f->last_picture);
890 
891  f->cur = p = f->picture.f;
892 
893  if (f->version < 3 && avctx->field_order > AV_FIELD_PROGRESSIVE) {
894  /* we have interlaced material flagged in container */
898  }
899 
900  f->avctx = avctx;
901  ff_init_range_decoder(c, buf, buf_size);
902  ff_build_rac_states(c, 0.05 * (1LL << 32), 256 - 8);
903 
904  p->pict_type = AV_PICTURE_TYPE_I; //FIXME I vs. P
905  if (get_rac(c, &keystate)) {
906  p->flags |= AV_FRAME_FLAG_KEY;
907  f->key_frame_ok = 0;
908  if ((ret = read_header(f)) < 0)
909  return ret;
910  f->key_frame_ok = 1;
911  } else {
912  if (!f->key_frame_ok) {
914  "Cannot decode non-keyframe without valid keyframe\n");
915  return AVERROR_INVALIDDATA;
916  }
917  p->flags &= ~AV_FRAME_FLAG_KEY;
918  }
919 
920  if (f->ac != AC_GOLOMB_RICE) {
921  if (buf_size < avctx->width * avctx->height / (128*8))
922  return AVERROR_INVALIDDATA;
923  } else {
924  int w = avctx->width;
925  int s = 1 + w / (1<<23);
926 
927  w /= s;
928 
929  for (i = 0; w > (1<<ff_log2_run[i]); i++)
930  w -= ff_log2_run[i];
931  if (buf_size < (avctx->height + i + 6) / 8 * s)
932  return AVERROR_INVALIDDATA;
933  }
934 
936  if (ret < 0)
937  return ret;
938 
940  av_log(avctx, AV_LOG_DEBUG, "ver:%d keyframe:%d coder:%d ec:%d slices:%d bps:%d\n",
941  f->version, !!(p->flags & AV_FRAME_FLAG_KEY), f->ac, f->ec, f->slice_count, f->avctx->bits_per_raw_sample);
942 
944 
945  buf_p = buf + buf_size;
946  for (i = f->slice_count - 1; i >= 0; i--) {
947  FFV1Context *fs = f->slice_context[i];
948  int trailer = 3 + 5*!!f->ec;
949  int v;
950 
951  if (i || f->version > 2) {
952  if (trailer > buf_p - buf) v = INT_MAX;
953  else v = AV_RB24(buf_p-trailer) + trailer;
954  } else v = buf_p - c->bytestream_start;
955  if (buf_p - c->bytestream_start < v) {
956  av_log(avctx, AV_LOG_ERROR, "Slice pointer chain broken\n");
957  ff_thread_report_progress(&f->picture, INT_MAX, 0);
958  return AVERROR_INVALIDDATA;
959  }
960  buf_p -= v;
961 
962  if (f->ec) {
963  unsigned crc = av_crc(av_crc_get_table(AV_CRC_32_IEEE), 0, buf_p, v);
964  if (crc) {
965  int64_t ts = avpkt->pts != AV_NOPTS_VALUE ? avpkt->pts : avpkt->dts;
966  av_log(f->avctx, AV_LOG_ERROR, "slice CRC mismatch %X!", crc);
967  if (ts != AV_NOPTS_VALUE && avctx->pkt_timebase.num) {
968  av_log(f->avctx, AV_LOG_ERROR, "at %f seconds\n", ts*av_q2d(avctx->pkt_timebase));
969  } else if (ts != AV_NOPTS_VALUE) {
970  av_log(f->avctx, AV_LOG_ERROR, "at %"PRId64"\n", ts);
971  } else {
972  av_log(f->avctx, AV_LOG_ERROR, "\n");
973  }
974  fs->slice_damaged = 1;
975  }
976  if (avctx->debug & FF_DEBUG_PICT_INFO) {
977  av_log(avctx, AV_LOG_DEBUG, "slice %d, CRC: 0x%08"PRIX32"\n", i, AV_RB32(buf_p + v - 4));
978  }
979  }
980 
981  if (i) {
982  ff_init_range_decoder(&fs->c, buf_p, v);
983  } else
984  fs->c.bytestream_end = buf_p + v;
985 
986  fs->avctx = avctx;
987  }
988 
990  decode_slice,
991  &f->slice_context[0],
992  NULL,
993  f->slice_count,
994  sizeof(void*));
995 
996  for (i = f->slice_count - 1; i >= 0; i--) {
997  FFV1Context *fs = f->slice_context[i];
998  int j;
999  if (fs->slice_damaged && f->last_picture.f->data[0]) {
1001  const uint8_t *src[4];
1002  uint8_t *dst[4];
1003  ff_thread_await_progress(&f->last_picture, INT_MAX, 0);
1004  for (j = 0; j < desc->nb_components; j++) {
1005  int pixshift = desc->comp[j].depth > 8;
1006  int sh = (j == 1 || j == 2) ? f->chroma_h_shift : 0;
1007  int sv = (j == 1 || j == 2) ? f->chroma_v_shift : 0;
1008  dst[j] = p->data[j] + p->linesize[j] *
1009  (fs->slice_y >> sv) + ((fs->slice_x >> sh) << pixshift);
1010  src[j] = f->last_picture.f->data[j] + f->last_picture.f->linesize[j] *
1011  (fs->slice_y >> sv) + ((fs->slice_x >> sh) << pixshift);
1012 
1013  }
1014  if (desc->flags & AV_PIX_FMT_FLAG_PAL) {
1015  dst[1] = p->data[1];
1016  src[1] = f->last_picture.f->data[1];
1017  }
1018  av_image_copy(dst, p->linesize, src,
1019  f->last_picture.f->linesize,
1020  avctx->pix_fmt,
1021  fs->slice_width,
1022  fs->slice_height);
1023  }
1024  }
1025  ff_thread_report_progress(&f->picture, INT_MAX, 0);
1026 
1027  if (f->last_picture.f)
1028  ff_thread_release_ext_buffer(&f->last_picture);
1029  if ((ret = av_frame_ref(rframe, f->picture.f)) < 0)
1030  return ret;
1031 
1032  *got_frame = 1;
1033 
1034  return buf_size;
1035 }
1036 
1037 static void copy_fields(FFV1Context *fsdst, const FFV1Context *fssrc,
1038  const FFV1Context *fsrc)
1039 {
1040  fsdst->version = fsrc->version;
1041  fsdst->micro_version = fsrc->micro_version;
1042  fsdst->chroma_planes = fsrc->chroma_planes;
1045  fsdst->transparency = fsrc->transparency;
1046  fsdst->plane_count = fsrc->plane_count;
1047  fsdst->ac = fsrc->ac;
1048  fsdst->colorspace = fsrc->colorspace;
1049 
1050  fsdst->ec = fsrc->ec;
1051  fsdst->intra = fsrc->intra;
1052  fsdst->slice_damaged = fssrc->slice_damaged;
1053  fsdst->key_frame_ok = fsrc->key_frame_ok;
1054 
1055  fsdst->packed_at_lsb = fsrc->packed_at_lsb;
1056  fsdst->slice_count = fsrc->slice_count;
1057  if (fsrc->version<3){
1058  fsdst->slice_x = fssrc->slice_x;
1059  fsdst->slice_y = fssrc->slice_y;
1060  fsdst->slice_width = fssrc->slice_width;
1061  fsdst->slice_height = fssrc->slice_height;
1062  }
1063 }
1064 
1065 #if HAVE_THREADS
1066 static int update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
1067 {
1068  FFV1Context *fsrc = src->priv_data;
1069  FFV1Context *fdst = dst->priv_data;
1070  int i, ret;
1071 
1072  if (dst == src)
1073  return 0;
1074 
1075  copy_fields(fdst, fsrc, fsrc);
1076  fdst->use32bit = fsrc->use32bit;
1077  memcpy(fdst->state_transition, fsrc->state_transition,
1078  sizeof(fdst->state_transition));
1079  memcpy(fdst->quant_table, fsrc->quant_table, sizeof(fsrc->quant_table));
1080 
1081  for (i = 0; i < fdst->num_h_slices * fdst->num_v_slices; i++) {
1082  FFV1Context *fssrc = fsrc->slice_context[i];
1083  FFV1Context *fsdst = fdst->slice_context[i];
1084  copy_fields(fsdst, fssrc, fsrc);
1085  }
1086  av_assert0(!fdst->plane[0].state);
1087  av_assert0(!fdst->sample_buffer);
1088 
1090 
1091 
1093  if (fsrc->picture.f->data[0]) {
1094  if ((ret = ff_thread_ref_frame(&fdst->picture, &fsrc->picture)) < 0)
1095  return ret;
1096  }
1097 
1098  fdst->fsrc = fsrc;
1099 
1100  return 0;
1101 }
1102 #endif
1103 
1105 {
1106  FFV1Context *const s = avctx->priv_data;
1107 
1108  if (s->picture.f) {
1109  ff_thread_release_ext_buffer(&s->picture);
1110  av_frame_free(&s->picture.f);
1111  }
1112 
1113  if (s->last_picture.f) {
1114  ff_thread_release_ext_buffer(&s->last_picture);
1115  av_frame_free(&s->last_picture.f);
1116  }
1117  return ff_ffv1_close(avctx);
1118 }
1119 
1121  .p.name = "ffv1",
1122  CODEC_LONG_NAME("FFmpeg video codec #1"),
1123  .p.type = AVMEDIA_TYPE_VIDEO,
1124  .p.id = AV_CODEC_ID_FFV1,
1125  .priv_data_size = sizeof(FFV1Context),
1126  .init = decode_init,
1127  .close = ffv1_decode_close,
1130  .p.capabilities = AV_CODEC_CAP_DR1 |
1132  .caps_internal = FF_CODEC_CAP_INIT_CLEANUP |
1134 };
AV_PIX_FMT_YUVA422P16
#define AV_PIX_FMT_YUVA422P16
Definition: pixfmt.h:522
read_extra_header
static int read_extra_header(FFV1Context *f)
Definition: ffv1dec.c:423
AV_PIX_FMT_GBRAP16
#define AV_PIX_FMT_GBRAP16
Definition: pixfmt.h:501
FFV1Context::chroma_v_shift
int chroma_v_shift
Definition: ffv1.h:85
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:186
FFV1Context::key_frame_ok
int key_frame_ok
Definition: ffv1.h:114
update_vlc_state
static void update_vlc_state(VlcState *const state, const int v)
Definition: ffv1.h:157
FF_CODEC_CAP_INIT_CLEANUP
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Definition: codec_internal.h:42
get_bits_left
static int get_bits_left(GetBitContext *gb)
Definition: get_bits.h:695
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
decode_slice
static int decode_slice(AVCodecContext *c, void *arg)
Definition: ffv1dec.c:255
opt.h
AV_PIX_FMT_YA8
@ AV_PIX_FMT_YA8
8 bits gray, 8 bits alpha
Definition: pixfmt.h:140
is_input_end
static int is_input_end(FFV1Context *s)
Definition: ffv1dec.c:97
FFV1Context::context_count
int context_count[MAX_QUANT_TABLES]
Definition: ffv1.h:101
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2962
MAX_OVERREAD
#define MAX_OVERREAD
Definition: lagarithrac.h:49
AV_FIELD_PROGRESSIVE
@ AV_FIELD_PROGRESSIVE
Definition: defs.h:200
FFV1Context::ec
int ec
Definition: ffv1.h:111
int64_t
long long int64_t
Definition: coverity.c:34
FFV1Context::gb
GetBitContext gb
Definition: ffv1.h:77
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:130
AV_PIX_FMT_YUVA422P9
#define AV_PIX_FMT_YUVA422P9
Definition: pixfmt.h:514
get_sr_golomb
static int get_sr_golomb(GetBitContext *gb, int k, int limit, int esc_len)
read signed golomb rice code (ffv1).
Definition: golomb.h:532
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:344
pixdesc.h
ff_ffv1_common_init
av_cold int ff_ffv1_common_init(AVCodecContext *avctx)
Definition: ffv1.c:36
AV_PIX_FMT_YUVA420P16
#define AV_PIX_FMT_YUVA420P16
Definition: pixfmt.h:521
w
uint8_t w
Definition: llviddspenc.c:38
AVPacket::data
uint8_t * data
Definition: packet.h:522
AV_PIX_FMT_YUVA420P10
#define AV_PIX_FMT_YUVA420P10
Definition: pixfmt.h:516
AVCodecContext::field_order
enum AVFieldOrder field_order
Field order.
Definition: avcodec.h:708
FFV1Context::slice_x
int slice_x
Definition: ffv1.h:130
rangecoder.h
AVComponentDescriptor::step
int step
Number of elements between 2 horizontally consecutive pixels.
Definition: pixdesc.h:40
AV_PIX_FMT_YUV420P10
#define AV_PIX_FMT_YUV420P10
Definition: pixfmt.h:478
PlaneContext::state
uint8_t(* state)[CONTEXT_SIZE]
Definition: ffv1.h:66
FFCodec
Definition: codec_internal.h:127
FFV1Context::num_h_slices
int num_h_slices
Definition: ffv1.h:127
AV_PIX_FMT_YUV440P
@ AV_PIX_FMT_YUV440P
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
Definition: pixfmt.h:106
FFV1Context::slice_context
struct FFV1Context * slice_context[MAX_SLICES]
Definition: ffv1.h:123
AVFrame::flags
int flags
Frame flags, a combination of AV_FRAME_FLAGS.
Definition: frame.h:616
read_quant_table
static int read_quant_table(RangeCoder *c, int16_t *quant_table, int scale)
Definition: ffv1dec.c:378
AC_RANGE_CUSTOM_TAB
#define AC_RANGE_CUSTOM_TAB
Definition: ffv1.h:52
AV_PIX_FMT_YUVA422P10
#define AV_PIX_FMT_YUVA422P10
Definition: pixfmt.h:517
init_get_bits
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
Definition: get_bits.h:514
thread.h
ff_thread_await_progress
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before ff_thread_await_progress() has been called on them. reget_buffer() and buffer age optimizations no longer work. *The contents of buffers must not be written to after ff_thread_report_progress() has been called on them. This includes draw_edges(). Porting codecs to frame threading
ThreadFrame::f
AVFrame * f
Definition: threadframe.h:28
FFV1Context::chroma_h_shift
int chroma_h_shift
Definition: ffv1.h:85
FF_DEBUG_PICT_INFO
#define FF_DEBUG_PICT_INFO
Definition: avcodec.h:1397
AV_PIX_FMT_GRAY9
#define AV_PIX_FMT_GRAY9
Definition: pixfmt.h:458
decode_line
static av_always_inline int RENAME() decode_line(FFV1Context *s, int w, TYPE *sample[2], int plane_index, int bits)
Definition: ffv1dec_template.c:25
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:365
AV_FRAME_FLAG_TOP_FIELD_FIRST
#define AV_FRAME_FLAG_TOP_FIELD_FIRST
A flag to mark frames where the top field is displayed first if the content is interlaced.
Definition: frame.h:608
crc.h
golomb.h
exp golomb vlc stuff
AV_PIX_FMT_YUVA420P9
#define AV_PIX_FMT_YUVA420P9
Definition: pixfmt.h:513
AV_FIELD_TT
@ AV_FIELD_TT
Top coded_first, top displayed first.
Definition: defs.h:201
FFCodec::p
AVCodec p
The public AVCodec.
Definition: codec_internal.h:131
AV_PIX_FMT_GBRP14
#define AV_PIX_FMT_GBRP14
Definition: pixfmt.h:496
AV_PIX_FMT_GBRP10
#define AV_PIX_FMT_GBRP10
Definition: pixfmt.h:494
AV_PIX_FMT_YUVA444P16
#define AV_PIX_FMT_YUVA444P16
Definition: pixfmt.h:523
ffv1_decode_close
static av_cold int ffv1_decode_close(AVCodecContext *avctx)
Definition: ffv1dec.c:1104
GetBitContext
Definition: get_bits.h:108
AV_PIX_FMT_YUV422P9
#define AV_PIX_FMT_YUV422P9
Definition: pixfmt.h:476
av_noinline
#define av_noinline
Definition: attributes.h:72
MAX_SLICES
#define MAX_SLICES
Definition: d3d12va_hevc.c:33
CONTEXT_SIZE
#define CONTEXT_SIZE
Definition: ffv1.h:45
AV_PIX_FMT_GRAY16
#define AV_PIX_FMT_GRAY16
Definition: pixfmt.h:462
get_symbol_inline
static av_flatten int get_symbol_inline(RangeCoder *c, uint8_t *state, int is_signed)
Definition: ffv1dec.c:43
FFV1Context::chroma_planes
int chroma_planes
Definition: ffv1.h:84
PlaneContext::context_count
int context_count
Definition: ffv1.h:65
AVRational::num
int num
Numerator.
Definition: rational.h:59
AV_FIELD_TB
@ AV_FIELD_TB
Top coded first, bottom displayed first.
Definition: defs.h:203
ff_ffv1_clear_slice_state
void ff_ffv1_clear_slice_state(const FFV1Context *f, FFV1Context *fs)
Definition: ffv1.c:163
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:118
AV_PIX_FMT_YUV444P10
#define AV_PIX_FMT_YUV444P10
Definition: pixfmt.h:481
avassert.h
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
av_cold
#define av_cold
Definition: attributes.h:90
AV_PIX_FMT_YUV422P16
#define AV_PIX_FMT_YUV422P16
Definition: pixfmt.h:490
AV_FRAME_FLAG_KEY
#define AV_FRAME_FLAG_KEY
A flag to mark frames that are keyframes.
Definition: frame.h:595
ff_thread_report_progress
void ff_thread_report_progress(ThreadFrame *f, int n, int field)
Notify later decoding threads when part of their reference picture is ready.
Definition: pthread_frame.c:573
FFV1Context::bits_per_raw_sample
int bits_per_raw_sample
Definition: ffv1.h:117
FFV1Context::use32bit
int use32bit
Definition: ffv1.h:109
AV_PIX_FMT_GBRAP10
#define AV_PIX_FMT_GBRAP10
Definition: pixfmt.h:498
AVCodecContext::extradata_size
int extradata_size
Definition: avcodec.h:524
width
#define width
FF_CODEC_DECODE_CB
#define FF_CODEC_DECODE_CB(func)
Definition: codec_internal.h:287
s
#define s(width, name)
Definition: cbs_vp9.c:198
AV_PIX_FMT_GBRAP14
#define AV_PIX_FMT_GBRAP14
Definition: pixfmt.h:500
AV_PIX_FMT_GBRAP12
#define AV_PIX_FMT_GBRAP12
Definition: pixfmt.h:499
FFV1Context::slice_count
int slice_count
Definition: ffv1.h:124
AV_PIX_FMT_YUVA420P
@ AV_PIX_FMT_YUVA420P
planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)
Definition: pixfmt.h:108
AV_PIX_FMT_YUV444P16
#define AV_PIX_FMT_YUV444P16
Definition: pixfmt.h:491
AV_CEIL_RSHIFT
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:58
AV_GET_BUFFER_FLAG_REF
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
Definition: avcodec.h:425
decode_frame
static int decode_frame(AVCodecContext *avctx, AVFrame *rframe, int *got_frame, AVPacket *avpkt)
Definition: ffv1dec.c:875
av_q2d
static double av_q2d(AVRational a)
Convert an AVRational to a double.
Definition: rational.h:104
FFV1Context::plane
PlaneContext plane[MAX_PLANES]
Definition: ffv1.h:98
FFV1Context::max_slice_count
int max_slice_count
Definition: ffv1.h:125
FFV1Context::slice_damaged
int slice_damaged
Definition: ffv1.h:113
bits
uint8_t bits
Definition: vp3data.h:128
FFV1Context::intra
int intra
Definition: ffv1.h:112
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:40
FFV1Context::quant_table
int16_t quant_table[MAX_CONTEXT_INPUTS][256]
Definition: ffv1.h:99
FFV1Context::fsrc
struct FFV1Context * fsrc
Definition: ffv1.h:91
AV_PIX_FMT_YUVA444P12
#define AV_PIX_FMT_YUVA444P12
Definition: pixfmt.h:520
AV_PIX_FMT_YUV420P9
#define AV_PIX_FMT_YUV420P9
Definition: pixfmt.h:475
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:201
read_quant_tables
static int read_quant_tables(RangeCoder *c, int16_t quant_table[MAX_CONTEXT_INPUTS][256])
Definition: ffv1dec.c:405
AV_PIX_FMT_YUV420P16
#define AV_PIX_FMT_YUV420P16
Definition: pixfmt.h:489
get_bits.h
AV_PIX_FMT_GRAY14
#define AV_PIX_FMT_GRAY14
Definition: pixfmt.h:461
fold
static av_always_inline int fold(int diff, int bits)
Definition: ffv1.h:146
FFV1Context::ac
int ac
1=range coder <-> 0=golomb rice
Definition: ffv1.h:96
get_vlc_symbol
static int get_vlc_symbol(GetBitContext *gb, VlcState *const state, int bits)
Definition: ffv1dec.c:72
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:73
FFV1Context::plane_count
int plane_count
Definition: ffv1.h:95
CODEC_LONG_NAME
#define CODEC_LONG_NAME(str)
Definition: codec_internal.h:272
ff_thread_ref_frame
int ff_thread_ref_frame(ThreadFrame *dst, const ThreadFrame *src)
Definition: utils.c:853
arg
const char * arg
Definition: jacosubdec.c:67
AV_PIX_FMT_GRAY10
#define AV_PIX_FMT_GRAY10
Definition: pixfmt.h:459
AV_CODEC_CAP_FRAME_THREADS
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
Definition: codec.h:110
threadframe.h
FFV1Context::slice_height
int slice_height
Definition: ffv1.h:129
quant_table
static const int16_t quant_table[64]
Definition: intrax8.c:516
AV_PIX_FMT_GBRP16
#define AV_PIX_FMT_GBRP16
Definition: pixfmt.h:497
read_header
static int read_header(FFV1Context *f)
Definition: ffv1dec.c:550
get_symbol
static av_noinline int get_symbol(RangeCoder *c, uint8_t *state, int is_signed)
Definition: ffv1dec.c:67
NULL
#define NULL
Definition: coverity.c:32
PlaneContext::vlc_state
VlcState * vlc_state
Definition: ffv1.h:67
AC_GOLOMB_RICE
#define AC_GOLOMB_RICE
Definition: ffv1.h:50
AVERROR_PATCHWELCOME
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:64
fs
#define fs(width, name, subs,...)
Definition: cbs_vp9.c:200
FFV1Context::num_v_slices
int num_v_slices
Definition: ffv1.h:126
FFV1Context::colorspace
int colorspace
Definition: ffv1.h:105
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:279
ff_ffv1_decoder
const FFCodec ff_ffv1_decoder
Definition: ffv1dec.c:1120
FFV1Context::state_transition
uint8_t state_transition[256]
Definition: ffv1.h:102
ff_thread_release_ext_buffer
void ff_thread_release_ext_buffer(ThreadFrame *f)
Unref a ThreadFrame.
Definition: pthread_frame.c:996
AV_PIX_FMT_YUV440P10
#define AV_PIX_FMT_YUV440P10
Definition: pixfmt.h:480
mathops.h
PlaneContext
Definition: ffv1.h:62
state
static struct @385 state
UPDATE_THREAD_CONTEXT
#define UPDATE_THREAD_CONTEXT(func)
Definition: codec_internal.h:281
AV_PIX_FMT_YUV422P10
#define AV_PIX_FMT_YUV422P10
Definition: pixfmt.h:479
AV_PIX_FMT_GRAY8
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
Definition: pixfmt.h:81
AV_PIX_FMT_GBRP9
#define AV_PIX_FMT_GBRP9
Definition: pixfmt.h:493
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
VlcState
Definition: ffv1.h:55
ff_dlog
#define ff_dlog(a,...)
Definition: tableprint_vlc.h:28
FFV1Context::slice_width
int slice_width
Definition: ffv1.h:128
ff_init_range_decoder
av_cold void ff_init_range_decoder(RangeCoder *c, const uint8_t *buf, int buf_size)
Definition: rangecoder.c:53
AV_CODEC_ID_FFV1
@ AV_CODEC_ID_FFV1
Definition: codec_id.h:85
f
f
Definition: af_crystalizer.c:121
AVFrame::pict_type
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:446
init
int(* init)(AVBSFContext *ctx)
Definition: dts2pts.c:365
AV_CODEC_CAP_DR1
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:52
AVPacket::size
int size
Definition: packet.h:523
scale
static void scale(int *out, const int *in, const int w, const int h, const int shift)
Definition: vvc_intra.c:291
av_frame_ref
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:354
codec_internal.h
AV_PIX_FMT_YUV422P12
#define AV_PIX_FMT_YUV422P12
Definition: pixfmt.h:483
ff_ffv1_close
av_cold int ff_ffv1_close(AVCodecContext *avctx)
Definition: ffv1.c:191
AVCodecContext::pkt_timebase
AVRational pkt_timebase
Timebase in which pkt_dts/pts and AVPacket.dts/pts are expressed.
Definition: avcodec.h:551
sample
#define sample
Definition: flacdsp_template.c:44
FFV1Context::picture
ThreadFrame picture
Definition: ffv1.h:90
size
int size
Definition: twinvq_data.h:10344
ff_build_rac_states
void ff_build_rac_states(RangeCoder *c, int factor, int max_p)
Definition: rangecoder.c:68
FF_CODEC_CAP_ALLOCATE_PROGRESS
#define FF_CODEC_CAP_ALLOCATE_PROGRESS
Definition: codec_internal.h:69
AV_NOPTS_VALUE
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:248
AV_RB32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_RB32
Definition: bytestream.h:96
AV_PIX_FMT_YUV444P12
#define AV_PIX_FMT_YUV444P12
Definition: pixfmt.h:485
FFV1Context::sample_buffer
int16_t * sample_buffer
Definition: ffv1.h:106
AVPacket::dts
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed.
Definition: packet.h:521
height
#define height
AV_PIX_FMT_RGB32
#define AV_PIX_FMT_RGB32
Definition: pixfmt.h:451
a
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
Definition: undefined.txt:41
AV_PIX_FMT_YUVA444P
@ AV_PIX_FMT_YUVA444P
planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples)
Definition: pixfmt.h:174
av_crc_get_table
const AVCRC * av_crc_get_table(AVCRCId crc_id)
Get an initialized standard CRC table.
Definition: crc.c:374
AV_CODEC_CAP_SLICE_THREADS
#define AV_CODEC_CAP_SLICE_THREADS
Codec supports slice-based (or partition-based) multithreading.
Definition: codec.h:114
AV_PIX_FMT_YUVA444P10
#define AV_PIX_FMT_YUVA444P10
Definition: pixfmt.h:518
PlaneContext::quant_table_index
int quant_table_index
Definition: ffv1.h:64
decode_plane
static int decode_plane(FFV1Context *s, uint8_t *src, int w, int h, int stride, int plane_index, int pixel_stride)
Definition: ffv1dec.c:120
copy_fields
static void copy_fields(FFV1Context *fsdst, const FFV1Context *fssrc, const FFV1Context *fsrc)
Definition: ffv1dec.c:1037
ff_ffv1_init_slice_state
av_cold int ff_ffv1_init_slice_state(const FFV1Context *f, FFV1Context *fs)
Definition: ffv1.c:56
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:255
AVPacket::pts
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
Definition: packet.h:515
av_flatten
#define av_flatten
Definition: attributes.h:96
AV_PIX_FMT_GBRP12
#define AV_PIX_FMT_GBRP12
Definition: pixfmt.h:495
av_malloc_array
#define av_malloc_array(a, b)
Definition: tableprint_vlc.h:31
av_assert1
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:56
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
FFV1Context::slice_y
int slice_y
Definition: ffv1.h:131
ffv1.h
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:194
update_thread_context
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call have update_thread_context() run it in the next thread. Add AV_CODEC_CAP_FRAME_THREADS to the codec capabilities. There will be very little speed gain at this point but it should work. If there are inter-frame dependencies
len
int len
Definition: vorbis_enc_data.h:426
get_rac
static int get_rac(RangeCoder *c, uint8_t *const state)
Definition: rangecoder.h:127
AV_CRC_32_IEEE
@ AV_CRC_32_IEEE
Definition: crc.h:52
AVCodecContext::height
int height
Definition: avcodec.h:618
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:657
ff_thread_get_ext_buffer
int ff_thread_get_ext_buffer(AVCodecContext *avctx, ThreadFrame *f, int flags)
Wrapper around ff_get_buffer() for frame-multithreaded codecs.
Definition: pthread_frame.c:968
AV_FRAME_FLAG_INTERLACED
#define AV_FRAME_FLAG_INTERLACED
A flag to mark frames whose content is interlaced.
Definition: frame.h:603
AV_PIX_FMT_YUV444P9
#define AV_PIX_FMT_YUV444P9
Definition: pixfmt.h:477
MAX_CONTEXT_INPUTS
#define MAX_CONTEXT_INPUTS
Definition: ffv1.h:48
FFV1Context::packed_at_lsb
int packed_at_lsb
Definition: ffv1.h:118
avcodec.h
stride
#define stride
Definition: h264pred_template.c:537
FFV1Context::avctx
AVCodecContext * avctx
Definition: ffv1.h:75
ret
ret
Definition: filter_design.txt:187
pred
static const float pred[4]
Definition: siprdata.h:259
FFSWAP
#define FFSWAP(type, a, b)
Definition: macros.h:52
AV_PIX_FMT_0RGB32
#define AV_PIX_FMT_0RGB32
Definition: pixfmt.h:455
AV_PIX_FMT_YUVA444P9
#define AV_PIX_FMT_YUVA444P9
Definition: pixfmt.h:515
ff_thread_finish_setup
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call ff_thread_finish_setup() afterwards. If some code can 't be moved
AV_PIX_FMT_YUV420P12
#define AV_PIX_FMT_YUV420P12
Definition: pixfmt.h:482
U
#define U(x)
Definition: vpx_arith.h:37
AV_PIX_FMT_YUV422P14
#define AV_PIX_FMT_YUV422P14
Definition: pixfmt.h:487
ff_ffv1_allocate_initial_states
int ff_ffv1_allocate_initial_states(FFV1Context *f)
Definition: ffv1.c:148
decode_rgb_frame
static int RENAME() decode_rgb_frame(FFV1Context *s, uint8_t *src[4], int w, int h, int stride[4])
Definition: ffv1dec_template.c:130
AVCodecContext
main external API structure.
Definition: avcodec.h:445
ThreadFrame
Definition: threadframe.h:27
AVCodecContext::execute
int(* execute)(struct AVCodecContext *c, int(*func)(struct AVCodecContext *c2, void *arg), void *arg2, int *ret, int count, int size)
The codec may call this to execute several independent things.
Definition: avcodec.h:1612
decode_init
static av_cold int decode_init(AVCodecContext *avctx)
Definition: ffv1dec.c:853
av_crc
uint32_t av_crc(const AVCRC *ctx, uint32_t crc, const uint8_t *buffer, size_t length)
Calculate the CRC of a block.
Definition: crc.c:392
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:72
AV_PIX_FMT_YUVA422P12
#define AV_PIX_FMT_YUVA422P12
Definition: pixfmt.h:519
AVPixFmtDescriptor::comp
AVComponentDescriptor comp[4]
Parameters that describe how pixels are packed.
Definition: pixdesc.h:105
temp
else temp
Definition: vf_mcdeint.c:263
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:78
AVCodecContext::debug
int debug
debug
Definition: avcodec.h:1396
desc
const char * desc
Definition: libsvtav1.c:75
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
AV_PIX_FMT_YUV422P
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:77
MAX_QUANT_TABLES
#define MAX_QUANT_TABLES
Definition: ffv1.h:47
AVPixFmtDescriptor
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:69
planes
static const struct @386 planes[]
FFV1Context
Definition: ffv1.h:73
av_free
#define av_free(p)
Definition: tableprint_vlc.h:33
FFV1Context::transparency
int transparency
Definition: ffv1.h:86
AVPacket
This structure stores compressed data.
Definition: packet.h:499
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:472
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
src
INIT_CLIP pixel * src
Definition: h264pred_template.c:418
AV_PIX_FMT_YUV411P
@ AV_PIX_FMT_YUV411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
Definition: pixfmt.h:80
ffv1dec_template.c
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:618
ff_ffv1_init_slice_contexts
av_cold int ff_ffv1_init_slice_contexts(FFV1Context *f)
Definition: ffv1.c:106
ff_log2_run
const uint8_t ff_log2_run[41]
Definition: mathtables.c:116
imgutils.h
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
Definition: frame.h:389
AV_PIX_FMT_YUV410P
@ AV_PIX_FMT_YUV410P
planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
Definition: pixfmt.h:79
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
FFV1Context::micro_version
int micro_version
Definition: ffv1.h:82
AV_PIX_FMT_YUV440P12
#define AV_PIX_FMT_YUV440P12
Definition: pixfmt.h:484
h
h
Definition: vp9dsp_template.c:2038
RangeCoder
Definition: mss3.c:62
AV_PIX_FMT_YUV444P14
#define AV_PIX_FMT_YUV444P14
Definition: pixfmt.h:488
av_image_check_sar
int av_image_check_sar(unsigned int w, unsigned int h, AVRational sar)
Check if the given sample aspect ratio of an image is valid.
Definition: imgutils.c:323
AV_PIX_FMT_GRAY12
#define AV_PIX_FMT_GRAY12
Definition: pixfmt.h:460
PlaneContext::quant_table
int16_t quant_table[MAX_CONTEXT_INPUTS][256]
Definition: ffv1.h:63
av_image_copy
void av_image_copy(uint8_t *const dst_data[4], const int dst_linesizes[4], const uint8_t *const src_data[4], const int src_linesizes[4], enum AVPixelFormat pix_fmt, int width, int height)
Copy image in src_data to dst_data.
Definition: imgutils.c:422
AV_RB24
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_WB32 unsigned int_TMPL AV_RB24
Definition: bytestream.h:97
decode_slice_header
static int decode_slice_header(const FFV1Context *f, FFV1Context *fs)
Definition: ffv1dec.c:166
AV_PIX_FMT_FLAG_PAL
#define AV_PIX_FMT_FLAG_PAL
Pixel format has a palette in data[1], values are indexes in this palette.
Definition: pixdesc.h:120
FFV1Context::version
int version
Definition: ffv1.h:81
AV_PIX_FMT_YUVA422P
@ AV_PIX_FMT_YUVA422P
planar YUV 4:2:2 24bpp, (1 Cr & Cb sample per 2x1 Y & A samples)
Definition: pixfmt.h:173
AV_PIX_FMT_YUV420P14
#define AV_PIX_FMT_YUV420P14
Definition: pixfmt.h:486