FFmpeg
snowdec.c
Go to the documentation of this file.
1 /*
2  * Copyright (C) 2004 Michael Niedermayer <michaelni@gmx.at>
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 #include "libavutil/emms.h"
22 #include "libavutil/intmath.h"
23 #include "libavutil/log.h"
24 #include "libavutil/mem.h"
25 #include "avcodec.h"
26 #include "codec_internal.h"
27 #include "decode.h"
28 #include "snow_dwt.h"
29 #include "snow.h"
30 
31 #include "rangecoder.h"
32 #include "mathops.h"
33 
34 static inline int get_symbol(RangeCoder *c, uint8_t *state, int is_signed)
35 {
36  if (get_rac(c, state + 0))
37  return 0;
38  else {
39  int e;
40  unsigned a;
41  e = 0;
42  while (get_rac(c, state + 1 + FFMIN(e, 9))) { //1..10
43  e++;
44  if (e > 31)
45  return AVERROR_INVALIDDATA;
46  }
47 
48  a = 1;
49  for (int i = e - 1; i >= 0; i--)
50  a += a + get_rac(c, state + 22 + FFMIN(i, 9)); //22..31
51 
52  e = -(is_signed && get_rac(c, state + 11 + FFMIN(e, 10))); //11..21
53  return (a ^ e) - e;
54  }
55 }
56 
57 static inline int get_symbol2(RangeCoder *c, uint8_t *state, int log2)
58 {
59  int r = log2 >= 0 ? 1 << log2 : 1;
60  int v = 0;
61 
62  av_assert2(log2 >= -4);
63 
64  while (log2 < 28 && get_rac(c, state + 4 + log2)) {
65  v += r;
66  log2++;
67  if (log2 > 0) r += r;
68  }
69 
70  for (int i = log2 - 1; i >= 0; i--)
71  v += get_rac(c, state + 31 - i) << i;
72 
73  return v;
74 }
75 
76 static void unpack_coeffs(SnowContext *s, SubBand *b, SubBand * parent, int orientation)
77 {
78  const int w = b->width;
79  const int h = b->height;
80 
81  int run, runs;
82  x_and_coeff *xc = b->x_coeff;
83  x_and_coeff *prev_xc = NULL;
84  x_and_coeff *prev2_xc = xc;
85  x_and_coeff *parent_xc = parent ? parent->x_coeff : NULL;
86  x_and_coeff *prev_parent_xc = parent_xc;
87 
88  runs = get_symbol2(&s->c, b->state[30], 0);
89  if (runs-- > 0) run = get_symbol2(&s->c, b->state[1], 3);
90  else run = INT_MAX;
91 
92  for (int y = 0; y < h; y++) {
93  int v = 0;
94  int lt = 0, t = 0, rt = 0;
95 
96  if (y && prev_xc->x == 0)
97  rt = prev_xc->coeff;
98 
99  for (int x = 0; x < w; x++) {
100  int p = 0;
101  const int l = v;
102 
103  lt= t; t= rt;
104 
105  if (y) {
106  if (prev_xc->x <= x)
107  prev_xc++;
108  if (prev_xc->x == x + 1)
109  rt = prev_xc->coeff;
110  else
111  rt = 0;
112  }
113  if (parent_xc) {
114  if (x>>1 > parent_xc->x)
115  parent_xc++;
116  if (x>>1 == parent_xc->x)
117  p = parent_xc->coeff;
118  }
119  if (/*ll|*/l|lt|t|rt|p) {
120  int context = av_log2(/*FFABS(ll) + */3*(l>>1) + (lt>>1) + (t&~1) + (rt>>1) + (p>>1));
121 
122  v = get_rac(&s->c, &b->state[0][context]);
123  if (v) {
124  v = 2*(get_symbol2(&s->c, b->state[context + 2], context-4) + 1);
125  v += get_rac(&s->c, &b->state[0][16 + 1 + 3 + ff_quant3bA[l&0xFF] + 3 * ff_quant3bA[t&0xFF]]);
126  if ((uint16_t)v != v) {
127  av_log(s->avctx, AV_LOG_ERROR, "Coefficient damaged\n");
128  v = 1;
129  }
130  xc->x = x;
131  (xc++)->coeff = v;
132  }
133  } else {
134  if (!run) {
135  if (runs-- > 0) run = get_symbol2(&s->c, b->state[1], 3);
136  else run = INT_MAX;
137  v = 2 * (get_symbol2(&s->c, b->state[0 + 2], 0-4) + 1);
138  v += get_rac(&s->c, &b->state[0][16 + 1 + 3]);
139  if ((uint16_t)v != v) {
140  av_log(s->avctx, AV_LOG_ERROR, "Coefficient damaged\n");
141  v = 1;
142  }
143 
144  xc->x = x;
145  (xc++)->coeff = v;
146  } else {
147  int max_run;
148  run--;
149  v = 0;
150  av_assert2(run >= 0);
151  if (y) max_run = FFMIN(run, prev_xc->x - x - 2);
152  else max_run = FFMIN(run, w-x-1);
153  if (parent_xc)
154  max_run = FFMIN(max_run, 2*parent_xc->x - x - 1);
155  av_assert2(max_run >= 0 && max_run <= run);
156 
157  x += max_run;
158  run -= max_run;
159  }
160  }
161  }
162  (xc++)->x = w+1; //end marker
163  prev_xc = prev2_xc;
164  prev2_xc = xc;
165 
166  if (parent_xc) {
167  if (y & 1) {
168  while (parent_xc->x != parent->width+1)
169  parent_xc++;
170  parent_xc++;
171  prev_parent_xc= parent_xc;
172  } else {
173  parent_xc= prev_parent_xc;
174  }
175  }
176  }
177 
178  (xc++)->x = w + 1; //end marker
179 }
180 
181 static av_always_inline void predict_slice_buffered(SnowContext *s, slice_buffer * sb, IDWTELEM * old_buffer, int plane_index, int add, int mb_y){
182  Plane *p= &s->plane[plane_index];
183  const int mb_w= s->b_width << s->block_max_depth;
184  const int mb_h= s->b_height << s->block_max_depth;
185  int x, y, mb_x;
186  int block_size = MB_SIZE >> s->block_max_depth;
187  int block_w = plane_index ? block_size>>s->chroma_h_shift : block_size;
188  int block_h = plane_index ? block_size>>s->chroma_v_shift : block_size;
189  const uint8_t *obmc = plane_index ? ff_obmc_tab[s->block_max_depth+s->chroma_h_shift] : ff_obmc_tab[s->block_max_depth];
190  int obmc_stride= plane_index ? (2*block_size)>>s->chroma_h_shift : 2*block_size;
191  int ref_stride= s->current_picture->linesize[plane_index];
192  uint8_t *dst8= s->current_picture->data[plane_index];
193  int w= p->width;
194  int h= p->height;
195 
196  if(s->keyframe || (s->avctx->debug&512)){
197  if(mb_y==mb_h)
198  return;
199 
200  if(add){
201  for(y=block_h*mb_y; y<FFMIN(h,block_h*(mb_y+1)); y++){
202 // DWTELEM * line = slice_buffer_get_line(sb, y);
203  IDWTELEM * line = sb->line[y];
204  for(x=0; x<w; x++){
205 // int v= buf[x + y*w] + (128<<FRAC_BITS) + (1<<(FRAC_BITS-1));
206  int v= line[x] + (128<<FRAC_BITS) + (1<<(FRAC_BITS-1));
207  v >>= FRAC_BITS;
208  if(v&(~255)) v= ~(v>>31);
209  dst8[x + y*ref_stride]= v;
210  }
211  }
212  }else{
213  for(y=block_h*mb_y; y<FFMIN(h,block_h*(mb_y+1)); y++){
214 // DWTELEM * line = slice_buffer_get_line(sb, y);
215  IDWTELEM * line = sb->line[y];
216  for(x=0; x<w; x++){
217  line[x] -= 128 << FRAC_BITS;
218 // buf[x + y*w]-= 128<<FRAC_BITS;
219  }
220  }
221  }
222 
223  return;
224  }
225 
226  for(mb_x=0; mb_x<=mb_w; mb_x++){
227  add_yblock(s, 1, sb, old_buffer, dst8, obmc,
228  block_w*mb_x - block_w/2,
229  block_h*mb_y - block_h/2,
230  block_w, block_h,
231  w, h,
232  w, ref_stride, obmc_stride,
233  mb_x - 1, mb_y - 1,
234  add, 0, plane_index);
235  }
236 
237  if(s->avmv && mb_y < mb_h && plane_index == 0)
238  for(mb_x=0; mb_x<mb_w; mb_x++){
239  AVMotionVector *avmv = s->avmv + s->avmv_index;
240  const int b_width = s->b_width << s->block_max_depth;
241  const int b_stride= b_width;
242  BlockNode *bn= &s->block[mb_x + mb_y*b_stride];
243 
244  if (bn->type)
245  continue;
246 
247  s->avmv_index++;
248 
249  avmv->w = block_w;
250  avmv->h = block_h;
251  avmv->dst_x = block_w*mb_x - block_w/2;
252  avmv->dst_y = block_h*mb_y - block_h/2;
253  avmv->motion_scale = 8;
254  avmv->motion_x = bn->mx * s->mv_scale;
255  avmv->motion_y = bn->my * s->mv_scale;
256  avmv->src_x = avmv->dst_x + avmv->motion_x / 8;
257  avmv->src_y = avmv->dst_y + avmv->motion_y / 8;
258  avmv->source= -1 - bn->ref;
259  avmv->flags = 0;
260  }
261 }
262 
263 static inline void decode_subband_slice_buffered(SnowContext *s, SubBand *b, slice_buffer * sb, int start_y, int h, int save_state[1]){
264  const int w= b->width;
265  int y;
266  const int qlog= av_clip(s->qlog + (int64_t)b->qlog, 0, QROOT*16);
267  int qmul= ff_qexp[qlog&(QROOT-1)]<<(qlog>>QSHIFT);
268  int qadd= (s->qbias*qmul)>>QBIAS_SHIFT;
269  int new_index = 0;
270 
271  if(b->ibuf == s->spatial_idwt_buffer || s->qlog == LOSSLESS_QLOG){
272  qadd= 0;
273  qmul= 1<<QEXPSHIFT;
274  }
275 
276  /* If we are on the second or later slice, restore our index. */
277  if (start_y != 0)
278  new_index = save_state[0];
279 
280 
281  for(y=start_y; y<h; y++){
282  int x = 0;
283  int v;
284  IDWTELEM * line = slice_buffer_get_line(sb, y * b->stride_line + b->buf_y_offset) + b->buf_x_offset;
285  memset(line, 0, b->width*sizeof(IDWTELEM));
286  v = b->x_coeff[new_index].coeff;
287  x = b->x_coeff[new_index++].x;
288  while(x < w){
289  register int t= (int)( (v>>1)*(unsigned)qmul + qadd)>>QEXPSHIFT;
290  register int u= -(v&1);
291  line[x] = (t^u) - u;
292 
293  v = b->x_coeff[new_index].coeff;
294  x = b->x_coeff[new_index++].x;
295  }
296  }
297 
298  /* Save our variables for the next slice. */
299  save_state[0] = new_index;
300 
301  return;
302 }
303 
304 static int decode_q_branch(SnowContext *s, int level, int x, int y){
305  const int w= s->b_width << s->block_max_depth;
306  const int rem_depth= s->block_max_depth - level;
307  const int index= (x + y*w) << rem_depth;
308  int trx= (x+1)<<rem_depth;
309  const BlockNode *left = x ? &s->block[index-1] : &null_block;
310  const BlockNode *top = y ? &s->block[index-w] : &null_block;
311  const BlockNode *tl = y && x ? &s->block[index-w-1] : left;
312  const BlockNode *tr = y && trx<w && ((x&1)==0 || level==0) ? &s->block[index-w+(1<<rem_depth)] : tl; //FIXME use lt
313  int s_context= 2*left->level + 2*top->level + tl->level + tr->level;
314  int res;
315 
316  if(s->keyframe){
318  return 0;
319  }
320 
321  if(level==s->block_max_depth || get_rac(&s->c, &s->block_state[4 + s_context])){
322  int type, mx, my;
323  int l = left->color[0];
324  int cb= left->color[1];
325  int cr= left->color[2];
326  unsigned ref = 0;
327  int ref_context= av_log2(2*left->ref) + av_log2(2*top->ref);
328  int mx_context= av_log2(2*FFABS(left->mx - top->mx)) + 0*av_log2(2*FFABS(tr->mx - top->mx));
329  int my_context= av_log2(2*FFABS(left->my - top->my)) + 0*av_log2(2*FFABS(tr->my - top->my));
330 
331  type= get_rac(&s->c, &s->block_state[1 + left->type + top->type]) ? BLOCK_INTRA : 0;
332  if(type){
333  int ld, cbd, crd;
334  pred_mv(s, &mx, &my, 0, left, top, tr);
335  ld = get_symbol(&s->c, &s->block_state[32], 1);
336  if (ld < -255 || ld > 255) {
337  return AVERROR_INVALIDDATA;
338  }
339  l += ld;
340  if (s->nb_planes > 2) {
341  cbd = get_symbol(&s->c, &s->block_state[64], 1);
342  crd = get_symbol(&s->c, &s->block_state[96], 1);
343  if (cbd < -255 || cbd > 255 || crd < -255 || crd > 255) {
344  return AVERROR_INVALIDDATA;
345  }
346  cb += cbd;
347  cr += crd;
348  }
349  }else{
350  if(s->ref_frames > 1)
351  ref= get_symbol(&s->c, &s->block_state[128 + 1024 + 32*ref_context], 0);
352  if (ref >= s->ref_frames) {
353  av_log(s->avctx, AV_LOG_ERROR, "Invalid ref\n");
354  return AVERROR_INVALIDDATA;
355  }
356  pred_mv(s, &mx, &my, ref, left, top, tr);
357  mx+= (unsigned)get_symbol(&s->c, &s->block_state[128 + 32*(mx_context + 16*!!ref)], 1);
358  my+= (unsigned)get_symbol(&s->c, &s->block_state[128 + 32*(my_context + 16*!!ref)], 1);
359  }
360  set_blocks(s, level, x, y, l, cb, cr, mx, my, ref, type);
361  }else{
362  if ((res = decode_q_branch(s, level+1, 2*x+0, 2*y+0)) < 0 ||
363  (res = decode_q_branch(s, level+1, 2*x+1, 2*y+0)) < 0 ||
364  (res = decode_q_branch(s, level+1, 2*x+0, 2*y+1)) < 0 ||
365  (res = decode_q_branch(s, level+1, 2*x+1, 2*y+1)) < 0)
366  return res;
367  }
368  return 0;
369 }
370 
371 static void dequantize_slice_buffered(SnowContext *s, slice_buffer * sb, SubBand *b, IDWTELEM *src, int stride, int start_y, int end_y){
372  const int w= b->width;
373  const int qlog= av_clip(s->qlog + (int64_t)b->qlog, 0, QROOT*16);
374  const int qmul= ff_qexp[qlog&(QROOT-1)]<<(qlog>>QSHIFT);
375  const int qadd= (s->qbias*qmul)>>QBIAS_SHIFT;
376  int x,y;
377 
378  if(s->qlog == LOSSLESS_QLOG) return;
379 
380  for(y=start_y; y<end_y; y++){
381 // DWTELEM * line = slice_buffer_get_line_from_address(sb, src + (y * stride));
382  IDWTELEM * line = slice_buffer_get_line(sb, (y * b->stride_line) + b->buf_y_offset) + b->buf_x_offset;
383  for(x=0; x<w; x++){
384  int i= line[x];
385  if(i<0){
386  line[x]= -((-i*(unsigned)qmul + qadd)>>(QEXPSHIFT)); //FIXME try different bias
387  }else if(i>0){
388  line[x]= (( i*(unsigned)qmul + qadd)>>(QEXPSHIFT));
389  }
390  }
391  }
392 }
393 
394 static void correlate_slice_buffered(SnowContext *s, slice_buffer * sb, SubBand *b, IDWTELEM *src, int stride, int inverse, int use_median, int start_y, int end_y){
395  const int w= b->width;
396  int x,y;
397 
398  IDWTELEM * line=0; // silence silly "could be used without having been initialized" warning
399  IDWTELEM * prev;
400 
401  if (start_y != 0)
402  line = slice_buffer_get_line(sb, ((start_y - 1) * b->stride_line) + b->buf_y_offset) + b->buf_x_offset;
403 
404  for(y=start_y; y<end_y; y++){
405  prev = line;
406 // line = slice_buffer_get_line_from_address(sb, src + (y * stride));
407  line = slice_buffer_get_line(sb, (y * b->stride_line) + b->buf_y_offset) + b->buf_x_offset;
408  for(x=0; x<w; x++){
409  if(x){
410  if(use_median){
411  if(y && x+1<w) line[x] += mid_pred(line[x - 1], prev[x], prev[x + 1]);
412  else line[x] += line[x - 1];
413  }else{
414  if(y) line[x] += mid_pred(line[x - 1], prev[x], line[x - 1] + prev[x] - prev[x - 1]);
415  else line[x] += line[x - 1];
416  }
417  }else{
418  if(y) line[x] += prev[x];
419  }
420  }
421  }
422 }
423 
424 static void decode_qlogs(SnowContext *s){
425  int plane_index, level, orientation;
426 
427  for(plane_index=0; plane_index < s->nb_planes; plane_index++){
428  for(level=0; level<s->spatial_decomposition_count; level++){
429  for(orientation=level ? 1:0; orientation<4; orientation++){
430  int q;
431  if (plane_index==2) q= s->plane[1].band[level][orientation].qlog;
432  else if(orientation==2) q= s->plane[plane_index].band[level][1].qlog;
433  else q= get_symbol(&s->c, s->header_state, 1);
434  s->plane[plane_index].band[level][orientation].qlog= q;
435  }
436  }
437  }
438 }
439 
440 #define GET_S(dst, check) \
441  tmp= get_symbol(&s->c, s->header_state, 0);\
442  if(!(check)){\
443  av_log(s->avctx, AV_LOG_ERROR, "Error " #dst " is %d\n", tmp);\
444  return AVERROR_INVALIDDATA;\
445  }\
446  dst= tmp;
447 
449  int plane_index, tmp;
450  uint8_t kstate[32];
451 
452  memset(kstate, MID_STATE, sizeof(kstate));
453 
454  s->keyframe= get_rac(&s->c, kstate);
455  if(s->keyframe || s->always_reset){
457  s->spatial_decomposition_type=
458  s->qlog=
459  s->qbias=
460  s->mv_scale=
461  s->block_max_depth= 0;
462  }
463  if(s->keyframe){
464  GET_S(s->version, tmp <= 0U)
465  s->always_reset= get_rac(&s->c, s->header_state);
466  s->temporal_decomposition_type= get_symbol(&s->c, s->header_state, 0);
467  s->temporal_decomposition_count= get_symbol(&s->c, s->header_state, 0);
468  GET_S(s->spatial_decomposition_count, 0 < tmp && tmp <= MAX_DECOMPOSITIONS)
469  s->colorspace_type= get_symbol(&s->c, s->header_state, 0);
470  if (s->colorspace_type == 1) {
471  s->avctx->pix_fmt= AV_PIX_FMT_GRAY8;
472  s->nb_planes = 1;
473  } else if(s->colorspace_type == 0) {
474  s->chroma_h_shift= get_symbol(&s->c, s->header_state, 0);
475  s->chroma_v_shift= get_symbol(&s->c, s->header_state, 0);
476 
477  if(s->chroma_h_shift == 1 && s->chroma_v_shift==1){
478  s->avctx->pix_fmt= AV_PIX_FMT_YUV420P;
479  }else if(s->chroma_h_shift == 0 && s->chroma_v_shift==0){
480  s->avctx->pix_fmt= AV_PIX_FMT_YUV444P;
481  }else if(s->chroma_h_shift == 2 && s->chroma_v_shift==2){
482  s->avctx->pix_fmt= AV_PIX_FMT_YUV410P;
483  } else {
484  av_log(s->avctx, AV_LOG_ERROR,
485  "unsupported color subsample mode %d %d\n",
486  s->chroma_h_shift, s->chroma_v_shift);
487  s->chroma_h_shift = s->chroma_v_shift = 1;
488  s->avctx->pix_fmt= AV_PIX_FMT_YUV420P;
489  return AVERROR_INVALIDDATA;
490  }
491  s->nb_planes = 3;
492  } else {
493  av_log(s->avctx, AV_LOG_ERROR, "unsupported color space\n");
494  s->chroma_h_shift = s->chroma_v_shift = 1;
495  s->avctx->pix_fmt= AV_PIX_FMT_YUV420P;
496  return AVERROR_INVALIDDATA;
497  }
498 
499 
500  s->spatial_scalability= get_rac(&s->c, s->header_state);
501 // s->rate_scalability= get_rac(&s->c, s->header_state);
502  GET_S(s->max_ref_frames, tmp < (unsigned)MAX_REF_FRAMES)
503  s->max_ref_frames++;
504 
505  decode_qlogs(s);
506  }
507 
508  if(!s->keyframe){
509  if(get_rac(&s->c, s->header_state)){
510  for(plane_index=0; plane_index<FFMIN(s->nb_planes, 2); plane_index++){
511  int htaps, i, sum=0;
512  Plane *p= &s->plane[plane_index];
513  p->diag_mc= get_rac(&s->c, s->header_state);
514  htaps= get_symbol(&s->c, s->header_state, 0);
515  if((unsigned)htaps >= HTAPS_MAX/2 - 1)
516  return AVERROR_INVALIDDATA;
517  htaps = htaps*2 + 2;
518  p->htaps= htaps;
519  for(i= htaps/2; i; i--){
520  unsigned hcoeff = get_symbol(&s->c, s->header_state, 0);
521  if (hcoeff > 127)
522  return AVERROR_INVALIDDATA;
523  p->hcoeff[i]= hcoeff * (1-2*(i&1));
524  sum += p->hcoeff[i];
525  }
526  p->hcoeff[0]= 32-sum;
527  }
528  s->plane[2].diag_mc= s->plane[1].diag_mc;
529  s->plane[2].htaps = s->plane[1].htaps;
530  memcpy(s->plane[2].hcoeff, s->plane[1].hcoeff, sizeof(s->plane[1].hcoeff));
531  }
532  if(get_rac(&s->c, s->header_state)){
533  GET_S(s->spatial_decomposition_count, 0 < tmp && tmp <= MAX_DECOMPOSITIONS)
534  decode_qlogs(s);
535  }
536  }
537 
538  s->spatial_decomposition_type+= (unsigned)get_symbol(&s->c, s->header_state, 1);
539  if(s->spatial_decomposition_type > 1U){
540  av_log(s->avctx, AV_LOG_ERROR, "spatial_decomposition_type %d not supported\n", s->spatial_decomposition_type);
541  return AVERROR_INVALIDDATA;
542  }
543  if(FFMIN(s->avctx-> width>>s->chroma_h_shift,
544  s->avctx->height>>s->chroma_v_shift) >> (s->spatial_decomposition_count-1) <= 1){
545  av_log(s->avctx, AV_LOG_ERROR, "spatial_decomposition_count %d too large for size\n", s->spatial_decomposition_count);
546  return AVERROR_INVALIDDATA;
547  }
548  if (s->avctx->width > 65536-4) {
549  av_log(s->avctx, AV_LOG_ERROR, "Width %d is too large\n", s->avctx->width);
550  return AVERROR_INVALIDDATA;
551  }
552 
553 
554  s->qlog += (unsigned)get_symbol(&s->c, s->header_state, 1);
555  s->mv_scale += (unsigned)get_symbol(&s->c, s->header_state, 1);
556  s->qbias += (unsigned)get_symbol(&s->c, s->header_state, 1);
557  s->block_max_depth+= (unsigned)get_symbol(&s->c, s->header_state, 1);
558  if(s->block_max_depth > 1 || s->block_max_depth < 0 || s->mv_scale > 256U){
559  av_log(s->avctx, AV_LOG_ERROR, "block_max_depth= %d is too large\n", s->block_max_depth);
560  s->block_max_depth= 0;
561  s->mv_scale = 0;
562  return AVERROR_INVALIDDATA;
563  }
564  if (FFABS(s->qbias) > 127) {
565  av_log(s->avctx, AV_LOG_ERROR, "qbias %d is too large\n", s->qbias);
566  s->qbias = 0;
567  return AVERROR_INVALIDDATA;
568  }
569 
570  return 0;
571 }
572 
574  int x, y;
575  int w= s->b_width;
576  int h= s->b_height;
577  int res;
578 
579  for(y=0; y<h; y++){
580  for(x=0; x<w; x++){
581  if (s->c.bytestream >= s->c.bytestream_end)
582  return AVERROR_INVALIDDATA;
583  if ((res = decode_q_branch(s, 0, x, y)) < 0)
584  return res;
585  }
586  }
587  return 0;
588 }
589 
590 static int decode_frame(AVCodecContext *avctx, AVFrame *picture,
591  int *got_frame, AVPacket *avpkt)
592 {
593  const uint8_t *buf = avpkt->data;
594  int buf_size = avpkt->size;
595  SnowContext *s = avctx->priv_data;
596  RangeCoder * const c= &s->c;
597  int bytes_read;
598  int level, orientation, plane_index;
599  int res;
600 
601  ff_init_range_decoder(c, buf, buf_size);
602  ff_build_rac_states(c, 0.05*(1LL<<32), 256-8);
603 
604  s->current_picture->pict_type= AV_PICTURE_TYPE_I; //FIXME I vs. P
605  if ((res = decode_header(s)) < 0)
606  return res;
607 
608  if (!s->mconly_picture->data[0]) {
609  res = ff_get_buffer(avctx, s->mconly_picture, AV_GET_BUFFER_FLAG_REF);
610  if (res < 0)
611  return res;
612  }
613  if (s->mconly_picture->format != avctx->pix_fmt) {
614  av_log(avctx, AV_LOG_ERROR, "pixel format changed\n");
615  return AVERROR_INVALIDDATA;
616  }
617 
618  if ((res=ff_snow_common_init_after_header(avctx)) < 0)
619  return res;
620 
621  // realloc slice buffer for the case that spatial_decomposition_count changed
623  if ((res = ff_slice_buffer_init(&s->sb, s->plane[0].height,
624  (MB_SIZE >> s->block_max_depth) +
625  s->spatial_decomposition_count * 11 + 1,
626  s->plane[0].width,
627  s->spatial_idwt_buffer)) < 0)
628  return res;
629 
630  for(plane_index=0; plane_index < s->nb_planes; plane_index++){
631  Plane *p= &s->plane[plane_index];
632  p->fast_mc= p->diag_mc && p->htaps==6 && p->hcoeff[0]==40
633  && p->hcoeff[1]==-10
634  && p->hcoeff[2]==2;
635  }
636 
638 
639  if ((res = ff_snow_frames_prepare(s)) < 0)
640  return res;
641 
642  s->current_picture->width = s->avctx->width;
643  s->current_picture->height = s->avctx->height;
644  res = ff_get_buffer(s->avctx, s->current_picture, AV_GET_BUFFER_FLAG_REF);
645  if (res < 0)
646  return res;
647 
648  s->current_picture->pict_type = s->keyframe ? AV_PICTURE_TYPE_I : AV_PICTURE_TYPE_P;
649 
650  //keyframe flag duplication mess FIXME
651  if(avctx->debug&FF_DEBUG_PICT_INFO)
652  av_log(avctx, AV_LOG_ERROR,
653  "keyframe:%d qlog:%d qbias: %d mvscale: %d "
654  "decomposition_type:%d decomposition_count:%d\n",
655  s->keyframe, s->qlog, s->qbias, s->mv_scale,
656  s->spatial_decomposition_type,
657  s->spatial_decomposition_count
658  );
659 
660  if (s->avctx->export_side_data & AV_CODEC_EXPORT_DATA_MVS) {
661  size_t size;
662  res = av_size_mult(s->b_width * s->b_height, sizeof(AVMotionVector) << (s->block_max_depth*2), &size);
663  if (res)
664  return res;
665  av_fast_malloc(&s->avmv, &s->avmv_size, size);
666  if (!s->avmv)
667  return AVERROR(ENOMEM);
668  } else {
669  s->avmv_size = 0;
670  av_freep(&s->avmv);
671  }
672  s->avmv_index = 0;
673 
674  if ((res = decode_blocks(s)) < 0)
675  return res;
676 
677  for(plane_index=0; plane_index < s->nb_planes; plane_index++){
678  Plane *p= &s->plane[plane_index];
679  int w= p->width;
680  int h= p->height;
681  int x, y;
682  int decode_state[MAX_DECOMPOSITIONS][4][1]; /* Stored state info for unpack_coeffs. 1 variable per instance. */
683 
684  if(s->avctx->debug&2048){
685  memset(s->spatial_dwt_buffer, 0, sizeof(DWTELEM)*w*h);
686  predict_plane(s, s->spatial_idwt_buffer, plane_index, 1);
687 
688  for(y=0; y<h; y++){
689  for(x=0; x<w; x++){
690  int v= s->current_picture->data[plane_index][y*s->current_picture->linesize[plane_index] + x];
691  s->mconly_picture->data[plane_index][y*s->mconly_picture->linesize[plane_index] + x]= v;
692  }
693  }
694  }
695 
696  for(level=0; level<s->spatial_decomposition_count; level++){
697  for(orientation=level ? 1 : 0; orientation<4; orientation++){
698  SubBand *b= &p->band[level][orientation];
699  unpack_coeffs(s, b, b->parent, orientation);
700  }
701  }
702 
703  {
704  const int mb_h= s->b_height << s->block_max_depth;
705  const int block_size = MB_SIZE >> s->block_max_depth;
706  const int block_h = plane_index ? block_size>>s->chroma_v_shift : block_size;
707  int mb_y;
709  int yd=0, yq=0;
710  int y;
711  int end_y;
712 
713  ff_spatial_idwt_buffered_init(cs, &s->sb, w, h, 1, s->spatial_decomposition_type, s->spatial_decomposition_count);
714  for(mb_y=0; mb_y<=mb_h; mb_y++){
715 
716  int slice_starty = block_h*mb_y;
717  int slice_h = block_h*(mb_y+1);
718 
719  if (!(s->keyframe || s->avctx->debug&512)){
720  slice_starty = FFMAX(0, slice_starty - (block_h >> 1));
721  slice_h -= (block_h >> 1);
722  }
723 
724  for(level=0; level<s->spatial_decomposition_count; level++){
725  for(orientation=level ? 1 : 0; orientation<4; orientation++){
726  SubBand *b= &p->band[level][orientation];
727  int start_y;
728  int end_y;
729  int our_mb_start = mb_y;
730  int our_mb_end = (mb_y + 1);
731  const int extra= 3;
732  start_y = (mb_y ? ((block_h * our_mb_start) >> (s->spatial_decomposition_count - level)) + s->spatial_decomposition_count - level + extra: 0);
733  end_y = (((block_h * our_mb_end) >> (s->spatial_decomposition_count - level)) + s->spatial_decomposition_count - level + extra);
734  if (!(s->keyframe || s->avctx->debug&512)){
735  start_y = FFMAX(0, start_y - (block_h >> (1+s->spatial_decomposition_count - level)));
736  end_y = FFMAX(0, end_y - (block_h >> (1+s->spatial_decomposition_count - level)));
737  }
738  start_y = FFMIN(b->height, start_y);
739  end_y = FFMIN(b->height, end_y);
740 
741  if (start_y != end_y){
742  if (orientation == 0){
743  SubBand * correlate_band = &p->band[0][0];
744  int correlate_end_y = FFMIN(b->height, end_y + 1);
745  int correlate_start_y = FFMIN(b->height, (start_y ? start_y + 1 : 0));
746  decode_subband_slice_buffered(s, correlate_band, &s->sb, correlate_start_y, correlate_end_y, decode_state[0][0]);
747  correlate_slice_buffered(s, &s->sb, correlate_band, correlate_band->ibuf, correlate_band->stride, 1, 0, correlate_start_y, correlate_end_y);
748  dequantize_slice_buffered(s, &s->sb, correlate_band, correlate_band->ibuf, correlate_band->stride, start_y, end_y);
749  }
750  else
751  decode_subband_slice_buffered(s, b, &s->sb, start_y, end_y, decode_state[level][orientation]);
752  }
753  }
754  }
755 
756  for(; yd<slice_h; yd+=4){
757  ff_spatial_idwt_buffered_slice(&s->dwt, cs, &s->sb, s->temp_idwt_buffer, w, h, 1, s->spatial_decomposition_type, s->spatial_decomposition_count, yd);
758  }
759 
760  if(s->qlog == LOSSLESS_QLOG){
761  for(; yq<slice_h && yq<h; yq++){
762  IDWTELEM * line = slice_buffer_get_line(&s->sb, yq);
763  for(x=0; x<w; x++){
764  line[x] *= 1<<FRAC_BITS;
765  }
766  }
767  }
768 
769  predict_slice_buffered(s, &s->sb, s->spatial_idwt_buffer, plane_index, 1, mb_y);
770 
771  y = FFMIN(p->height, slice_starty);
772  end_y = FFMIN(p->height, slice_h);
773  while(y < end_y)
774  ff_slice_buffer_release(&s->sb, y++);
775  }
776 
777  ff_slice_buffer_flush(&s->sb);
778  }
779 
780  }
781 
782  emms_c();
783 
784  ff_snow_release_buffer(avctx);
785 
786  if(!(s->avctx->debug&2048))
787  res = av_frame_ref(picture, s->current_picture);
788  else
789  res = av_frame_ref(picture, s->mconly_picture);
790  if (res >= 0 && s->avmv_index) {
791  AVFrameSideData *sd;
792 
793  sd = av_frame_new_side_data(picture, AV_FRAME_DATA_MOTION_VECTORS, s->avmv_index * sizeof(AVMotionVector));
794  if (!sd)
795  return AVERROR(ENOMEM);
796  memcpy(sd->data, s->avmv, s->avmv_index * sizeof(AVMotionVector));
797  }
798 
799  if (res < 0)
800  return res;
801 
802  *got_frame = 1;
803 
804  bytes_read= c->bytestream - c->bytestream_start;
805  if(bytes_read ==0) av_log(s->avctx, AV_LOG_ERROR, "error at end of frame\n"); //FIXME
806 
807  return bytes_read;
808 }
809 
811 {
812  SnowContext *s = avctx->priv_data;
813 
815 
817 
818  s->avmv_size = 0;
819  av_freep(&s->avmv);
820 
821  return 0;
822 }
823 
825  .p.name = "snow",
826  CODEC_LONG_NAME("Snow"),
827  .p.type = AVMEDIA_TYPE_VIDEO,
828  .p.id = AV_CODEC_ID_SNOW,
829  .priv_data_size = sizeof(SnowContext),
831  .close = decode_end,
833  .p.capabilities = AV_CODEC_CAP_DR1,
834  .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
835 };
av_size_mult
int av_size_mult(size_t a, size_t b, size_t *r)
Multiply two size_t values checking for overflow.
Definition: mem.c:567
BlockNode::color
uint8_t color[3]
Color for intra.
Definition: snow.h:54
AVMotionVector::motion_scale
uint16_t motion_scale
Definition: motion_vector.h:54
decode_q_branch
static int decode_q_branch(SnowContext *s, int level, int x, int y)
Definition: snowdec.c:304
set_blocks
static void set_blocks(SnowContext *s, int level, int x, int y, int l, int cb, int cr, int mx, int my, int ref, int type)
Definition: snow.h:402
level
uint8_t level
Definition: svq3.c:205
av_clip
#define av_clip
Definition: common.h:100
FF_CODEC_CAP_INIT_CLEANUP
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Definition: codec_internal.h:42
QEXPSHIFT
#define QEXPSHIFT
Definition: snow.h:429
GET_S
#define GET_S(dst, check)
Definition: snowdec.c:440
MAX_DECOMPOSITIONS
#define MAX_DECOMPOSITIONS
Definition: dirac_dwt.h:30
r
const char * r
Definition: vf_curves.c:127
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
Plane::fast_mc
int fast_mc
Definition: snow.h:106
MID_STATE
#define MID_STATE
Definition: snow.h:39
cb
static double cb(void *priv, double x, double y)
Definition: vf_geq.c:247
av_frame_new_side_data
AVFrameSideData * av_frame_new_side_data(AVFrame *frame, enum AVFrameSideDataType type, size_t size)
Add a new side data to a frame.
Definition: frame.c:681
u
#define u(width, name, range_min, range_max)
Definition: cbs_h2645.c:251
AVMotionVector
Definition: motion_vector.h:24
inverse
inverse
Definition: af_crystalizer.c:122
AVMotionVector::src_x
int16_t src_x
Absolute source position.
Definition: motion_vector.h:38
ff_slice_buffer_flush
void ff_slice_buffer_flush(slice_buffer *buf)
Definition: snow_dwt.c:92
ff_snow_decoder
const FFCodec ff_snow_decoder
Definition: snowdec.c:824
int64_t
long long int64_t
Definition: coverity.c:34
QBIAS_SHIFT
#define QBIAS_SHIFT
Definition: snow.h:156
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:410
tmp
static uint8_t tmp[11]
Definition: aes_ctr.c:28
w
uint8_t w
Definition: llviddspenc.c:38
AVPacket::data
uint8_t * data
Definition: packet.h:539
SubBand::width
int width
Definition: cfhd.h:111
b
#define b
Definition: input.c:41
rangecoder.h
FFCodec
Definition: codec_internal.h:127
x_and_coeff::x
int16_t x
Definition: snow.h:78
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
decode_frame
static int decode_frame(AVCodecContext *avctx, AVFrame *picture, int *got_frame, AVPacket *avpkt)
Definition: snowdec.c:590
SnowContext
Definition: snow.h:113
QSHIFT
#define QSHIFT
Definition: snow.h:42
MAX_REF_FRAMES
#define MAX_REF_FRAMES
Definition: snow.h:46
ff_snow_common_end
av_cold void ff_snow_common_end(SnowContext *s)
Definition: snow.c:554
FF_DEBUG_PICT_INFO
#define FF_DEBUG_PICT_INFO
Definition: avcodec.h:1415
Plane::diag_mc
int diag_mc
Definition: snow.h:105
BlockNode::type
uint8_t type
Bitfield of BLOCK_*.
Definition: snow.h:55
mx
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t mx
Definition: dsp.h:53
FFCodec::p
AVCodec p
The public AVCodec.
Definition: codec_internal.h:131
BlockNode
Definition: snow.h:50
ff_slice_buffer_init
int ff_slice_buffer_init(slice_buffer *buf, int line_count, int max_allocated_lines, int line_width, IDWTELEM *base_buffer)
Definition: snow_dwt.c:29
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
x_and_coeff::coeff
uint16_t coeff
Definition: snow.h:79
AVMotionVector::motion_x
int32_t motion_x
Motion vector src_x = dst_x + motion_x / motion_scale src_y = dst_y + motion_y / motion_scale.
Definition: motion_vector.h:53
AVMotionVector::src_y
int16_t src_y
Definition: motion_vector.h:38
DWTCompose
Definition: dirac_dwt.h:32
x_and_coeff
Definition: snow.h:77
ff_slice_buffer_destroy
void ff_slice_buffer_destroy(slice_buffer *buf)
Definition: snow_dwt.c:104
ff_snow_common_init_after_header
int ff_snow_common_init_after_header(AVCodecContext *avctx)
Definition: snow.c:450
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:209
av_cold
#define av_cold
Definition: attributes.h:90
ff_spatial_idwt_buffered_init
void ff_spatial_idwt_buffered_init(DWTCompose *cs, slice_buffer *sb, int width, int height, int stride_line, int type, int decomposition_count)
Definition: snow_dwt.c:640
emms_c
#define emms_c()
Definition: emms.h:63
FF_CODEC_DECODE_CB
#define FF_CODEC_DECODE_CB(func)
Definition: codec_internal.h:311
AVMotionVector::h
uint8_t h
Definition: motion_vector.h:34
s
#define s(width, name)
Definition: cbs_vp9.c:198
AV_GET_BUFFER_FLAG_REF
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
Definition: avcodec.h:431
LOSSLESS_QLOG
#define LOSSLESS_QLOG
Definition: snow.h:44
decode.h
add_yblock
static av_always_inline void add_yblock(SnowContext *s, int sliced, slice_buffer *sb, IDWTELEM *dst, uint8_t *dst8, const uint8_t *obmc, int src_x, int src_y, int b_w, int b_h, int w, int h, int dst_stride, int src_stride, int obmc_stride, int b_x, int b_y, int add, int offset_dst, int plane_index)
Definition: snow.h:220
htaps
static const double htaps[HTAPS]
The 2nd half (48 coeffs) of a 96-tap symmetric lowpass filter.
Definition: dsd.c:52
ff_snow_common_init
av_cold int ff_snow_common_init(AVCodecContext *avctx)
Definition: snow.c:395
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:73
CODEC_LONG_NAME
#define CODEC_LONG_NAME(str)
Definition: codec_internal.h:296
my
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t intptr_t my
Definition: dsp.h:53
FFABS
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:74
context
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default minimum maximum flags name is the option keep it simple and lowercase description are in without and describe what they for example set the foo of the bar offset is the offset of the field in your context
Definition: writing_filters.txt:91
correlate_slice_buffered
static void correlate_slice_buffered(SnowContext *s, slice_buffer *sb, SubBand *b, IDWTELEM *src, int stride, int inverse, int use_median, int start_y, int end_y)
Definition: snowdec.c:394
NULL
#define NULL
Definition: coverity.c:32
predict_slice_buffered
static av_always_inline void predict_slice_buffered(SnowContext *s, slice_buffer *sb, IDWTELEM *old_buffer, int plane_index, int add, int mb_y)
Definition: snowdec.c:181
run
uint8_t run
Definition: svq3.c:204
snow.h
BlockNode::my
int16_t my
Motion vector component Y, see mv_scale.
Definition: snow.h:52
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:279
ff_snow_release_buffer
void ff_snow_release_buffer(AVCodecContext *avctx)
Definition: snow.c:514
mathops.h
get_symbol
static int get_symbol(RangeCoder *c, uint8_t *state, int is_signed)
Definition: snowdec.c:34
AVMotionVector::motion_y
int32_t motion_y
Definition: motion_vector.h:53
QROOT
#define QROOT
Definition: snow.h:43
decode_subband_slice_buffered
static void decode_subband_slice_buffered(SnowContext *s, SubBand *b, slice_buffer *sb, int start_y, int h, int save_state[1])
Definition: snowdec.c:263
AV_PIX_FMT_GRAY8
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
Definition: pixfmt.h:81
index
int index
Definition: gxfenc.c:90
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
MB_SIZE
#define MB_SIZE
Definition: cinepakenc.c:54
ff_init_range_decoder
av_cold void ff_init_range_decoder(RangeCoder *c, const uint8_t *buf, int buf_size)
Definition: rangecoder.c:53
ff_get_buffer
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
Definition: decode.c:1706
init
int(* init)(AVBSFContext *ctx)
Definition: dts2pts.c:368
AV_CODEC_CAP_DR1
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:52
AVPacket::size
int size
Definition: packet.h:540
state
static struct @474 state
av_frame_ref
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:308
SubBand::stride
ptrdiff_t stride
Definition: cfhd.h:109
codec_internal.h
Plane::height
int height
Definition: cfhd.h:119
size
int size
Definition: twinvq_data.h:10344
ff_build_rac_states
void ff_build_rac_states(RangeCoder *c, int factor, int max_p)
Definition: rangecoder.c:68
ff_slice_buffer_release
void ff_slice_buffer_release(slice_buffer *buf, int line)
Definition: snow_dwt.c:79
AVFrameSideData::data
uint8_t * data
Definition: frame.h:267
AVMotionVector::flags
uint64_t flags
Extra flag information.
Definition: motion_vector.h:47
SubBand
Definition: cfhd.h:108
Plane::htaps
int htaps
Definition: snow.h:103
a
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
Definition: undefined.txt:41
Plane::width
int width
Definition: cfhd.h:118
line
Definition: graph2dot.c:48
snow_dwt.h
ff_quant3bA
const int8_t ff_quant3bA[256]
Definition: snowdata.h:104
Plane::hcoeff
int8_t hcoeff[HTAPS_MAX/2]
Definition: snow.h:104
DWTELEM
int DWTELEM
Definition: dirac_dwt.h:26
emms.h
ff_obmc_tab
const uint8_t *const ff_obmc_tab[4]
Definition: snowdata.h:123
decode_qlogs
static void decode_qlogs(SnowContext *s)
Definition: snowdec.c:424
av_assert2
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
Definition: avassert.h:67
AVMotionVector::dst_y
int16_t dst_y
Definition: motion_vector.h:42
log.h
dequantize_slice_buffered
static void dequantize_slice_buffered(SnowContext *s, slice_buffer *sb, SubBand *b, IDWTELEM *src, int stride, int start_y, int end_y)
Definition: snowdec.c:371
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:256
pred_mv
static void pred_mv(DiracBlock *block, int stride, int x, int y, int ref)
Definition: diracdec.c:1392
AVMotionVector::dst_x
int16_t dst_x
Absolute destination position.
Definition: motion_vector.h:42
BLOCK_INTRA
#define BLOCK_INTRA
Intra block, inter otherwise.
Definition: snow.h:57
SubBand::ibuf
uint8_t * ibuf
Definition: diracdec.c:101
av_always_inline
#define av_always_inline
Definition: attributes.h:49
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
ff_qexp
const uint8_t ff_qexp[QROOT]
Definition: snowdata.h:128
predict_plane
static av_always_inline void predict_plane(SnowContext *s, IDWTELEM *buf, int plane_index, int add)
Definition: snow.h:395
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:194
get_rac
static int get_rac(RangeCoder *c, uint8_t *const state)
Definition: rangecoder.h:118
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:671
log2
#define log2(x)
Definition: libm.h:404
avcodec.h
stride
#define stride
Definition: h264pred_template.c:536
mid_pred
#define mid_pred
Definition: mathops.h:96
decode_header
static int decode_header(SnowContext *s)
Definition: snowdec.c:448
left
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled left
Definition: snow.txt:386
ff_snow_reset_contexts
void ff_snow_reset_contexts(SnowContext *s)
Definition: snow.c:63
U
#define U(x)
Definition: vpx_arith.h:37
AVCodecContext
main external API structure.
Definition: avcodec.h:451
AV_CODEC_ID_SNOW
@ AV_CODEC_ID_SNOW
Definition: codec_id.h:267
FRAC_BITS
#define FRAC_BITS
Definition: g729postfilter.c:36
ref
static int ref[MAX_W *MAX_W]
Definition: jpeg2000dwt.c:117
AVMotionVector::source
int32_t source
Where the current macroblock comes from; negative value when it comes from the past,...
Definition: motion_vector.h:30
Plane
Definition: cfhd.h:117
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:78
get_symbol2
static int get_symbol2(RangeCoder *c, uint8_t *state, int log2)
Definition: snowdec.c:57
AVCodecContext::debug
int debug
debug
Definition: avcodec.h:1414
slice_buffer_get_line
#define slice_buffer_get_line(slice_buf, line_num)
Definition: snow_dwt.h:91
BlockNode::level
uint8_t level
Definition: snow.h:60
AV_PICTURE_TYPE_P
@ AV_PICTURE_TYPE_P
Predicted.
Definition: avutil.h:280
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
mem.h
AV_CODEC_EXPORT_DATA_MVS
#define AV_CODEC_EXPORT_DATA_MVS
Export motion vectors through frame side data.
Definition: avcodec.h:406
Plane::band
SubBand band[DWT_LEVELS_3D][4]
Definition: cfhd.h:130
BlockNode::mx
int16_t mx
Motion vector component X, see mv_scale.
Definition: snow.h:51
AVFrameSideData
Structure to hold side data for an AVFrame.
Definition: frame.h:265
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:478
AVPacket
This structure stores compressed data.
Definition: packet.h:516
cr
static double cr(void *priv, double x, double y)
Definition: vf_geq.c:248
ff_snow_frames_prepare
int ff_snow_frames_prepare(SnowContext *s)
Definition: snow.c:523
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
av_fast_malloc
void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size)
Allocate a buffer, reusing the given one if large enough.
Definition: mem.c:557
HTAPS_MAX
#define HTAPS_MAX
Definition: snow.h:75
null_block
static const BlockNode null_block
Definition: snow.h:63
AV_PIX_FMT_YUV410P
@ AV_PIX_FMT_YUV410P
planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
Definition: pixfmt.h:79
coeff
static const double coeff[2][5]
Definition: vf_owdenoise.c:80
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
AV_FRAME_DATA_MOTION_VECTORS
@ AV_FRAME_DATA_MOTION_VECTORS
Motion vectors exported by some codecs (on demand through the export_mvs flag set in the libavcodec A...
Definition: frame.h:97
IDWTELEM
short IDWTELEM
Definition: dirac_dwt.h:27
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
h
h
Definition: vp9dsp_template.c:2070
RangeCoder
Definition: mss3.c:63
width
#define width
Definition: dsp.h:85
ff_spatial_idwt_buffered_slice
void ff_spatial_idwt_buffered_slice(SnowDWTContext *dsp, DWTCompose *cs, slice_buffer *slice_buf, IDWTELEM *temp, int width, int height, int stride_line, int type, int decomposition_count, int y)
Definition: snow_dwt.c:659
decode_blocks
static int decode_blocks(SnowContext *s)
Definition: snowdec.c:573
ff_snow_alloc_blocks
int ff_snow_alloc_blocks(SnowContext *s)
Definition: snow.c:77
av_log2
int av_log2(unsigned v)
Definition: intmath.c:26
decode_end
static av_cold int decode_end(AVCodecContext *avctx)
Definition: snowdec.c:810
BlockNode::ref
uint8_t ref
Reference frame index.
Definition: snow.h:53
src
#define src
Definition: vp8dsp.c:248
line
The official guide to swscale for confused that consecutive non overlapping rectangles of slice_bottom special converter These generally are unscaled converters of common like for each output line the vertical scaler pulls lines from a ring buffer When the ring buffer does not contain the wanted line
Definition: swscale.txt:40
AVMotionVector::w
uint8_t w
Width and height of the block.
Definition: motion_vector.h:34
unpack_coeffs
static void unpack_coeffs(SnowContext *s, SubBand *b, SubBand *parent, int orientation)
Definition: snowdec.c:76
SubBand::x_coeff
x_and_coeff * x_coeff
Definition: snow.h:93
intmath.h