FFmpeg
rv34.c
Go to the documentation of this file.
1 /*
2  * RV30/40 decoder common data
3  * Copyright (c) 2007 Mike Melanson, Konstantin Shishkov
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 /**
23  * @file
24  * RV30/40 decoder common data
25  */
26 
27 #include "libavutil/avassert.h"
28 #include "libavutil/imgutils.h"
29 #include "libavutil/internal.h"
30 #include "libavutil/mem_internal.h"
31 #include "libavutil/thread.h"
32 
33 #include "avcodec.h"
34 #include "decode.h"
35 #include "error_resilience.h"
36 #include "mpegutils.h"
37 #include "mpegvideo.h"
38 #include "mpegvideodec.h"
39 #include "golomb.h"
40 #include "mathops.h"
41 #include "mpeg_er.h"
42 #include "qpeldsp.h"
43 #include "rectangle.h"
44 #include "thread.h"
45 #include "threadframe.h"
46 
47 #include "rv34vlc.h"
48 #include "rv34data.h"
49 #include "rv34.h"
50 
51 static inline void ZERO8x2(void* dst, int stride)
52 {
53  fill_rectangle(dst, 1, 2, stride, 0, 4);
54  fill_rectangle(((uint8_t*)(dst))+4, 1, 2, stride, 0, 4);
55 }
56 
57 /** translation of RV30/40 macroblock types to lavc ones */
58 static const int rv34_mb_type_to_lavc[12] = {
71 };
72 
73 
75 
76 static int rv34_decode_mv(RV34DecContext *r, int block_type);
77 
78 /**
79  * @name RV30/40 VLC generating functions
80  * @{
81  */
82 
83 static VLCElem table_data[117592];
84 
85 /**
86  * Generate VLC from codeword lengths.
87  * @param bits codeword lengths (zeroes are accepted)
88  * @param size length of input data
89  * @param vlc output VLC
90  * @param insyms symbols for input codes (NULL for default ones)
91  * @param num VLC table number (for static initialization)
92  */
93 static av_cold void rv34_gen_vlc_ext(const uint8_t *bits, int size, VLC *vlc,
94  const uint8_t *syms, int *offset)
95 {
96  int counts[17] = {0}, codes[17];
97  uint16_t cw[MAX_VLC_SIZE];
98  int maxbits;
99 
100  av_assert1(size > 0);
101 
102  for (int i = 0; i < size; i++)
103  counts[bits[i]]++;
104 
105  /* bits[0] is zero for some tables, i.e. syms actually starts at 1.
106  * So we reset it here. The code assigned to this element is 0x00. */
107  codes[0] = counts[0] = 0;
108  for (int i = 0; i < 16; i++) {
109  codes[i+1] = (codes[i] + counts[i]) << 1;
110  if (counts[i])
111  maxbits = i;
112  }
113  for (int i = 0; i < size; i++)
114  cw[i] = codes[bits[i]]++;
115 
116  vlc->table = &table_data[*offset];
118  ff_vlc_init_sparse(vlc, FFMIN(maxbits, 9), size,
119  bits, 1, 1,
120  cw, 2, 2,
121  syms, !!syms, !!syms, VLC_INIT_STATIC_OVERLONG);
122  *offset += vlc->table_size;
123 }
124 
125 static av_cold void rv34_gen_vlc(const uint8_t *bits, int size, const VLCElem **vlcp,
126  int *offset)
127 {
128  VLC vlc = { 0 };
130  *vlcp = vlc.table;
131 }
132 
133 /**
134  * Initialize all tables.
135  */
136 static av_cold void rv34_init_tables(void)
137 {
138  int i, j, k, offset = 0;
139 
140  for(i = 0; i < NUM_INTRA_TABLES; i++){
141  for(j = 0; j < 2; j++){
143  &intra_vlcs[i].cbppattern[j], &offset);
145  &intra_vlcs[i].second_pattern[j], &offset);
147  &intra_vlcs[i].third_pattern[j], &offset);
148  for(k = 0; k < 4; k++){
150  &intra_vlcs[i].cbp[j][k], rv34_cbp_code, &offset);
151  }
152  }
153  for(j = 0; j < 4; j++){
155  &intra_vlcs[i].first_pattern[j], &offset);
156  }
158  &intra_vlcs[i].coefficient, &offset);
159  }
160 
161  for(i = 0; i < NUM_INTER_TABLES; i++){
163  &inter_vlcs[i].cbppattern[0], &offset);
164  for(j = 0; j < 4; j++){
166  &inter_vlcs[i].cbp[0][j], rv34_cbp_code, &offset);
167  }
168  for(j = 0; j < 2; j++){
170  &inter_vlcs[i].first_pattern[j], &offset);
172  &inter_vlcs[i].second_pattern[j], &offset);
174  &inter_vlcs[i].third_pattern[j], &offset);
175  }
177  &inter_vlcs[i].coefficient, &offset);
178  }
179 }
180 
181 /** @} */ // vlc group
182 
183 /**
184  * @name RV30/40 4x4 block decoding functions
185  * @{
186  */
187 
188 /**
189  * Decode coded block pattern.
190  */
191 static int rv34_decode_cbp(GetBitContext *gb, const RV34VLC *vlc, int table)
192 {
193  int pattern, code, cbp=0;
194  int ones;
195  static const int cbp_masks[3] = {0x100000, 0x010000, 0x110000};
196  static const int shifts[4] = { 0, 2, 8, 10 };
197  const int *curshift = shifts;
198  int i, t, mask;
199 
200  code = get_vlc2(gb, vlc->cbppattern[table], 9, 2);
201  pattern = code & 0xF;
202  code >>= 4;
203 
204  ones = rv34_count_ones[pattern];
205 
206  for(mask = 8; mask; mask >>= 1, curshift++){
207  if(pattern & mask)
208  cbp |= get_vlc2(gb, vlc->cbp[table][ones].table, vlc->cbp[table][ones].bits, 1) << curshift[0];
209  }
210 
211  for(i = 0; i < 4; i++){
212  t = (modulo_three_table[code] >> (6 - 2*i)) & 3;
213  if(t == 1)
214  cbp |= cbp_masks[get_bits1(gb)] << i;
215  if(t == 2)
216  cbp |= cbp_masks[2] << i;
217  }
218  return cbp;
219 }
220 
221 /**
222  * Get one coefficient value from the bitstream and store it.
223  */
224 static inline void decode_coeff(int16_t *dst, int coef, int esc, GetBitContext *gb,
225  const VLCElem *vlc, int q)
226 {
227  if(coef){
228  if(coef == esc){
229  coef = get_vlc2(gb, vlc, 9, 2);
230  if(coef > 23){
231  coef -= 23;
232  coef = 22 + ((1 << coef) | get_bits(gb, coef));
233  }
234  coef += esc;
235  }
236  if(get_bits1(gb))
237  coef = -coef;
238  *dst = (coef*q + 8) >> 4;
239  }
240 }
241 
242 /**
243  * Decode 2x2 subblock of coefficients.
244  */
245 static inline void decode_subblock(int16_t *dst, int code, const int is_block2,
246  GetBitContext *gb, const VLCElem *vlc, int q)
247 {
249 
250  decode_coeff( dst+0*4+0, (flags >> 6) , 3, gb, vlc, q);
251  if(is_block2){
252  decode_coeff(dst+1*4+0, (flags >> 4) & 3, 2, gb, vlc, q);
253  decode_coeff(dst+0*4+1, (flags >> 2) & 3, 2, gb, vlc, q);
254  }else{
255  decode_coeff(dst+0*4+1, (flags >> 4) & 3, 2, gb, vlc, q);
256  decode_coeff(dst+1*4+0, (flags >> 2) & 3, 2, gb, vlc, q);
257  }
258  decode_coeff( dst+1*4+1, (flags >> 0) & 3, 2, gb, vlc, q);
259 }
260 
261 /**
262  * Decode a single coefficient.
263  */
264 static inline void decode_subblock1(int16_t *dst, int code, GetBitContext *gb,
265  const VLCElem *vlc, int q)
266 {
267  int coeff = modulo_three_table[code] >> 6;
268  decode_coeff(dst, coeff, 3, gb, vlc, q);
269 }
270 
271 static inline void decode_subblock3(int16_t *dst, int code, GetBitContext *gb,
272  const VLCElem *vlc,
273  int q_dc, int q_ac1, int q_ac2)
274 {
276 
277  decode_coeff(dst+0*4+0, (flags >> 6) , 3, gb, vlc, q_dc);
278  decode_coeff(dst+0*4+1, (flags >> 4) & 3, 2, gb, vlc, q_ac1);
279  decode_coeff(dst+1*4+0, (flags >> 2) & 3, 2, gb, vlc, q_ac1);
280  decode_coeff(dst+1*4+1, (flags >> 0) & 3, 2, gb, vlc, q_ac2);
281 }
282 
283 /**
284  * Decode coefficients for 4x4 block.
285  *
286  * This is done by filling 2x2 subblocks with decoded coefficients
287  * in this order (the same for subblocks and subblock coefficients):
288  * o--o
289  * /
290  * /
291  * o--o
292  */
293 
294 static int rv34_decode_block(int16_t *dst, GetBitContext *gb, const RV34VLC *rvlc,
295  int fc, int sc, int q_dc, int q_ac1, int q_ac2)
296 {
297  int code, pattern, has_ac = 1;
298 
299  code = get_vlc2(gb, rvlc->first_pattern[fc], 9, 2);
300 
301  pattern = code & 0x7;
302 
303  code >>= 3;
304 
305  if (modulo_three_table[code] & 0x3F) {
306  decode_subblock3(dst, code, gb, rvlc->coefficient, q_dc, q_ac1, q_ac2);
307  } else {
308  decode_subblock1(dst, code, gb, rvlc->coefficient, q_dc);
309  if (!pattern)
310  return 0;
311  has_ac = 0;
312  }
313 
314  if(pattern & 4){
315  code = get_vlc2(gb, rvlc->second_pattern[sc], 9, 2);
316  decode_subblock(dst + 4*0+2, code, 0, gb, rvlc->coefficient, q_ac2);
317  }
318  if(pattern & 2){ // Looks like coefficients 1 and 2 are swapped for this block
319  code = get_vlc2(gb, rvlc->second_pattern[sc], 9, 2);
320  decode_subblock(dst + 4*2+0, code, 1, gb, rvlc->coefficient, q_ac2);
321  }
322  if(pattern & 1){
323  code = get_vlc2(gb, rvlc->third_pattern[sc], 9, 2);
324  decode_subblock(dst + 4*2+2, code, 0, gb, rvlc->coefficient, q_ac2);
325  }
326  return has_ac | pattern;
327 }
328 
329 /**
330  * @name RV30/40 bitstream parsing
331  * @{
332  */
333 
334 /**
335  * Decode starting slice position.
336  * @todo Maybe replace with ff_h263_decode_mba() ?
337  */
339 {
340  int i;
341  for(i = 0; i < 5; i++)
342  if(rv34_mb_max_sizes[i] >= mb_size - 1)
343  break;
344  return rv34_mb_bits_sizes[i];
345 }
346 
347 /**
348  * Select VLC set for decoding from current quantizer, modifier and frame type.
349  */
350 static inline RV34VLC* choose_vlc_set(int quant, int mod, int type)
351 {
352  if(mod == 2 && quant < 19) quant += 10;
353  else if(mod && quant < 26) quant += 5;
354  av_assert2(quant >= 0 && quant < 32);
357 }
358 
359 /**
360  * Decode intra macroblock header and return CBP in case of success, -1 otherwise.
361  */
362 static int rv34_decode_intra_mb_header(RV34DecContext *r, int8_t *intra_types)
363 {
364  MpegEncContext *s = &r->s;
365  GetBitContext *gb = &s->gb;
366  int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
367  int t;
368 
369  r->is16 = get_bits1(gb);
370  if(r->is16){
371  s->current_picture_ptr->mb_type[mb_pos] = MB_TYPE_INTRA16x16;
372  r->block_type = RV34_MB_TYPE_INTRA16x16;
373  t = get_bits(gb, 2);
374  fill_rectangle(intra_types, 4, 4, r->intra_types_stride, t, sizeof(intra_types[0]));
375  r->luma_vlc = 2;
376  }else{
377  if(!r->rv30){
378  if(!get_bits1(gb))
379  av_log(s->avctx, AV_LOG_ERROR, "Need DQUANT\n");
380  }
381  s->current_picture_ptr->mb_type[mb_pos] = MB_TYPE_INTRA;
382  r->block_type = RV34_MB_TYPE_INTRA;
383  if(r->decode_intra_types(r, gb, intra_types) < 0)
384  return -1;
385  r->luma_vlc = 1;
386  }
387 
388  r->chroma_vlc = 0;
389  r->cur_vlcs = choose_vlc_set(r->si.quant, r->si.vlc_set, 0);
390 
391  return rv34_decode_cbp(gb, r->cur_vlcs, r->is16);
392 }
393 
394 /**
395  * Decode inter macroblock header and return CBP in case of success, -1 otherwise.
396  */
397 static int rv34_decode_inter_mb_header(RV34DecContext *r, int8_t *intra_types)
398 {
399  MpegEncContext *s = &r->s;
400  GetBitContext *gb = &s->gb;
401  int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
402  int i, t;
403 
404  r->block_type = r->decode_mb_info(r);
405  if(r->block_type == -1)
406  return -1;
407  s->current_picture_ptr->mb_type[mb_pos] = rv34_mb_type_to_lavc[r->block_type];
408  r->mb_type[mb_pos] = r->block_type;
409  if(r->block_type == RV34_MB_SKIP){
410  if(s->pict_type == AV_PICTURE_TYPE_P)
411  r->mb_type[mb_pos] = RV34_MB_P_16x16;
412  if(s->pict_type == AV_PICTURE_TYPE_B)
413  r->mb_type[mb_pos] = RV34_MB_B_DIRECT;
414  }
415  r->is16 = !!IS_INTRA16x16(s->current_picture_ptr->mb_type[mb_pos]);
416  if (rv34_decode_mv(r, r->block_type) < 0)
417  return -1;
418  if(r->block_type == RV34_MB_SKIP){
419  fill_rectangle(intra_types, 4, 4, r->intra_types_stride, 0, sizeof(intra_types[0]));
420  return 0;
421  }
422  r->chroma_vlc = 1;
423  r->luma_vlc = 0;
424 
425  if(IS_INTRA(s->current_picture_ptr->mb_type[mb_pos])){
426  if(r->is16){
427  t = get_bits(gb, 2);
428  fill_rectangle(intra_types, 4, 4, r->intra_types_stride, t, sizeof(intra_types[0]));
429  r->luma_vlc = 2;
430  }else{
431  if(r->decode_intra_types(r, gb, intra_types) < 0)
432  return -1;
433  r->luma_vlc = 1;
434  }
435  r->chroma_vlc = 0;
436  r->cur_vlcs = choose_vlc_set(r->si.quant, r->si.vlc_set, 0);
437  }else{
438  for(i = 0; i < 16; i++)
439  intra_types[(i & 3) + (i>>2) * r->intra_types_stride] = 0;
440  r->cur_vlcs = choose_vlc_set(r->si.quant, r->si.vlc_set, 1);
441  if(r->mb_type[mb_pos] == RV34_MB_P_MIX16x16){
442  r->is16 = 1;
443  r->chroma_vlc = 1;
444  r->luma_vlc = 2;
445  r->cur_vlcs = choose_vlc_set(r->si.quant, r->si.vlc_set, 0);
446  }
447  }
448 
449  return rv34_decode_cbp(gb, r->cur_vlcs, r->is16);
450 }
451 
452 /** @} */ //bitstream functions
453 
454 /**
455  * @name motion vector related code (prediction, reconstruction, motion compensation)
456  * @{
457  */
458 
459 /** macroblock partition width in 8x8 blocks */
460 static const uint8_t part_sizes_w[RV34_MB_TYPES] = { 2, 2, 2, 1, 2, 2, 2, 2, 2, 1, 2, 2 };
461 
462 /** macroblock partition height in 8x8 blocks */
463 static const uint8_t part_sizes_h[RV34_MB_TYPES] = { 2, 2, 2, 1, 2, 2, 2, 2, 1, 2, 2, 2 };
464 
465 /** availability index for subblocks */
466 static const uint8_t avail_indexes[4] = { 6, 7, 10, 11 };
467 
468 /**
469  * motion vector prediction
470  *
471  * Motion prediction performed for the block by using median prediction of
472  * motion vectors from the left, top and right top blocks but in corner cases
473  * some other vectors may be used instead.
474  */
475 static void rv34_pred_mv(RV34DecContext *r, int block_type, int subblock_no, int dmv_no)
476 {
477  MpegEncContext *s = &r->s;
478  int mv_pos = s->mb_x * 2 + s->mb_y * 2 * s->b8_stride;
479  int A[2] = {0}, B[2], C[2];
480  int i, j;
481  int mx, my;
482  int* avail = r->avail_cache + avail_indexes[subblock_no];
483  int c_off = part_sizes_w[block_type];
484 
485  mv_pos += (subblock_no & 1) + (subblock_no >> 1)*s->b8_stride;
486  if(subblock_no == 3)
487  c_off = -1;
488 
489  if(avail[-1]){
490  A[0] = s->current_picture_ptr->motion_val[0][mv_pos-1][0];
491  A[1] = s->current_picture_ptr->motion_val[0][mv_pos-1][1];
492  }
493  if(avail[-4]){
494  B[0] = s->current_picture_ptr->motion_val[0][mv_pos-s->b8_stride][0];
495  B[1] = s->current_picture_ptr->motion_val[0][mv_pos-s->b8_stride][1];
496  }else{
497  B[0] = A[0];
498  B[1] = A[1];
499  }
500  if(!avail[c_off-4]){
501  if(avail[-4] && (avail[-1] || r->rv30)){
502  C[0] = s->current_picture_ptr->motion_val[0][mv_pos-s->b8_stride-1][0];
503  C[1] = s->current_picture_ptr->motion_val[0][mv_pos-s->b8_stride-1][1];
504  }else{
505  C[0] = A[0];
506  C[1] = A[1];
507  }
508  }else{
509  C[0] = s->current_picture_ptr->motion_val[0][mv_pos-s->b8_stride+c_off][0];
510  C[1] = s->current_picture_ptr->motion_val[0][mv_pos-s->b8_stride+c_off][1];
511  }
512  mx = mid_pred(A[0], B[0], C[0]);
513  my = mid_pred(A[1], B[1], C[1]);
514  mx += r->dmv[dmv_no][0];
515  my += r->dmv[dmv_no][1];
516  for(j = 0; j < part_sizes_h[block_type]; j++){
517  for(i = 0; i < part_sizes_w[block_type]; i++){
518  s->current_picture_ptr->motion_val[0][mv_pos + i + j*s->b8_stride][0] = mx;
519  s->current_picture_ptr->motion_val[0][mv_pos + i + j*s->b8_stride][1] = my;
520  }
521  }
522 }
523 
524 #define GET_PTS_DIFF(a, b) (((a) - (b) + 8192) & 0x1FFF)
525 
526 /**
527  * Calculate motion vector component that should be added for direct blocks.
528  */
529 static int calc_add_mv(RV34DecContext *r, int dir, int val)
530 {
531  int mul = dir ? -r->mv_weight2 : r->mv_weight1;
532 
533  return (int)(val * (SUINT)mul + 0x2000) >> 14;
534 }
535 
536 /**
537  * Predict motion vector for B-frame macroblock.
538  */
539 static inline void rv34_pred_b_vector(int A[2], int B[2], int C[2],
540  int A_avail, int B_avail, int C_avail,
541  int *mx, int *my)
542 {
543  if(A_avail + B_avail + C_avail != 3){
544  *mx = A[0] + B[0] + C[0];
545  *my = A[1] + B[1] + C[1];
546  if(A_avail + B_avail + C_avail == 2){
547  *mx /= 2;
548  *my /= 2;
549  }
550  }else{
551  *mx = mid_pred(A[0], B[0], C[0]);
552  *my = mid_pred(A[1], B[1], C[1]);
553  }
554 }
555 
556 /**
557  * motion vector prediction for B-frames
558  */
559 static void rv34_pred_mv_b(RV34DecContext *r, int block_type, int dir)
560 {
561  MpegEncContext *s = &r->s;
562  int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
563  int mv_pos = s->mb_x * 2 + s->mb_y * 2 * s->b8_stride;
564  int A[2] = { 0 }, B[2] = { 0 }, C[2] = { 0 };
565  int has_A = 0, has_B = 0, has_C = 0;
566  int mx, my;
567  int i, j;
568  Picture *cur_pic = s->current_picture_ptr;
569  const int mask = dir ? MB_TYPE_L1 : MB_TYPE_L0;
570  int type = cur_pic->mb_type[mb_pos];
571 
572  if((r->avail_cache[6-1] & type) & mask){
573  A[0] = cur_pic->motion_val[dir][mv_pos - 1][0];
574  A[1] = cur_pic->motion_val[dir][mv_pos - 1][1];
575  has_A = 1;
576  }
577  if((r->avail_cache[6-4] & type) & mask){
578  B[0] = cur_pic->motion_val[dir][mv_pos - s->b8_stride][0];
579  B[1] = cur_pic->motion_val[dir][mv_pos - s->b8_stride][1];
580  has_B = 1;
581  }
582  if(r->avail_cache[6-4] && (r->avail_cache[6-2] & type) & mask){
583  C[0] = cur_pic->motion_val[dir][mv_pos - s->b8_stride + 2][0];
584  C[1] = cur_pic->motion_val[dir][mv_pos - s->b8_stride + 2][1];
585  has_C = 1;
586  }else if((s->mb_x+1) == s->mb_width && (r->avail_cache[6-5] & type) & mask){
587  C[0] = cur_pic->motion_val[dir][mv_pos - s->b8_stride - 1][0];
588  C[1] = cur_pic->motion_val[dir][mv_pos - s->b8_stride - 1][1];
589  has_C = 1;
590  }
591 
592  rv34_pred_b_vector(A, B, C, has_A, has_B, has_C, &mx, &my);
593 
594  mx += r->dmv[dir][0];
595  my += r->dmv[dir][1];
596 
597  for(j = 0; j < 2; j++){
598  for(i = 0; i < 2; i++){
599  cur_pic->motion_val[dir][mv_pos + i + j*s->b8_stride][0] = mx;
600  cur_pic->motion_val[dir][mv_pos + i + j*s->b8_stride][1] = my;
601  }
602  }
603  if(block_type == RV34_MB_B_BACKWARD || block_type == RV34_MB_B_FORWARD){
604  ZERO8x2(cur_pic->motion_val[!dir][mv_pos], s->b8_stride);
605  }
606 }
607 
608 /**
609  * motion vector prediction - RV3 version
610  */
611 static void rv34_pred_mv_rv3(RV34DecContext *r, int block_type, int dir)
612 {
613  MpegEncContext *s = &r->s;
614  int mv_pos = s->mb_x * 2 + s->mb_y * 2 * s->b8_stride;
615  int A[2] = {0}, B[2], C[2];
616  int i, j, k;
617  int mx, my;
618  int* avail = r->avail_cache + avail_indexes[0];
619 
620  if(avail[-1]){
621  A[0] = s->current_picture_ptr->motion_val[0][mv_pos - 1][0];
622  A[1] = s->current_picture_ptr->motion_val[0][mv_pos - 1][1];
623  }
624  if(avail[-4]){
625  B[0] = s->current_picture_ptr->motion_val[0][mv_pos - s->b8_stride][0];
626  B[1] = s->current_picture_ptr->motion_val[0][mv_pos - s->b8_stride][1];
627  }else{
628  B[0] = A[0];
629  B[1] = A[1];
630  }
631  if(!avail[-4 + 2]){
632  if(avail[-4] && (avail[-1])){
633  C[0] = s->current_picture_ptr->motion_val[0][mv_pos - s->b8_stride - 1][0];
634  C[1] = s->current_picture_ptr->motion_val[0][mv_pos - s->b8_stride - 1][1];
635  }else{
636  C[0] = A[0];
637  C[1] = A[1];
638  }
639  }else{
640  C[0] = s->current_picture_ptr->motion_val[0][mv_pos - s->b8_stride + 2][0];
641  C[1] = s->current_picture_ptr->motion_val[0][mv_pos - s->b8_stride + 2][1];
642  }
643  mx = mid_pred(A[0], B[0], C[0]);
644  my = mid_pred(A[1], B[1], C[1]);
645  mx += r->dmv[0][0];
646  my += r->dmv[0][1];
647  for(j = 0; j < 2; j++){
648  for(i = 0; i < 2; i++){
649  for(k = 0; k < 2; k++){
650  s->current_picture_ptr->motion_val[k][mv_pos + i + j*s->b8_stride][0] = mx;
651  s->current_picture_ptr->motion_val[k][mv_pos + i + j*s->b8_stride][1] = my;
652  }
653  }
654  }
655 }
656 
657 static const int chroma_coeffs[3] = { 0, 3, 5 };
658 
659 /**
660  * generic motion compensation function
661  *
662  * @param r decoder context
663  * @param block_type type of the current block
664  * @param xoff horizontal offset from the start of the current block
665  * @param yoff vertical offset from the start of the current block
666  * @param mv_off offset to the motion vector information
667  * @param width width of the current partition in 8x8 blocks
668  * @param height height of the current partition in 8x8 blocks
669  * @param dir motion compensation direction (i.e. from the last or the next reference frame)
670  * @param thirdpel motion vectors are specified in 1/3 of pixel
671  * @param qpel_mc a set of functions used to perform luma motion compensation
672  * @param chroma_mc a set of functions used to perform chroma motion compensation
673  */
674 static inline void rv34_mc(RV34DecContext *r, const int block_type,
675  const int xoff, const int yoff, int mv_off,
676  const int width, const int height, int dir,
677  const int thirdpel, int weighted,
678  qpel_mc_func (*qpel_mc)[16],
680 {
681  MpegEncContext *s = &r->s;
682  uint8_t *Y, *U, *V, *srcY, *srcU, *srcV;
683  int dxy, mx, my, umx, umy, lx, ly, uvmx, uvmy, src_x, src_y, uvsrc_x, uvsrc_y;
684  int mv_pos = s->mb_x * 2 + s->mb_y * 2 * s->b8_stride + mv_off;
685  int is16x16 = 1;
686  int emu = 0;
687 
688  if(thirdpel){
689  int chroma_mx, chroma_my;
690  mx = (s->current_picture_ptr->motion_val[dir][mv_pos][0] + (3 << 24)) / 3 - (1 << 24);
691  my = (s->current_picture_ptr->motion_val[dir][mv_pos][1] + (3 << 24)) / 3 - (1 << 24);
692  lx = (s->current_picture_ptr->motion_val[dir][mv_pos][0] + (3 << 24)) % 3;
693  ly = (s->current_picture_ptr->motion_val[dir][mv_pos][1] + (3 << 24)) % 3;
694  chroma_mx = s->current_picture_ptr->motion_val[dir][mv_pos][0] / 2;
695  chroma_my = s->current_picture_ptr->motion_val[dir][mv_pos][1] / 2;
696  umx = (chroma_mx + (3 << 24)) / 3 - (1 << 24);
697  umy = (chroma_my + (3 << 24)) / 3 - (1 << 24);
698  uvmx = chroma_coeffs[(chroma_mx + (3 << 24)) % 3];
699  uvmy = chroma_coeffs[(chroma_my + (3 << 24)) % 3];
700  }else{
701  int cx, cy;
702  mx = s->current_picture_ptr->motion_val[dir][mv_pos][0] >> 2;
703  my = s->current_picture_ptr->motion_val[dir][mv_pos][1] >> 2;
704  lx = s->current_picture_ptr->motion_val[dir][mv_pos][0] & 3;
705  ly = s->current_picture_ptr->motion_val[dir][mv_pos][1] & 3;
706  cx = s->current_picture_ptr->motion_val[dir][mv_pos][0] / 2;
707  cy = s->current_picture_ptr->motion_val[dir][mv_pos][1] / 2;
708  umx = cx >> 2;
709  umy = cy >> 2;
710  uvmx = (cx & 3) << 1;
711  uvmy = (cy & 3) << 1;
712  //due to some flaw RV40 uses the same MC compensation routine for H2V2 and H3V3
713  if(uvmx == 6 && uvmy == 6)
714  uvmx = uvmy = 4;
715  }
716 
717  if (HAVE_THREADS && (s->avctx->active_thread_type & FF_THREAD_FRAME)) {
718  /* wait for the referenced mb row to be finished */
719  int mb_row = s->mb_y + ((yoff + my + 5 + 8 * height) >> 4);
720  const ThreadFrame *f = dir ? &s->next_picture_ptr->tf : &s->last_picture_ptr->tf;
721  ff_thread_await_progress(f, mb_row, 0);
722  }
723 
724  dxy = ly*4 + lx;
725  srcY = dir ? s->next_picture_ptr->f->data[0] : s->last_picture_ptr->f->data[0];
726  srcU = dir ? s->next_picture_ptr->f->data[1] : s->last_picture_ptr->f->data[1];
727  srcV = dir ? s->next_picture_ptr->f->data[2] : s->last_picture_ptr->f->data[2];
728  src_x = s->mb_x * 16 + xoff + mx;
729  src_y = s->mb_y * 16 + yoff + my;
730  uvsrc_x = s->mb_x * 8 + (xoff >> 1) + umx;
731  uvsrc_y = s->mb_y * 8 + (yoff >> 1) + umy;
732  srcY += src_y * s->linesize + src_x;
733  srcU += uvsrc_y * s->uvlinesize + uvsrc_x;
734  srcV += uvsrc_y * s->uvlinesize + uvsrc_x;
735  if(s->h_edge_pos - (width << 3) < 6 || s->v_edge_pos - (height << 3) < 6 ||
736  (unsigned)(src_x - !!lx*2) > s->h_edge_pos - !!lx*2 - (width <<3) - 4 ||
737  (unsigned)(src_y - !!ly*2) > s->v_edge_pos - !!ly*2 - (height<<3) - 4) {
738  srcY -= 2 + 2*s->linesize;
739  s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, srcY,
740  s->linesize, s->linesize,
741  (width << 3) + 6, (height << 3) + 6,
742  src_x - 2, src_y - 2,
743  s->h_edge_pos, s->v_edge_pos);
744  srcY = s->sc.edge_emu_buffer + 2 + 2*s->linesize;
745  emu = 1;
746  }
747  if(!weighted){
748  Y = s->dest[0] + xoff + yoff *s->linesize;
749  U = s->dest[1] + (xoff>>1) + (yoff>>1)*s->uvlinesize;
750  V = s->dest[2] + (xoff>>1) + (yoff>>1)*s->uvlinesize;
751  }else{
752  Y = r->tmp_b_block_y [dir] + xoff + yoff *s->linesize;
753  U = r->tmp_b_block_uv[dir*2] + (xoff>>1) + (yoff>>1)*s->uvlinesize;
754  V = r->tmp_b_block_uv[dir*2+1] + (xoff>>1) + (yoff>>1)*s->uvlinesize;
755  }
756 
757  if(block_type == RV34_MB_P_16x8){
758  qpel_mc[1][dxy](Y, srcY, s->linesize);
759  Y += 8;
760  srcY += 8;
761  }else if(block_type == RV34_MB_P_8x16){
762  qpel_mc[1][dxy](Y, srcY, s->linesize);
763  Y += 8 * s->linesize;
764  srcY += 8 * s->linesize;
765  }
766  is16x16 = (block_type != RV34_MB_P_8x8) && (block_type != RV34_MB_P_16x8) && (block_type != RV34_MB_P_8x16);
767  qpel_mc[!is16x16][dxy](Y, srcY, s->linesize);
768  if (emu) {
769  uint8_t *uvbuf = s->sc.edge_emu_buffer;
770 
771  s->vdsp.emulated_edge_mc(uvbuf, srcU,
772  s->uvlinesize, s->uvlinesize,
773  (width << 2) + 1, (height << 2) + 1,
774  uvsrc_x, uvsrc_y,
775  s->h_edge_pos >> 1, s->v_edge_pos >> 1);
776  srcU = uvbuf;
777  uvbuf += 9*s->uvlinesize;
778 
779  s->vdsp.emulated_edge_mc(uvbuf, srcV,
780  s->uvlinesize, s->uvlinesize,
781  (width << 2) + 1, (height << 2) + 1,
782  uvsrc_x, uvsrc_y,
783  s->h_edge_pos >> 1, s->v_edge_pos >> 1);
784  srcV = uvbuf;
785  }
786  chroma_mc[2-width] (U, srcU, s->uvlinesize, height*4, uvmx, uvmy);
787  chroma_mc[2-width] (V, srcV, s->uvlinesize, height*4, uvmx, uvmy);
788 }
789 
790 static void rv34_mc_1mv(RV34DecContext *r, const int block_type,
791  const int xoff, const int yoff, int mv_off,
792  const int width, const int height, int dir)
793 {
794  rv34_mc(r, block_type, xoff, yoff, mv_off, width, height, dir, r->rv30, 0,
795  r->rdsp.put_pixels_tab,
796  r->rdsp.put_chroma_pixels_tab);
797 }
798 
800 {
801  r->rdsp.rv40_weight_pixels_tab[r->scaled_weight][0](r->s.dest[0],
802  r->tmp_b_block_y[0],
803  r->tmp_b_block_y[1],
804  r->weight1,
805  r->weight2,
806  r->s.linesize);
807  r->rdsp.rv40_weight_pixels_tab[r->scaled_weight][1](r->s.dest[1],
808  r->tmp_b_block_uv[0],
809  r->tmp_b_block_uv[2],
810  r->weight1,
811  r->weight2,
812  r->s.uvlinesize);
813  r->rdsp.rv40_weight_pixels_tab[r->scaled_weight][1](r->s.dest[2],
814  r->tmp_b_block_uv[1],
815  r->tmp_b_block_uv[3],
816  r->weight1,
817  r->weight2,
818  r->s.uvlinesize);
819 }
820 
821 static void rv34_mc_2mv(RV34DecContext *r, const int block_type)
822 {
823  int weighted = !r->rv30 && block_type != RV34_MB_B_BIDIR && r->weight1 != 8192;
824 
825  rv34_mc(r, block_type, 0, 0, 0, 2, 2, 0, r->rv30, weighted,
826  r->rdsp.put_pixels_tab,
827  r->rdsp.put_chroma_pixels_tab);
828  if(!weighted){
829  rv34_mc(r, block_type, 0, 0, 0, 2, 2, 1, r->rv30, 0,
830  r->rdsp.avg_pixels_tab,
831  r->rdsp.avg_chroma_pixels_tab);
832  }else{
833  rv34_mc(r, block_type, 0, 0, 0, 2, 2, 1, r->rv30, 1,
834  r->rdsp.put_pixels_tab,
835  r->rdsp.put_chroma_pixels_tab);
836  rv4_weight(r);
837  }
838 }
839 
841 {
842  int i, j;
843  int weighted = !r->rv30 && r->weight1 != 8192;
844 
845  for(j = 0; j < 2; j++)
846  for(i = 0; i < 2; i++){
847  rv34_mc(r, RV34_MB_P_8x8, i*8, j*8, i+j*r->s.b8_stride, 1, 1, 0, r->rv30,
848  weighted,
849  r->rdsp.put_pixels_tab,
850  r->rdsp.put_chroma_pixels_tab);
851  rv34_mc(r, RV34_MB_P_8x8, i*8, j*8, i+j*r->s.b8_stride, 1, 1, 1, r->rv30,
852  weighted,
853  weighted ? r->rdsp.put_pixels_tab : r->rdsp.avg_pixels_tab,
854  weighted ? r->rdsp.put_chroma_pixels_tab : r->rdsp.avg_chroma_pixels_tab);
855  }
856  if(weighted)
857  rv4_weight(r);
858 }
859 
860 /** number of motion vectors in each macroblock type */
861 static const int num_mvs[RV34_MB_TYPES] = { 0, 0, 1, 4, 1, 1, 0, 0, 2, 2, 2, 1 };
862 
863 /**
864  * Decode motion vector differences
865  * and perform motion vector reconstruction and motion compensation.
866  */
867 static int rv34_decode_mv(RV34DecContext *r, int block_type)
868 {
869  MpegEncContext *s = &r->s;
870  GetBitContext *gb = &s->gb;
871  int i, j, k, l;
872  int mv_pos = s->mb_x * 2 + s->mb_y * 2 * s->b8_stride;
873  int next_bt;
874 
875  memset(r->dmv, 0, sizeof(r->dmv));
876  for(i = 0; i < num_mvs[block_type]; i++){
877  r->dmv[i][0] = get_interleaved_se_golomb(gb);
878  r->dmv[i][1] = get_interleaved_se_golomb(gb);
879  if (r->dmv[i][0] == INVALID_VLC ||
880  r->dmv[i][1] == INVALID_VLC) {
881  r->dmv[i][0] = r->dmv[i][1] = 0;
882  return AVERROR_INVALIDDATA;
883  }
884  }
885  switch(block_type){
886  case RV34_MB_TYPE_INTRA:
888  ZERO8x2(s->current_picture_ptr->motion_val[0][s->mb_x * 2 + s->mb_y * 2 * s->b8_stride], s->b8_stride);
889  return 0;
890  case RV34_MB_SKIP:
891  if(s->pict_type == AV_PICTURE_TYPE_P){
892  ZERO8x2(s->current_picture_ptr->motion_val[0][s->mb_x * 2 + s->mb_y * 2 * s->b8_stride], s->b8_stride);
893  rv34_mc_1mv (r, block_type, 0, 0, 0, 2, 2, 0);
894  break;
895  }
896  case RV34_MB_B_DIRECT:
897  //surprisingly, it uses motion scheme from next reference frame
898  /* wait for the current mb row to be finished */
899  if (HAVE_THREADS && (s->avctx->active_thread_type & FF_THREAD_FRAME))
900  ff_thread_await_progress(&s->next_picture_ptr->tf, FFMAX(0, s->mb_y-1), 0);
901 
902  next_bt = s->next_picture_ptr->mb_type[s->mb_x + s->mb_y * s->mb_stride];
903  if(IS_INTRA(next_bt) || IS_SKIP(next_bt)){
904  ZERO8x2(s->current_picture_ptr->motion_val[0][s->mb_x * 2 + s->mb_y * 2 * s->b8_stride], s->b8_stride);
905  ZERO8x2(s->current_picture_ptr->motion_val[1][s->mb_x * 2 + s->mb_y * 2 * s->b8_stride], s->b8_stride);
906  }else
907  for(j = 0; j < 2; j++)
908  for(i = 0; i < 2; i++)
909  for(k = 0; k < 2; k++)
910  for(l = 0; l < 2; l++)
911  s->current_picture_ptr->motion_val[l][mv_pos + i + j*s->b8_stride][k] = calc_add_mv(r, l, s->next_picture_ptr->motion_val[0][mv_pos + i + j*s->b8_stride][k]);
912  if(!(IS_16X8(next_bt) || IS_8X16(next_bt) || IS_8X8(next_bt))) //we can use whole macroblock MC
913  rv34_mc_2mv(r, block_type);
914  else
916  ZERO8x2(s->current_picture_ptr->motion_val[0][s->mb_x * 2 + s->mb_y * 2 * s->b8_stride], s->b8_stride);
917  break;
918  case RV34_MB_P_16x16:
919  case RV34_MB_P_MIX16x16:
920  rv34_pred_mv(r, block_type, 0, 0);
921  rv34_mc_1mv (r, block_type, 0, 0, 0, 2, 2, 0);
922  break;
923  case RV34_MB_B_FORWARD:
924  case RV34_MB_B_BACKWARD:
925  r->dmv[1][0] = r->dmv[0][0];
926  r->dmv[1][1] = r->dmv[0][1];
927  if(r->rv30)
928  rv34_pred_mv_rv3(r, block_type, block_type == RV34_MB_B_BACKWARD);
929  else
930  rv34_pred_mv_b (r, block_type, block_type == RV34_MB_B_BACKWARD);
931  rv34_mc_1mv (r, block_type, 0, 0, 0, 2, 2, block_type == RV34_MB_B_BACKWARD);
932  break;
933  case RV34_MB_P_16x8:
934  case RV34_MB_P_8x16:
935  rv34_pred_mv(r, block_type, 0, 0);
936  rv34_pred_mv(r, block_type, 1 + (block_type == RV34_MB_P_16x8), 1);
937  if(block_type == RV34_MB_P_16x8){
938  rv34_mc_1mv(r, block_type, 0, 0, 0, 2, 1, 0);
939  rv34_mc_1mv(r, block_type, 0, 8, s->b8_stride, 2, 1, 0);
940  }
941  if(block_type == RV34_MB_P_8x16){
942  rv34_mc_1mv(r, block_type, 0, 0, 0, 1, 2, 0);
943  rv34_mc_1mv(r, block_type, 8, 0, 1, 1, 2, 0);
944  }
945  break;
946  case RV34_MB_B_BIDIR:
947  rv34_pred_mv_b (r, block_type, 0);
948  rv34_pred_mv_b (r, block_type, 1);
949  rv34_mc_2mv (r, block_type);
950  break;
951  case RV34_MB_P_8x8:
952  for(i=0;i< 4;i++){
953  rv34_pred_mv(r, block_type, i, i);
954  rv34_mc_1mv (r, block_type, (i&1)<<3, (i&2)<<2, (i&1)+(i>>1)*s->b8_stride, 1, 1, 0);
955  }
956  break;
957  }
958 
959  return 0;
960 }
961 /** @} */ // mv group
962 
963 /**
964  * @name Macroblock reconstruction functions
965  * @{
966  */
967 /** mapping of RV30/40 intra prediction types to standard H.264 types */
968 static const int ittrans[9] = {
971 };
972 
973 /** mapping of RV30/40 intra 16x16 prediction types to standard H.264 types */
974 static const int ittrans16[4] = {
976 };
977 
978 /**
979  * Perform 4x4 intra prediction.
980  */
981 static void rv34_pred_4x4_block(RV34DecContext *r, uint8_t *dst, int stride, int itype, int up, int left, int down, int right)
982 {
983  uint8_t *prev = dst - stride + 4;
984  uint32_t topleft;
985 
986  if(!up && !left)
987  itype = DC_128_PRED;
988  else if(!up){
989  if(itype == VERT_PRED) itype = HOR_PRED;
990  if(itype == DC_PRED) itype = LEFT_DC_PRED;
991  }else if(!left){
992  if(itype == HOR_PRED) itype = VERT_PRED;
993  if(itype == DC_PRED) itype = TOP_DC_PRED;
995  }
996  if(!down){
998  if(itype == HOR_UP_PRED) itype = HOR_UP_PRED_RV40_NODOWN;
999  if(itype == VERT_LEFT_PRED) itype = VERT_LEFT_PRED_RV40_NODOWN;
1000  }
1001  if(!right && up){
1002  topleft = dst[-stride + 3] * 0x01010101u;
1003  prev = (uint8_t*)&topleft;
1004  }
1005  r->h.pred4x4[itype](dst, prev, stride);
1006 }
1007 
1008 static inline int adjust_pred16(int itype, int up, int left)
1009 {
1010  if(!up && !left)
1011  itype = DC_128_PRED8x8;
1012  else if(!up){
1013  if(itype == PLANE_PRED8x8)itype = HOR_PRED8x8;
1014  if(itype == VERT_PRED8x8) itype = HOR_PRED8x8;
1015  if(itype == DC_PRED8x8) itype = LEFT_DC_PRED8x8;
1016  }else if(!left){
1017  if(itype == PLANE_PRED8x8)itype = VERT_PRED8x8;
1018  if(itype == HOR_PRED8x8) itype = VERT_PRED8x8;
1019  if(itype == DC_PRED8x8) itype = TOP_DC_PRED8x8;
1020  }
1021  return itype;
1022 }
1023 
1025  uint8_t *pdst, int stride,
1026  int fc, int sc, int q_dc, int q_ac)
1027 {
1028  MpegEncContext *s = &r->s;
1029  int16_t *ptr = s->block[0];
1030  int has_ac = rv34_decode_block(ptr, &s->gb, r->cur_vlcs,
1031  fc, sc, q_dc, q_ac, q_ac);
1032  if(has_ac){
1033  r->rdsp.rv34_idct_add(pdst, stride, ptr);
1034  }else{
1035  r->rdsp.rv34_idct_dc_add(pdst, stride, ptr[0]);
1036  ptr[0] = 0;
1037  }
1038 }
1039 
1040 static void rv34_output_i16x16(RV34DecContext *r, int8_t *intra_types, int cbp)
1041 {
1042  LOCAL_ALIGNED_16(int16_t, block16, [16]);
1043  MpegEncContext *s = &r->s;
1044  GetBitContext *gb = &s->gb;
1045  int q_dc = rv34_qscale_tab[ r->luma_dc_quant_i[s->qscale] ],
1046  q_ac = rv34_qscale_tab[s->qscale];
1047  uint8_t *dst = s->dest[0];
1048  int16_t *ptr = s->block[0];
1049  int i, j, itype, has_ac;
1050 
1051  memset(block16, 0, 16 * sizeof(*block16));
1052 
1053  has_ac = rv34_decode_block(block16, gb, r->cur_vlcs, 3, 0, q_dc, q_dc, q_ac);
1054  if(has_ac)
1055  r->rdsp.rv34_inv_transform(block16);
1056  else
1057  r->rdsp.rv34_inv_transform_dc(block16);
1058 
1059  itype = ittrans16[intra_types[0]];
1060  itype = adjust_pred16(itype, r->avail_cache[6-4], r->avail_cache[6-1]);
1061  r->h.pred16x16[itype](dst, s->linesize);
1062 
1063  for(j = 0; j < 4; j++){
1064  for(i = 0; i < 4; i++, cbp >>= 1){
1065  int dc = block16[i + j*4];
1066 
1067  if(cbp & 1){
1068  has_ac = rv34_decode_block(ptr, gb, r->cur_vlcs, r->luma_vlc, 0, q_ac, q_ac, q_ac);
1069  }else
1070  has_ac = 0;
1071 
1072  if(has_ac){
1073  ptr[0] = dc;
1074  r->rdsp.rv34_idct_add(dst+4*i, s->linesize, ptr);
1075  }else
1076  r->rdsp.rv34_idct_dc_add(dst+4*i, s->linesize, dc);
1077  }
1078 
1079  dst += 4*s->linesize;
1080  }
1081 
1082  itype = ittrans16[intra_types[0]];
1083  if(itype == PLANE_PRED8x8) itype = DC_PRED8x8;
1084  itype = adjust_pred16(itype, r->avail_cache[6-4], r->avail_cache[6-1]);
1085 
1086  q_dc = rv34_qscale_tab[rv34_chroma_quant[1][s->qscale]];
1087  q_ac = rv34_qscale_tab[rv34_chroma_quant[0][s->qscale]];
1088 
1089  for(j = 1; j < 3; j++){
1090  dst = s->dest[j];
1091  r->h.pred8x8[itype](dst, s->uvlinesize);
1092  for(i = 0; i < 4; i++, cbp >>= 1){
1093  uint8_t *pdst;
1094  if(!(cbp & 1)) continue;
1095  pdst = dst + (i&1)*4 + (i&2)*2*s->uvlinesize;
1096 
1097  rv34_process_block(r, pdst, s->uvlinesize,
1098  r->chroma_vlc, 1, q_dc, q_ac);
1099  }
1100  }
1101 }
1102 
1103 static void rv34_output_intra(RV34DecContext *r, int8_t *intra_types, int cbp)
1104 {
1105  MpegEncContext *s = &r->s;
1106  uint8_t *dst = s->dest[0];
1107  int avail[6*8] = {0};
1108  int i, j, k;
1109  int idx, q_ac, q_dc;
1110 
1111  // Set neighbour information.
1112  if(r->avail_cache[1])
1113  avail[0] = 1;
1114  if(r->avail_cache[2])
1115  avail[1] = avail[2] = 1;
1116  if(r->avail_cache[3])
1117  avail[3] = avail[4] = 1;
1118  if(r->avail_cache[4])
1119  avail[5] = 1;
1120  if(r->avail_cache[5])
1121  avail[8] = avail[16] = 1;
1122  if(r->avail_cache[9])
1123  avail[24] = avail[32] = 1;
1124 
1125  q_ac = rv34_qscale_tab[s->qscale];
1126  for(j = 0; j < 4; j++){
1127  idx = 9 + j*8;
1128  for(i = 0; i < 4; i++, cbp >>= 1, dst += 4, idx++){
1129  rv34_pred_4x4_block(r, dst, s->linesize, ittrans[intra_types[i]], avail[idx-8], avail[idx-1], avail[idx+7], avail[idx-7]);
1130  avail[idx] = 1;
1131  if(!(cbp & 1)) continue;
1132 
1133  rv34_process_block(r, dst, s->linesize,
1134  r->luma_vlc, 0, q_ac, q_ac);
1135  }
1136  dst += s->linesize * 4 - 4*4;
1137  intra_types += r->intra_types_stride;
1138  }
1139 
1140  intra_types -= r->intra_types_stride * 4;
1141 
1142  q_dc = rv34_qscale_tab[rv34_chroma_quant[1][s->qscale]];
1143  q_ac = rv34_qscale_tab[rv34_chroma_quant[0][s->qscale]];
1144 
1145  for(k = 0; k < 2; k++){
1146  dst = s->dest[1+k];
1147  fill_rectangle(r->avail_cache + 6, 2, 2, 4, 0, 4);
1148 
1149  for(j = 0; j < 2; j++){
1150  int* acache = r->avail_cache + 6 + j*4;
1151  for(i = 0; i < 2; i++, cbp >>= 1, acache++){
1152  int itype = ittrans[intra_types[i*2+j*2*r->intra_types_stride]];
1153  rv34_pred_4x4_block(r, dst+4*i, s->uvlinesize, itype, acache[-4], acache[-1], !i && !j, acache[-3]);
1154  acache[0] = 1;
1155 
1156  if(!(cbp&1)) continue;
1157 
1158  rv34_process_block(r, dst + 4*i, s->uvlinesize,
1159  r->chroma_vlc, 1, q_dc, q_ac);
1160  }
1161 
1162  dst += 4*s->uvlinesize;
1163  }
1164  }
1165 }
1166 
1167 static int is_mv_diff_gt_3(int16_t (*motion_val)[2], int step)
1168 {
1169  int d;
1170  d = motion_val[0][0] - motion_val[-step][0];
1171  if(d < -3 || d > 3)
1172  return 1;
1173  d = motion_val[0][1] - motion_val[-step][1];
1174  if(d < -3 || d > 3)
1175  return 1;
1176  return 0;
1177 }
1178 
1180 {
1181  MpegEncContext *s = &r->s;
1182  int hmvmask = 0, vmvmask = 0, i, j;
1183  int midx = s->mb_x * 2 + s->mb_y * 2 * s->b8_stride;
1184  int16_t (*motion_val)[2] = &s->current_picture_ptr->motion_val[0][midx];
1185  for(j = 0; j < 16; j += 8){
1186  for(i = 0; i < 2; i++){
1187  if(is_mv_diff_gt_3(motion_val + i, 1))
1188  vmvmask |= 0x11 << (j + i*2);
1189  if((j || s->mb_y) && is_mv_diff_gt_3(motion_val + i, s->b8_stride))
1190  hmvmask |= 0x03 << (j + i*2);
1191  }
1192  motion_val += s->b8_stride;
1193  }
1194  if(s->first_slice_line)
1195  hmvmask &= ~0x000F;
1196  if(!s->mb_x)
1197  vmvmask &= ~0x1111;
1198  if(r->rv30){ //RV30 marks both subblocks on the edge for filtering
1199  vmvmask |= (vmvmask & 0x4444) >> 1;
1200  hmvmask |= (hmvmask & 0x0F00) >> 4;
1201  if(s->mb_x)
1202  r->deblock_coefs[s->mb_x - 1 + s->mb_y*s->mb_stride] |= (vmvmask & 0x1111) << 3;
1203  if(!s->first_slice_line)
1204  r->deblock_coefs[s->mb_x + (s->mb_y - 1)*s->mb_stride] |= (hmvmask & 0xF) << 12;
1205  }
1206  return hmvmask | vmvmask;
1207 }
1208 
1209 static int rv34_decode_inter_macroblock(RV34DecContext *r, int8_t *intra_types)
1210 {
1211  MpegEncContext *s = &r->s;
1212  GetBitContext *gb = &s->gb;
1213  uint8_t *dst = s->dest[0];
1214  int16_t *ptr = s->block[0];
1215  int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
1216  int cbp, cbp2;
1217  int q_dc, q_ac, has_ac;
1218  int i, j;
1219  int dist;
1220 
1221  // Calculate which neighbours are available. Maybe it's worth optimizing too.
1222  memset(r->avail_cache, 0, sizeof(r->avail_cache));
1223  fill_rectangle(r->avail_cache + 6, 2, 2, 4, 1, 4);
1224  dist = (s->mb_x - s->resync_mb_x) + (s->mb_y - s->resync_mb_y) * s->mb_width;
1225  if(s->mb_x && dist)
1226  r->avail_cache[5] =
1227  r->avail_cache[9] = s->current_picture_ptr->mb_type[mb_pos - 1];
1228  if(dist >= s->mb_width)
1229  r->avail_cache[2] =
1230  r->avail_cache[3] = s->current_picture_ptr->mb_type[mb_pos - s->mb_stride];
1231  if(((s->mb_x+1) < s->mb_width) && dist >= s->mb_width - 1)
1232  r->avail_cache[4] = s->current_picture_ptr->mb_type[mb_pos - s->mb_stride + 1];
1233  if(s->mb_x && dist > s->mb_width)
1234  r->avail_cache[1] = s->current_picture_ptr->mb_type[mb_pos - s->mb_stride - 1];
1235 
1236  s->qscale = r->si.quant;
1237  cbp = cbp2 = rv34_decode_inter_mb_header(r, intra_types);
1238  r->cbp_luma [mb_pos] = cbp;
1239  r->cbp_chroma[mb_pos] = cbp >> 16;
1240  r->deblock_coefs[mb_pos] = rv34_set_deblock_coef(r) | r->cbp_luma[mb_pos];
1241  s->current_picture_ptr->qscale_table[mb_pos] = s->qscale;
1242 
1243  if(cbp == -1)
1244  return -1;
1245 
1246  if (IS_INTRA(s->current_picture_ptr->mb_type[mb_pos])){
1247  if(r->is16) rv34_output_i16x16(r, intra_types, cbp);
1248  else rv34_output_intra(r, intra_types, cbp);
1249  return 0;
1250  }
1251 
1252  if(r->is16){
1253  // Only for RV34_MB_P_MIX16x16
1254  LOCAL_ALIGNED_16(int16_t, block16, [16]);
1255  memset(block16, 0, 16 * sizeof(*block16));
1256  q_dc = rv34_qscale_tab[ r->luma_dc_quant_p[s->qscale] ];
1257  q_ac = rv34_qscale_tab[s->qscale];
1258  if (rv34_decode_block(block16, gb, r->cur_vlcs, 3, 0, q_dc, q_dc, q_ac))
1259  r->rdsp.rv34_inv_transform(block16);
1260  else
1261  r->rdsp.rv34_inv_transform_dc(block16);
1262 
1263  q_ac = rv34_qscale_tab[s->qscale];
1264 
1265  for(j = 0; j < 4; j++){
1266  for(i = 0; i < 4; i++, cbp >>= 1){
1267  int dc = block16[i + j*4];
1268 
1269  if(cbp & 1){
1270  has_ac = rv34_decode_block(ptr, gb, r->cur_vlcs, r->luma_vlc, 0, q_ac, q_ac, q_ac);
1271  }else
1272  has_ac = 0;
1273 
1274  if(has_ac){
1275  ptr[0] = dc;
1276  r->rdsp.rv34_idct_add(dst+4*i, s->linesize, ptr);
1277  }else
1278  r->rdsp.rv34_idct_dc_add(dst+4*i, s->linesize, dc);
1279  }
1280 
1281  dst += 4*s->linesize;
1282  }
1283 
1284  r->cur_vlcs = choose_vlc_set(r->si.quant, r->si.vlc_set, 1);
1285  }else{
1286  q_ac = rv34_qscale_tab[s->qscale];
1287 
1288  for(j = 0; j < 4; j++){
1289  for(i = 0; i < 4; i++, cbp >>= 1){
1290  if(!(cbp & 1)) continue;
1291 
1292  rv34_process_block(r, dst + 4*i, s->linesize,
1293  r->luma_vlc, 0, q_ac, q_ac);
1294  }
1295  dst += 4*s->linesize;
1296  }
1297  }
1298 
1299  q_dc = rv34_qscale_tab[rv34_chroma_quant[1][s->qscale]];
1300  q_ac = rv34_qscale_tab[rv34_chroma_quant[0][s->qscale]];
1301 
1302  for(j = 1; j < 3; j++){
1303  dst = s->dest[j];
1304  for(i = 0; i < 4; i++, cbp >>= 1){
1305  uint8_t *pdst;
1306  if(!(cbp & 1)) continue;
1307  pdst = dst + (i&1)*4 + (i&2)*2*s->uvlinesize;
1308 
1309  rv34_process_block(r, pdst, s->uvlinesize,
1310  r->chroma_vlc, 1, q_dc, q_ac);
1311  }
1312  }
1313 
1314  return 0;
1315 }
1316 
1317 static int rv34_decode_intra_macroblock(RV34DecContext *r, int8_t *intra_types)
1318 {
1319  MpegEncContext *s = &r->s;
1320  int cbp, dist;
1321  int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
1322 
1323  // Calculate which neighbours are available. Maybe it's worth optimizing too.
1324  memset(r->avail_cache, 0, sizeof(r->avail_cache));
1325  fill_rectangle(r->avail_cache + 6, 2, 2, 4, 1, 4);
1326  dist = (s->mb_x - s->resync_mb_x) + (s->mb_y - s->resync_mb_y) * s->mb_width;
1327  if(s->mb_x && dist)
1328  r->avail_cache[5] =
1329  r->avail_cache[9] = s->current_picture_ptr->mb_type[mb_pos - 1];
1330  if(dist >= s->mb_width)
1331  r->avail_cache[2] =
1332  r->avail_cache[3] = s->current_picture_ptr->mb_type[mb_pos - s->mb_stride];
1333  if(((s->mb_x+1) < s->mb_width) && dist >= s->mb_width - 1)
1334  r->avail_cache[4] = s->current_picture_ptr->mb_type[mb_pos - s->mb_stride + 1];
1335  if(s->mb_x && dist > s->mb_width)
1336  r->avail_cache[1] = s->current_picture_ptr->mb_type[mb_pos - s->mb_stride - 1];
1337 
1338  s->qscale = r->si.quant;
1339  cbp = rv34_decode_intra_mb_header(r, intra_types);
1340  r->cbp_luma [mb_pos] = cbp;
1341  r->cbp_chroma[mb_pos] = cbp >> 16;
1342  r->deblock_coefs[mb_pos] = 0xFFFF;
1343  s->current_picture_ptr->qscale_table[mb_pos] = s->qscale;
1344 
1345  if(cbp == -1)
1346  return -1;
1347 
1348  if(r->is16){
1349  rv34_output_i16x16(r, intra_types, cbp);
1350  return 0;
1351  }
1352 
1353  rv34_output_intra(r, intra_types, cbp);
1354  return 0;
1355 }
1356 
1358 {
1359  int bits;
1360  if(s->mb_y >= s->mb_height)
1361  return 1;
1362  if(!s->mb_num_left)
1363  return 1;
1364  if(r->s.mb_skip_run > 1)
1365  return 0;
1366  bits = get_bits_left(&s->gb);
1367  if(bits <= 0 || (bits < 8 && !show_bits(&s->gb, bits)))
1368  return 1;
1369  return 0;
1370 }
1371 
1372 
1374 {
1375  av_freep(&r->intra_types_hist);
1376  r->intra_types = NULL;
1377  av_freep(&r->tmp_b_block_base);
1378  av_freep(&r->mb_type);
1379  av_freep(&r->cbp_luma);
1380  av_freep(&r->cbp_chroma);
1381  av_freep(&r->deblock_coefs);
1382 }
1383 
1384 
1386 {
1387  r->intra_types_stride = r->s.mb_width * 4 + 4;
1388 
1389  r->cbp_chroma = av_mallocz(r->s.mb_stride * r->s.mb_height *
1390  sizeof(*r->cbp_chroma));
1391  r->cbp_luma = av_mallocz(r->s.mb_stride * r->s.mb_height *
1392  sizeof(*r->cbp_luma));
1393  r->deblock_coefs = av_mallocz(r->s.mb_stride * r->s.mb_height *
1394  sizeof(*r->deblock_coefs));
1395  r->intra_types_hist = av_malloc(r->intra_types_stride * 4 * 2 *
1396  sizeof(*r->intra_types_hist));
1397  r->mb_type = av_mallocz(r->s.mb_stride * r->s.mb_height *
1398  sizeof(*r->mb_type));
1399 
1400  if (!(r->cbp_chroma && r->cbp_luma && r->deblock_coefs &&
1401  r->intra_types_hist && r->mb_type)) {
1402  r->s.context_reinit = 1;
1404  return AVERROR(ENOMEM);
1405  }
1406 
1407  r->intra_types = r->intra_types_hist + r->intra_types_stride * 4;
1408 
1409  return 0;
1410 }
1411 
1412 
1414 {
1416  return rv34_decoder_alloc(r);
1417 }
1418 
1419 
1420 static int rv34_decode_slice(RV34DecContext *r, int end, const uint8_t* buf, int buf_size)
1421 {
1422  MpegEncContext *s = &r->s;
1423  GetBitContext *gb = &s->gb;
1424  int mb_pos, slice_type;
1425  int res;
1426 
1427  init_get_bits(&r->s.gb, buf, buf_size*8);
1428  res = r->parse_slice_header(r, gb, &r->si);
1429  if(res < 0){
1430  av_log(s->avctx, AV_LOG_ERROR, "Incorrect or unknown slice header\n");
1431  return -1;
1432  }
1433 
1434  slice_type = r->si.type ? r->si.type : AV_PICTURE_TYPE_I;
1435  if (slice_type != s->pict_type) {
1436  av_log(s->avctx, AV_LOG_ERROR, "Slice type mismatch\n");
1437  return AVERROR_INVALIDDATA;
1438  }
1439  if (s->width != r->si.width || s->height != r->si.height) {
1440  av_log(s->avctx, AV_LOG_ERROR, "Size mismatch\n");
1441  return AVERROR_INVALIDDATA;
1442  }
1443 
1444  r->si.end = end;
1445  s->qscale = r->si.quant;
1446  s->mb_num_left = r->si.end - r->si.start;
1447  r->s.mb_skip_run = 0;
1448 
1449  mb_pos = s->mb_x + s->mb_y * s->mb_width;
1450  if(r->si.start != mb_pos){
1451  av_log(s->avctx, AV_LOG_ERROR, "Slice indicates MB offset %d, got %d\n", r->si.start, mb_pos);
1452  s->mb_x = r->si.start % s->mb_width;
1453  s->mb_y = r->si.start / s->mb_width;
1454  }
1455  memset(r->intra_types_hist, -1, r->intra_types_stride * 4 * 2 * sizeof(*r->intra_types_hist));
1456  s->first_slice_line = 1;
1457  s->resync_mb_x = s->mb_x;
1458  s->resync_mb_y = s->mb_y;
1459 
1461  while(!check_slice_end(r, s)) {
1462  ff_update_block_index(s, 8, 0, 1);
1463 
1464  if(r->si.type)
1465  res = rv34_decode_inter_macroblock(r, r->intra_types + s->mb_x * 4 + 4);
1466  else
1467  res = rv34_decode_intra_macroblock(r, r->intra_types + s->mb_x * 4 + 4);
1468  if(res < 0){
1469  ff_er_add_slice(&s->er, s->resync_mb_x, s->resync_mb_y, s->mb_x-1, s->mb_y, ER_MB_ERROR);
1470  return -1;
1471  }
1472  if (++s->mb_x == s->mb_width) {
1473  s->mb_x = 0;
1474  s->mb_y++;
1476 
1477  memmove(r->intra_types_hist, r->intra_types, r->intra_types_stride * 4 * sizeof(*r->intra_types_hist));
1478  memset(r->intra_types, -1, r->intra_types_stride * 4 * sizeof(*r->intra_types_hist));
1479 
1480  if(r->loop_filter && s->mb_y >= 2)
1481  r->loop_filter(r, s->mb_y - 2);
1482 
1483  if (HAVE_THREADS && (s->avctx->active_thread_type & FF_THREAD_FRAME))
1484  ff_thread_report_progress(&s->current_picture_ptr->tf,
1485  s->mb_y - 2, 0);
1486 
1487  }
1488  if(s->mb_x == s->resync_mb_x)
1489  s->first_slice_line=0;
1490  s->mb_num_left--;
1491  }
1492  ff_er_add_slice(&s->er, s->resync_mb_x, s->resync_mb_y, s->mb_x-1, s->mb_y, ER_MB_END);
1493 
1494  return s->mb_y == s->mb_height;
1495 }
1496 
1497 /** @} */ // reconstruction group end
1498 
1499 /**
1500  * Initialize decoder.
1501  */
1503 {
1504  static AVOnce init_static_once = AV_ONCE_INIT;
1505  RV34DecContext *r = avctx->priv_data;
1506  MpegEncContext *s = &r->s;
1507  int ret;
1508 
1509  ff_mpv_decode_init(s, avctx);
1510  s->out_format = FMT_H263;
1511 
1512  avctx->pix_fmt = AV_PIX_FMT_YUV420P;
1513  avctx->has_b_frames = 1;
1514  s->low_delay = 0;
1515 
1516  if ((ret = ff_mpv_common_init(s)) < 0)
1517  return ret;
1518 
1519  ff_h264_pred_init(&r->h, AV_CODEC_ID_RV40, 8, 1);
1520 
1521  if ((ret = rv34_decoder_alloc(r)) < 0) {
1522  ff_mpv_common_end(&r->s);
1523  return ret;
1524  }
1525 
1526  ff_thread_once(&init_static_once, rv34_init_tables);
1527 
1528  return 0;
1529 }
1530 
1532 {
1533  RV34DecContext *r = dst->priv_data, *r1 = src->priv_data;
1534  MpegEncContext * const s = &r->s, * const s1 = &r1->s;
1535  int err;
1536 
1537  if (dst == src || !s1->context_initialized)
1538  return 0;
1539 
1540  if (s->height != s1->height || s->width != s1->width || s->context_reinit) {
1541  s->height = s1->height;
1542  s->width = s1->width;
1543  if ((err = ff_mpv_common_frame_size_change(s)) < 0)
1544  return err;
1545  if ((err = rv34_decoder_realloc(r)) < 0)
1546  return err;
1547  }
1548 
1549  r->cur_pts = r1->cur_pts;
1550  r->last_pts = r1->last_pts;
1551  r->next_pts = r1->next_pts;
1552 
1553  memset(&r->si, 0, sizeof(r->si));
1554 
1555  // Do no call ff_mpeg_update_thread_context on a partially initialized
1556  // decoder context.
1557  if (!s1->context_initialized)
1558  return 0;
1559 
1560  return ff_mpeg_update_thread_context(dst, src);
1561 }
1562 
1563 static int get_slice_offset(AVCodecContext *avctx, const uint8_t *buf, int n, int slice_count, int buf_size)
1564 {
1565  if (n < slice_count) {
1566  return AV_RL32(buf + n*8 - 4) == 1 ? AV_RL32(buf + n*8) : AV_RB32(buf + n*8);
1567  } else
1568  return buf_size;
1569 }
1570 
1571 static int finish_frame(AVCodecContext *avctx, AVFrame *pict)
1572 {
1573  RV34DecContext *r = avctx->priv_data;
1574  MpegEncContext *s = &r->s;
1575  int got_picture = 0, ret;
1576 
1577  ff_er_frame_end(&s->er, NULL);
1579  s->mb_num_left = 0;
1580 
1581  if (HAVE_THREADS && (s->avctx->active_thread_type & FF_THREAD_FRAME))
1582  ff_thread_report_progress(&s->current_picture_ptr->tf, INT_MAX, 0);
1583 
1584  if (s->pict_type == AV_PICTURE_TYPE_B) {
1585  if ((ret = av_frame_ref(pict, s->current_picture_ptr->f)) < 0)
1586  return ret;
1587  ff_print_debug_info(s, s->current_picture_ptr, pict);
1588  ff_mpv_export_qp_table(s, pict, s->current_picture_ptr, FF_MPV_QSCALE_TYPE_MPEG1);
1589  got_picture = 1;
1590  } else if (s->last_picture_ptr) {
1591  if ((ret = av_frame_ref(pict, s->last_picture_ptr->f)) < 0)
1592  return ret;
1593  ff_print_debug_info(s, s->last_picture_ptr, pict);
1594  ff_mpv_export_qp_table(s, pict, s->last_picture_ptr, FF_MPV_QSCALE_TYPE_MPEG1);
1595  got_picture = 1;
1596  }
1597 
1598  return got_picture;
1599 }
1600 
1601 static AVRational update_sar(int old_w, int old_h, AVRational sar, int new_w, int new_h)
1602 {
1603  // attempt to keep aspect during typical resolution switches
1604  if (!sar.num)
1605  sar = (AVRational){1, 1};
1606 
1607  sar = av_mul_q(sar, av_mul_q((AVRational){new_h, new_w}, (AVRational){old_w, old_h}));
1608  return sar;
1609 }
1610 
1612  int *got_picture_ptr, AVPacket *avpkt)
1613 {
1614  const uint8_t *buf = avpkt->data;
1615  int buf_size = avpkt->size;
1616  RV34DecContext *r = avctx->priv_data;
1617  MpegEncContext *s = &r->s;
1618  SliceInfo si;
1619  int i, ret;
1620  int slice_count;
1621  const uint8_t *slices_hdr = NULL;
1622  int last = 0;
1623  int faulty_b = 0;
1624  int offset;
1625 
1626  /* no supplementary picture */
1627  if (buf_size == 0) {
1628  /* special case for last picture */
1629  if (s->next_picture_ptr) {
1630  if ((ret = av_frame_ref(pict, s->next_picture_ptr->f)) < 0)
1631  return ret;
1632  s->next_picture_ptr = NULL;
1633 
1634  *got_picture_ptr = 1;
1635  }
1636  return 0;
1637  }
1638 
1639  slice_count = (*buf++) + 1;
1640  slices_hdr = buf + 4;
1641  buf += 8 * slice_count;
1642  buf_size -= 1 + 8 * slice_count;
1643 
1644  offset = get_slice_offset(avctx, slices_hdr, 0, slice_count, buf_size);
1645  //parse first slice header to check whether this frame can be decoded
1646  if(offset < 0 || offset > buf_size){
1647  av_log(avctx, AV_LOG_ERROR, "Slice offset is invalid\n");
1648  return AVERROR_INVALIDDATA;
1649  }
1650  init_get_bits(&s->gb, buf+offset, (buf_size-offset)*8);
1651  if(r->parse_slice_header(r, &r->s.gb, &si) < 0 || si.start){
1652  av_log(avctx, AV_LOG_ERROR, "First slice header is incorrect\n");
1653  return AVERROR_INVALIDDATA;
1654  }
1655  if ((!s->last_picture_ptr || !s->last_picture_ptr->f->data[0]) &&
1656  si.type == AV_PICTURE_TYPE_B) {
1657  av_log(avctx, AV_LOG_ERROR, "Invalid decoder state: B-frame without "
1658  "reference data.\n");
1659  faulty_b = 1;
1660  }
1661  if( (avctx->skip_frame >= AVDISCARD_NONREF && si.type==AV_PICTURE_TYPE_B)
1662  || (avctx->skip_frame >= AVDISCARD_NONKEY && si.type!=AV_PICTURE_TYPE_I)
1663  || avctx->skip_frame >= AVDISCARD_ALL)
1664  return avpkt->size;
1665 
1666  /* first slice */
1667  if (si.start == 0) {
1668  if (s->mb_num_left > 0 && s->current_picture_ptr) {
1669  av_log(avctx, AV_LOG_ERROR, "New frame but still %d MB left.\n",
1670  s->mb_num_left);
1671  if (!s->context_reinit)
1672  ff_er_frame_end(&s->er, NULL);
1674  }
1675 
1676  if (s->width != si.width || s->height != si.height || s->context_reinit) {
1677  int err;
1678 
1679  av_log(s->avctx, AV_LOG_WARNING, "Changing dimensions to %dx%d\n",
1680  si.width, si.height);
1681 
1682  if (av_image_check_size(si.width, si.height, 0, s->avctx))
1683  return AVERROR_INVALIDDATA;
1684 
1685  s->avctx->sample_aspect_ratio = update_sar(
1686  s->width, s->height, s->avctx->sample_aspect_ratio,
1687  si.width, si.height);
1688  s->width = si.width;
1689  s->height = si.height;
1690 
1691  err = ff_set_dimensions(s->avctx, s->width, s->height);
1692  if (err < 0)
1693  return err;
1694  if ((err = ff_mpv_common_frame_size_change(s)) < 0)
1695  return err;
1696  if ((err = rv34_decoder_realloc(r)) < 0)
1697  return err;
1698  }
1699  if (faulty_b)
1700  return AVERROR_INVALIDDATA;
1701  s->pict_type = si.type ? si.type : AV_PICTURE_TYPE_I;
1702  if (ff_mpv_frame_start(s, s->avctx) < 0)
1703  return -1;
1705  if (!r->tmp_b_block_base) {
1706  int i;
1707 
1708  r->tmp_b_block_base = av_malloc(s->linesize * 48);
1709  if (!r->tmp_b_block_base)
1710  return AVERROR(ENOMEM);
1711  for (i = 0; i < 2; i++)
1712  r->tmp_b_block_y[i] = r->tmp_b_block_base
1713  + i * 16 * s->linesize;
1714  for (i = 0; i < 4; i++)
1715  r->tmp_b_block_uv[i] = r->tmp_b_block_base + 32 * s->linesize
1716  + (i >> 1) * 8 * s->uvlinesize
1717  + (i & 1) * 16;
1718  }
1719  r->cur_pts = si.pts;
1720  if (s->pict_type != AV_PICTURE_TYPE_B) {
1721  r->last_pts = r->next_pts;
1722  r->next_pts = r->cur_pts;
1723  } else {
1724  int refdist = GET_PTS_DIFF(r->next_pts, r->last_pts);
1725  int dist0 = GET_PTS_DIFF(r->cur_pts, r->last_pts);
1726  int dist1 = GET_PTS_DIFF(r->next_pts, r->cur_pts);
1727 
1728  if(!refdist){
1729  r->mv_weight1 = r->mv_weight2 = r->weight1 = r->weight2 = 8192;
1730  r->scaled_weight = 0;
1731  }else{
1732  if (FFMAX(dist0, dist1) > refdist)
1733  av_log(avctx, AV_LOG_TRACE, "distance overflow\n");
1734 
1735  r->mv_weight1 = (dist0 << 14) / refdist;
1736  r->mv_weight2 = (dist1 << 14) / refdist;
1737  if((r->mv_weight1|r->mv_weight2) & 511){
1738  r->weight1 = r->mv_weight1;
1739  r->weight2 = r->mv_weight2;
1740  r->scaled_weight = 0;
1741  }else{
1742  r->weight1 = r->mv_weight1 >> 9;
1743  r->weight2 = r->mv_weight2 >> 9;
1744  r->scaled_weight = 1;
1745  }
1746  }
1747  }
1748  s->mb_x = s->mb_y = 0;
1749  ff_thread_finish_setup(s->avctx);
1750  } else if (s->context_reinit) {
1751  av_log(s->avctx, AV_LOG_ERROR, "Decoder needs full frames to "
1752  "reinitialize (start MB is %d).\n", si.start);
1753  return AVERROR_INVALIDDATA;
1754  } else if (HAVE_THREADS &&
1755  (s->avctx->active_thread_type & FF_THREAD_FRAME)) {
1756  av_log(s->avctx, AV_LOG_ERROR, "Decoder needs full frames in frame "
1757  "multithreading mode (start MB is %d).\n", si.start);
1758  return AVERROR_INVALIDDATA;
1759  }
1760 
1761  for(i = 0; i < slice_count; i++){
1762  int offset = get_slice_offset(avctx, slices_hdr, i , slice_count, buf_size);
1763  int offset1 = get_slice_offset(avctx, slices_hdr, i+1, slice_count, buf_size);
1764  int size;
1765 
1766  if(offset < 0 || offset > offset1 || offset1 > buf_size){
1767  av_log(avctx, AV_LOG_ERROR, "Slice offset is invalid\n");
1768  break;
1769  }
1770  size = offset1 - offset;
1771 
1772  r->si.end = s->mb_width * s->mb_height;
1773  s->mb_num_left = r->s.mb_x + r->s.mb_y*r->s.mb_width - r->si.start;
1774 
1775  if(i+1 < slice_count){
1776  int offset2 = get_slice_offset(avctx, slices_hdr, i+2, slice_count, buf_size);
1777  if (offset2 < offset1 || offset2 > buf_size) {
1778  av_log(avctx, AV_LOG_ERROR, "Slice offset is invalid\n");
1779  break;
1780  }
1781  init_get_bits(&s->gb, buf+offset1, (buf_size-offset1)*8);
1782  if(r->parse_slice_header(r, &r->s.gb, &si) < 0){
1783  size = offset2 - offset;
1784  }else
1785  r->si.end = si.start;
1786  }
1787  av_assert0 (size >= 0 && size <= buf_size - offset);
1788  last = rv34_decode_slice(r, r->si.end, buf + offset, size);
1789  if(last)
1790  break;
1791  }
1792 
1793  if (s->current_picture_ptr) {
1794  if (last) {
1795  if(r->loop_filter)
1796  r->loop_filter(r, s->mb_height - 1);
1797 
1798  ret = finish_frame(avctx, pict);
1799  if (ret < 0)
1800  return ret;
1801  *got_picture_ptr = ret;
1802  } else if (HAVE_THREADS &&
1803  (s->avctx->active_thread_type & FF_THREAD_FRAME)) {
1804  av_log(avctx, AV_LOG_INFO, "marking unfished frame as finished\n");
1805  /* always mark the current frame as finished, frame-mt supports
1806  * only complete frames */
1807  ff_er_frame_end(&s->er, NULL);
1809  s->mb_num_left = 0;
1810  ff_thread_report_progress(&s->current_picture_ptr->tf, INT_MAX, 0);
1811  return AVERROR_INVALIDDATA;
1812  }
1813  }
1814 
1815  return avpkt->size;
1816 }
1817 
1819 {
1820  RV34DecContext *r = avctx->priv_data;
1821 
1822  ff_mpv_common_end(&r->s);
1824 
1825  return 0;
1826 }
RV34DecContext
decoder context
Definition: rv34.h:86
ff_mpv_common_init
av_cold int ff_mpv_common_init(MpegEncContext *s)
init common structure for both encoder and decoder.
Definition: mpegvideo.c:681
A
#define A(x)
Definition: vpx_arith.h:28
IS_8X8
#define IS_8X8(a)
Definition: mpegutils.h:82
rv34_mb_type_to_lavc
static const int rv34_mb_type_to_lavc[12]
translation of RV30/40 macroblock types to lavc ones
Definition: rv34.c:58
HOR_PRED8x8
#define HOR_PRED8x8
Definition: h264pred.h:69
MB_TYPE_L0
#define MB_TYPE_L0
Definition: mpegutils.h:60
h264_chroma_mc_func
void(* h264_chroma_mc_func)(uint8_t *dst, const uint8_t *src, ptrdiff_t srcStride, int h, int x, int y)
Definition: h264chroma.h:25
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:186
rv34_qscale_tab
static const uint16_t rv34_qscale_tab[32]
This table is used for dequantizing.
Definition: rv34data.h:84
rv34_output_intra
static void rv34_output_intra(RV34DecContext *r, int8_t *intra_types, int cbp)
Definition: rv34.c:1103
get_bits_left
static int get_bits_left(GetBitContext *gb)
Definition: get_bits.h:695
r
const char * r
Definition: vf_curves.c:126
ff_rv34_decode_end
av_cold int ff_rv34_decode_end(AVCodecContext *avctx)
Definition: rv34.c:1818
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
DC_PRED8x8
#define DC_PRED8x8
Definition: h264pred.h:68
ff_mpv_export_qp_table
int ff_mpv_export_qp_table(const MpegEncContext *s, AVFrame *f, const Picture *p, int qp_type)
Definition: mpegvideo_dec.c:505
rv34_pred_mv_rv3
static void rv34_pred_mv_rv3(RV34DecContext *r, int block_type, int dir)
motion vector prediction - RV3 version
Definition: rv34.c:611
mem_internal.h
DC_128_PRED
@ DC_128_PRED
Definition: vp9.h:58
u
#define u(width, name, range_min, range_max)
Definition: cbs_h2645.c:250
thread.h
rv34_table_inter_secondpat
static const uint8_t rv34_table_inter_secondpat[NUM_INTER_TABLES][2][OTHERBLK_VLC_SIZE]
Definition: rv34vlc.h:3737
ittrans16
static const int ittrans16[4]
mapping of RV30/40 intra 16x16 prediction types to standard H.264 types
Definition: rv34.c:974
num_mvs
static const int num_mvs[RV34_MB_TYPES]
number of motion vectors in each macroblock type
Definition: rv34.c:861
MB_TYPE_16x8
#define MB_TYPE_16x8
Definition: mpegutils.h:48
chroma_coeffs
static const int chroma_coeffs[3]
Definition: rv34.c:657
ff_rv34_get_start_offset
int ff_rv34_get_start_offset(GetBitContext *gb, int mb_size)
Decode starting slice position.
Definition: rv34.c:338
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:344
step
trying all byte sequences megabyte in length and selecting the best looking sequence will yield cases to try But a word about which is also called distortion Distortion can be quantified by almost any quality measurement one chooses the sum of squared differences is used but more complex methods that consider psychovisual effects can be used as well It makes no difference in this discussion First step
Definition: rate_distortion.txt:58
ff_rv34_decode_update_thread_context
int ff_rv34_decode_update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
Definition: rv34.c:1531
AVPacket::data
uint8_t * data
Definition: packet.h:522
DC_PRED
@ DC_PRED
Definition: vp9.h:48
table
static const uint16_t table[]
Definition: prosumer.c:205
rv34_decoder_realloc
static int rv34_decoder_realloc(RV34DecContext *r)
Definition: rv34.c:1413
VERT_LEFT_PRED
@ VERT_LEFT_PRED
Definition: vp9.h:53
MB_TYPE_16x16
#define MB_TYPE_16x16
Definition: mpegutils.h:47
check_slice_end
static int check_slice_end(RV34DecContext *r, MpegEncContext *s)
Definition: rv34.c:1357
fc
#define fc(width, name, range_min, range_max)
Definition: cbs_av1.c:472
ff_er_add_slice
void ff_er_add_slice(ERContext *s, int startx, int starty, int endx, int endy, int status)
Add a slice.
Definition: error_resilience.c:822
ff_init_block_index
void ff_init_block_index(MpegEncContext *s)
Definition: mpegvideo.c:845
chroma_mc
#define chroma_mc(a)
Definition: vc1dsp.c:786
mpegvideo.h
MB_TYPE_L1
#define MB_TYPE_L1
Definition: mpegutils.h:61
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
Picture
Picture.
Definition: mpegpicture.h:46
rv34_set_deblock_coef
static int rv34_set_deblock_coef(RV34DecContext *r)
Definition: rv34.c:1179
mpegutils.h
MB_TYPE_INTRA16x16
#define MB_TYPE_INTRA16x16
Definition: mpegutils.h:45
ff_set_dimensions
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
Definition: utils.c:94
init_get_bits
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
Definition: get_bits.h:514
thread.h
ff_thread_await_progress
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before ff_thread_await_progress() has been called on them. reget_buffer() and buffer age optimizations no longer work. *The contents of buffers must not be written to after ff_thread_report_progress() has been called on them. This includes draw_edges(). Porting codecs to frame threading
avail_indexes
static const uint8_t avail_indexes[4]
availability index for subblocks
Definition: rv34.c:466
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:30
golomb.h
exp golomb vlc stuff
NUM_INTRA_TABLES
#define NUM_INTRA_TABLES
Definition: rv34vlc.h:32
get_bits
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:335
adjust_pred16
static int adjust_pred16(int itype, int up, int left)
Definition: rv34.c:1008
RV34_MB_B_FORWARD
@ RV34_MB_B_FORWARD
B-frame macroblock, forward prediction.
Definition: rv34.h:49
rv34_decoder_alloc
static int rv34_decoder_alloc(RV34DecContext *r)
Definition: rv34.c:1385
AVCodecContext::skip_frame
enum AVDiscard skip_frame
Skip decoding for selected frames.
Definition: avcodec.h:1819
VERT_PRED
@ VERT_PRED
Definition: vp9.h:46
rv34_pred_mv
static void rv34_pred_mv(RV34DecContext *r, int block_type, int subblock_no, int dmv_no)
motion vector prediction
Definition: rv34.c:475
GetBitContext
Definition: get_bits.h:108
RV34VLC::first_pattern
const VLCElem * first_pattern[4]
VLCs used for decoding coefficients in the first subblock.
Definition: rv34.h:68
DIAG_DOWN_RIGHT_PRED
@ DIAG_DOWN_RIGHT_PRED
Definition: vp9.h:50
rv34_decode_block
static int rv34_decode_block(int16_t *dst, GetBitContext *gb, const RV34VLC *rvlc, int fc, int sc, int q_dc, int q_ac1, int q_ac2)
Decode coefficients for 4x4 block.
Definition: rv34.c:294
RV34_MB_B_DIRECT
@ RV34_MB_B_DIRECT
Bidirectionally predicted B-frame macroblock, no motion vectors.
Definition: rv34.h:52
val
static double val(void *priv, double ch)
Definition: aeval.c:78
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
ff_print_debug_info
void ff_print_debug_info(const MpegEncContext *s, const Picture *p, AVFrame *pict)
Definition: mpegvideo_dec.c:498
rv34_count_ones
static const uint8_t rv34_count_ones[16]
number of ones in nibble minus one
Definition: rv34data.h:35
AVRational::num
int num
Numerator.
Definition: rational.h:59
rv34_table_intra_firstpat
static const uint8_t rv34_table_intra_firstpat[NUM_INTRA_TABLES][4][FIRSTBLK_VLC_SIZE]
Definition: rv34vlc.h:940
rv34data.h
quant
static const uint8_t quant[64]
Definition: vmixdec.c:71
C
s EdgeDetect Foobar g libavfilter vf_edgedetect c libavfilter vf_foobar c edit libavfilter and add an entry for foobar following the pattern of the other filters edit libavfilter allfilters and add an entry for foobar following the pattern of the other filters configure make j< whatever > ffmpeg ffmpeg i you should get a foobar png with Lena edge detected That s your new playground is ready Some little details about what s going which in turn will define variables for the build system and the C
Definition: writing_filters.txt:58
ff_mpv_common_end
void ff_mpv_common_end(MpegEncContext *s)
Definition: mpegvideo.c:782
avassert.h
ff_thread_once
static int ff_thread_once(char *control, void(*routine)(void))
Definition: thread.h:205
mpegvideodec.h
AV_LOG_TRACE
#define AV_LOG_TRACE
Extremely verbose debugging, useful for libav* development.
Definition: log.h:206
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
HOR_PRED
@ HOR_PRED
Definition: vp9.h:47
av_cold
#define av_cold
Definition: attributes.h:90
ff_rv34_decode_init
av_cold int ff_rv34_decode_init(AVCodecContext *avctx)
Initialize decoder.
Definition: rv34.c:1502
rv34_pred_4x4_block
static void rv34_pred_4x4_block(RV34DecContext *r, uint8_t *dst, int stride, int itype, int up, int left, int down, int right)
Perform 4x4 intra prediction.
Definition: rv34.c:981
rv34_decode_intra_macroblock
static int rv34_decode_intra_macroblock(RV34DecContext *r, int8_t *intra_types)
Definition: rv34.c:1317
ff_thread_report_progress
void ff_thread_report_progress(ThreadFrame *f, int n, int field)
Notify later decoding threads when part of their reference picture is ready.
Definition: pthread_frame.c:573
ZERO8x2
static void ZERO8x2(void *dst, int stride)
Definition: rv34.c:51
mask
static const uint16_t mask[17]
Definition: lzw.c:38
RV34VLC
VLC tables used by the decoder.
Definition: rv34.h:65
AVCodecContext::has_b_frames
int has_b_frames
Size of the frame reordering buffer in the decoder.
Definition: avcodec.h:723
ff_er_frame_end
void ff_er_frame_end(ERContext *s, int *decode_error_flags)
Indicate that a frame has finished decoding and perform error concealment in case it has been enabled...
Definition: error_resilience.c:892
ff_mpv_common_frame_size_change
int ff_mpv_common_frame_size_change(MpegEncContext *s)
Definition: mpegvideo_dec.c:190
width
#define width
rv34_mc_1mv
static void rv34_mc_1mv(RV34DecContext *r, const int block_type, const int xoff, const int yoff, int mv_off, const int width, const int height, int dir)
Definition: rv34.c:790
rv34_decode_inter_macroblock
static int rv34_decode_inter_macroblock(RV34DecContext *r, int8_t *intra_types)
Definition: rv34.c:1209
intra_vlcs
static RV34VLC intra_vlcs[NUM_INTRA_TABLES]
Definition: rv34.c:74
s
#define s(width, name)
Definition: cbs_vp9.c:198
IS_16X8
#define IS_16X8(a)
Definition: mpegutils.h:80
s1
#define s1
Definition: regdef.h:38
VERT_LEFT_PRED_RV40_NODOWN
#define VERT_LEFT_PRED_RV40_NODOWN
Definition: h264pred.h:56
RV34VLC::cbp
VLC cbp[2][4]
VLCs used for coded block patterns decoding.
Definition: rv34.h:67
CBPPAT_VLC_SIZE
#define CBPPAT_VLC_SIZE
Definition: rv34vlc.h:35
ff_mpeg_er_frame_start
void ff_mpeg_er_frame_start(MpegEncContext *s)
Definition: mpeg_er.c:47
calc_add_mv
static int calc_add_mv(RV34DecContext *r, int dir, int val)
Calculate motion vector component that should be added for direct blocks.
Definition: rv34.c:529
bits
uint8_t bits
Definition: vp3data.h:128
LOCAL_ALIGNED_16
#define LOCAL_ALIGNED_16(t, v,...)
Definition: mem_internal.h:150
LEFT_DC_PRED
@ LEFT_DC_PRED
Definition: vp9.h:56
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:40
B
#define B
Definition: huffyuv.h:42
decode.h
IS_SKIP
#define IS_SKIP(a)
Definition: mpegutils.h:74
CBP_VLC_SIZE
#define CBP_VLC_SIZE
Definition: rv34vlc.h:36
IS_INTRA
#define IS_INTRA(x, y)
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:73
finish_frame
static int finish_frame(AVCodecContext *avctx, AVFrame *pict)
Definition: rv34.c:1571
rv34_mb_max_sizes
static const uint16_t rv34_mb_max_sizes[6]
maximum number of macroblocks for each of the possible slice offset sizes
Definition: rv34data.h:106
decode_coeff
static void decode_coeff(int16_t *dst, int coef, int esc, GetBitContext *gb, const VLCElem *vlc, int q)
Get one coefficient value from the bitstream and store it.
Definition: rv34.c:224
MB_TYPE_8x16
#define MB_TYPE_8x16
Definition: mpegutils.h:49
TOP_DC_PRED8x8
#define TOP_DC_PRED8x8
Definition: h264pred.h:75
RV34VLC::second_pattern
const VLCElem * second_pattern[2]
VLCs used for decoding coefficients in the subblocks 2 and 3.
Definition: rv34.h:69
AVDISCARD_ALL
@ AVDISCARD_ALL
discard all
Definition: defs.h:219
threadframe.h
rv34_inter_coeff
static const uint8_t rv34_inter_coeff[NUM_INTER_TABLES][COEFF_VLC_SIZE]
Definition: rv34vlc.h:4024
AV_ONCE_INIT
#define AV_ONCE_INIT
Definition: thread.h:203
RV34VLC::cbppattern
const VLCElem * cbppattern[2]
VLCs used for pattern of coded block patterns decoding.
Definition: rv34.h:66
NULL
#define NULL
Definition: coverity.c:32
GET_PTS_DIFF
#define GET_PTS_DIFF(a, b)
Definition: rv34.c:524
rv34_decode_slice
static int rv34_decode_slice(RV34DecContext *r, int end, const uint8_t *buf, int buf_size)
Definition: rv34.c:1420
rv34_init_tables
static av_cold void rv34_init_tables(void)
Initialize all tables.
Definition: rv34.c:136
RV34_MB_SKIP
@ RV34_MB_SKIP
Skipped block.
Definition: rv34.h:51
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
decode_subblock
static void decode_subblock(int16_t *dst, int code, const int is_block2, GetBitContext *gb, const VLCElem *vlc, int q)
Decode 2x2 subblock of coefficients.
Definition: rv34.c:245
COEFF_VLC_SIZE
#define COEFF_VLC_SIZE
Definition: rv34vlc.h:39
rv34_table_intra_cbppat
static const uint8_t rv34_table_intra_cbppat[NUM_INTRA_TABLES][2][CBPPAT_VLC_SIZE]
Definition: rv34vlc.h:42
RV34VLC::third_pattern
const VLCElem * third_pattern[2]
VLCs used for decoding coefficients in the last subblock.
Definition: rv34.h:70
MB_TYPE_8x8
#define MB_TYPE_8x8
Definition: mpegutils.h:50
SliceInfo::type
int type
slice type (intra, inter)
Definition: rv34.h:76
ER_MB_ERROR
#define ER_MB_ERROR
Definition: error_resilience.h:37
decode_subblock3
static void decode_subblock3(int16_t *dst, int code, GetBitContext *gb, const VLCElem *vlc, int q_dc, int q_ac1, int q_ac2)
Definition: rv34.c:271
V
#define V
Definition: avdct.c:30
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:279
get_bits1
static unsigned int get_bits1(GetBitContext *s)
Definition: get_bits.h:388
mathops.h
VERT_PRED8x8
#define VERT_PRED8x8
Definition: h264pred.h:70
qpeldsp.h
rv34_gen_vlc_ext
static av_cold void rv34_gen_vlc_ext(const uint8_t *bits, int size, VLC *vlc, const uint8_t *syms, int *offset)
Generate VLC from codeword lengths.
Definition: rv34.c:93
rv34_table_intra_secondpat
static const uint8_t rv34_table_intra_secondpat[NUM_INTRA_TABLES][2][OTHERBLK_VLC_SIZE]
Definition: rv34vlc.h:2074
get_vlc2
static av_always_inline int get_vlc2(GetBitContext *s, const VLCElem *table, int bits, int max_depth)
Parse a vlc code.
Definition: get_bits.h:652
MAX_VLC_SIZE
#define MAX_VLC_SIZE
Definition: rv34vlc.h:40
rv34.h
FF_MPV_QSCALE_TYPE_MPEG1
#define FF_MPV_QSCALE_TYPE_MPEG1
Definition: mpegvideodec.h:40
AVOnce
#define AVOnce
Definition: thread.h:202
rv34_decode_mv
static int rv34_decode_mv(RV34DecContext *r, int block_type)
Decode motion vector differences and perform motion vector reconstruction and motion compensation.
Definition: rv34.c:867
qpel_mc_func
void(* qpel_mc_func)(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
Definition: qpeldsp.h:65
RV34_MB_P_8x8
@ RV34_MB_P_8x8
P-frame macroblock, 8x8 motion compensation partitions.
Definition: rv34.h:48
rv34_table_intra_thirdpat
static const uint8_t rv34_table_intra_thirdpat[NUM_INTRA_TABLES][2][OTHERBLK_VLC_SIZE]
Definition: rv34vlc.h:2177
VLC::table_allocated
int table_allocated
Definition: vlc.h:39
rv34_mc_2mv_skip
static void rv34_mc_2mv_skip(RV34DecContext *r)
Definition: rv34.c:840
AVDISCARD_NONKEY
@ AVDISCARD_NONKEY
discard all frames except keyframes
Definition: defs.h:218
f
f
Definition: af_crystalizer.c:121
rv34_cbp_code
static const uint8_t rv34_cbp_code[16]
values used to reconstruct coded block pattern
Definition: rv34data.h:42
is_mv_diff_gt_3
static int is_mv_diff_gt_3(int16_t(*motion_val)[2], int step)
Definition: rv34.c:1167
AVPacket::size
int size
Definition: packet.h:523
dc
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled top and top right vectors is used as motion vector prediction the used motion vector is the sum of the predictor and(mvx_diff, mvy_diff) *mv_scale Intra DC Prediction block[y][x] dc[1]
Definition: snow.txt:400
av_frame_ref
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:354
RV34_MB_B_BACKWARD
@ RV34_MB_B_BACKWARD
B-frame macroblock, backward prediction.
Definition: rv34.h:50
ff_rv34_decode_frame
int ff_rv34_decode_frame(AVCodecContext *avctx, AVFrame *pict, int *got_picture_ptr, AVPacket *avpkt)
Definition: rv34.c:1611
rectangle.h
ff_update_block_index
static void ff_update_block_index(MpegEncContext *s, int bits_per_raw_sample, int lowres, int chroma_x_shift)
Definition: mpegvideo.h:594
for
for(k=2;k<=8;++k)
Definition: h264pred_template.c:425
update_sar
static AVRational update_sar(int old_w, int old_h, AVRational sar, int new_w, int new_h)
Definition: rv34.c:1601
FIRSTBLK_VLC_SIZE
#define FIRSTBLK_VLC_SIZE
Definition: rv34vlc.h:37
get_interleaved_se_golomb
static int get_interleaved_se_golomb(GetBitContext *gb)
Definition: golomb.h:301
RV34_MB_P_8x16
@ RV34_MB_P_8x16
P-frame macroblock, 8x16 motion compensation partitions.
Definition: rv34.h:54
size
int size
Definition: twinvq_data.h:10344
VERT_RIGHT_PRED
@ VERT_RIGHT_PRED
Definition: vp9.h:51
VLCElem
Definition: vlc.h:32
decode_subblock1
static void decode_subblock1(int16_t *dst, int code, GetBitContext *gb, const VLCElem *vlc, int q)
Decode a single coefficient.
Definition: rv34.c:264
rv34_decode_cbp
static int rv34_decode_cbp(GetBitContext *gb, const RV34VLC *vlc, int table)
Decode coded block pattern.
Definition: rv34.c:191
AV_RB32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_RB32
Definition: bytestream.h:96
DC_128_PRED8x8
#define DC_128_PRED8x8
Definition: h264pred.h:76
MB_TYPE_SKIP
#define MB_TYPE_SKIP
Definition: mpegutils.h:55
rv34_inter_cbppat
static const uint8_t rv34_inter_cbppat[NUM_INTER_TABLES][CBPPAT_VLC_SIZE]
Definition: rv34vlc.h:2305
ff_mpv_frame_start
int ff_mpv_frame_start(MpegEncContext *s, AVCodecContext *avctx)
generic function called after decoding the header and before a frame is decoded.
Definition: mpegvideo_dec.c:295
height
#define height
SliceInfo::pts
int pts
frame timestamp
Definition: rv34.h:82
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
OTHERBLK_VLC_SIZE
#define OTHERBLK_VLC_SIZE
Definition: rv34vlc.h:38
IS_INTRA16x16
#define IS_INTRA16x16(a)
Definition: mpegutils.h:69
ff_vlc_init_sparse
int ff_vlc_init_sparse(VLC *vlc, int nb_bits, int nb_codes, const void *bits, int bits_wrap, int bits_size, const void *codes, int codes_wrap, int codes_size, const void *symbols, int symbols_wrap, int symbols_size, int flags)
Build VLC decoding tables suitable for use with get_vlc2().
Definition: vlc.c:250
Picture::motion_val
int16_t(*[2] motion_val)[2]
Definition: mpegpicture.h:54
PLANE_PRED8x8
#define PLANE_PRED8x8
Definition: h264pred.h:71
Y
#define Y
Definition: boxblur.h:37
rv34_output_i16x16
static void rv34_output_i16x16(RV34DecContext *r, int8_t *intra_types, int cbp)
Definition: rv34.c:1040
RV34_MB_TYPE_INTRA16x16
@ RV34_MB_TYPE_INTRA16x16
Intra macroblock with DCs in a separate 4x4 block.
Definition: rv34.h:46
AV_LOG_INFO
#define AV_LOG_INFO
Standard information.
Definition: log.h:191
rv34_pred_mv_b
static void rv34_pred_mv_b(RV34DecContext *r, int block_type, int dir)
motion vector prediction for B-frames
Definition: rv34.c:559
FF_THREAD_FRAME
#define FF_THREAD_FRAME
Decode more than one frame at once.
Definition: avcodec.h:1593
rv34_table_inter_thirdpat
static const uint8_t rv34_table_inter_thirdpat[NUM_INTER_TABLES][2][OTHERBLK_VLC_SIZE]
Definition: rv34vlc.h:3880
DIAG_DOWN_LEFT_PRED_RV40_NODOWN
#define DIAG_DOWN_LEFT_PRED_RV40_NODOWN
Definition: h264pred.h:54
SliceInfo::height
int height
coded height
Definition: rv34.h:81
MB_TYPE_L0L1
#define MB_TYPE_L0L1
Definition: mpegutils.h:62
av_assert2
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
Definition: avassert.h:67
AV_CODEC_ID_RV40
@ AV_CODEC_ID_RV40
Definition: codec_id.h:121
FMT_H263
@ FMT_H263
Definition: mpegutils.h:119
part_sizes_h
static const uint8_t part_sizes_h[RV34_MB_TYPES]
macroblock partition height in 8x8 blocks
Definition: rv34.c:463
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:255
code
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some it can consider them to be part of the FIFO and delay acknowledging a status change accordingly Example code
Definition: filter_design.txt:178
show_bits
static unsigned int show_bits(GetBitContext *s, int n)
Show 1-25 bits.
Definition: get_bits.h:371
rv34_table_inter_firstpat
static const uint8_t rv34_table_inter_firstpat[NUM_INTER_TABLES][2][FIRSTBLK_VLC_SIZE]
Definition: rv34vlc.h:2936
internal.h
ff_mpv_decode_init
void ff_mpv_decode_init(MpegEncContext *s, AVCodecContext *avctx)
Initialize the given MpegEncContext for decoding.
Definition: mpegvideo_dec.c:45
HOR_UP_PRED_RV40_NODOWN
#define HOR_UP_PRED_RV40_NODOWN
Definition: h264pred.h:55
rv34_mc_2mv
static void rv34_mc_2mv(RV34DecContext *r, const int block_type)
Definition: rv34.c:821
av_assert1
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:56
rv34_table_intra_cbp
static const uint8_t rv34_table_intra_cbp[NUM_INTRA_TABLES][8][CBP_VLC_SIZE]
Definition: rv34vlc.h:886
RV34_MB_TYPE_INTRA
@ RV34_MB_TYPE_INTRA
Intra macroblock.
Definition: rv34.h:45
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
SUINT
#define SUINT
Definition: dct32_template.c:30
RV34_MB_TYPES
@ RV34_MB_TYPES
Definition: rv34.h:57
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:254
table_data
static VLCElem table_data[117592]
Definition: rv34.c:83
rv34_quant_to_vlc_set
static const uint8_t rv34_quant_to_vlc_set[2][32]
tables used to translate a quantizer value into a VLC set for decoding The first table is used for in...
Definition: rv34data.h:95
SliceInfo
essential slice information
Definition: rv34.h:75
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:657
get_slice_offset
static int get_slice_offset(AVCodecContext *avctx, const uint8_t *buf, int n, int slice_count, int buf_size)
Definition: rv34.c:1563
mod
static int mod(int a, int b)
Modulo operation with only positive remainders.
Definition: vf_v360.c:750
LEFT_DC_PRED8x8
#define LEFT_DC_PRED8x8
Definition: h264pred.h:74
avcodec.h
stride
#define stride
Definition: h264pred_template.c:537
VLC::bits
int bits
Definition: vlc.h:37
mid_pred
#define mid_pred
Definition: mathops.h:98
ret
ret
Definition: filter_design.txt:187
INVALID_VLC
#define INVALID_VLC
Definition: golomb.h:37
RV34VLC::coefficient
const VLCElem * coefficient
VLCs used for decoding big coefficients.
Definition: rv34.h:71
rv4_weight
static void rv4_weight(RV34DecContext *r)
Definition: rv34.c:799
ff_thread_finish_setup
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call ff_thread_finish_setup() afterwards. If some code can 't be moved
left
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled left
Definition: snow.txt:386
AV_RL32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
Definition: bytestream.h:92
U
#define U(x)
Definition: vpx_arith.h:37
rv34_inter_cbp
static const uint8_t rv34_inter_cbp[NUM_INTER_TABLES][4][CBP_VLC_SIZE]
Definition: rv34vlc.h:2890
ff_mpeg_update_thread_context
int ff_mpeg_update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
Definition: mpegvideo_dec.c:62
AVCodecContext
main external API structure.
Definition: avcodec.h:445
VLC_INIT_STATIC_OVERLONG
#define VLC_INIT_STATIC_OVERLONG
Definition: vlc.h:180
SliceInfo::start
int start
Definition: rv34.h:79
Picture::mb_type
uint32_t * mb_type
types and macros are defined in mpegutils.h
Definition: mpegpicture.h:57
ThreadFrame
Definition: threadframe.h:27
rv34_decode_inter_mb_header
static int rv34_decode_inter_mb_header(RV34DecContext *r, int8_t *intra_types)
Decode inter macroblock header and return CBP in case of success, -1 otherwise.
Definition: rv34.c:397
ff_h264_pred_init
av_cold void ff_h264_pred_init(H264PredContext *h, int codec_id, const int bit_depth, int chroma_format_idc)
Set the intra prediction function pointers.
Definition: h264pred.c:437
HOR_UP_PRED
@ HOR_UP_PRED
Definition: vp9.h:54
AV_PICTURE_TYPE_B
@ AV_PICTURE_TYPE_B
Bi-dir predicted.
Definition: avutil.h:281
rv34_intra_coeff
static const uint8_t rv34_intra_coeff[NUM_INTRA_TABLES][COEFF_VLC_SIZE]
Definition: rv34vlc.h:2281
error_resilience.h
part_sizes_w
static const uint8_t part_sizes_w[RV34_MB_TYPES]
macroblock partition width in 8x8 blocks
Definition: rv34.c:460
VLC
Definition: vlc.h:36
ittrans
static const int ittrans[9]
mapping of RV30/40 intra prediction types to standard H.264 types
Definition: rv34.c:968
fill_rectangle
static void fill_rectangle(int x, int y, int w, int h)
Definition: ffplay.c:829
rv34_chroma_quant
static const uint8_t rv34_chroma_quant[2][32]
quantizer values used for AC and DC coefficients in chroma blocks
Definition: rv34data.h:74
ff_mpv_frame_end
void ff_mpv_frame_end(MpegEncContext *s)
Definition: mpegvideo_dec.c:490
VLC::table
VLCElem * table
Definition: vlc.h:38
rv34_decode_intra_mb_header
static int rv34_decode_intra_mb_header(RV34DecContext *r, int8_t *intra_types)
Decode intra macroblock header and return CBP in case of success, -1 otherwise.
Definition: rv34.c:362
HOR_DOWN_PRED
@ HOR_DOWN_PRED
Definition: vp9.h:52
rv34_mb_bits_sizes
static const uint8_t rv34_mb_bits_sizes[6]
bits needed to code the slice offset for the given size
Definition: rv34data.h:111
IS_8X16
#define IS_8X16(a)
Definition: mpegutils.h:81
rv34_process_block
static void rv34_process_block(RV34DecContext *r, uint8_t *pdst, int stride, int fc, int sc, int q_dc, int q_ac)
Definition: rv34.c:1024
av_mul_q
AVRational av_mul_q(AVRational b, AVRational c)
Multiply two rationals.
Definition: rational.c:80
RV34_MB_P_MIX16x16
@ RV34_MB_P_MIX16x16
P-frame macroblock with DCs in a separate 4x4 block, one motion vector.
Definition: rv34.h:56
rv34vlc.h
VLC::table_size
int table_size
Definition: vlc.h:39
AV_PICTURE_TYPE_P
@ AV_PICTURE_TYPE_P
Predicted.
Definition: avutil.h:280
rv34_mc
static void rv34_mc(RV34DecContext *r, const int block_type, const int xoff, const int yoff, int mv_off, const int width, const int height, int dir, const int thirdpel, int weighted, qpel_mc_func(*qpel_mc)[16], h264_chroma_mc_func(*chroma_mc))
generic motion compensation function
Definition: rv34.c:674
ER_MB_END
#define ER_MB_END
Definition: error_resilience.h:38
MB_TYPE_SEPARATE_DC
#define MB_TYPE_SEPARATE_DC
Definition: rv34.h:38
RV34_MB_P_16x8
@ RV34_MB_P_16x8
P-frame macroblock, 16x8 motion compensation partitions.
Definition: rv34.h:53
TOP_DC_PRED
@ TOP_DC_PRED
Definition: vp9.h:57
AVPacket
This structure stores compressed data.
Definition: packet.h:499
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:472
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
src
INIT_CLIP pixel * src
Definition: h264pred_template.c:418
inter_vlcs
static RV34VLC inter_vlcs[NUM_INTER_TABLES]
Definition: rv34.c:74
mpeg_er.h
d
d
Definition: ffmpeg_filter.c:409
DIAG_DOWN_LEFT_PRED
@ DIAG_DOWN_LEFT_PRED
Definition: vp9.h:49
SliceInfo::width
int width
coded width
Definition: rv34.h:80
imgutils.h
MB_TYPE_DIRECT2
#define MB_TYPE_DIRECT2
Definition: mpegutils.h:52
flags
#define flags(name, subs,...)
Definition: cbs_av1.c:482
RV34_MB_P_16x16
@ RV34_MB_P_16x16
P-frame macroblock, one motion frame.
Definition: rv34.h:47
choose_vlc_set
static RV34VLC * choose_vlc_set(int quant, int mod, int type)
Select VLC set for decoding from current quantizer, modifier and frame type.
Definition: rv34.c:350
coeff
static const double coeff[2][5]
Definition: vf_owdenoise.c:79
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
RV34_MB_B_BIDIR
@ RV34_MB_B_BIDIR
Bidirectionally predicted B-frame macroblock, two motion vectors.
Definition: rv34.h:55
modulo_three_table
static const uint8_t modulo_three_table[108]
precalculated results of division by three and modulo three for values 0-107
Definition: rv34data.h:53
av_image_check_size
int av_image_check_size(unsigned int w, unsigned int h, int log_offset, void *log_ctx)
Check if the given dimension of an image is valid, meaning that all bytes of the image can be address...
Definition: imgutils.c:318
rv34_gen_vlc
static av_cold void rv34_gen_vlc(const uint8_t *bits, int size, const VLCElem **vlcp, int *offset)
Definition: rv34.c:125
AVDISCARD_NONREF
@ AVDISCARD_NONREF
discard all non reference
Definition: defs.h:215
MpegEncContext
MpegEncContext.
Definition: mpegvideo.h:67
rv34_decoder_free
static void rv34_decoder_free(RV34DecContext *r)
Definition: rv34.c:1373
shifts
static const uint8_t shifts[2][12]
Definition: camellia.c:178
MB_TYPE_INTRA
#define MB_TYPE_INTRA
Definition: mpegutils.h:66
NUM_INTER_TABLES
#define NUM_INTER_TABLES
Definition: rv34vlc.h:33
rv34_pred_b_vector
static void rv34_pred_b_vector(int A[2], int B[2], int C[2], int A_avail, int B_avail, int C_avail, int *mx, int *my)
Predict motion vector for B-frame macroblock.
Definition: rv34.c:539