Go to the documentation of this file.
27 #include "config_components.h"
46 for (
i = 0;
i < 8;
i++) {
51 d1 = (
a -
d + 3 +
rnd) >> 3;
52 d2 = (
a -
d +
b -
c + 4 -
rnd) >> 3;
70 for (
i = 0;
i < 8;
i++) {
75 d1 = (
a -
d + 3 +
rnd) >> 3;
76 d2 = (
a -
d +
b -
c + 4 -
rnd) >> 3;
92 int rnd1 = 4, rnd2 = 3;
93 for (
i = 0;
i < 8;
i++) {
101 top[48] = ((
a * 8) - d1 + rnd1) >> 3;
102 top[56] = ((
b * 8) - d2 + rnd2) >> 3;
103 bottom[0] = ((
c * 8) + d2 + rnd1) >> 3;
104 bottom[8] = ((
d * 8) + d1 + rnd2) >> 3;
118 int rnd1 =
flags & 2 ? 3 : 4;
120 for (
i = 0;
i < 8;
i++) {
128 left[6] = ((
a * 8) - d1 + rnd1) >> 3;
129 left[7] = ((
b * 8) - d2 + rnd2) >> 3;
130 right[0] = ((
c * 8) + d2 + rnd1) >> 3;
131 right[1] = ((
d * 8) + d1 + rnd2) >> 3;
133 right += right_stride;
154 int a0_sign =
a0 >> 31;
156 a0 = (
a0 ^ a0_sign) - a0_sign;
164 int clip_sign =
clip >> 31;
166 clip = ((
clip ^ clip_sign) - clip_sign) >> 1;
169 int d = 5 * (
a3 -
a0);
170 int d_sign = (
d >> 31);
172 d = ((
d ^ d_sign) - d_sign) >> 3;
175 if (d_sign ^ clip_sign)
179 d = (
d ^ d_sign) - d_sign;
205 for (
i = 0;
i <
len;
i += 4) {
252 dc = (3 *
dc + 1) >> 1;
253 dc = (3 *
dc + 16) >> 5;
255 for (
i = 0;
i < 8;
i++) {
276 for (
i = 0;
i < 8;
i++) {
292 dst[0] = (
t5 +
t1) >> 3;
293 dst[1] = (
t6 +
t2) >> 3;
294 dst[2] = (
t7 +
t3) >> 3;
295 dst[3] = (
t8 +
t4) >> 3;
296 dst[4] = (
t8 -
t4) >> 3;
297 dst[5] = (
t7 -
t3) >> 3;
298 dst[6] = (
t6 -
t2) >> 3;
299 dst[7] = (
t5 -
t1) >> 3;
307 for (
i = 0;
i < 8;
i++) {
323 dst[ 0] = (
t5 +
t1) >> 7;
324 dst[ 8] = (
t6 +
t2) >> 7;
325 dst[16] = (
t7 +
t3) >> 7;
326 dst[24] = (
t8 +
t4) >> 7;
327 dst[32] = (
t8 -
t4 + 1) >> 7;
328 dst[40] = (
t7 -
t3 + 1) >> 7;
329 dst[48] = (
t6 -
t2 + 1) >> 7;
330 dst[56] = (
t5 -
t1 + 1) >> 7;
343 dc = (3 *
dc + 1) >> 1;
344 dc = (17 *
dc + 64) >> 7;
346 for (
i = 0;
i < 4;
i++) {
368 for (
i = 0;
i < 4;
i++) {
384 dst[0] = (
t5 +
t1) >> 3;
385 dst[1] = (
t6 +
t2) >> 3;
386 dst[2] = (
t7 +
t3) >> 3;
387 dst[3] = (
t8 +
t4) >> 3;
388 dst[4] = (
t8 -
t4) >> 3;
389 dst[5] = (
t7 -
t3) >> 3;
390 dst[6] = (
t6 -
t2) >> 3;
391 dst[7] = (
t5 -
t1) >> 3;
398 for (
i = 0;
i < 8;
i++) {
420 dc = (17 *
dc + 4) >> 3;
421 dc = (12 *
dc + 64) >> 7;
423 for (
i = 0;
i < 8;
i++) {
441 for (
i = 0;
i < 8;
i++) {
447 dst[0] = (
t1 +
t3) >> 3;
448 dst[1] = (
t2 -
t4) >> 3;
449 dst[2] = (
t2 +
t4) >> 3;
450 dst[3] = (
t1 -
t3) >> 3;
457 for (
i = 0;
i < 4;
i++) {
493 dc = (17 *
dc + 4) >> 3;
494 dc = (17 *
dc + 64) >> 7;
496 for (
i = 0;
i < 4;
i++) {
513 for (
i = 0;
i < 4;
i++) {
519 dst[0] = (
t1 +
t3) >> 3;
520 dst[1] = (
t2 -
t4) >> 3;
521 dst[2] = (
t2 +
t4) >> 3;
522 dst[3] = (
t1 -
t3) >> 3;
529 for (
i = 0;
i < 4;
i++) {
548 #define VC1_MSPEL_FILTER_16B(DIR, TYPE) \
549 static av_always_inline int vc1_mspel_ ## DIR ## _filter_16bits(const TYPE *src, \
557 return -4 * src[-stride] + 53 * src[0] + \
558 18 * src[stride] - 3 * src[stride * 2]; \
560 return -1 * src[-stride] + 9 * src[0] + \
561 9 * src[stride] - 1 * src[stride * 2]; \
563 return -3 * src[-stride] + 18 * src[0] + \
564 53 * src[stride] - 4 * src[stride * 2]; \
593 #define VC1_MSPEL_MC(OP, OP4, OPNAME) \
594 static av_always_inline void OPNAME ## vc1_mspel_mc(uint8_t *dst, \
595 const uint8_t *src, \
607 static const int shift_value[] = { 0, 5, 1, 5 }; \
608 int shift = (shift_value[hmode] + shift_value[vmode]) >> 1; \
609 int16_t tmp[11 * 8], *tptr = tmp; \
611 r = (1 << (shift - 1)) + rnd - 1; \
614 for (j = 0; j < 8; j++) { \
615 for (i = 0; i < 11; i++) \
616 tptr[i] = (vc1_mspel_ver_filter_16bits(src + i, stride, vmode) + r) >> shift; \
623 for (j = 0; j < 8; j++) { \
624 for (i = 0; i < 8; i++) \
625 OP(dst[i], (vc1_mspel_hor_filter_16bits(tptr + i, 1, hmode) + r) >> 7); \
634 for (j = 0; j < 8; j++) { \
635 for (i = 0; i < 8; i++) \
636 OP(dst[i], vc1_mspel_filter(src + i, stride, vmode, r)); \
645 for (j = 0; j < 8; j++) { \
646 for (i = 0; i < 8; i++) \
647 OP(dst[i], vc1_mspel_filter(src + i, 1, hmode, rnd)); \
652 static av_always_inline void OPNAME ## vc1_mspel_mc_16(uint8_t *dst, \
653 const uint8_t *src, \
665 static const int shift_value[] = { 0, 5, 1, 5 }; \
666 int shift = (shift_value[hmode] + shift_value[vmode]) >> 1; \
667 int16_t tmp[19 * 16], *tptr = tmp; \
669 r = (1 << (shift - 1)) + rnd - 1; \
672 for (j = 0; j < 16; j++) { \
673 for (i = 0; i < 19; i++) \
674 tptr[i] = (vc1_mspel_ver_filter_16bits(src + i, stride, vmode) + r) >> shift; \
681 for (j = 0; j < 16; j++) { \
682 for (i = 0; i < 16; i++) \
683 OP(dst[i], (vc1_mspel_hor_filter_16bits(tptr + i, 1, hmode) + r) >> 7); \
692 for (j = 0; j < 16; j++) { \
693 for (i = 0; i < 16; i++) \
694 OP(dst[i], vc1_mspel_filter(src + i, stride, vmode, r)); \
703 for (j = 0; j < 16; j++) { \
704 for (i = 0; i < 16; i++) \
705 OP(dst[i], vc1_mspel_filter(src + i, 1, hmode, rnd)); \
710 static void OPNAME ## pixels8x8_c(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int rnd){\
713 OP4(*(uint32_t*)(block ), AV_RN32(pixels ));\
714 OP4(*(uint32_t*)(block+4), AV_RN32(pixels+4));\
719 static void OPNAME ## pixels16x16_c(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int rnd){\
721 for(i=0; i<16; i++){\
722 OP4(*(uint32_t*)(block ), AV_RN32(pixels ));\
723 OP4(*(uint32_t*)(block+ 4), AV_RN32(pixels+ 4));\
724 OP4(*(uint32_t*)(block+ 8), AV_RN32(pixels+ 8));\
725 OP4(*(uint32_t*)(block+12), AV_RN32(pixels+12));\
731 #define op_put(a, b) (a) = av_clip_uint8(b)
732 #define op_avg(a, b) (a) = ((a) + av_clip_uint8(b) + 1) >> 1
733 #define op4_avg(a, b) (a) = rnd_avg32(a, b)
734 #define op4_put(a, b) (a) = (b)
741 #define PUT_VC1_MSPEL(a, b) \
742 static void put_vc1_mspel_mc ## a ## b ## _c(uint8_t *dst, \
743 const uint8_t *src, \
744 ptrdiff_t stride, int rnd) \
746 put_vc1_mspel_mc(dst, src, stride, a, b, rnd); \
748 static void avg_vc1_mspel_mc ## a ## b ## _c(uint8_t *dst, \
749 const uint8_t *src, \
750 ptrdiff_t stride, int rnd) \
752 avg_vc1_mspel_mc(dst, src, stride, a, b, rnd); \
754 static void put_vc1_mspel_mc ## a ## b ## _16_c(uint8_t *dst, \
755 const uint8_t *src, \
756 ptrdiff_t stride, int rnd) \
758 put_vc1_mspel_mc_16(dst, src, stride, a, b, rnd); \
760 static void avg_vc1_mspel_mc ## a ## b ## _16_c(uint8_t *dst, \
761 const uint8_t *src, \
762 ptrdiff_t stride, int rnd) \
764 avg_vc1_mspel_mc_16(dst, src, stride, a, b, rnd); \
786 #define chroma_mc(a) \
787 ((A * src[a] + B * src[a + 1] + \
788 C * src[stride + a] + D * src[stride + a + 1] + 32 - 4) >> 6)
791 ptrdiff_t
stride,
int h,
int x,
int y)
793 const int A = (8 - x) * (8 - y);
794 const int B = (x) * (8 - y);
795 const int C = (8 - x) * (y);
796 const int D = (x) * (y);
799 av_assert2(x < 8 && y < 8 && x >= 0 && y >= 0);
801 for (
i = 0;
i <
h;
i++) {
816 ptrdiff_t
stride,
int h,
int x,
int y)
818 const int A = (8 - x) * (8 - y);
819 const int B = (x) * (8 - y);
820 const int C = (8 - x) * (y);
821 const int D = (x) * (y);
824 av_assert2(x < 8 && y < 8 && x >= 0 && y >= 0);
826 for (
i = 0;
i <
h;
i++) {
836 #define avg2(a, b) (((a) + (b) + 1) >> 1)
839 ptrdiff_t
stride,
int h,
int x,
int y)
841 const int A = (8 - x) * (8 - y);
842 const int B = (x) * (8 - y);
843 const int C = (8 - x) * (y);
844 const int D = (x) * (y);
847 av_assert2(x < 8 && y < 8 && x >= 0 && y >= 0);
849 for (
i = 0;
i <
h;
i++) {
865 ptrdiff_t
stride,
int h,
int x,
int y)
867 const int A = (8 - x) * (8 - y);
868 const int B = ( x) * (8 - y);
869 const int C = (8 - x) * ( y);
870 const int D = ( x) * ( y);
873 av_assert2(x < 8 && y < 8 && x >= 0 && y >= 0);
875 for (
i = 0;
i <
h;
i++) {
885 #if CONFIG_WMV3IMAGE_DECODER || CONFIG_VC1IMAGE_DECODER
887 static void sprite_h_c(uint8_t *dst,
const uint8_t *
src,
int offset,
888 int advance,
int count)
893 *dst++ =
a + ((
b -
a) * (
offset & 0xFFFF) >> 16);
899 const uint8_t *src1a,
900 const uint8_t *src1b,
903 const uint8_t *src2a,
904 const uint8_t *src2b,
906 int alpha,
int scaled,
914 a1 =
a1 + ((
b1 -
a1) * offset1 >> 16);
920 a2 =
a2 + ((
b2 -
a2) * offset2 >> 16);
928 static void sprite_v_single_c(uint8_t *dst,
const uint8_t *src1a,
929 const uint8_t *src1b,
932 sprite_v_template(dst, src1a, src1b,
offset, 0,
NULL,
NULL, 0, 0, 1,
width);
935 static void sprite_v_double_noscale_c(uint8_t *dst,
const uint8_t *src1a,
936 const uint8_t *src2a,
939 sprite_v_template(dst, src1a,
NULL, 0, 1, src2a,
NULL, 0,
alpha, 0,
width);
942 static void sprite_v_double_onescale_c(uint8_t *dst,
943 const uint8_t *src1a,
944 const uint8_t *src1b,
946 const uint8_t *src2a,
949 sprite_v_template(dst, src1a, src1b, offset1, 1, src2a,
NULL, 0,
alpha, 1,
953 static void sprite_v_double_twoscale_c(uint8_t *dst,
954 const uint8_t *src1a,
955 const uint8_t *src1b,
957 const uint8_t *src2a,
958 const uint8_t *src2b,
963 sprite_v_template(dst, src1a, src1b, offset1, 1, src2a, src2b, offset2,
968 #define FN_ASSIGN(X, Y) \
969 dsp->put_vc1_mspel_pixels_tab[1][X+4*Y] = put_vc1_mspel_mc##X##Y##_c; \
970 dsp->put_vc1_mspel_pixels_tab[0][X+4*Y] = put_vc1_mspel_mc##X##Y##_16_c; \
971 dsp->avg_vc1_mspel_pixels_tab[1][X+4*Y] = avg_vc1_mspel_mc##X##Y##_c; \
972 dsp->avg_vc1_mspel_pixels_tab[0][X+4*Y] = avg_vc1_mspel_mc##X##Y##_16_c
1025 #if CONFIG_WMV3IMAGE_DECODER || CONFIG_VC1IMAGE_DECODER
1048 #elif ARCH_LOONGARCH
void(* sprite_v_double_noscale)(uint8_t *dst, const uint8_t *src1a, const uint8_t *src2a, int alpha, int width)
#define VC1_MSPEL_MC(OP, OP4, OPNAME)
void(* vc1_v_loop_filter16)(uint8_t *src, ptrdiff_t stride, int pq)
av_cold void ff_vc1dsp_init_aarch64(VC1DSPContext *dsp)
void(* vc1_inv_trans_4x4)(uint8_t *dest, ptrdiff_t stride, int16_t *block)
vc1op_pixels_func avg_vc1_mspel_pixels_tab[2][16]
#define VC1_MSPEL_FILTER_16B(DIR, TYPE)
h264_chroma_mc_func avg_no_rnd_vc1_chroma_pixels_tab[3]
trying all byte sequences megabyte in length and selecting the best looking sequence will yield cases to try But a word about which is also called distortion Distortion can be quantified by almost any quality measurement one chooses the sum of squared differences is used but more complex methods that consider psychovisual effects can be used as well It makes no difference in this discussion First step
static void vc1_v_overlap_c(uint8_t *src, ptrdiff_t stride)
int ff_startcode_find_candidate_c(const uint8_t *buf, int size)
void(* vc1_inv_trans_8x8_dc)(uint8_t *dest, ptrdiff_t stride, int16_t *block)
av_cold void ff_vc1dsp_init_mips(VC1DSPContext *dsp)
static void vc1_loop_filter(uint8_t *src, int step, ptrdiff_t stride, int len, int pq)
VC-1 in-loop deblocking filter.
h264_chroma_mc_func put_no_rnd_vc1_chroma_pixels_tab[3]
void(* vc1_inv_trans_4x4_dc)(uint8_t *dest, ptrdiff_t stride, int16_t *block)
static void vc1_inv_trans_8x8_c(int16_t block[64])
static void vc1_inv_trans_4x8_c(uint8_t *dest, ptrdiff_t stride, int16_t *block)
static av_always_inline int vc1_mspel_filter(const uint8_t *src, int stride, int mode, int r)
void(* vc1_h_overlap)(uint8_t *src, ptrdiff_t stride)
void(* vc1_v_loop_filter4)(uint8_t *src, ptrdiff_t stride, int pq)
av_cold void ff_vc1dsp_init_arm(VC1DSPContext *dsp)
static double b1(void *priv, double x, double y)
#define PUT_VC1_MSPEL(a, b)
void(* vc1_h_loop_filter4)(uint8_t *src, ptrdiff_t stride, int pq)
static void avg_no_rnd_vc1_chroma_mc4_c(uint8_t *dst, const uint8_t *src, ptrdiff_t stride, int h, int x, int y)
s EdgeDetect Foobar g libavfilter vf_edgedetect c libavfilter vf_foobar c edit libavfilter and add an entry for foobar following the pattern of the other filters edit libavfilter allfilters and add an entry for foobar following the pattern of the other filters configure make j< whatever > ffmpeg ffmpeg i you should get a foobar png with Lena edge detected That s your new playground is ready Some little details about what s going which in turn will define variables for the build system and the C
static void put_no_rnd_vc1_chroma_mc4_c(uint8_t *dst, const uint8_t *src, ptrdiff_t stride, int h, int x, int y)
void(* vc1_inv_trans_8x4_dc)(uint8_t *dest, ptrdiff_t stride, int16_t *block)
void(* sprite_v_double_twoscale)(uint8_t *dst, const uint8_t *src1a, const uint8_t *src1b, int offset1, const uint8_t *src2a, const uint8_t *src2b, int offset2, int alpha, int width)
void(* vc1_h_loop_filter16)(uint8_t *src, ptrdiff_t stride, int pq)
static void vc1_v_loop_filter8_c(uint8_t *src, ptrdiff_t stride, int pq)
void(* sprite_h)(uint8_t *dst, const uint8_t *src, int offset, int advance, int count)
static av_always_inline int vc1_filter_line(uint8_t *src, ptrdiff_t stride, int pq)
VC-1 in-loop deblocking filter for one line.
static void vc1_h_loop_filter4_c(uint8_t *src, ptrdiff_t stride, int pq)
static void vc1_inv_trans_4x4_dc_c(uint8_t *dest, ptrdiff_t stride, int16_t *block)
void(* vc1_v_overlap)(uint8_t *src, ptrdiff_t stride)
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
static void vc1_inv_trans_4x8_dc_c(uint8_t *dest, ptrdiff_t stride, int16_t *block)
static void vc1_inv_trans_8x8_dc_c(uint8_t *dest, ptrdiff_t stride, int16_t *block)
void(* vc1_inv_trans_8x4)(uint8_t *dest, ptrdiff_t stride, int16_t *block)
static av_always_inline int vc1_unescape_buffer(const uint8_t *src, int size, uint8_t *dst)
void(* vc1_inv_trans_4x8_dc)(uint8_t *dest, ptrdiff_t stride, int16_t *block)
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
static void vc1_h_loop_filter8_c(uint8_t *src, ptrdiff_t stride, int pq)
static void put_no_rnd_vc1_chroma_mc8_c(uint8_t *dst, const uint8_t *src, ptrdiff_t stride, int h, int x, int y)
void ff_vc1dsp_init_x86(VC1DSPContext *dsp)
av_cold void ff_vc1dsp_init(VC1DSPContext *dsp)
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled top and top right vectors is used as motion vector prediction the used motion vector is the sum of the predictor and(mvx_diff, mvy_diff) *mv_scale Intra DC Prediction block[y][x] dc[1]
static double b2(void *priv, double x, double y)
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
void(* sprite_v_single)(uint8_t *dst, const uint8_t *src1a, const uint8_t *src1b, int offset, int width)
void(* vc1_inv_trans_8x8)(int16_t *b)
int(* startcode_find_candidate)(const uint8_t *buf, int size)
Search buf from the start for up to size bytes.
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
void(* sprite_v_double_onescale)(uint8_t *dst, const uint8_t *src1a, const uint8_t *src1b, int offset1, const uint8_t *src2a, int alpha, int width)
#define i(width, name, range_min, range_max)
int(* vc1_unescape_buffer)(const uint8_t *src, int size, uint8_t *dst)
static void vc1_h_s_overlap_c(int16_t *left, int16_t *right, ptrdiff_t left_stride, ptrdiff_t right_stride, int flags)
static void vc1_inv_trans_4x4_c(uint8_t *dest, ptrdiff_t stride, int16_t *block)
static void vc1_v_s_overlap_c(int16_t *top, int16_t *bottom)
av_cold void ff_vc1dsp_init_riscv(VC1DSPContext *dsp)
void(* vc1_h_loop_filter8)(uint8_t *src, ptrdiff_t stride, int pq)
vc1op_pixels_func put_vc1_mspel_pixels_tab[2][16]
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled left
void(* vc1_v_loop_filter8)(uint8_t *src, ptrdiff_t stride, int pq)
static void vc1_h_overlap_c(uint8_t *src, ptrdiff_t stride)
static void avg_no_rnd_vc1_chroma_mc8_c(uint8_t *dst, const uint8_t *src, ptrdiff_t stride, int h, int x, int y)
av_cold void ff_vc1dsp_init_ppc(VC1DSPContext *dsp)
static const int16_t alpha[]
#define flags(name, subs,...)
The exact code depends on how similar the blocks are and how related they are to the block
static void vc1_v_loop_filter4_c(uint8_t *src, ptrdiff_t stride, int pq)
static void vc1_v_loop_filter16_c(uint8_t *src, ptrdiff_t stride, int pq)
static void vc1_inv_trans_8x4_c(uint8_t *dest, ptrdiff_t stride, int16_t *block)
void(* vc1_inv_trans_4x8)(uint8_t *dest, ptrdiff_t stride, int16_t *block)
static void vc1_inv_trans_8x4_dc_c(uint8_t *dest, ptrdiff_t stride, int16_t *block)
av_cold void ff_vc1dsp_init_loongarch(VC1DSPContext *dsp)
void(* vc1_v_s_overlap)(int16_t *top, int16_t *bottom)
void(* vc1_h_s_overlap)(int16_t *left, int16_t *right, ptrdiff_t left_stride, ptrdiff_t right_stride, int flags)
static void vc1_h_loop_filter16_c(uint8_t *src, ptrdiff_t stride, int pq)