32 #define MK_IDCT_DC_ADD4_C(name) \
33 static void name ## _idct_dc_add4uv_c(uint8_t *dst, int16_t block[4][16], ptrdiff_t stride)\
35 name ## _idct_dc_add_c(dst+stride*0+0, block[0], stride);\
36 name ## _idct_dc_add_c(dst+stride*0+4, block[1], stride);\
37 name ## _idct_dc_add_c(dst+stride*4+0, block[2], stride);\
38 name ## _idct_dc_add_c(dst+stride*4+4, block[3], stride);\
41 static void name ## _idct_dc_add4y_c(uint8_t *dst, int16_t block[4][16], ptrdiff_t stride)\
43 name ## _idct_dc_add_c(dst+ 0, block[0], stride);\
44 name ## _idct_dc_add_c(dst+ 4, block[1], stride);\
45 name ## _idct_dc_add_c(dst+ 8, block[2], stride);\
46 name ## _idct_dc_add_c(dst+12, block[3], stride);\
49 #if CONFIG_VP7_DECODER
50 static void vp7_luma_dc_wht_c(int16_t
block[4][4][16], int16_t
dc[16])
52 int i,
a1, b1,
c1, d1;
55 for (i = 0; i < 4; i++) {
56 a1 = (dc[i*4+0] + dc[i*4+2]) * 23170;
57 b1 = (dc[i*4+0] - dc[i*4+2]) * 23170;
58 c1 = dc[i*4+1] * 12540 - dc[i*4+3] * 30274;
59 d1 = dc[i*4+1] * 30274 + dc[i*4+3] * 12540;
60 tmp[i*4+0] = (a1 + d1) >> 14;
61 tmp[i*4+3] = (a1 - d1) >> 14;
62 tmp[i*4+1] = (b1 +
c1) >> 14;
63 tmp[i*4+2] = (b1 -
c1) >> 14;
66 for (i = 0; i < 4; i++) {
67 a1 = (tmp[i + 0] + tmp[i + 8]) * 23170;
68 b1 = (tmp[i + 0] - tmp[i + 8]) * 23170;
69 c1 = tmp[i + 4] * 12540 - tmp[i + 12] * 30274;
70 d1 = tmp[i + 4] * 30274 + tmp[i + 12] * 12540;
72 block[0][i][0] = (a1 + d1 + 0x20000) >> 18;
73 block[3][i][0] = (a1 - d1 + 0x20000) >> 18;
74 block[1][i][0] = (b1 + c1 + 0x20000) >> 18;
75 block[2][i][0] = (b1 - c1 + 0x20000) >> 18;
79 static void vp7_luma_dc_wht_dc_c(int16_t
block[4][4][16], int16_t
dc[16])
81 int i,
val = (23170 * (23170 * dc[0] >> 14) + 0x20000) >> 18;
84 for (i = 0; i < 4; i++) {
92 static void vp7_idct_add_c(
uint8_t *dst, int16_t block[16], ptrdiff_t
stride)
94 int i,
a1, b1,
c1, d1;
97 for (i = 0; i < 4; i++) {
98 a1 = (block[i*4+0] + block[i*4+2]) * 23170;
99 b1 = (block[i*4+0] - block[i*4+2]) * 23170;
100 c1 = block[i*4+1] * 12540 - block[i*4+3] * 30274;
101 d1 = block[i*4+1] * 30274 + block[i*4+3] * 12540;
103 tmp[i*4+0] = (a1 + d1) >> 14;
104 tmp[i*4+3] = (a1 - d1) >> 14;
105 tmp[i*4+1] = (b1 +
c1) >> 14;
106 tmp[i*4+2] = (b1 -
c1) >> 14;
109 for (i = 0; i < 4; i++) {
110 a1 = (tmp[i + 0] + tmp[i + 8]) * 23170;
111 b1 = (tmp[i + 0] - tmp[i + 8]) * 23170;
112 c1 = tmp[i + 4] * 12540 - tmp[i + 12] * 30274;
113 d1 = tmp[i + 4] * 30274 + tmp[i + 12] * 12540;
114 dst[0*stride+i] = av_clip_uint8(dst[0*stride+i] + ((a1 + d1 + 0x20000) >> 18));
115 dst[3*stride+i] = av_clip_uint8(dst[3*stride+i] + ((a1 - d1 + 0x20000) >> 18));
116 dst[1*stride+i] = av_clip_uint8(dst[1*stride+i] + ((b1 + c1 + 0x20000) >> 18));
117 dst[2*stride+i] = av_clip_uint8(dst[2*stride+i] + ((b1 - c1 + 0x20000) >> 18));
121 static void vp7_idct_dc_add_c(
uint8_t *dst, int16_t block[16], ptrdiff_t stride)
123 int i, dc = (23170 * (23170 * block[0] >> 14) + 0x20000) >> 18;
126 for (i = 0; i < 4; i++) {
127 dst[0] = av_clip_uint8(dst[0] + dc);
128 dst[1] = av_clip_uint8(dst[1] + dc);
129 dst[2] = av_clip_uint8(dst[2] + dc);
130 dst[3] = av_clip_uint8(dst[3] + dc);
139 #if CONFIG_VP8_DECODER
140 static void vp8_luma_dc_wht_c(int16_t block[4][4][16], int16_t dc[16])
144 for (i = 0; i < 4; i++) {
145 t0 = dc[0*4+i] + dc[3*4+i];
146 t1 = dc[1*4+i] + dc[2*4+i];
147 t2 = dc[1*4+i] - dc[2*4+i];
148 t3 = dc[0*4+i] - dc[3*4+i];
156 for (i = 0; i < 4; i++) {
157 t0 = dc[i*4+0] + dc[i*4+3] + 3;
158 t1 = dc[i*4+1] + dc[i*4+2];
159 t2 = dc[i*4+1] - dc[i*4+2];
160 t3 = dc[i*4+0] - dc[i*4+3] + 3;
163 block[i][0][0] = (t0 +
t1) >> 3;
164 block[i][1][0] = (t3 +
t2) >> 3;
165 block[i][2][0] = (t0 -
t1) >> 3;
166 block[i][3][0] = (t3 -
t2) >> 3;
170 static void vp8_luma_dc_wht_dc_c(int16_t block[4][4][16], int16_t dc[16])
172 int i, val = (dc[0] + 3) >> 3;
175 for (i = 0; i < 4; i++) {
176 block[i][0][0] =
val;
177 block[i][1][0] =
val;
178 block[i][2][0] =
val;
179 block[i][3][0] =
val;
183 #define MUL_20091(a) ((((a)*20091) >> 16) + (a))
184 #define MUL_35468(a) (((a)*35468) >> 16)
186 static void vp8_idct_add_c(
uint8_t *dst, int16_t block[16], ptrdiff_t stride)
191 for (i = 0; i < 4; i++) {
192 t0 = block[0*4+i] + block[2*4+i];
193 t1 = block[0*4+i] - block[2*4+i];
194 t2 = MUL_35468(block[1*4+i]) - MUL_20091(block[3*4+i]);
195 t3 = MUL_20091(block[1*4+i]) + MUL_35468(block[3*4+i]);
201 tmp[i*4+0] = t0 +
t3;
202 tmp[i*4+1] = t1 +
t2;
203 tmp[i*4+2] = t1 -
t2;
204 tmp[i*4+3] = t0 -
t3;
207 for (i = 0; i < 4; i++) {
208 t0 = tmp[0*4+i] + tmp[2*4+i];
209 t1 = tmp[0*4+i] - tmp[2*4+i];
210 t2 = MUL_35468(tmp[1*4+i]) - MUL_20091(tmp[3*4+i]);
211 t3 = MUL_20091(tmp[1*4+i]) + MUL_35468(tmp[3*4+i]);
213 dst[0] = av_clip_uint8(dst[0] + ((t0 + t3 + 4) >> 3));
214 dst[1] = av_clip_uint8(dst[1] + ((t1 + t2 + 4) >> 3));
215 dst[2] = av_clip_uint8(dst[2] + ((t1 - t2 + 4) >> 3));
216 dst[3] = av_clip_uint8(dst[3] + ((t0 - t3 + 4) >> 3));
221 static void vp8_idct_dc_add_c(
uint8_t *dst, int16_t block[16], ptrdiff_t stride)
223 int i, dc = (block[0] + 4) >> 3;
226 for (i = 0; i < 4; i++) {
227 dst[0] = av_clip_uint8(dst[0] + dc);
228 dst[1] = av_clip_uint8(dst[1] + dc);
229 dst[2] = av_clip_uint8(dst[2] + dc);
230 dst[3] = av_clip_uint8(dst[3] + dc);
240 int av_unused p3 = p[-4*stride];\
241 int av_unused p2 = p[-3*stride];\
242 int av_unused p1 = p[-2*stride];\
243 int av_unused p0 = p[-1*stride];\
244 int av_unused q0 = p[ 0*stride];\
245 int av_unused q1 = p[ 1*stride];\
246 int av_unused q2 = p[ 2*stride];\
247 int av_unused q3 = p[ 3*stride];
249 #define clip_int8(n) (cm[n+0x80]-0x80)
266 f1 =
FFMIN(a+4, 127) >> 3;
269 f2 = f1 - ((a & 7) == 4);
271 f2 =
FFMIN(a+3, 127) >> 3;
275 p[-1*stride] = cm[p0 + f2];
276 p[ 0*stride] = cm[q0 - f1];
281 p[-2*stride] = cm[p1 +
a];
282 p[ 1*stride] = cm[q1 -
a];
289 return FFABS(p0-q0) <= flim;
295 return 2*
FFABS(p0-q0) + (
FFABS(p1-q1) >> 1) <= flim;
302 #define NORMAL_LIMIT(vpn) \
303 static av_always_inline int vp ## vpn ## _normal_limit(uint8_t *p, ptrdiff_t stride, int E, int I)\
306 return vp ## vpn ## _simple_limit(p, stride, E)\
307 && FFABS(p3-p2) <= I && FFABS(p2-p1) <= I && FFABS(p1-p0) <= I\
308 && FFABS(q3-q2) <= I && FFABS(q2-q1) <= I && FFABS(q1-q0) <= I;\
318 return FFABS(p1-p0) > thresh ||
FFABS(q1-q0) > thresh;
331 a0 = (27*w + 63) >> 7;
332 a1 = (18*w + 63) >> 7;
333 a2 = ( 9*w + 63) >> 7;
335 p[-3*stride] = cm[p2 +
a2];
336 p[-2*stride] = cm[p1 +
a1];
337 p[-1*stride] = cm[p0 +
a0];
338 p[ 0*stride] = cm[q0 -
a0];
339 p[ 1*stride] = cm[q1 -
a1];
340 p[ 2*stride] = cm[q2 -
a2];
343 #define LOOP_FILTER(vpn, dir, size, stridea, strideb, maybe_inline) \
344 static maybe_inline void vp ## vpn ## _ ## dir ## _loop_filter ## size ## _c(uint8_t *dst, ptrdiff_t stride,\
345 int flim_E, int flim_I, int hev_thresh)\
349 for (i = 0; i < size; i++)\
350 if (vp ## vpn ## _normal_limit(dst+i*stridea, strideb, flim_E, flim_I)) {\
351 if (hev(dst+i*stridea, strideb, hev_thresh))\
352 filter_common(dst+i*stridea, strideb, 1, vpn);\
354 filter_mbedge(dst+i*stridea, strideb);\
358 static maybe_inline void vp ## vpn ## _ ## dir ## _loop_filter ## size ## _inner_c(uint8_t *dst, ptrdiff_t stride,\
359 int flim_E, int flim_I, int hev_thresh)\
363 for (i = 0; i < size; i++)\
364 if (vp ## vpn ## _normal_limit(dst+i*stridea, strideb, flim_E, flim_I)) {\
365 int hv = hev(dst+i*stridea, strideb, hev_thresh);\
367 filter_common(dst+i*stridea, strideb, 1, vpn);\
369 filter_common(dst+i*stridea, strideb, 0, vpn);\
373 #define UV_LOOP_FILTER(vpn, dir, stridea, strideb) \
374 LOOP_FILTER(vpn, dir, 8, stridea, strideb, av_always_inline) \
375 static void vp ## vpn ## _ ## dir ## _loop_filter8uv_c(uint8_t *dstU, uint8_t *dstV, ptrdiff_t stride,\
376 int fE, int fI, int hev_thresh)\
378 vp ## vpn ## _ ## dir ## _loop_filter8_c(dstU, stride, fE, fI, hev_thresh);\
379 vp ## vpn ## _ ## dir ## _loop_filter8_c(dstV, stride, fE, fI, hev_thresh);\
381 static void vp ## vpn ## _ ## dir ## _loop_filter8uv_inner_c(uint8_t *dstU, uint8_t *dstV, ptrdiff_t stride,\
382 int fE, int fI, int hev_thresh)\
384 vp ## vpn ## _ ## dir ## _loop_filter8_inner_c(dstU, stride, fE, fI, hev_thresh);\
385 vp ## vpn ## _ ## dir ## _loop_filter8_inner_c(dstV, stride, fE, fI, hev_thresh);\
388 #define LOOP_FILTER_SIMPLE(vpn) \
389 static void vp ## vpn ## _v_loop_filter_simple_c(uint8_t *dst, ptrdiff_t stride, int flim)\
393 for (i = 0; i < 16; i++)\
394 if (vp ## vpn ## _simple_limit(dst+i, stride, flim))\
395 filter_common(dst+i, stride, 1, vpn);\
398 static void vp ## vpn ## _h_loop_filter_simple_c(uint8_t *dst, ptrdiff_t stride, int flim)\
402 for (i = 0; i < 16; i++)\
403 if (vp ## vpn ## _simple_limit(dst+i*stride, 1, flim))\
404 filter_common(dst+i*stride, 1, 1, vpn);\
407 #if CONFIG_VP7_DECODER
415 #if CONFIG_VP8_DECODER
418 UV_LOOP_FILTER(8,
v, 1, stride)
419 UV_LOOP_FILTER(8, h, stride, 1)
424 { 0, 6, 123, 12, 1, 0 },
425 { 2, 11, 108, 36, 8, 1 },
426 { 0, 9, 93, 50, 6, 0 },
427 { 3, 16, 77, 77, 16, 3 },
428 { 0, 6, 50, 93, 9, 0 },
429 { 1, 8, 36, 108, 11, 2 },
430 { 0, 1, 12, 123, 6, 0 },
433 #define PUT_PIXELS(WIDTH) \
434 static void put_vp8_pixels ## WIDTH ##_c(uint8_t *dst, ptrdiff_t dststride, uint8_t *src, ptrdiff_t srcstride, int h, int x, int y) { \
436 for (i = 0; i < h; i++, dst+= dststride, src+= srcstride) { \
437 memcpy(dst, src, WIDTH); \
445 #define FILTER_6TAP(src, F, stride) \
446 cm[(F[2]*src[x+0*stride] - F[1]*src[x-1*stride] + F[0]*src[x-2*stride] + \
447 F[3]*src[x+1*stride] - F[4]*src[x+2*stride] + F[5]*src[x+3*stride] + 64) >> 7]
449 #define FILTER_4TAP(src, F, stride) \
450 cm[(F[2]*src[x+0*stride] - F[1]*src[x-1*stride] + \
451 F[3]*src[x+1*stride] - F[4]*src[x+2*stride] + 64) >> 7]
453 #define VP8_EPEL_H(SIZE, TAPS) \
454 static void put_vp8_epel ## SIZE ## _h ## TAPS ## _c(uint8_t *dst, ptrdiff_t dststride, uint8_t *src, ptrdiff_t srcstride, int h, int mx, int my) \
456 const uint8_t *filter = subpel_filters[mx-1]; \
457 const uint8_t *cm = ff_cropTbl + MAX_NEG_CROP; \
460 for (y = 0; y < h; y++) { \
461 for (x = 0; x < SIZE; x++) \
462 dst[x] = FILTER_ ## TAPS ## TAP(src, filter, 1); \
467 #define VP8_EPEL_V(SIZE, TAPS) \
468 static void put_vp8_epel ## SIZE ## _v ## TAPS ## _c(uint8_t *dst, ptrdiff_t dststride, uint8_t *src, ptrdiff_t srcstride, int h, int mx, int my) \
470 const uint8_t *filter = subpel_filters[my-1]; \
471 const uint8_t *cm = ff_cropTbl + MAX_NEG_CROP; \
474 for (y = 0; y < h; y++) { \
475 for (x = 0; x < SIZE; x++) \
476 dst[x] = FILTER_ ## TAPS ## TAP(src, filter, srcstride); \
481 #define VP8_EPEL_HV(SIZE, HTAPS, VTAPS) \
482 static void put_vp8_epel ## SIZE ## _h ## HTAPS ## v ## VTAPS ## _c(uint8_t *dst, ptrdiff_t dststride, uint8_t *src, ptrdiff_t srcstride, int h, int mx, int my) \
484 const uint8_t *filter = subpel_filters[mx-1]; \
485 const uint8_t *cm = ff_cropTbl + MAX_NEG_CROP; \
487 uint8_t tmp_array[(2*SIZE+VTAPS-1)*SIZE]; \
488 uint8_t *tmp = tmp_array; \
489 src -= (2-(VTAPS==4))*srcstride; \
491 for (y = 0; y < h+VTAPS-1; y++) { \
492 for (x = 0; x < SIZE; x++) \
493 tmp[x] = FILTER_ ## HTAPS ## TAP(src, filter, 1); \
498 tmp = tmp_array + (2-(VTAPS==4))*SIZE; \
499 filter = subpel_filters[my-1]; \
501 for (y = 0; y < h; y++) { \
502 for (x = 0; x < SIZE; x++) \
503 dst[x] = FILTER_ ## VTAPS ## TAP(tmp, filter, SIZE); \
534 #define VP8_BILINEAR(SIZE) \
535 static void put_vp8_bilinear ## SIZE ## _h_c(uint8_t *dst, ptrdiff_t dstride, uint8_t *src, ptrdiff_t sstride, int h, int mx, int my) \
537 int a = 8-mx, b = mx; \
540 for (y = 0; y < h; y++) { \
541 for (x = 0; x < SIZE; x++) \
542 dst[x] = (a*src[x] + b*src[x+1] + 4) >> 3; \
547 static void put_vp8_bilinear ## SIZE ## _v_c(uint8_t *dst, ptrdiff_t dstride, uint8_t *src, ptrdiff_t sstride, int h, int mx, int my) \
549 int c = 8-my, d = my; \
552 for (y = 0; y < h; y++) { \
553 for (x = 0; x < SIZE; x++) \
554 dst[x] = (c*src[x] + d*src[x+sstride] + 4) >> 3; \
560 static void put_vp8_bilinear ## SIZE ## _hv_c(uint8_t *dst, ptrdiff_t dstride, uint8_t *src, ptrdiff_t sstride, int h, int mx, int my) \
562 int a = 8-mx, b = mx; \
563 int c = 8-my, d = my; \
565 uint8_t tmp_array[(2*SIZE+1)*SIZE]; \
566 uint8_t *tmp = tmp_array; \
568 for (y = 0; y < h+1; y++) { \
569 for (x = 0; x < SIZE; x++) \
570 tmp[x] = (a*src[x] + b*src[x+1] + 4) >> 3; \
577 for (y = 0; y < h; y++) { \
578 for (x = 0; x < SIZE; x++) \
579 dst[x] = (c*tmp[x] + d*tmp[x+SIZE] + 4) >> 3; \
589 #define VP8_MC_FUNC(IDX, SIZE) \
590 dsp->put_vp8_epel_pixels_tab[IDX][0][0] = put_vp8_pixels ## SIZE ## _c; \
591 dsp->put_vp8_epel_pixels_tab[IDX][0][1] = put_vp8_epel ## SIZE ## _h4_c; \
592 dsp->put_vp8_epel_pixels_tab[IDX][0][2] = put_vp8_epel ## SIZE ## _h6_c; \
593 dsp->put_vp8_epel_pixels_tab[IDX][1][0] = put_vp8_epel ## SIZE ## _v4_c; \
594 dsp->put_vp8_epel_pixels_tab[IDX][1][1] = put_vp8_epel ## SIZE ## _h4v4_c; \
595 dsp->put_vp8_epel_pixels_tab[IDX][1][2] = put_vp8_epel ## SIZE ## _h6v4_c; \
596 dsp->put_vp8_epel_pixels_tab[IDX][2][0] = put_vp8_epel ## SIZE ## _v6_c; \
597 dsp->put_vp8_epel_pixels_tab[IDX][2][1] = put_vp8_epel ## SIZE ## _h4v6_c; \
598 dsp->put_vp8_epel_pixels_tab[IDX][2][2] = put_vp8_epel ## SIZE ## _h6v6_c
600 #define VP8_BILINEAR_MC_FUNC(IDX, SIZE) \
601 dsp->put_vp8_bilinear_pixels_tab[IDX][0][0] = put_vp8_pixels ## SIZE ## _c; \
602 dsp->put_vp8_bilinear_pixels_tab[IDX][0][1] = put_vp8_bilinear ## SIZE ## _h_c; \
603 dsp->put_vp8_bilinear_pixels_tab[IDX][0][2] = put_vp8_bilinear ## SIZE ## _h_c; \
604 dsp->put_vp8_bilinear_pixels_tab[IDX][1][0] = put_vp8_bilinear ## SIZE ## _v_c; \
605 dsp->put_vp8_bilinear_pixels_tab[IDX][1][1] = put_vp8_bilinear ## SIZE ## _hv_c; \
606 dsp->put_vp8_bilinear_pixels_tab[IDX][1][2] = put_vp8_bilinear ## SIZE ## _hv_c; \
607 dsp->put_vp8_bilinear_pixels_tab[IDX][2][0] = put_vp8_bilinear ## SIZE ## _v_c; \
608 dsp->put_vp8_bilinear_pixels_tab[IDX][2][1] = put_vp8_bilinear ## SIZE ## _hv_c; \
609 dsp->put_vp8_bilinear_pixels_tab[IDX][2][2] = put_vp8_bilinear ## SIZE ## _hv_c
613 #if CONFIG_VP7_DECODER && CONFIG_VP8_DECODER
614 #define VPX(f) vp7 ? vp7_ ## f : vp8_ ## f
615 #elif CONFIG_VP7_DECODER
616 #define VPX(f) vp7_ ## f
617 #else // CONFIG_VP8_DECODER
618 #define VPX(f) vp8_ ## f