00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018
00019
00020
00021
00022
00023
00024
00030 #include "bit_depth_template.c"
00031
00032 static inline void FUNC(copy_block2)(uint8_t *dst, const uint8_t *src, int dstStride, int srcStride, int h)
00033 {
00034 int i;
00035 for(i=0; i<h; i++)
00036 {
00037 AV_WN2P(dst , AV_RN2P(src ));
00038 dst+=dstStride;
00039 src+=srcStride;
00040 }
00041 }
00042
00043 static inline void FUNC(copy_block4)(uint8_t *dst, const uint8_t *src, int dstStride, int srcStride, int h)
00044 {
00045 int i;
00046 for(i=0; i<h; i++)
00047 {
00048 AV_WN4P(dst , AV_RN4P(src ));
00049 dst+=dstStride;
00050 src+=srcStride;
00051 }
00052 }
00053
00054 static inline void FUNC(copy_block8)(uint8_t *dst, const uint8_t *src, int dstStride, int srcStride, int h)
00055 {
00056 int i;
00057 for(i=0; i<h; i++)
00058 {
00059 AV_WN4P(dst , AV_RN4P(src ));
00060 AV_WN4P(dst+4*sizeof(pixel), AV_RN4P(src+4*sizeof(pixel)));
00061 dst+=dstStride;
00062 src+=srcStride;
00063 }
00064 }
00065
00066 static inline void FUNC(copy_block16)(uint8_t *dst, const uint8_t *src, int dstStride, int srcStride, int h)
00067 {
00068 int i;
00069 for(i=0; i<h; i++)
00070 {
00071 AV_WN4P(dst , AV_RN4P(src ));
00072 AV_WN4P(dst+ 4*sizeof(pixel), AV_RN4P(src+ 4*sizeof(pixel)));
00073 AV_WN4P(dst+ 8*sizeof(pixel), AV_RN4P(src+ 8*sizeof(pixel)));
00074 AV_WN4P(dst+12*sizeof(pixel), AV_RN4P(src+12*sizeof(pixel)));
00075 dst+=dstStride;
00076 src+=srcStride;
00077 }
00078 }
00079
00080
00081
00082 static void FUNCC(draw_edges)(uint8_t *p_buf, int p_wrap, int width, int height, int w, int h, int sides)
00083 {
00084 pixel *buf = (pixel*)p_buf;
00085 int wrap = p_wrap / sizeof(pixel);
00086 pixel *ptr, *last_line;
00087 int i;
00088
00089
00090 ptr = buf;
00091 for(i=0;i<height;i++) {
00092 #if BIT_DEPTH > 8
00093 int j;
00094 for (j = 0; j < w; j++) {
00095 ptr[j-w] = ptr[0];
00096 ptr[j+width] = ptr[width-1];
00097 }
00098 #else
00099 memset(ptr - w, ptr[0], w);
00100 memset(ptr + width, ptr[width-1], w);
00101 #endif
00102 ptr += wrap;
00103 }
00104
00105
00106 buf -= w;
00107 last_line = buf + (height - 1) * wrap;
00108 if (sides & EDGE_TOP)
00109 for(i = 0; i < h; i++)
00110 memcpy(buf - (i + 1) * wrap, buf, (width + w + w) * sizeof(pixel));
00111 if (sides & EDGE_BOTTOM)
00112 for (i = 0; i < h; i++)
00113 memcpy(last_line + (i + 1) * wrap, last_line, (width + w + w) * sizeof(pixel));
00114 }
00115
00128 void FUNC(ff_emulated_edge_mc)(uint8_t *buf, const uint8_t *src, int linesize, int block_w, int block_h,
00129 int src_x, int src_y, int w, int h){
00130 int x, y;
00131 int start_y, start_x, end_y, end_x;
00132
00133 if(src_y>= h){
00134 src+= (h-1-src_y)*linesize;
00135 src_y=h-1;
00136 }else if(src_y<=-block_h){
00137 src+= (1-block_h-src_y)*linesize;
00138 src_y=1-block_h;
00139 }
00140 if(src_x>= w){
00141 src+= (w-1-src_x)*sizeof(pixel);
00142 src_x=w-1;
00143 }else if(src_x<=-block_w){
00144 src+= (1-block_w-src_x)*sizeof(pixel);
00145 src_x=1-block_w;
00146 }
00147
00148 start_y= FFMAX(0, -src_y);
00149 start_x= FFMAX(0, -src_x);
00150 end_y= FFMIN(block_h, h-src_y);
00151 end_x= FFMIN(block_w, w-src_x);
00152 assert(start_y < end_y && block_h);
00153 assert(start_x < end_x && block_w);
00154
00155 w = end_x - start_x;
00156 src += start_y*linesize + start_x*sizeof(pixel);
00157 buf += start_x*sizeof(pixel);
00158
00159
00160 for(y=0; y<start_y; y++){
00161 memcpy(buf, src, w*sizeof(pixel));
00162 buf += linesize;
00163 }
00164
00165
00166 for(; y<end_y; y++){
00167 memcpy(buf, src, w*sizeof(pixel));
00168 src += linesize;
00169 buf += linesize;
00170 }
00171
00172
00173 src -= linesize;
00174 for(; y<block_h; y++){
00175 memcpy(buf, src, w*sizeof(pixel));
00176 buf += linesize;
00177 }
00178
00179 buf -= block_h * linesize + start_x*sizeof(pixel);
00180 while (block_h--){
00181 pixel *bufp = (pixel*)buf;
00182
00183 for(x=0; x<start_x; x++){
00184 bufp[x] = bufp[start_x];
00185 }
00186
00187
00188 for(x=end_x; x<block_w; x++){
00189 bufp[x] = bufp[end_x - 1];
00190 }
00191 buf += linesize;
00192 }
00193 }
00194
00195 #define DCTELEM_FUNCS(dctcoef, suffix) \
00196 static void FUNCC(get_pixels ## suffix)(DCTELEM *restrict _block, \
00197 const uint8_t *_pixels, \
00198 int line_size) \
00199 { \
00200 const pixel *pixels = (const pixel *) _pixels; \
00201 dctcoef *restrict block = (dctcoef *) _block; \
00202 int i; \
00203 \
00204 \
00205 for(i=0;i<8;i++) { \
00206 block[0] = pixels[0]; \
00207 block[1] = pixels[1]; \
00208 block[2] = pixels[2]; \
00209 block[3] = pixels[3]; \
00210 block[4] = pixels[4]; \
00211 block[5] = pixels[5]; \
00212 block[6] = pixels[6]; \
00213 block[7] = pixels[7]; \
00214 pixels += line_size / sizeof(pixel); \
00215 block += 8; \
00216 } \
00217 } \
00218 \
00219 static void FUNCC(add_pixels8 ## suffix)(uint8_t *restrict _pixels, \
00220 DCTELEM *_block, \
00221 int line_size) \
00222 { \
00223 int i; \
00224 pixel *restrict pixels = (pixel *restrict)_pixels; \
00225 dctcoef *block = (dctcoef*)_block; \
00226 line_size /= sizeof(pixel); \
00227 \
00228 for(i=0;i<8;i++) { \
00229 pixels[0] += block[0]; \
00230 pixels[1] += block[1]; \
00231 pixels[2] += block[2]; \
00232 pixels[3] += block[3]; \
00233 pixels[4] += block[4]; \
00234 pixels[5] += block[5]; \
00235 pixels[6] += block[6]; \
00236 pixels[7] += block[7]; \
00237 pixels += line_size; \
00238 block += 8; \
00239 } \
00240 } \
00241 \
00242 static void FUNCC(add_pixels4 ## suffix)(uint8_t *restrict _pixels, \
00243 DCTELEM *_block, \
00244 int line_size) \
00245 { \
00246 int i; \
00247 pixel *restrict pixels = (pixel *restrict)_pixels; \
00248 dctcoef *block = (dctcoef*)_block; \
00249 line_size /= sizeof(pixel); \
00250 \
00251 for(i=0;i<4;i++) { \
00252 pixels[0] += block[0]; \
00253 pixels[1] += block[1]; \
00254 pixels[2] += block[2]; \
00255 pixels[3] += block[3]; \
00256 pixels += line_size; \
00257 block += 4; \
00258 } \
00259 } \
00260 \
00261 static void FUNCC(clear_block ## suffix)(DCTELEM *block) \
00262 { \
00263 memset(block, 0, sizeof(dctcoef)*64); \
00264 } \
00265 \
00266 \
00269 static void FUNCC(clear_blocks ## suffix)(DCTELEM *blocks) \
00270 { \
00271 memset(blocks, 0, sizeof(dctcoef)*6*64); \
00272 }
00273
00274 DCTELEM_FUNCS(DCTELEM, _16)
00275 #if BIT_DEPTH > 8
00276 DCTELEM_FUNCS(dctcoef, _32)
00277 #endif
00278
00279 #define PIXOP2(OPNAME, OP) \
00280 static void FUNCC(OPNAME ## _pixels2)(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
00281 int i;\
00282 for(i=0; i<h; i++){\
00283 OP(*((pixel2*)(block )), AV_RN2P(pixels ));\
00284 pixels+=line_size;\
00285 block +=line_size;\
00286 }\
00287 }\
00288 static void FUNCC(OPNAME ## _pixels4)(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
00289 int i;\
00290 for(i=0; i<h; i++){\
00291 OP(*((pixel4*)(block )), AV_RN4P(pixels ));\
00292 pixels+=line_size;\
00293 block +=line_size;\
00294 }\
00295 }\
00296 static void FUNCC(OPNAME ## _pixels8)(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
00297 int i;\
00298 for(i=0; i<h; i++){\
00299 OP(*((pixel4*)(block )), AV_RN4P(pixels ));\
00300 OP(*((pixel4*)(block+4*sizeof(pixel))), AV_RN4P(pixels+4*sizeof(pixel)));\
00301 pixels+=line_size;\
00302 block +=line_size;\
00303 }\
00304 }\
00305 static inline void FUNCC(OPNAME ## _no_rnd_pixels8)(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
00306 FUNCC(OPNAME ## _pixels8)(block, pixels, line_size, h);\
00307 }\
00308 \
00309 static inline void FUNC(OPNAME ## _no_rnd_pixels8_l2)(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, \
00310 int src_stride1, int src_stride2, int h){\
00311 int i;\
00312 for(i=0; i<h; i++){\
00313 pixel4 a,b;\
00314 a= AV_RN4P(&src1[i*src_stride1 ]);\
00315 b= AV_RN4P(&src2[i*src_stride2 ]);\
00316 OP(*((pixel4*)&dst[i*dst_stride ]), no_rnd_avg_pixel4(a, b));\
00317 a= AV_RN4P(&src1[i*src_stride1+4*sizeof(pixel)]);\
00318 b= AV_RN4P(&src2[i*src_stride2+4*sizeof(pixel)]);\
00319 OP(*((pixel4*)&dst[i*dst_stride+4*sizeof(pixel)]), no_rnd_avg_pixel4(a, b));\
00320 }\
00321 }\
00322 \
00323 static inline void FUNC(OPNAME ## _pixels8_l2)(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, \
00324 int src_stride1, int src_stride2, int h){\
00325 int i;\
00326 for(i=0; i<h; i++){\
00327 pixel4 a,b;\
00328 a= AV_RN4P(&src1[i*src_stride1 ]);\
00329 b= AV_RN4P(&src2[i*src_stride2 ]);\
00330 OP(*((pixel4*)&dst[i*dst_stride ]), rnd_avg_pixel4(a, b));\
00331 a= AV_RN4P(&src1[i*src_stride1+4*sizeof(pixel)]);\
00332 b= AV_RN4P(&src2[i*src_stride2+4*sizeof(pixel)]);\
00333 OP(*((pixel4*)&dst[i*dst_stride+4*sizeof(pixel)]), rnd_avg_pixel4(a, b));\
00334 }\
00335 }\
00336 \
00337 static inline void FUNC(OPNAME ## _pixels4_l2)(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, \
00338 int src_stride1, int src_stride2, int h){\
00339 int i;\
00340 for(i=0; i<h; i++){\
00341 pixel4 a,b;\
00342 a= AV_RN4P(&src1[i*src_stride1 ]);\
00343 b= AV_RN4P(&src2[i*src_stride2 ]);\
00344 OP(*((pixel4*)&dst[i*dst_stride ]), rnd_avg_pixel4(a, b));\
00345 }\
00346 }\
00347 \
00348 static inline void FUNC(OPNAME ## _pixels2_l2)(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, \
00349 int src_stride1, int src_stride2, int h){\
00350 int i;\
00351 for(i=0; i<h; i++){\
00352 pixel4 a,b;\
00353 a= AV_RN2P(&src1[i*src_stride1 ]);\
00354 b= AV_RN2P(&src2[i*src_stride2 ]);\
00355 OP(*((pixel2*)&dst[i*dst_stride ]), rnd_avg_pixel4(a, b));\
00356 }\
00357 }\
00358 \
00359 static inline void FUNC(OPNAME ## _pixels16_l2)(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, \
00360 int src_stride1, int src_stride2, int h){\
00361 FUNC(OPNAME ## _pixels8_l2)(dst , src1 , src2 , dst_stride, src_stride1, src_stride2, h);\
00362 FUNC(OPNAME ## _pixels8_l2)(dst+8*sizeof(pixel), src1+8*sizeof(pixel), src2+8*sizeof(pixel), dst_stride, src_stride1, src_stride2, h);\
00363 }\
00364 \
00365 static inline void FUNC(OPNAME ## _no_rnd_pixels16_l2)(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, \
00366 int src_stride1, int src_stride2, int h){\
00367 FUNC(OPNAME ## _no_rnd_pixels8_l2)(dst , src1 , src2 , dst_stride, src_stride1, src_stride2, h);\
00368 FUNC(OPNAME ## _no_rnd_pixels8_l2)(dst+8*sizeof(pixel), src1+8*sizeof(pixel), src2+8*sizeof(pixel), dst_stride, src_stride1, src_stride2, h);\
00369 }\
00370 \
00371 static inline void FUNCC(OPNAME ## _no_rnd_pixels8_x2)(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
00372 FUNC(OPNAME ## _no_rnd_pixels8_l2)(block, pixels, pixels+sizeof(pixel), line_size, line_size, line_size, h);\
00373 }\
00374 \
00375 static inline void FUNCC(OPNAME ## _pixels8_x2)(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
00376 FUNC(OPNAME ## _pixels8_l2)(block, pixels, pixels+sizeof(pixel), line_size, line_size, line_size, h);\
00377 }\
00378 \
00379 static inline void FUNCC(OPNAME ## _no_rnd_pixels8_y2)(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
00380 FUNC(OPNAME ## _no_rnd_pixels8_l2)(block, pixels, pixels+line_size, line_size, line_size, line_size, h);\
00381 }\
00382 \
00383 static inline void FUNCC(OPNAME ## _pixels8_y2)(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
00384 FUNC(OPNAME ## _pixels8_l2)(block, pixels, pixels+line_size, line_size, line_size, line_size, h);\
00385 }\
00386 \
00387 static inline void FUNC(OPNAME ## _pixels8_l4)(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, const uint8_t *src3, const uint8_t *src4,\
00388 int dst_stride, int src_stride1, int src_stride2,int src_stride3,int src_stride4, int h){\
00389 \
00390 int i;\
00391 for(i=0; i<h; i++){\
00392 uint32_t a, b, c, d, l0, l1, h0, h1;\
00393 a= AV_RN32(&src1[i*src_stride1]);\
00394 b= AV_RN32(&src2[i*src_stride2]);\
00395 c= AV_RN32(&src3[i*src_stride3]);\
00396 d= AV_RN32(&src4[i*src_stride4]);\
00397 l0= (a&0x03030303UL)\
00398 + (b&0x03030303UL)\
00399 + 0x02020202UL;\
00400 h0= ((a&0xFCFCFCFCUL)>>2)\
00401 + ((b&0xFCFCFCFCUL)>>2);\
00402 l1= (c&0x03030303UL)\
00403 + (d&0x03030303UL);\
00404 h1= ((c&0xFCFCFCFCUL)>>2)\
00405 + ((d&0xFCFCFCFCUL)>>2);\
00406 OP(*((uint32_t*)&dst[i*dst_stride]), h0+h1+(((l0+l1)>>2)&0x0F0F0F0FUL));\
00407 a= AV_RN32(&src1[i*src_stride1+4]);\
00408 b= AV_RN32(&src2[i*src_stride2+4]);\
00409 c= AV_RN32(&src3[i*src_stride3+4]);\
00410 d= AV_RN32(&src4[i*src_stride4+4]);\
00411 l0= (a&0x03030303UL)\
00412 + (b&0x03030303UL)\
00413 + 0x02020202UL;\
00414 h0= ((a&0xFCFCFCFCUL)>>2)\
00415 + ((b&0xFCFCFCFCUL)>>2);\
00416 l1= (c&0x03030303UL)\
00417 + (d&0x03030303UL);\
00418 h1= ((c&0xFCFCFCFCUL)>>2)\
00419 + ((d&0xFCFCFCFCUL)>>2);\
00420 OP(*((uint32_t*)&dst[i*dst_stride+4]), h0+h1+(((l0+l1)>>2)&0x0F0F0F0FUL));\
00421 }\
00422 }\
00423 \
00424 static inline void FUNCC(OPNAME ## _pixels4_x2)(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
00425 FUNC(OPNAME ## _pixels4_l2)(block, pixels, pixels+sizeof(pixel), line_size, line_size, line_size, h);\
00426 }\
00427 \
00428 static inline void FUNCC(OPNAME ## _pixels4_y2)(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
00429 FUNC(OPNAME ## _pixels4_l2)(block, pixels, pixels+line_size, line_size, line_size, line_size, h);\
00430 }\
00431 \
00432 static inline void FUNCC(OPNAME ## _pixels2_x2)(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
00433 FUNC(OPNAME ## _pixels2_l2)(block, pixels, pixels+sizeof(pixel), line_size, line_size, line_size, h);\
00434 }\
00435 \
00436 static inline void FUNCC(OPNAME ## _pixels2_y2)(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
00437 FUNC(OPNAME ## _pixels2_l2)(block, pixels, pixels+line_size, line_size, line_size, line_size, h);\
00438 }\
00439 \
00440 static inline void FUNC(OPNAME ## _no_rnd_pixels8_l4)(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, const uint8_t *src3, const uint8_t *src4,\
00441 int dst_stride, int src_stride1, int src_stride2,int src_stride3,int src_stride4, int h){\
00442 \
00443 int i;\
00444 for(i=0; i<h; i++){\
00445 uint32_t a, b, c, d, l0, l1, h0, h1;\
00446 a= AV_RN32(&src1[i*src_stride1]);\
00447 b= AV_RN32(&src2[i*src_stride2]);\
00448 c= AV_RN32(&src3[i*src_stride3]);\
00449 d= AV_RN32(&src4[i*src_stride4]);\
00450 l0= (a&0x03030303UL)\
00451 + (b&0x03030303UL)\
00452 + 0x01010101UL;\
00453 h0= ((a&0xFCFCFCFCUL)>>2)\
00454 + ((b&0xFCFCFCFCUL)>>2);\
00455 l1= (c&0x03030303UL)\
00456 + (d&0x03030303UL);\
00457 h1= ((c&0xFCFCFCFCUL)>>2)\
00458 + ((d&0xFCFCFCFCUL)>>2);\
00459 OP(*((uint32_t*)&dst[i*dst_stride]), h0+h1+(((l0+l1)>>2)&0x0F0F0F0FUL));\
00460 a= AV_RN32(&src1[i*src_stride1+4]);\
00461 b= AV_RN32(&src2[i*src_stride2+4]);\
00462 c= AV_RN32(&src3[i*src_stride3+4]);\
00463 d= AV_RN32(&src4[i*src_stride4+4]);\
00464 l0= (a&0x03030303UL)\
00465 + (b&0x03030303UL)\
00466 + 0x01010101UL;\
00467 h0= ((a&0xFCFCFCFCUL)>>2)\
00468 + ((b&0xFCFCFCFCUL)>>2);\
00469 l1= (c&0x03030303UL)\
00470 + (d&0x03030303UL);\
00471 h1= ((c&0xFCFCFCFCUL)>>2)\
00472 + ((d&0xFCFCFCFCUL)>>2);\
00473 OP(*((uint32_t*)&dst[i*dst_stride+4]), h0+h1+(((l0+l1)>>2)&0x0F0F0F0FUL));\
00474 }\
00475 }\
00476 static inline void FUNC(OPNAME ## _pixels16_l4)(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, const uint8_t *src3, const uint8_t *src4,\
00477 int dst_stride, int src_stride1, int src_stride2,int src_stride3,int src_stride4, int h){\
00478 FUNC(OPNAME ## _pixels8_l4)(dst , src1 , src2 , src3 , src4 , dst_stride, src_stride1, src_stride2, src_stride3, src_stride4, h);\
00479 FUNC(OPNAME ## _pixels8_l4)(dst+8*sizeof(pixel), src1+8*sizeof(pixel), src2+8*sizeof(pixel), src3+8*sizeof(pixel), src4+8*sizeof(pixel), dst_stride, src_stride1, src_stride2, src_stride3, src_stride4, h);\
00480 }\
00481 static inline void FUNC(OPNAME ## _no_rnd_pixels16_l4)(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, const uint8_t *src3, const uint8_t *src4,\
00482 int dst_stride, int src_stride1, int src_stride2,int src_stride3,int src_stride4, int h){\
00483 FUNC(OPNAME ## _no_rnd_pixels8_l4)(dst , src1 , src2 , src3 , src4 , dst_stride, src_stride1, src_stride2, src_stride3, src_stride4, h);\
00484 FUNC(OPNAME ## _no_rnd_pixels8_l4)(dst+8*sizeof(pixel), src1+8*sizeof(pixel), src2+8*sizeof(pixel), src3+8*sizeof(pixel), src4+8*sizeof(pixel), dst_stride, src_stride1, src_stride2, src_stride3, src_stride4, h);\
00485 }\
00486 \
00487 static inline void FUNCC(OPNAME ## _pixels2_xy2)(uint8_t *p_block, const uint8_t *p_pixels, int line_size, int h)\
00488 {\
00489 int i, a0, b0, a1, b1;\
00490 pixel *block = (pixel*)p_block;\
00491 const pixel *pixels = (const pixel*)p_pixels;\
00492 line_size >>= sizeof(pixel)-1;\
00493 a0= pixels[0];\
00494 b0= pixels[1] + 2;\
00495 a0 += b0;\
00496 b0 += pixels[2];\
00497 \
00498 pixels+=line_size;\
00499 for(i=0; i<h; i+=2){\
00500 a1= pixels[0];\
00501 b1= pixels[1];\
00502 a1 += b1;\
00503 b1 += pixels[2];\
00504 \
00505 block[0]= (a1+a0)>>2; \
00506 block[1]= (b1+b0)>>2;\
00507 \
00508 pixels+=line_size;\
00509 block +=line_size;\
00510 \
00511 a0= pixels[0];\
00512 b0= pixels[1] + 2;\
00513 a0 += b0;\
00514 b0 += pixels[2];\
00515 \
00516 block[0]= (a1+a0)>>2;\
00517 block[1]= (b1+b0)>>2;\
00518 pixels+=line_size;\
00519 block +=line_size;\
00520 }\
00521 }\
00522 \
00523 static inline void FUNCC(OPNAME ## _pixels4_xy2)(uint8_t *block, const uint8_t *pixels, int line_size, int h)\
00524 {\
00525 \
00526 int i;\
00527 const uint32_t a= AV_RN32(pixels );\
00528 const uint32_t b= AV_RN32(pixels+1);\
00529 uint32_t l0= (a&0x03030303UL)\
00530 + (b&0x03030303UL)\
00531 + 0x02020202UL;\
00532 uint32_t h0= ((a&0xFCFCFCFCUL)>>2)\
00533 + ((b&0xFCFCFCFCUL)>>2);\
00534 uint32_t l1,h1;\
00535 \
00536 pixels+=line_size;\
00537 for(i=0; i<h; i+=2){\
00538 uint32_t a= AV_RN32(pixels );\
00539 uint32_t b= AV_RN32(pixels+1);\
00540 l1= (a&0x03030303UL)\
00541 + (b&0x03030303UL);\
00542 h1= ((a&0xFCFCFCFCUL)>>2)\
00543 + ((b&0xFCFCFCFCUL)>>2);\
00544 OP(*((uint32_t*)block), h0+h1+(((l0+l1)>>2)&0x0F0F0F0FUL));\
00545 pixels+=line_size;\
00546 block +=line_size;\
00547 a= AV_RN32(pixels );\
00548 b= AV_RN32(pixels+1);\
00549 l0= (a&0x03030303UL)\
00550 + (b&0x03030303UL)\
00551 + 0x02020202UL;\
00552 h0= ((a&0xFCFCFCFCUL)>>2)\
00553 + ((b&0xFCFCFCFCUL)>>2);\
00554 OP(*((uint32_t*)block), h0+h1+(((l0+l1)>>2)&0x0F0F0F0FUL));\
00555 pixels+=line_size;\
00556 block +=line_size;\
00557 }\
00558 }\
00559 \
00560 static inline void FUNCC(OPNAME ## _pixels8_xy2)(uint8_t *block, const uint8_t *pixels, int line_size, int h)\
00561 {\
00562 \
00563 int j;\
00564 for(j=0; j<2; j++){\
00565 int i;\
00566 const uint32_t a= AV_RN32(pixels );\
00567 const uint32_t b= AV_RN32(pixels+1);\
00568 uint32_t l0= (a&0x03030303UL)\
00569 + (b&0x03030303UL)\
00570 + 0x02020202UL;\
00571 uint32_t h0= ((a&0xFCFCFCFCUL)>>2)\
00572 + ((b&0xFCFCFCFCUL)>>2);\
00573 uint32_t l1,h1;\
00574 \
00575 pixels+=line_size;\
00576 for(i=0; i<h; i+=2){\
00577 uint32_t a= AV_RN32(pixels );\
00578 uint32_t b= AV_RN32(pixels+1);\
00579 l1= (a&0x03030303UL)\
00580 + (b&0x03030303UL);\
00581 h1= ((a&0xFCFCFCFCUL)>>2)\
00582 + ((b&0xFCFCFCFCUL)>>2);\
00583 OP(*((uint32_t*)block), h0+h1+(((l0+l1)>>2)&0x0F0F0F0FUL));\
00584 pixels+=line_size;\
00585 block +=line_size;\
00586 a= AV_RN32(pixels );\
00587 b= AV_RN32(pixels+1);\
00588 l0= (a&0x03030303UL)\
00589 + (b&0x03030303UL)\
00590 + 0x02020202UL;\
00591 h0= ((a&0xFCFCFCFCUL)>>2)\
00592 + ((b&0xFCFCFCFCUL)>>2);\
00593 OP(*((uint32_t*)block), h0+h1+(((l0+l1)>>2)&0x0F0F0F0FUL));\
00594 pixels+=line_size;\
00595 block +=line_size;\
00596 }\
00597 pixels+=4-line_size*(h+1);\
00598 block +=4-line_size*h;\
00599 }\
00600 }\
00601 \
00602 static inline void FUNCC(OPNAME ## _no_rnd_pixels8_xy2)(uint8_t *block, const uint8_t *pixels, int line_size, int h)\
00603 {\
00604 \
00605 int j;\
00606 for(j=0; j<2; j++){\
00607 int i;\
00608 const uint32_t a= AV_RN32(pixels );\
00609 const uint32_t b= AV_RN32(pixels+1);\
00610 uint32_t l0= (a&0x03030303UL)\
00611 + (b&0x03030303UL)\
00612 + 0x01010101UL;\
00613 uint32_t h0= ((a&0xFCFCFCFCUL)>>2)\
00614 + ((b&0xFCFCFCFCUL)>>2);\
00615 uint32_t l1,h1;\
00616 \
00617 pixels+=line_size;\
00618 for(i=0; i<h; i+=2){\
00619 uint32_t a= AV_RN32(pixels );\
00620 uint32_t b= AV_RN32(pixels+1);\
00621 l1= (a&0x03030303UL)\
00622 + (b&0x03030303UL);\
00623 h1= ((a&0xFCFCFCFCUL)>>2)\
00624 + ((b&0xFCFCFCFCUL)>>2);\
00625 OP(*((uint32_t*)block), h0+h1+(((l0+l1)>>2)&0x0F0F0F0FUL));\
00626 pixels+=line_size;\
00627 block +=line_size;\
00628 a= AV_RN32(pixels );\
00629 b= AV_RN32(pixels+1);\
00630 l0= (a&0x03030303UL)\
00631 + (b&0x03030303UL)\
00632 + 0x01010101UL;\
00633 h0= ((a&0xFCFCFCFCUL)>>2)\
00634 + ((b&0xFCFCFCFCUL)>>2);\
00635 OP(*((uint32_t*)block), h0+h1+(((l0+l1)>>2)&0x0F0F0F0FUL));\
00636 pixels+=line_size;\
00637 block +=line_size;\
00638 }\
00639 pixels+=4-line_size*(h+1);\
00640 block +=4-line_size*h;\
00641 }\
00642 }\
00643 \
00644 CALL_2X_PIXELS(FUNCC(OPNAME ## _pixels16) , FUNCC(OPNAME ## _pixels8) , 8*sizeof(pixel))\
00645 CALL_2X_PIXELS(FUNCC(OPNAME ## _pixels16_x2) , FUNCC(OPNAME ## _pixels8_x2) , 8*sizeof(pixel))\
00646 CALL_2X_PIXELS(FUNCC(OPNAME ## _pixels16_y2) , FUNCC(OPNAME ## _pixels8_y2) , 8*sizeof(pixel))\
00647 CALL_2X_PIXELS(FUNCC(OPNAME ## _pixels16_xy2), FUNCC(OPNAME ## _pixels8_xy2), 8*sizeof(pixel))\
00648 av_unused CALL_2X_PIXELS(FUNCC(OPNAME ## _no_rnd_pixels16) , FUNCC(OPNAME ## _pixels8) , 8*sizeof(pixel))\
00649 CALL_2X_PIXELS(FUNCC(OPNAME ## _no_rnd_pixels16_x2) , FUNCC(OPNAME ## _no_rnd_pixels8_x2) , 8*sizeof(pixel))\
00650 CALL_2X_PIXELS(FUNCC(OPNAME ## _no_rnd_pixels16_y2) , FUNCC(OPNAME ## _no_rnd_pixels8_y2) , 8*sizeof(pixel))\
00651 CALL_2X_PIXELS(FUNCC(OPNAME ## _no_rnd_pixels16_xy2), FUNCC(OPNAME ## _no_rnd_pixels8_xy2), 8*sizeof(pixel))\
00652
00653 #define op_avg(a, b) a = rnd_avg_pixel4(a, b)
00654 #define op_put(a, b) a = b
00655
00656 PIXOP2(avg, op_avg)
00657 PIXOP2(put, op_put)
00658 #undef op_avg
00659 #undef op_put
00660
00661 #define put_no_rnd_pixels8_c put_pixels8_c
00662 #define put_no_rnd_pixels16_c put_pixels16_c
00663
00664 static void FUNCC(put_no_rnd_pixels16_l2)(uint8_t *dst, const uint8_t *a, const uint8_t *b, int stride, int h){
00665 FUNC(put_no_rnd_pixels16_l2)(dst, a, b, stride, stride, stride, h);
00666 }
00667
00668 static void FUNCC(put_no_rnd_pixels8_l2)(uint8_t *dst, const uint8_t *a, const uint8_t *b, int stride, int h){
00669 FUNC(put_no_rnd_pixels8_l2)(dst, a, b, stride, stride, stride, h);
00670 }
00671
00672 #define H264_CHROMA_MC(OPNAME, OP)\
00673 static void FUNCC(OPNAME ## h264_chroma_mc2)(uint8_t *p_dst, uint8_t *p_src, int stride, int h, int x, int y){\
00674 pixel *dst = (pixel*)p_dst;\
00675 pixel *src = (pixel*)p_src;\
00676 const int A=(8-x)*(8-y);\
00677 const int B=( x)*(8-y);\
00678 const int C=(8-x)*( y);\
00679 const int D=( x)*( y);\
00680 int i;\
00681 stride >>= sizeof(pixel)-1;\
00682 \
00683 assert(x<8 && y<8 && x>=0 && y>=0);\
00684 \
00685 if(D){\
00686 for(i=0; i<h; i++){\
00687 OP(dst[0], (A*src[0] + B*src[1] + C*src[stride+0] + D*src[stride+1]));\
00688 OP(dst[1], (A*src[1] + B*src[2] + C*src[stride+1] + D*src[stride+2]));\
00689 dst+= stride;\
00690 src+= stride;\
00691 }\
00692 }else{\
00693 const int E= B+C;\
00694 const int step= C ? stride : 1;\
00695 for(i=0; i<h; i++){\
00696 OP(dst[0], (A*src[0] + E*src[step+0]));\
00697 OP(dst[1], (A*src[1] + E*src[step+1]));\
00698 dst+= stride;\
00699 src+= stride;\
00700 }\
00701 }\
00702 }\
00703 \
00704 static void FUNCC(OPNAME ## h264_chroma_mc4)(uint8_t *p_dst, uint8_t *p_src, int stride, int h, int x, int y){\
00705 pixel *dst = (pixel*)p_dst;\
00706 pixel *src = (pixel*)p_src;\
00707 const int A=(8-x)*(8-y);\
00708 const int B=( x)*(8-y);\
00709 const int C=(8-x)*( y);\
00710 const int D=( x)*( y);\
00711 int i;\
00712 stride >>= sizeof(pixel)-1;\
00713 \
00714 assert(x<8 && y<8 && x>=0 && y>=0);\
00715 \
00716 if(D){\
00717 for(i=0; i<h; i++){\
00718 OP(dst[0], (A*src[0] + B*src[1] + C*src[stride+0] + D*src[stride+1]));\
00719 OP(dst[1], (A*src[1] + B*src[2] + C*src[stride+1] + D*src[stride+2]));\
00720 OP(dst[2], (A*src[2] + B*src[3] + C*src[stride+2] + D*src[stride+3]));\
00721 OP(dst[3], (A*src[3] + B*src[4] + C*src[stride+3] + D*src[stride+4]));\
00722 dst+= stride;\
00723 src+= stride;\
00724 }\
00725 }else{\
00726 const int E= B+C;\
00727 const int step= C ? stride : 1;\
00728 for(i=0; i<h; i++){\
00729 OP(dst[0], (A*src[0] + E*src[step+0]));\
00730 OP(dst[1], (A*src[1] + E*src[step+1]));\
00731 OP(dst[2], (A*src[2] + E*src[step+2]));\
00732 OP(dst[3], (A*src[3] + E*src[step+3]));\
00733 dst+= stride;\
00734 src+= stride;\
00735 }\
00736 }\
00737 }\
00738 \
00739 static void FUNCC(OPNAME ## h264_chroma_mc8)(uint8_t *p_dst, uint8_t *p_src, int stride, int h, int x, int y){\
00740 pixel *dst = (pixel*)p_dst;\
00741 pixel *src = (pixel*)p_src;\
00742 const int A=(8-x)*(8-y);\
00743 const int B=( x)*(8-y);\
00744 const int C=(8-x)*( y);\
00745 const int D=( x)*( y);\
00746 int i;\
00747 stride >>= sizeof(pixel)-1;\
00748 \
00749 assert(x<8 && y<8 && x>=0 && y>=0);\
00750 \
00751 if(D){\
00752 for(i=0; i<h; i++){\
00753 OP(dst[0], (A*src[0] + B*src[1] + C*src[stride+0] + D*src[stride+1]));\
00754 OP(dst[1], (A*src[1] + B*src[2] + C*src[stride+1] + D*src[stride+2]));\
00755 OP(dst[2], (A*src[2] + B*src[3] + C*src[stride+2] + D*src[stride+3]));\
00756 OP(dst[3], (A*src[3] + B*src[4] + C*src[stride+3] + D*src[stride+4]));\
00757 OP(dst[4], (A*src[4] + B*src[5] + C*src[stride+4] + D*src[stride+5]));\
00758 OP(dst[5], (A*src[5] + B*src[6] + C*src[stride+5] + D*src[stride+6]));\
00759 OP(dst[6], (A*src[6] + B*src[7] + C*src[stride+6] + D*src[stride+7]));\
00760 OP(dst[7], (A*src[7] + B*src[8] + C*src[stride+7] + D*src[stride+8]));\
00761 dst+= stride;\
00762 src+= stride;\
00763 }\
00764 }else{\
00765 const int E= B+C;\
00766 const int step= C ? stride : 1;\
00767 for(i=0; i<h; i++){\
00768 OP(dst[0], (A*src[0] + E*src[step+0]));\
00769 OP(dst[1], (A*src[1] + E*src[step+1]));\
00770 OP(dst[2], (A*src[2] + E*src[step+2]));\
00771 OP(dst[3], (A*src[3] + E*src[step+3]));\
00772 OP(dst[4], (A*src[4] + E*src[step+4]));\
00773 OP(dst[5], (A*src[5] + E*src[step+5]));\
00774 OP(dst[6], (A*src[6] + E*src[step+6]));\
00775 OP(dst[7], (A*src[7] + E*src[step+7]));\
00776 dst+= stride;\
00777 src+= stride;\
00778 }\
00779 }\
00780 }
00781
00782 #define op_avg(a, b) a = (((a)+(((b) + 32)>>6)+1)>>1)
00783 #define op_put(a, b) a = (((b) + 32)>>6)
00784
00785 H264_CHROMA_MC(put_ , op_put)
00786 H264_CHROMA_MC(avg_ , op_avg)
00787 #undef op_avg
00788 #undef op_put
00789
00790 #define H264_LOWPASS(OPNAME, OP, OP2) \
00791 static av_unused void FUNC(OPNAME ## h264_qpel2_h_lowpass)(uint8_t *p_dst, uint8_t *p_src, int dstStride, int srcStride){\
00792 const int h=2;\
00793 INIT_CLIP\
00794 int i;\
00795 pixel *dst = (pixel*)p_dst;\
00796 pixel *src = (pixel*)p_src;\
00797 dstStride >>= sizeof(pixel)-1;\
00798 srcStride >>= sizeof(pixel)-1;\
00799 for(i=0; i<h; i++)\
00800 {\
00801 OP(dst[0], (src[0]+src[1])*20 - (src[-1]+src[2])*5 + (src[-2]+src[3]));\
00802 OP(dst[1], (src[1]+src[2])*20 - (src[0 ]+src[3])*5 + (src[-1]+src[4]));\
00803 dst+=dstStride;\
00804 src+=srcStride;\
00805 }\
00806 }\
00807 \
00808 static av_unused void FUNC(OPNAME ## h264_qpel2_v_lowpass)(uint8_t *p_dst, uint8_t *p_src, int dstStride, int srcStride){\
00809 const int w=2;\
00810 INIT_CLIP\
00811 int i;\
00812 pixel *dst = (pixel*)p_dst;\
00813 pixel *src = (pixel*)p_src;\
00814 dstStride >>= sizeof(pixel)-1;\
00815 srcStride >>= sizeof(pixel)-1;\
00816 for(i=0; i<w; i++)\
00817 {\
00818 const int srcB= src[-2*srcStride];\
00819 const int srcA= src[-1*srcStride];\
00820 const int src0= src[0 *srcStride];\
00821 const int src1= src[1 *srcStride];\
00822 const int src2= src[2 *srcStride];\
00823 const int src3= src[3 *srcStride];\
00824 const int src4= src[4 *srcStride];\
00825 OP(dst[0*dstStride], (src0+src1)*20 - (srcA+src2)*5 + (srcB+src3));\
00826 OP(dst[1*dstStride], (src1+src2)*20 - (src0+src3)*5 + (srcA+src4));\
00827 dst++;\
00828 src++;\
00829 }\
00830 }\
00831 \
00832 static av_unused void FUNC(OPNAME ## h264_qpel2_hv_lowpass)(uint8_t *p_dst, int16_t *tmp, uint8_t *p_src, int dstStride, int tmpStride, int srcStride){\
00833 const int h=2;\
00834 const int w=2;\
00835 const int pad = (BIT_DEPTH > 9) ? (-10 * ((1<<BIT_DEPTH)-1)) : 0;\
00836 INIT_CLIP\
00837 int i;\
00838 pixel *dst = (pixel*)p_dst;\
00839 pixel *src = (pixel*)p_src;\
00840 dstStride >>= sizeof(pixel)-1;\
00841 srcStride >>= sizeof(pixel)-1;\
00842 src -= 2*srcStride;\
00843 for(i=0; i<h+5; i++)\
00844 {\
00845 tmp[0]= (src[0]+src[1])*20 - (src[-1]+src[2])*5 + (src[-2]+src[3]) + pad;\
00846 tmp[1]= (src[1]+src[2])*20 - (src[0 ]+src[3])*5 + (src[-1]+src[4]) + pad;\
00847 tmp+=tmpStride;\
00848 src+=srcStride;\
00849 }\
00850 tmp -= tmpStride*(h+5-2);\
00851 for(i=0; i<w; i++)\
00852 {\
00853 const int tmpB= tmp[-2*tmpStride] - pad;\
00854 const int tmpA= tmp[-1*tmpStride] - pad;\
00855 const int tmp0= tmp[0 *tmpStride] - pad;\
00856 const int tmp1= tmp[1 *tmpStride] - pad;\
00857 const int tmp2= tmp[2 *tmpStride] - pad;\
00858 const int tmp3= tmp[3 *tmpStride] - pad;\
00859 const int tmp4= tmp[4 *tmpStride] - pad;\
00860 OP2(dst[0*dstStride], (tmp0+tmp1)*20 - (tmpA+tmp2)*5 + (tmpB+tmp3));\
00861 OP2(dst[1*dstStride], (tmp1+tmp2)*20 - (tmp0+tmp3)*5 + (tmpA+tmp4));\
00862 dst++;\
00863 tmp++;\
00864 }\
00865 }\
00866 static void FUNC(OPNAME ## h264_qpel4_h_lowpass)(uint8_t *p_dst, uint8_t *p_src, int dstStride, int srcStride){\
00867 const int h=4;\
00868 INIT_CLIP\
00869 int i;\
00870 pixel *dst = (pixel*)p_dst;\
00871 pixel *src = (pixel*)p_src;\
00872 dstStride >>= sizeof(pixel)-1;\
00873 srcStride >>= sizeof(pixel)-1;\
00874 for(i=0; i<h; i++)\
00875 {\
00876 OP(dst[0], (src[0]+src[1])*20 - (src[-1]+src[2])*5 + (src[-2]+src[3]));\
00877 OP(dst[1], (src[1]+src[2])*20 - (src[0 ]+src[3])*5 + (src[-1]+src[4]));\
00878 OP(dst[2], (src[2]+src[3])*20 - (src[1 ]+src[4])*5 + (src[0 ]+src[5]));\
00879 OP(dst[3], (src[3]+src[4])*20 - (src[2 ]+src[5])*5 + (src[1 ]+src[6]));\
00880 dst+=dstStride;\
00881 src+=srcStride;\
00882 }\
00883 }\
00884 \
00885 static void FUNC(OPNAME ## h264_qpel4_v_lowpass)(uint8_t *p_dst, uint8_t *p_src, int dstStride, int srcStride){\
00886 const int w=4;\
00887 INIT_CLIP\
00888 int i;\
00889 pixel *dst = (pixel*)p_dst;\
00890 pixel *src = (pixel*)p_src;\
00891 dstStride >>= sizeof(pixel)-1;\
00892 srcStride >>= sizeof(pixel)-1;\
00893 for(i=0; i<w; i++)\
00894 {\
00895 const int srcB= src[-2*srcStride];\
00896 const int srcA= src[-1*srcStride];\
00897 const int src0= src[0 *srcStride];\
00898 const int src1= src[1 *srcStride];\
00899 const int src2= src[2 *srcStride];\
00900 const int src3= src[3 *srcStride];\
00901 const int src4= src[4 *srcStride];\
00902 const int src5= src[5 *srcStride];\
00903 const int src6= src[6 *srcStride];\
00904 OP(dst[0*dstStride], (src0+src1)*20 - (srcA+src2)*5 + (srcB+src3));\
00905 OP(dst[1*dstStride], (src1+src2)*20 - (src0+src3)*5 + (srcA+src4));\
00906 OP(dst[2*dstStride], (src2+src3)*20 - (src1+src4)*5 + (src0+src5));\
00907 OP(dst[3*dstStride], (src3+src4)*20 - (src2+src5)*5 + (src1+src6));\
00908 dst++;\
00909 src++;\
00910 }\
00911 }\
00912 \
00913 static void FUNC(OPNAME ## h264_qpel4_hv_lowpass)(uint8_t *p_dst, int16_t *tmp, uint8_t *p_src, int dstStride, int tmpStride, int srcStride){\
00914 const int h=4;\
00915 const int w=4;\
00916 const int pad = (BIT_DEPTH > 9) ? (-10 * ((1<<BIT_DEPTH)-1)) : 0;\
00917 INIT_CLIP\
00918 int i;\
00919 pixel *dst = (pixel*)p_dst;\
00920 pixel *src = (pixel*)p_src;\
00921 dstStride >>= sizeof(pixel)-1;\
00922 srcStride >>= sizeof(pixel)-1;\
00923 src -= 2*srcStride;\
00924 for(i=0; i<h+5; i++)\
00925 {\
00926 tmp[0]= (src[0]+src[1])*20 - (src[-1]+src[2])*5 + (src[-2]+src[3]) + pad;\
00927 tmp[1]= (src[1]+src[2])*20 - (src[0 ]+src[3])*5 + (src[-1]+src[4]) + pad;\
00928 tmp[2]= (src[2]+src[3])*20 - (src[1 ]+src[4])*5 + (src[0 ]+src[5]) + pad;\
00929 tmp[3]= (src[3]+src[4])*20 - (src[2 ]+src[5])*5 + (src[1 ]+src[6]) + pad;\
00930 tmp+=tmpStride;\
00931 src+=srcStride;\
00932 }\
00933 tmp -= tmpStride*(h+5-2);\
00934 for(i=0; i<w; i++)\
00935 {\
00936 const int tmpB= tmp[-2*tmpStride] - pad;\
00937 const int tmpA= tmp[-1*tmpStride] - pad;\
00938 const int tmp0= tmp[0 *tmpStride] - pad;\
00939 const int tmp1= tmp[1 *tmpStride] - pad;\
00940 const int tmp2= tmp[2 *tmpStride] - pad;\
00941 const int tmp3= tmp[3 *tmpStride] - pad;\
00942 const int tmp4= tmp[4 *tmpStride] - pad;\
00943 const int tmp5= tmp[5 *tmpStride] - pad;\
00944 const int tmp6= tmp[6 *tmpStride] - pad;\
00945 OP2(dst[0*dstStride], (tmp0+tmp1)*20 - (tmpA+tmp2)*5 + (tmpB+tmp3));\
00946 OP2(dst[1*dstStride], (tmp1+tmp2)*20 - (tmp0+tmp3)*5 + (tmpA+tmp4));\
00947 OP2(dst[2*dstStride], (tmp2+tmp3)*20 - (tmp1+tmp4)*5 + (tmp0+tmp5));\
00948 OP2(dst[3*dstStride], (tmp3+tmp4)*20 - (tmp2+tmp5)*5 + (tmp1+tmp6));\
00949 dst++;\
00950 tmp++;\
00951 }\
00952 }\
00953 \
00954 static void FUNC(OPNAME ## h264_qpel8_h_lowpass)(uint8_t *p_dst, uint8_t *p_src, int dstStride, int srcStride){\
00955 const int h=8;\
00956 INIT_CLIP\
00957 int i;\
00958 pixel *dst = (pixel*)p_dst;\
00959 pixel *src = (pixel*)p_src;\
00960 dstStride >>= sizeof(pixel)-1;\
00961 srcStride >>= sizeof(pixel)-1;\
00962 for(i=0; i<h; i++)\
00963 {\
00964 OP(dst[0], (src[0]+src[1])*20 - (src[-1]+src[2])*5 + (src[-2]+src[3 ]));\
00965 OP(dst[1], (src[1]+src[2])*20 - (src[0 ]+src[3])*5 + (src[-1]+src[4 ]));\
00966 OP(dst[2], (src[2]+src[3])*20 - (src[1 ]+src[4])*5 + (src[0 ]+src[5 ]));\
00967 OP(dst[3], (src[3]+src[4])*20 - (src[2 ]+src[5])*5 + (src[1 ]+src[6 ]));\
00968 OP(dst[4], (src[4]+src[5])*20 - (src[3 ]+src[6])*5 + (src[2 ]+src[7 ]));\
00969 OP(dst[5], (src[5]+src[6])*20 - (src[4 ]+src[7])*5 + (src[3 ]+src[8 ]));\
00970 OP(dst[6], (src[6]+src[7])*20 - (src[5 ]+src[8])*5 + (src[4 ]+src[9 ]));\
00971 OP(dst[7], (src[7]+src[8])*20 - (src[6 ]+src[9])*5 + (src[5 ]+src[10]));\
00972 dst+=dstStride;\
00973 src+=srcStride;\
00974 }\
00975 }\
00976 \
00977 static void FUNC(OPNAME ## h264_qpel8_v_lowpass)(uint8_t *p_dst, uint8_t *p_src, int dstStride, int srcStride){\
00978 const int w=8;\
00979 INIT_CLIP\
00980 int i;\
00981 pixel *dst = (pixel*)p_dst;\
00982 pixel *src = (pixel*)p_src;\
00983 dstStride >>= sizeof(pixel)-1;\
00984 srcStride >>= sizeof(pixel)-1;\
00985 for(i=0; i<w; i++)\
00986 {\
00987 const int srcB= src[-2*srcStride];\
00988 const int srcA= src[-1*srcStride];\
00989 const int src0= src[0 *srcStride];\
00990 const int src1= src[1 *srcStride];\
00991 const int src2= src[2 *srcStride];\
00992 const int src3= src[3 *srcStride];\
00993 const int src4= src[4 *srcStride];\
00994 const int src5= src[5 *srcStride];\
00995 const int src6= src[6 *srcStride];\
00996 const int src7= src[7 *srcStride];\
00997 const int src8= src[8 *srcStride];\
00998 const int src9= src[9 *srcStride];\
00999 const int src10=src[10*srcStride];\
01000 OP(dst[0*dstStride], (src0+src1)*20 - (srcA+src2)*5 + (srcB+src3));\
01001 OP(dst[1*dstStride], (src1+src2)*20 - (src0+src3)*5 + (srcA+src4));\
01002 OP(dst[2*dstStride], (src2+src3)*20 - (src1+src4)*5 + (src0+src5));\
01003 OP(dst[3*dstStride], (src3+src4)*20 - (src2+src5)*5 + (src1+src6));\
01004 OP(dst[4*dstStride], (src4+src5)*20 - (src3+src6)*5 + (src2+src7));\
01005 OP(dst[5*dstStride], (src5+src6)*20 - (src4+src7)*5 + (src3+src8));\
01006 OP(dst[6*dstStride], (src6+src7)*20 - (src5+src8)*5 + (src4+src9));\
01007 OP(dst[7*dstStride], (src7+src8)*20 - (src6+src9)*5 + (src5+src10));\
01008 dst++;\
01009 src++;\
01010 }\
01011 }\
01012 \
01013 static void FUNC(OPNAME ## h264_qpel8_hv_lowpass)(uint8_t *p_dst, int16_t *tmp, uint8_t *p_src, int dstStride, int tmpStride, int srcStride){\
01014 const int h=8;\
01015 const int w=8;\
01016 const int pad = (BIT_DEPTH > 9) ? (-10 * ((1<<BIT_DEPTH)-1)) : 0;\
01017 INIT_CLIP\
01018 int i;\
01019 pixel *dst = (pixel*)p_dst;\
01020 pixel *src = (pixel*)p_src;\
01021 dstStride >>= sizeof(pixel)-1;\
01022 srcStride >>= sizeof(pixel)-1;\
01023 src -= 2*srcStride;\
01024 for(i=0; i<h+5; i++)\
01025 {\
01026 tmp[0]= (src[0]+src[1])*20 - (src[-1]+src[2])*5 + (src[-2]+src[3 ]) + pad;\
01027 tmp[1]= (src[1]+src[2])*20 - (src[0 ]+src[3])*5 + (src[-1]+src[4 ]) + pad;\
01028 tmp[2]= (src[2]+src[3])*20 - (src[1 ]+src[4])*5 + (src[0 ]+src[5 ]) + pad;\
01029 tmp[3]= (src[3]+src[4])*20 - (src[2 ]+src[5])*5 + (src[1 ]+src[6 ]) + pad;\
01030 tmp[4]= (src[4]+src[5])*20 - (src[3 ]+src[6])*5 + (src[2 ]+src[7 ]) + pad;\
01031 tmp[5]= (src[5]+src[6])*20 - (src[4 ]+src[7])*5 + (src[3 ]+src[8 ]) + pad;\
01032 tmp[6]= (src[6]+src[7])*20 - (src[5 ]+src[8])*5 + (src[4 ]+src[9 ]) + pad;\
01033 tmp[7]= (src[7]+src[8])*20 - (src[6 ]+src[9])*5 + (src[5 ]+src[10]) + pad;\
01034 tmp+=tmpStride;\
01035 src+=srcStride;\
01036 }\
01037 tmp -= tmpStride*(h+5-2);\
01038 for(i=0; i<w; i++)\
01039 {\
01040 const int tmpB= tmp[-2*tmpStride] - pad;\
01041 const int tmpA= tmp[-1*tmpStride] - pad;\
01042 const int tmp0= tmp[0 *tmpStride] - pad;\
01043 const int tmp1= tmp[1 *tmpStride] - pad;\
01044 const int tmp2= tmp[2 *tmpStride] - pad;\
01045 const int tmp3= tmp[3 *tmpStride] - pad;\
01046 const int tmp4= tmp[4 *tmpStride] - pad;\
01047 const int tmp5= tmp[5 *tmpStride] - pad;\
01048 const int tmp6= tmp[6 *tmpStride] - pad;\
01049 const int tmp7= tmp[7 *tmpStride] - pad;\
01050 const int tmp8= tmp[8 *tmpStride] - pad;\
01051 const int tmp9= tmp[9 *tmpStride] - pad;\
01052 const int tmp10=tmp[10*tmpStride] - pad;\
01053 OP2(dst[0*dstStride], (tmp0+tmp1)*20 - (tmpA+tmp2)*5 + (tmpB+tmp3));\
01054 OP2(dst[1*dstStride], (tmp1+tmp2)*20 - (tmp0+tmp3)*5 + (tmpA+tmp4));\
01055 OP2(dst[2*dstStride], (tmp2+tmp3)*20 - (tmp1+tmp4)*5 + (tmp0+tmp5));\
01056 OP2(dst[3*dstStride], (tmp3+tmp4)*20 - (tmp2+tmp5)*5 + (tmp1+tmp6));\
01057 OP2(dst[4*dstStride], (tmp4+tmp5)*20 - (tmp3+tmp6)*5 + (tmp2+tmp7));\
01058 OP2(dst[5*dstStride], (tmp5+tmp6)*20 - (tmp4+tmp7)*5 + (tmp3+tmp8));\
01059 OP2(dst[6*dstStride], (tmp6+tmp7)*20 - (tmp5+tmp8)*5 + (tmp4+tmp9));\
01060 OP2(dst[7*dstStride], (tmp7+tmp8)*20 - (tmp6+tmp9)*5 + (tmp5+tmp10));\
01061 dst++;\
01062 tmp++;\
01063 }\
01064 }\
01065 \
01066 static void FUNC(OPNAME ## h264_qpel16_v_lowpass)(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
01067 FUNC(OPNAME ## h264_qpel8_v_lowpass)(dst , src , dstStride, srcStride);\
01068 FUNC(OPNAME ## h264_qpel8_v_lowpass)(dst+8*sizeof(pixel), src+8*sizeof(pixel), dstStride, srcStride);\
01069 src += 8*srcStride;\
01070 dst += 8*dstStride;\
01071 FUNC(OPNAME ## h264_qpel8_v_lowpass)(dst , src , dstStride, srcStride);\
01072 FUNC(OPNAME ## h264_qpel8_v_lowpass)(dst+8*sizeof(pixel), src+8*sizeof(pixel), dstStride, srcStride);\
01073 }\
01074 \
01075 static void FUNC(OPNAME ## h264_qpel16_h_lowpass)(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
01076 FUNC(OPNAME ## h264_qpel8_h_lowpass)(dst , src , dstStride, srcStride);\
01077 FUNC(OPNAME ## h264_qpel8_h_lowpass)(dst+8*sizeof(pixel), src+8*sizeof(pixel), dstStride, srcStride);\
01078 src += 8*srcStride;\
01079 dst += 8*dstStride;\
01080 FUNC(OPNAME ## h264_qpel8_h_lowpass)(dst , src , dstStride, srcStride);\
01081 FUNC(OPNAME ## h264_qpel8_h_lowpass)(dst+8*sizeof(pixel), src+8*sizeof(pixel), dstStride, srcStride);\
01082 }\
01083 \
01084 static void FUNC(OPNAME ## h264_qpel16_hv_lowpass)(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride){\
01085 FUNC(OPNAME ## h264_qpel8_hv_lowpass)(dst , tmp , src , dstStride, tmpStride, srcStride);\
01086 FUNC(OPNAME ## h264_qpel8_hv_lowpass)(dst+8*sizeof(pixel), tmp+8, src+8*sizeof(pixel), dstStride, tmpStride, srcStride);\
01087 src += 8*srcStride;\
01088 dst += 8*dstStride;\
01089 FUNC(OPNAME ## h264_qpel8_hv_lowpass)(dst , tmp , src , dstStride, tmpStride, srcStride);\
01090 FUNC(OPNAME ## h264_qpel8_hv_lowpass)(dst+8*sizeof(pixel), tmp+8, src+8*sizeof(pixel), dstStride, tmpStride, srcStride);\
01091 }\
01092
01093 #define H264_MC(OPNAME, SIZE) \
01094 static av_unused void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc00)(uint8_t *dst, uint8_t *src, int stride){\
01095 FUNCC(OPNAME ## pixels ## SIZE)(dst, src, stride, SIZE);\
01096 }\
01097 \
01098 static void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc10)(uint8_t *dst, uint8_t *src, int stride){\
01099 uint8_t half[SIZE*SIZE*sizeof(pixel)];\
01100 FUNC(put_h264_qpel ## SIZE ## _h_lowpass)(half, src, SIZE*sizeof(pixel), stride);\
01101 FUNC(OPNAME ## pixels ## SIZE ## _l2)(dst, src, half, stride, stride, SIZE*sizeof(pixel), SIZE);\
01102 }\
01103 \
01104 static void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc20)(uint8_t *dst, uint8_t *src, int stride){\
01105 FUNC(OPNAME ## h264_qpel ## SIZE ## _h_lowpass)(dst, src, stride, stride);\
01106 }\
01107 \
01108 static void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc30)(uint8_t *dst, uint8_t *src, int stride){\
01109 uint8_t half[SIZE*SIZE*sizeof(pixel)];\
01110 FUNC(put_h264_qpel ## SIZE ## _h_lowpass)(half, src, SIZE*sizeof(pixel), stride);\
01111 FUNC(OPNAME ## pixels ## SIZE ## _l2)(dst, src+sizeof(pixel), half, stride, stride, SIZE*sizeof(pixel), SIZE);\
01112 }\
01113 \
01114 static void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc01)(uint8_t *dst, uint8_t *src, int stride){\
01115 uint8_t full[SIZE*(SIZE+5)*sizeof(pixel)];\
01116 uint8_t * const full_mid= full + SIZE*2*sizeof(pixel);\
01117 uint8_t half[SIZE*SIZE*sizeof(pixel)];\
01118 FUNC(copy_block ## SIZE )(full, src - stride*2, SIZE*sizeof(pixel), stride, SIZE + 5);\
01119 FUNC(put_h264_qpel ## SIZE ## _v_lowpass)(half, full_mid, SIZE*sizeof(pixel), SIZE*sizeof(pixel));\
01120 FUNC(OPNAME ## pixels ## SIZE ## _l2)(dst, full_mid, half, stride, SIZE*sizeof(pixel), SIZE*sizeof(pixel), SIZE);\
01121 }\
01122 \
01123 static void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc02)(uint8_t *dst, uint8_t *src, int stride){\
01124 uint8_t full[SIZE*(SIZE+5)*sizeof(pixel)];\
01125 uint8_t * const full_mid= full + SIZE*2*sizeof(pixel);\
01126 FUNC(copy_block ## SIZE )(full, src - stride*2, SIZE*sizeof(pixel), stride, SIZE + 5);\
01127 FUNC(OPNAME ## h264_qpel ## SIZE ## _v_lowpass)(dst, full_mid, stride, SIZE*sizeof(pixel));\
01128 }\
01129 \
01130 static void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc03)(uint8_t *dst, uint8_t *src, int stride){\
01131 uint8_t full[SIZE*(SIZE+5)*sizeof(pixel)];\
01132 uint8_t * const full_mid= full + SIZE*2*sizeof(pixel);\
01133 uint8_t half[SIZE*SIZE*sizeof(pixel)];\
01134 FUNC(copy_block ## SIZE )(full, src - stride*2, SIZE*sizeof(pixel), stride, SIZE + 5);\
01135 FUNC(put_h264_qpel ## SIZE ## _v_lowpass)(half, full_mid, SIZE*sizeof(pixel), SIZE*sizeof(pixel));\
01136 FUNC(OPNAME ## pixels ## SIZE ## _l2)(dst, full_mid+SIZE*sizeof(pixel), half, stride, SIZE*sizeof(pixel), SIZE*sizeof(pixel), SIZE);\
01137 }\
01138 \
01139 static void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc11)(uint8_t *dst, uint8_t *src, int stride){\
01140 uint8_t full[SIZE*(SIZE+5)*sizeof(pixel)];\
01141 uint8_t * const full_mid= full + SIZE*2*sizeof(pixel);\
01142 uint8_t halfH[SIZE*SIZE*sizeof(pixel)];\
01143 uint8_t halfV[SIZE*SIZE*sizeof(pixel)];\
01144 FUNC(put_h264_qpel ## SIZE ## _h_lowpass)(halfH, src, SIZE*sizeof(pixel), stride);\
01145 FUNC(copy_block ## SIZE )(full, src - stride*2, SIZE*sizeof(pixel), stride, SIZE + 5);\
01146 FUNC(put_h264_qpel ## SIZE ## _v_lowpass)(halfV, full_mid, SIZE*sizeof(pixel), SIZE*sizeof(pixel));\
01147 FUNC(OPNAME ## pixels ## SIZE ## _l2)(dst, halfH, halfV, stride, SIZE*sizeof(pixel), SIZE*sizeof(pixel), SIZE);\
01148 }\
01149 \
01150 static void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc31)(uint8_t *dst, uint8_t *src, int stride){\
01151 uint8_t full[SIZE*(SIZE+5)*sizeof(pixel)];\
01152 uint8_t * const full_mid= full + SIZE*2*sizeof(pixel);\
01153 uint8_t halfH[SIZE*SIZE*sizeof(pixel)];\
01154 uint8_t halfV[SIZE*SIZE*sizeof(pixel)];\
01155 FUNC(put_h264_qpel ## SIZE ## _h_lowpass)(halfH, src, SIZE*sizeof(pixel), stride);\
01156 FUNC(copy_block ## SIZE )(full, src - stride*2 + sizeof(pixel), SIZE*sizeof(pixel), stride, SIZE + 5);\
01157 FUNC(put_h264_qpel ## SIZE ## _v_lowpass)(halfV, full_mid, SIZE*sizeof(pixel), SIZE*sizeof(pixel));\
01158 FUNC(OPNAME ## pixels ## SIZE ## _l2)(dst, halfH, halfV, stride, SIZE*sizeof(pixel), SIZE*sizeof(pixel), SIZE);\
01159 }\
01160 \
01161 static void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc13)(uint8_t *dst, uint8_t *src, int stride){\
01162 uint8_t full[SIZE*(SIZE+5)*sizeof(pixel)];\
01163 uint8_t * const full_mid= full + SIZE*2*sizeof(pixel);\
01164 uint8_t halfH[SIZE*SIZE*sizeof(pixel)];\
01165 uint8_t halfV[SIZE*SIZE*sizeof(pixel)];\
01166 FUNC(put_h264_qpel ## SIZE ## _h_lowpass)(halfH, src + stride, SIZE*sizeof(pixel), stride);\
01167 FUNC(copy_block ## SIZE )(full, src - stride*2, SIZE*sizeof(pixel), stride, SIZE + 5);\
01168 FUNC(put_h264_qpel ## SIZE ## _v_lowpass)(halfV, full_mid, SIZE*sizeof(pixel), SIZE*sizeof(pixel));\
01169 FUNC(OPNAME ## pixels ## SIZE ## _l2)(dst, halfH, halfV, stride, SIZE*sizeof(pixel), SIZE*sizeof(pixel), SIZE);\
01170 }\
01171 \
01172 static void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc33)(uint8_t *dst, uint8_t *src, int stride){\
01173 uint8_t full[SIZE*(SIZE+5)*sizeof(pixel)];\
01174 uint8_t * const full_mid= full + SIZE*2*sizeof(pixel);\
01175 uint8_t halfH[SIZE*SIZE*sizeof(pixel)];\
01176 uint8_t halfV[SIZE*SIZE*sizeof(pixel)];\
01177 FUNC(put_h264_qpel ## SIZE ## _h_lowpass)(halfH, src + stride, SIZE*sizeof(pixel), stride);\
01178 FUNC(copy_block ## SIZE )(full, src - stride*2 + sizeof(pixel), SIZE*sizeof(pixel), stride, SIZE + 5);\
01179 FUNC(put_h264_qpel ## SIZE ## _v_lowpass)(halfV, full_mid, SIZE*sizeof(pixel), SIZE*sizeof(pixel));\
01180 FUNC(OPNAME ## pixels ## SIZE ## _l2)(dst, halfH, halfV, stride, SIZE*sizeof(pixel), SIZE*sizeof(pixel), SIZE);\
01181 }\
01182 \
01183 static void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc22)(uint8_t *dst, uint8_t *src, int stride){\
01184 int16_t tmp[SIZE*(SIZE+5)*sizeof(pixel)];\
01185 FUNC(OPNAME ## h264_qpel ## SIZE ## _hv_lowpass)(dst, tmp, src, stride, SIZE*sizeof(pixel), stride);\
01186 }\
01187 \
01188 static void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc21)(uint8_t *dst, uint8_t *src, int stride){\
01189 int16_t tmp[SIZE*(SIZE+5)*sizeof(pixel)];\
01190 uint8_t halfH[SIZE*SIZE*sizeof(pixel)];\
01191 uint8_t halfHV[SIZE*SIZE*sizeof(pixel)];\
01192 FUNC(put_h264_qpel ## SIZE ## _h_lowpass)(halfH, src, SIZE*sizeof(pixel), stride);\
01193 FUNC(put_h264_qpel ## SIZE ## _hv_lowpass)(halfHV, tmp, src, SIZE*sizeof(pixel), SIZE*sizeof(pixel), stride);\
01194 FUNC(OPNAME ## pixels ## SIZE ## _l2)(dst, halfH, halfHV, stride, SIZE*sizeof(pixel), SIZE*sizeof(pixel), SIZE);\
01195 }\
01196 \
01197 static void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc23)(uint8_t *dst, uint8_t *src, int stride){\
01198 int16_t tmp[SIZE*(SIZE+5)*sizeof(pixel)];\
01199 uint8_t halfH[SIZE*SIZE*sizeof(pixel)];\
01200 uint8_t halfHV[SIZE*SIZE*sizeof(pixel)];\
01201 FUNC(put_h264_qpel ## SIZE ## _h_lowpass)(halfH, src + stride, SIZE*sizeof(pixel), stride);\
01202 FUNC(put_h264_qpel ## SIZE ## _hv_lowpass)(halfHV, tmp, src, SIZE*sizeof(pixel), SIZE*sizeof(pixel), stride);\
01203 FUNC(OPNAME ## pixels ## SIZE ## _l2)(dst, halfH, halfHV, stride, SIZE*sizeof(pixel), SIZE*sizeof(pixel), SIZE);\
01204 }\
01205 \
01206 static void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc12)(uint8_t *dst, uint8_t *src, int stride){\
01207 uint8_t full[SIZE*(SIZE+5)*sizeof(pixel)];\
01208 uint8_t * const full_mid= full + SIZE*2*sizeof(pixel);\
01209 int16_t tmp[SIZE*(SIZE+5)*sizeof(pixel)];\
01210 uint8_t halfV[SIZE*SIZE*sizeof(pixel)];\
01211 uint8_t halfHV[SIZE*SIZE*sizeof(pixel)];\
01212 FUNC(copy_block ## SIZE )(full, src - stride*2, SIZE*sizeof(pixel), stride, SIZE + 5);\
01213 FUNC(put_h264_qpel ## SIZE ## _v_lowpass)(halfV, full_mid, SIZE*sizeof(pixel), SIZE*sizeof(pixel));\
01214 FUNC(put_h264_qpel ## SIZE ## _hv_lowpass)(halfHV, tmp, src, SIZE*sizeof(pixel), SIZE*sizeof(pixel), stride);\
01215 FUNC(OPNAME ## pixels ## SIZE ## _l2)(dst, halfV, halfHV, stride, SIZE*sizeof(pixel), SIZE*sizeof(pixel), SIZE);\
01216 }\
01217 \
01218 static void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc32)(uint8_t *dst, uint8_t *src, int stride){\
01219 uint8_t full[SIZE*(SIZE+5)*sizeof(pixel)];\
01220 uint8_t * const full_mid= full + SIZE*2*sizeof(pixel);\
01221 int16_t tmp[SIZE*(SIZE+5)*sizeof(pixel)];\
01222 uint8_t halfV[SIZE*SIZE*sizeof(pixel)];\
01223 uint8_t halfHV[SIZE*SIZE*sizeof(pixel)];\
01224 FUNC(copy_block ## SIZE )(full, src - stride*2 + sizeof(pixel), SIZE*sizeof(pixel), stride, SIZE + 5);\
01225 FUNC(put_h264_qpel ## SIZE ## _v_lowpass)(halfV, full_mid, SIZE*sizeof(pixel), SIZE*sizeof(pixel));\
01226 FUNC(put_h264_qpel ## SIZE ## _hv_lowpass)(halfHV, tmp, src, SIZE*sizeof(pixel), SIZE*sizeof(pixel), stride);\
01227 FUNC(OPNAME ## pixels ## SIZE ## _l2)(dst, halfV, halfHV, stride, SIZE*sizeof(pixel), SIZE*sizeof(pixel), SIZE);\
01228 }\
01229
01230 #define op_avg(a, b) a = (((a)+CLIP(((b) + 16)>>5)+1)>>1)
01231
01232 #define op_put(a, b) a = CLIP(((b) + 16)>>5)
01233 #define op2_avg(a, b) a = (((a)+CLIP(((b) + 512)>>10)+1)>>1)
01234 #define op2_put(a, b) a = CLIP(((b) + 512)>>10)
01235
01236 H264_LOWPASS(put_ , op_put, op2_put)
01237 H264_LOWPASS(avg_ , op_avg, op2_avg)
01238 H264_MC(put_, 2)
01239 H264_MC(put_, 4)
01240 H264_MC(put_, 8)
01241 H264_MC(put_, 16)
01242 H264_MC(avg_, 4)
01243 H264_MC(avg_, 8)
01244 H264_MC(avg_, 16)
01245
01246 #undef op_avg
01247 #undef op_put
01248 #undef op2_avg
01249 #undef op2_put
01250
01251 #if BIT_DEPTH == 8
01252 # define put_h264_qpel8_mc00_8_c ff_put_pixels8x8_8_c
01253 # define avg_h264_qpel8_mc00_8_c ff_avg_pixels8x8_8_c
01254 # define put_h264_qpel16_mc00_8_c ff_put_pixels16x16_8_c
01255 # define avg_h264_qpel16_mc00_8_c ff_avg_pixels16x16_8_c
01256 #elif BIT_DEPTH == 9
01257 # define put_h264_qpel8_mc00_9_c ff_put_pixels8x8_9_c
01258 # define avg_h264_qpel8_mc00_9_c ff_avg_pixels8x8_9_c
01259 # define put_h264_qpel16_mc00_9_c ff_put_pixels16x16_9_c
01260 # define avg_h264_qpel16_mc00_9_c ff_avg_pixels16x16_9_c
01261 #elif BIT_DEPTH == 10
01262 # define put_h264_qpel8_mc00_10_c ff_put_pixels8x8_10_c
01263 # define avg_h264_qpel8_mc00_10_c ff_avg_pixels8x8_10_c
01264 # define put_h264_qpel16_mc00_10_c ff_put_pixels16x16_10_c
01265 # define avg_h264_qpel16_mc00_10_c ff_avg_pixels16x16_10_c
01266 #endif
01267
01268 void FUNCC(ff_put_pixels8x8)(uint8_t *dst, uint8_t *src, int stride) {
01269 FUNCC(put_pixels8)(dst, src, stride, 8);
01270 }
01271 void FUNCC(ff_avg_pixels8x8)(uint8_t *dst, uint8_t *src, int stride) {
01272 FUNCC(avg_pixels8)(dst, src, stride, 8);
01273 }
01274 void FUNCC(ff_put_pixels16x16)(uint8_t *dst, uint8_t *src, int stride) {
01275 FUNCC(put_pixels16)(dst, src, stride, 16);
01276 }
01277 void FUNCC(ff_avg_pixels16x16)(uint8_t *dst, uint8_t *src, int stride) {
01278 FUNCC(avg_pixels16)(dst, src, stride, 16);
01279 }
01280