00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018
00019
00020
00021
00022 #include "dsputil_mmx.h"
00023
00024
00025
00026
00027 #define QPEL_H264V_MM(A,B,C,D,E,F,OP,T,Z,d,q)\
00028 "mov"#q" "#C", "#T" \n\t"\
00029 "mov"#d" (%0), "#F" \n\t"\
00030 "paddw "#D", "#T" \n\t"\
00031 "psllw $2, "#T" \n\t"\
00032 "psubw "#B", "#T" \n\t"\
00033 "psubw "#E", "#T" \n\t"\
00034 "punpcklbw "#Z", "#F" \n\t"\
00035 "pmullw "MANGLE(ff_pw_5)", "#T"\n\t"\
00036 "paddw "MANGLE(ff_pw_16)", "#A"\n\t"\
00037 "add %2, %0 \n\t"\
00038 "paddw "#F", "#A" \n\t"\
00039 "paddw "#A", "#T" \n\t"\
00040 "psraw $5, "#T" \n\t"\
00041 "packuswb "#T", "#T" \n\t"\
00042 OP(T, (%1), A, d)\
00043 "add %3, %1 \n\t"
00044
00045 #define QPEL_H264HV_MM(A,B,C,D,E,F,OF,T,Z,d,q)\
00046 "mov"#q" "#C", "#T" \n\t"\
00047 "mov"#d" (%0), "#F" \n\t"\
00048 "paddw "#D", "#T" \n\t"\
00049 "psllw $2, "#T" \n\t"\
00050 "paddw "MANGLE(ff_pw_16)", "#A"\n\t"\
00051 "psubw "#B", "#T" \n\t"\
00052 "psubw "#E", "#T" \n\t"\
00053 "punpcklbw "#Z", "#F" \n\t"\
00054 "pmullw "MANGLE(ff_pw_5)", "#T"\n\t"\
00055 "paddw "#F", "#A" \n\t"\
00056 "add %2, %0 \n\t"\
00057 "paddw "#A", "#T" \n\t"\
00058 "mov"#q" "#T", "#OF"(%1) \n\t"
00059
00060 #define QPEL_H264V(A,B,C,D,E,F,OP) QPEL_H264V_MM(A,B,C,D,E,F,OP,%%mm6,%%mm7,d,q)
00061 #define QPEL_H264HV(A,B,C,D,E,F,OF) QPEL_H264HV_MM(A,B,C,D,E,F,OF,%%mm6,%%mm7,d,q)
00062 #define QPEL_H264V_XMM(A,B,C,D,E,F,OP) QPEL_H264V_MM(A,B,C,D,E,F,OP,%%xmm6,%%xmm7,q,dqa)
00063 #define QPEL_H264HV_XMM(A,B,C,D,E,F,OF) QPEL_H264HV_MM(A,B,C,D,E,F,OF,%%xmm6,%%xmm7,q,dqa)
00064
00065
00066 #define QPEL_H264(OPNAME, OP, MMX)\
00067 static av_noinline void OPNAME ## h264_qpel4_h_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
00068 int h=4;\
00069 \
00070 __asm__ volatile(\
00071 "pxor %%mm7, %%mm7 \n\t"\
00072 "movq "MANGLE(ff_pw_5) ", %%mm4\n\t"\
00073 "movq "MANGLE(ff_pw_16)", %%mm5\n\t"\
00074 "1: \n\t"\
00075 "movd -1(%0), %%mm1 \n\t"\
00076 "movd (%0), %%mm2 \n\t"\
00077 "movd 1(%0), %%mm3 \n\t"\
00078 "movd 2(%0), %%mm0 \n\t"\
00079 "punpcklbw %%mm7, %%mm1 \n\t"\
00080 "punpcklbw %%mm7, %%mm2 \n\t"\
00081 "punpcklbw %%mm7, %%mm3 \n\t"\
00082 "punpcklbw %%mm7, %%mm0 \n\t"\
00083 "paddw %%mm0, %%mm1 \n\t"\
00084 "paddw %%mm3, %%mm2 \n\t"\
00085 "movd -2(%0), %%mm0 \n\t"\
00086 "movd 3(%0), %%mm3 \n\t"\
00087 "punpcklbw %%mm7, %%mm0 \n\t"\
00088 "punpcklbw %%mm7, %%mm3 \n\t"\
00089 "paddw %%mm3, %%mm0 \n\t"\
00090 "psllw $2, %%mm2 \n\t"\
00091 "psubw %%mm1, %%mm2 \n\t"\
00092 "pmullw %%mm4, %%mm2 \n\t"\
00093 "paddw %%mm5, %%mm0 \n\t"\
00094 "paddw %%mm2, %%mm0 \n\t"\
00095 "psraw $5, %%mm0 \n\t"\
00096 "packuswb %%mm0, %%mm0 \n\t"\
00097 OP(%%mm0, (%1),%%mm6, d)\
00098 "add %3, %0 \n\t"\
00099 "add %4, %1 \n\t"\
00100 "decl %2 \n\t"\
00101 " jnz 1b \n\t"\
00102 : "+a"(src), "+c"(dst), "+g"(h)\
00103 : "d"((x86_reg)srcStride), "S"((x86_reg)dstStride)\
00104 : "memory"\
00105 );\
00106 }\
00107 static av_noinline void OPNAME ## h264_qpel4_h_lowpass_l2_ ## MMX(uint8_t *dst, uint8_t *src, uint8_t *src2, int dstStride, int src2Stride){\
00108 int h=4;\
00109 __asm__ volatile(\
00110 "pxor %%mm7, %%mm7 \n\t"\
00111 "movq %0, %%mm4 \n\t"\
00112 "movq %1, %%mm5 \n\t"\
00113 :: "m"(ff_pw_5), "m"(ff_pw_16)\
00114 );\
00115 do{\
00116 __asm__ volatile(\
00117 "movd -1(%0), %%mm1 \n\t"\
00118 "movd (%0), %%mm2 \n\t"\
00119 "movd 1(%0), %%mm3 \n\t"\
00120 "movd 2(%0), %%mm0 \n\t"\
00121 "punpcklbw %%mm7, %%mm1 \n\t"\
00122 "punpcklbw %%mm7, %%mm2 \n\t"\
00123 "punpcklbw %%mm7, %%mm3 \n\t"\
00124 "punpcklbw %%mm7, %%mm0 \n\t"\
00125 "paddw %%mm0, %%mm1 \n\t"\
00126 "paddw %%mm3, %%mm2 \n\t"\
00127 "movd -2(%0), %%mm0 \n\t"\
00128 "movd 3(%0), %%mm3 \n\t"\
00129 "punpcklbw %%mm7, %%mm0 \n\t"\
00130 "punpcklbw %%mm7, %%mm3 \n\t"\
00131 "paddw %%mm3, %%mm0 \n\t"\
00132 "psllw $2, %%mm2 \n\t"\
00133 "psubw %%mm1, %%mm2 \n\t"\
00134 "pmullw %%mm4, %%mm2 \n\t"\
00135 "paddw %%mm5, %%mm0 \n\t"\
00136 "paddw %%mm2, %%mm0 \n\t"\
00137 "movd (%2), %%mm3 \n\t"\
00138 "psraw $5, %%mm0 \n\t"\
00139 "packuswb %%mm0, %%mm0 \n\t"\
00140 PAVGB" %%mm3, %%mm0 \n\t"\
00141 OP(%%mm0, (%1),%%mm6, d)\
00142 "add %4, %0 \n\t"\
00143 "add %4, %1 \n\t"\
00144 "add %3, %2 \n\t"\
00145 : "+a"(src), "+c"(dst), "+d"(src2)\
00146 : "D"((x86_reg)src2Stride), "S"((x86_reg)dstStride)\
00147 : "memory"\
00148 );\
00149 }while(--h);\
00150 }\
00151 static av_noinline void OPNAME ## h264_qpel4_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
00152 src -= 2*srcStride;\
00153 __asm__ volatile(\
00154 "pxor %%mm7, %%mm7 \n\t"\
00155 "movd (%0), %%mm0 \n\t"\
00156 "add %2, %0 \n\t"\
00157 "movd (%0), %%mm1 \n\t"\
00158 "add %2, %0 \n\t"\
00159 "movd (%0), %%mm2 \n\t"\
00160 "add %2, %0 \n\t"\
00161 "movd (%0), %%mm3 \n\t"\
00162 "add %2, %0 \n\t"\
00163 "movd (%0), %%mm4 \n\t"\
00164 "add %2, %0 \n\t"\
00165 "punpcklbw %%mm7, %%mm0 \n\t"\
00166 "punpcklbw %%mm7, %%mm1 \n\t"\
00167 "punpcklbw %%mm7, %%mm2 \n\t"\
00168 "punpcklbw %%mm7, %%mm3 \n\t"\
00169 "punpcklbw %%mm7, %%mm4 \n\t"\
00170 QPEL_H264V(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, OP)\
00171 QPEL_H264V(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, OP)\
00172 QPEL_H264V(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, OP)\
00173 QPEL_H264V(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, OP)\
00174 \
00175 : "+a"(src), "+c"(dst)\
00176 : "S"((x86_reg)srcStride), "D"((x86_reg)dstStride), "m"(ff_pw_5), "m"(ff_pw_16)\
00177 : "memory"\
00178 );\
00179 }\
00180 static av_noinline void OPNAME ## h264_qpel4_hv_lowpass_ ## MMX(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride){\
00181 int h=4;\
00182 int w=3;\
00183 src -= 2*srcStride+2;\
00184 while(w--){\
00185 __asm__ volatile(\
00186 "pxor %%mm7, %%mm7 \n\t"\
00187 "movd (%0), %%mm0 \n\t"\
00188 "add %2, %0 \n\t"\
00189 "movd (%0), %%mm1 \n\t"\
00190 "add %2, %0 \n\t"\
00191 "movd (%0), %%mm2 \n\t"\
00192 "add %2, %0 \n\t"\
00193 "movd (%0), %%mm3 \n\t"\
00194 "add %2, %0 \n\t"\
00195 "movd (%0), %%mm4 \n\t"\
00196 "add %2, %0 \n\t"\
00197 "punpcklbw %%mm7, %%mm0 \n\t"\
00198 "punpcklbw %%mm7, %%mm1 \n\t"\
00199 "punpcklbw %%mm7, %%mm2 \n\t"\
00200 "punpcklbw %%mm7, %%mm3 \n\t"\
00201 "punpcklbw %%mm7, %%mm4 \n\t"\
00202 QPEL_H264HV(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, 0*8*3)\
00203 QPEL_H264HV(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, 1*8*3)\
00204 QPEL_H264HV(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, 2*8*3)\
00205 QPEL_H264HV(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, 3*8*3)\
00206 \
00207 : "+a"(src)\
00208 : "c"(tmp), "S"((x86_reg)srcStride)\
00209 : "memory"\
00210 );\
00211 tmp += 4;\
00212 src += 4 - 9*srcStride;\
00213 }\
00214 tmp -= 3*4;\
00215 __asm__ volatile(\
00216 "1: \n\t"\
00217 "movq (%0), %%mm0 \n\t"\
00218 "paddw 10(%0), %%mm0 \n\t"\
00219 "movq 2(%0), %%mm1 \n\t"\
00220 "paddw 8(%0), %%mm1 \n\t"\
00221 "movq 4(%0), %%mm2 \n\t"\
00222 "paddw 6(%0), %%mm2 \n\t"\
00223 "psubw %%mm1, %%mm0 \n\t"\
00224 "psraw $2, %%mm0 \n\t"\
00225 "psubw %%mm1, %%mm0 \n\t"\
00226 "paddsw %%mm2, %%mm0 \n\t"\
00227 "psraw $2, %%mm0 \n\t"\
00228 "paddw %%mm2, %%mm0 \n\t"\
00229 "psraw $6, %%mm0 \n\t"\
00230 "packuswb %%mm0, %%mm0 \n\t"\
00231 OP(%%mm0, (%1),%%mm7, d)\
00232 "add $24, %0 \n\t"\
00233 "add %3, %1 \n\t"\
00234 "decl %2 \n\t"\
00235 " jnz 1b \n\t"\
00236 : "+a"(tmp), "+c"(dst), "+g"(h)\
00237 : "S"((x86_reg)dstStride)\
00238 : "memory"\
00239 );\
00240 }\
00241 \
00242 static av_noinline void OPNAME ## h264_qpel8_h_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
00243 int h=8;\
00244 __asm__ volatile(\
00245 "pxor %%mm7, %%mm7 \n\t"\
00246 "movq "MANGLE(ff_pw_5)", %%mm6\n\t"\
00247 "1: \n\t"\
00248 "movq (%0), %%mm0 \n\t"\
00249 "movq 1(%0), %%mm2 \n\t"\
00250 "movq %%mm0, %%mm1 \n\t"\
00251 "movq %%mm2, %%mm3 \n\t"\
00252 "punpcklbw %%mm7, %%mm0 \n\t"\
00253 "punpckhbw %%mm7, %%mm1 \n\t"\
00254 "punpcklbw %%mm7, %%mm2 \n\t"\
00255 "punpckhbw %%mm7, %%mm3 \n\t"\
00256 "paddw %%mm2, %%mm0 \n\t"\
00257 "paddw %%mm3, %%mm1 \n\t"\
00258 "psllw $2, %%mm0 \n\t"\
00259 "psllw $2, %%mm1 \n\t"\
00260 "movq -1(%0), %%mm2 \n\t"\
00261 "movq 2(%0), %%mm4 \n\t"\
00262 "movq %%mm2, %%mm3 \n\t"\
00263 "movq %%mm4, %%mm5 \n\t"\
00264 "punpcklbw %%mm7, %%mm2 \n\t"\
00265 "punpckhbw %%mm7, %%mm3 \n\t"\
00266 "punpcklbw %%mm7, %%mm4 \n\t"\
00267 "punpckhbw %%mm7, %%mm5 \n\t"\
00268 "paddw %%mm4, %%mm2 \n\t"\
00269 "paddw %%mm3, %%mm5 \n\t"\
00270 "psubw %%mm2, %%mm0 \n\t"\
00271 "psubw %%mm5, %%mm1 \n\t"\
00272 "pmullw %%mm6, %%mm0 \n\t"\
00273 "pmullw %%mm6, %%mm1 \n\t"\
00274 "movd -2(%0), %%mm2 \n\t"\
00275 "movd 7(%0), %%mm5 \n\t"\
00276 "punpcklbw %%mm7, %%mm2 \n\t"\
00277 "punpcklbw %%mm7, %%mm5 \n\t"\
00278 "paddw %%mm3, %%mm2 \n\t"\
00279 "paddw %%mm5, %%mm4 \n\t"\
00280 "movq "MANGLE(ff_pw_16)", %%mm5\n\t"\
00281 "paddw %%mm5, %%mm2 \n\t"\
00282 "paddw %%mm5, %%mm4 \n\t"\
00283 "paddw %%mm2, %%mm0 \n\t"\
00284 "paddw %%mm4, %%mm1 \n\t"\
00285 "psraw $5, %%mm0 \n\t"\
00286 "psraw $5, %%mm1 \n\t"\
00287 "packuswb %%mm1, %%mm0 \n\t"\
00288 OP(%%mm0, (%1),%%mm5, q)\
00289 "add %3, %0 \n\t"\
00290 "add %4, %1 \n\t"\
00291 "decl %2 \n\t"\
00292 " jnz 1b \n\t"\
00293 : "+a"(src), "+c"(dst), "+g"(h)\
00294 : "d"((x86_reg)srcStride), "S"((x86_reg)dstStride)\
00295 : "memory"\
00296 );\
00297 }\
00298 \
00299 static av_noinline void OPNAME ## h264_qpel8_h_lowpass_l2_ ## MMX(uint8_t *dst, uint8_t *src, uint8_t *src2, int dstStride, int src2Stride){\
00300 int h=8;\
00301 __asm__ volatile(\
00302 "pxor %%mm7, %%mm7 \n\t"\
00303 "movq "MANGLE(ff_pw_5)", %%mm6\n\t"\
00304 "1: \n\t"\
00305 "movq (%0), %%mm0 \n\t"\
00306 "movq 1(%0), %%mm2 \n\t"\
00307 "movq %%mm0, %%mm1 \n\t"\
00308 "movq %%mm2, %%mm3 \n\t"\
00309 "punpcklbw %%mm7, %%mm0 \n\t"\
00310 "punpckhbw %%mm7, %%mm1 \n\t"\
00311 "punpcklbw %%mm7, %%mm2 \n\t"\
00312 "punpckhbw %%mm7, %%mm3 \n\t"\
00313 "paddw %%mm2, %%mm0 \n\t"\
00314 "paddw %%mm3, %%mm1 \n\t"\
00315 "psllw $2, %%mm0 \n\t"\
00316 "psllw $2, %%mm1 \n\t"\
00317 "movq -1(%0), %%mm2 \n\t"\
00318 "movq 2(%0), %%mm4 \n\t"\
00319 "movq %%mm2, %%mm3 \n\t"\
00320 "movq %%mm4, %%mm5 \n\t"\
00321 "punpcklbw %%mm7, %%mm2 \n\t"\
00322 "punpckhbw %%mm7, %%mm3 \n\t"\
00323 "punpcklbw %%mm7, %%mm4 \n\t"\
00324 "punpckhbw %%mm7, %%mm5 \n\t"\
00325 "paddw %%mm4, %%mm2 \n\t"\
00326 "paddw %%mm3, %%mm5 \n\t"\
00327 "psubw %%mm2, %%mm0 \n\t"\
00328 "psubw %%mm5, %%mm1 \n\t"\
00329 "pmullw %%mm6, %%mm0 \n\t"\
00330 "pmullw %%mm6, %%mm1 \n\t"\
00331 "movd -2(%0), %%mm2 \n\t"\
00332 "movd 7(%0), %%mm5 \n\t"\
00333 "punpcklbw %%mm7, %%mm2 \n\t"\
00334 "punpcklbw %%mm7, %%mm5 \n\t"\
00335 "paddw %%mm3, %%mm2 \n\t"\
00336 "paddw %%mm5, %%mm4 \n\t"\
00337 "movq "MANGLE(ff_pw_16)", %%mm5\n\t"\
00338 "paddw %%mm5, %%mm2 \n\t"\
00339 "paddw %%mm5, %%mm4 \n\t"\
00340 "paddw %%mm2, %%mm0 \n\t"\
00341 "paddw %%mm4, %%mm1 \n\t"\
00342 "psraw $5, %%mm0 \n\t"\
00343 "psraw $5, %%mm1 \n\t"\
00344 "movq (%2), %%mm4 \n\t"\
00345 "packuswb %%mm1, %%mm0 \n\t"\
00346 PAVGB" %%mm4, %%mm0 \n\t"\
00347 OP(%%mm0, (%1),%%mm5, q)\
00348 "add %5, %0 \n\t"\
00349 "add %5, %1 \n\t"\
00350 "add %4, %2 \n\t"\
00351 "decl %3 \n\t"\
00352 "jg 1b \n\t"\
00353 : "+a"(src), "+c"(dst), "+d"(src2), "+g"(h)\
00354 : "D"((x86_reg)src2Stride), "S"((x86_reg)dstStride)\
00355 : "memory"\
00356 );\
00357 }\
00358 \
00359 static av_noinline void OPNAME ## h264_qpel8or16_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
00360 int w= 2;\
00361 src -= 2*srcStride;\
00362 \
00363 while(w--){\
00364 __asm__ volatile(\
00365 "pxor %%mm7, %%mm7 \n\t"\
00366 "movd (%0), %%mm0 \n\t"\
00367 "add %2, %0 \n\t"\
00368 "movd (%0), %%mm1 \n\t"\
00369 "add %2, %0 \n\t"\
00370 "movd (%0), %%mm2 \n\t"\
00371 "add %2, %0 \n\t"\
00372 "movd (%0), %%mm3 \n\t"\
00373 "add %2, %0 \n\t"\
00374 "movd (%0), %%mm4 \n\t"\
00375 "add %2, %0 \n\t"\
00376 "punpcklbw %%mm7, %%mm0 \n\t"\
00377 "punpcklbw %%mm7, %%mm1 \n\t"\
00378 "punpcklbw %%mm7, %%mm2 \n\t"\
00379 "punpcklbw %%mm7, %%mm3 \n\t"\
00380 "punpcklbw %%mm7, %%mm4 \n\t"\
00381 QPEL_H264V(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, OP)\
00382 QPEL_H264V(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, OP)\
00383 QPEL_H264V(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, OP)\
00384 QPEL_H264V(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, OP)\
00385 QPEL_H264V(%%mm4, %%mm5, %%mm0, %%mm1, %%mm2, %%mm3, OP)\
00386 QPEL_H264V(%%mm5, %%mm0, %%mm1, %%mm2, %%mm3, %%mm4, OP)\
00387 QPEL_H264V(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, OP)\
00388 QPEL_H264V(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, OP)\
00389 "cmpl $16, %4 \n\t"\
00390 "jne 2f \n\t"\
00391 QPEL_H264V(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, OP)\
00392 QPEL_H264V(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, OP)\
00393 QPEL_H264V(%%mm4, %%mm5, %%mm0, %%mm1, %%mm2, %%mm3, OP)\
00394 QPEL_H264V(%%mm5, %%mm0, %%mm1, %%mm2, %%mm3, %%mm4, OP)\
00395 QPEL_H264V(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, OP)\
00396 QPEL_H264V(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, OP)\
00397 QPEL_H264V(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, OP)\
00398 QPEL_H264V(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, OP)\
00399 "2: \n\t"\
00400 \
00401 : "+a"(src), "+c"(dst)\
00402 : "S"((x86_reg)srcStride), "D"((x86_reg)dstStride), "rm"(h)\
00403 : "memory"\
00404 );\
00405 src += 4-(h+5)*srcStride;\
00406 dst += 4-h*dstStride;\
00407 }\
00408 }\
00409 static av_always_inline void OPNAME ## h264_qpel8or16_hv1_lowpass_ ## MMX(int16_t *tmp, uint8_t *src, int tmpStride, int srcStride, int size){\
00410 int w = (size+8)>>2;\
00411 src -= 2*srcStride+2;\
00412 while(w--){\
00413 __asm__ volatile(\
00414 "pxor %%mm7, %%mm7 \n\t"\
00415 "movd (%0), %%mm0 \n\t"\
00416 "add %2, %0 \n\t"\
00417 "movd (%0), %%mm1 \n\t"\
00418 "add %2, %0 \n\t"\
00419 "movd (%0), %%mm2 \n\t"\
00420 "add %2, %0 \n\t"\
00421 "movd (%0), %%mm3 \n\t"\
00422 "add %2, %0 \n\t"\
00423 "movd (%0), %%mm4 \n\t"\
00424 "add %2, %0 \n\t"\
00425 "punpcklbw %%mm7, %%mm0 \n\t"\
00426 "punpcklbw %%mm7, %%mm1 \n\t"\
00427 "punpcklbw %%mm7, %%mm2 \n\t"\
00428 "punpcklbw %%mm7, %%mm3 \n\t"\
00429 "punpcklbw %%mm7, %%mm4 \n\t"\
00430 QPEL_H264HV(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, 0*48)\
00431 QPEL_H264HV(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, 1*48)\
00432 QPEL_H264HV(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, 2*48)\
00433 QPEL_H264HV(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, 3*48)\
00434 QPEL_H264HV(%%mm4, %%mm5, %%mm0, %%mm1, %%mm2, %%mm3, 4*48)\
00435 QPEL_H264HV(%%mm5, %%mm0, %%mm1, %%mm2, %%mm3, %%mm4, 5*48)\
00436 QPEL_H264HV(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, 6*48)\
00437 QPEL_H264HV(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, 7*48)\
00438 "cmpl $16, %3 \n\t"\
00439 "jne 2f \n\t"\
00440 QPEL_H264HV(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, 8*48)\
00441 QPEL_H264HV(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, 9*48)\
00442 QPEL_H264HV(%%mm4, %%mm5, %%mm0, %%mm1, %%mm2, %%mm3, 10*48)\
00443 QPEL_H264HV(%%mm5, %%mm0, %%mm1, %%mm2, %%mm3, %%mm4, 11*48)\
00444 QPEL_H264HV(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, 12*48)\
00445 QPEL_H264HV(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, 13*48)\
00446 QPEL_H264HV(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, 14*48)\
00447 QPEL_H264HV(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, 15*48)\
00448 "2: \n\t"\
00449 : "+a"(src)\
00450 : "c"(tmp), "S"((x86_reg)srcStride), "rm"(size)\
00451 : "memory"\
00452 );\
00453 tmp += 4;\
00454 src += 4 - (size+5)*srcStride;\
00455 }\
00456 }\
00457 static av_always_inline void OPNAME ## h264_qpel8or16_hv2_lowpass_ ## MMX(uint8_t *dst, int16_t *tmp, int dstStride, int tmpStride, int size){\
00458 int w = size>>4;\
00459 do{\
00460 int h = size;\
00461 __asm__ volatile(\
00462 "1: \n\t"\
00463 "movq (%0), %%mm0 \n\t"\
00464 "movq 8(%0), %%mm3 \n\t"\
00465 "movq 2(%0), %%mm1 \n\t"\
00466 "movq 10(%0), %%mm4 \n\t"\
00467 "paddw %%mm4, %%mm0 \n\t"\
00468 "paddw %%mm3, %%mm1 \n\t"\
00469 "paddw 18(%0), %%mm3 \n\t"\
00470 "paddw 16(%0), %%mm4 \n\t"\
00471 "movq 4(%0), %%mm2 \n\t"\
00472 "movq 12(%0), %%mm5 \n\t"\
00473 "paddw 6(%0), %%mm2 \n\t"\
00474 "paddw 14(%0), %%mm5 \n\t"\
00475 "psubw %%mm1, %%mm0 \n\t"\
00476 "psubw %%mm4, %%mm3 \n\t"\
00477 "psraw $2, %%mm0 \n\t"\
00478 "psraw $2, %%mm3 \n\t"\
00479 "psubw %%mm1, %%mm0 \n\t"\
00480 "psubw %%mm4, %%mm3 \n\t"\
00481 "paddsw %%mm2, %%mm0 \n\t"\
00482 "paddsw %%mm5, %%mm3 \n\t"\
00483 "psraw $2, %%mm0 \n\t"\
00484 "psraw $2, %%mm3 \n\t"\
00485 "paddw %%mm2, %%mm0 \n\t"\
00486 "paddw %%mm5, %%mm3 \n\t"\
00487 "psraw $6, %%mm0 \n\t"\
00488 "psraw $6, %%mm3 \n\t"\
00489 "packuswb %%mm3, %%mm0 \n\t"\
00490 OP(%%mm0, (%1),%%mm7, q)\
00491 "add $48, %0 \n\t"\
00492 "add %3, %1 \n\t"\
00493 "decl %2 \n\t"\
00494 " jnz 1b \n\t"\
00495 : "+a"(tmp), "+c"(dst), "+g"(h)\
00496 : "S"((x86_reg)dstStride)\
00497 : "memory"\
00498 );\
00499 tmp += 8 - size*24;\
00500 dst += 8 - size*dstStride;\
00501 }while(w--);\
00502 }\
00503 \
00504 static void OPNAME ## h264_qpel8_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
00505 OPNAME ## h264_qpel8or16_v_lowpass_ ## MMX(dst , src , dstStride, srcStride, 8);\
00506 }\
00507 static av_noinline void OPNAME ## h264_qpel16_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
00508 OPNAME ## h264_qpel8or16_v_lowpass_ ## MMX(dst , src , dstStride, srcStride, 16);\
00509 OPNAME ## h264_qpel8or16_v_lowpass_ ## MMX(dst+8, src+8, dstStride, srcStride, 16);\
00510 }\
00511 \
00512 static void OPNAME ## h264_qpel16_h_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
00513 OPNAME ## h264_qpel8_h_lowpass_ ## MMX(dst , src , dstStride, srcStride);\
00514 OPNAME ## h264_qpel8_h_lowpass_ ## MMX(dst+8, src+8, dstStride, srcStride);\
00515 src += 8*srcStride;\
00516 dst += 8*dstStride;\
00517 OPNAME ## h264_qpel8_h_lowpass_ ## MMX(dst , src , dstStride, srcStride);\
00518 OPNAME ## h264_qpel8_h_lowpass_ ## MMX(dst+8, src+8, dstStride, srcStride);\
00519 }\
00520 \
00521 static av_noinline void OPNAME ## h264_qpel16_h_lowpass_l2_ ## MMX(uint8_t *dst, uint8_t *src, uint8_t *src2, int dstStride, int src2Stride){\
00522 OPNAME ## h264_qpel8_h_lowpass_l2_ ## MMX(dst , src , src2 , dstStride, src2Stride);\
00523 OPNAME ## h264_qpel8_h_lowpass_l2_ ## MMX(dst+8, src+8, src2+8, dstStride, src2Stride);\
00524 src += 8*dstStride;\
00525 dst += 8*dstStride;\
00526 src2 += 8*src2Stride;\
00527 OPNAME ## h264_qpel8_h_lowpass_l2_ ## MMX(dst , src , src2 , dstStride, src2Stride);\
00528 OPNAME ## h264_qpel8_h_lowpass_l2_ ## MMX(dst+8, src+8, src2+8, dstStride, src2Stride);\
00529 }\
00530 \
00531 static av_noinline void OPNAME ## h264_qpel8or16_hv_lowpass_ ## MMX(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride, int size){\
00532 put_h264_qpel8or16_hv1_lowpass_ ## MMX(tmp, src, tmpStride, srcStride, size);\
00533 OPNAME ## h264_qpel8or16_hv2_lowpass_ ## MMX(dst, tmp, dstStride, tmpStride, size);\
00534 }\
00535 static void OPNAME ## h264_qpel8_hv_lowpass_ ## MMX(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride){\
00536 OPNAME ## h264_qpel8or16_hv_lowpass_ ## MMX(dst , tmp , src , dstStride, tmpStride, srcStride, 8);\
00537 }\
00538 \
00539 static void OPNAME ## h264_qpel16_hv_lowpass_ ## MMX(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride){\
00540 OPNAME ## h264_qpel8or16_hv_lowpass_ ## MMX(dst , tmp , src , dstStride, tmpStride, srcStride, 16);\
00541 }\
00542 \
00543 static av_noinline void OPNAME ## pixels4_l2_shift5_ ## MMX(uint8_t *dst, int16_t *src16, uint8_t *src8, int dstStride, int src8Stride, int h)\
00544 {\
00545 __asm__ volatile(\
00546 "movq (%1), %%mm0 \n\t"\
00547 "movq 24(%1), %%mm1 \n\t"\
00548 "psraw $5, %%mm0 \n\t"\
00549 "psraw $5, %%mm1 \n\t"\
00550 "packuswb %%mm0, %%mm0 \n\t"\
00551 "packuswb %%mm1, %%mm1 \n\t"\
00552 PAVGB" (%0), %%mm0 \n\t"\
00553 PAVGB" (%0,%3), %%mm1 \n\t"\
00554 OP(%%mm0, (%2), %%mm4, d)\
00555 OP(%%mm1, (%2,%4), %%mm5, d)\
00556 "lea (%0,%3,2), %0 \n\t"\
00557 "lea (%2,%4,2), %2 \n\t"\
00558 "movq 48(%1), %%mm0 \n\t"\
00559 "movq 72(%1), %%mm1 \n\t"\
00560 "psraw $5, %%mm0 \n\t"\
00561 "psraw $5, %%mm1 \n\t"\
00562 "packuswb %%mm0, %%mm0 \n\t"\
00563 "packuswb %%mm1, %%mm1 \n\t"\
00564 PAVGB" (%0), %%mm0 \n\t"\
00565 PAVGB" (%0,%3), %%mm1 \n\t"\
00566 OP(%%mm0, (%2), %%mm4, d)\
00567 OP(%%mm1, (%2,%4), %%mm5, d)\
00568 :"+a"(src8), "+c"(src16), "+d"(dst)\
00569 :"S"((x86_reg)src8Stride), "D"((x86_reg)dstStride)\
00570 :"memory");\
00571 }\
00572 static av_noinline void OPNAME ## pixels8_l2_shift5_ ## MMX(uint8_t *dst, int16_t *src16, uint8_t *src8, int dstStride, int src8Stride, int h)\
00573 {\
00574 do{\
00575 __asm__ volatile(\
00576 "movq (%1), %%mm0 \n\t"\
00577 "movq 8(%1), %%mm1 \n\t"\
00578 "movq 48(%1), %%mm2 \n\t"\
00579 "movq 8+48(%1), %%mm3 \n\t"\
00580 "psraw $5, %%mm0 \n\t"\
00581 "psraw $5, %%mm1 \n\t"\
00582 "psraw $5, %%mm2 \n\t"\
00583 "psraw $5, %%mm3 \n\t"\
00584 "packuswb %%mm1, %%mm0 \n\t"\
00585 "packuswb %%mm3, %%mm2 \n\t"\
00586 PAVGB" (%0), %%mm0 \n\t"\
00587 PAVGB" (%0,%3), %%mm2 \n\t"\
00588 OP(%%mm0, (%2), %%mm5, q)\
00589 OP(%%mm2, (%2,%4), %%mm5, q)\
00590 ::"a"(src8), "c"(src16), "d"(dst),\
00591 "r"((x86_reg)src8Stride), "r"((x86_reg)dstStride)\
00592 :"memory");\
00593 src8 += 2L*src8Stride;\
00594 src16 += 48;\
00595 dst += 2L*dstStride;\
00596 }while(h-=2);\
00597 }\
00598 static void OPNAME ## pixels16_l2_shift5_ ## MMX(uint8_t *dst, int16_t *src16, uint8_t *src8, int dstStride, int src8Stride, int h)\
00599 {\
00600 OPNAME ## pixels8_l2_shift5_ ## MMX(dst , src16 , src8 , dstStride, src8Stride, h);\
00601 OPNAME ## pixels8_l2_shift5_ ## MMX(dst+8, src16+8, src8+8, dstStride, src8Stride, h);\
00602 }\
00603
00604
00605 #if ARCH_X86_64
00606 #define QPEL_H264_H16_XMM(OPNAME, OP, MMX)\
00607 static av_noinline void OPNAME ## h264_qpel16_h_lowpass_l2_ ## MMX(uint8_t *dst, uint8_t *src, uint8_t *src2, int dstStride, int src2Stride){\
00608 int h=16;\
00609 __asm__ volatile(\
00610 "pxor %%xmm15, %%xmm15 \n\t"\
00611 "movdqa %6, %%xmm14 \n\t"\
00612 "movdqa %7, %%xmm13 \n\t"\
00613 "1: \n\t"\
00614 "lddqu 6(%0), %%xmm1 \n\t"\
00615 "lddqu -2(%0), %%xmm7 \n\t"\
00616 "movdqa %%xmm1, %%xmm0 \n\t"\
00617 "punpckhbw %%xmm15, %%xmm1 \n\t"\
00618 "punpcklbw %%xmm15, %%xmm0 \n\t"\
00619 "punpcklbw %%xmm15, %%xmm7 \n\t"\
00620 "movdqa %%xmm1, %%xmm2 \n\t"\
00621 "movdqa %%xmm0, %%xmm6 \n\t"\
00622 "movdqa %%xmm1, %%xmm3 \n\t"\
00623 "movdqa %%xmm0, %%xmm8 \n\t"\
00624 "movdqa %%xmm1, %%xmm4 \n\t"\
00625 "movdqa %%xmm0, %%xmm9 \n\t"\
00626 "movdqa %%xmm0, %%xmm12 \n\t"\
00627 "movdqa %%xmm1, %%xmm11 \n\t"\
00628 "palignr $10,%%xmm0, %%xmm11\n\t"\
00629 "palignr $10,%%xmm7, %%xmm12\n\t"\
00630 "palignr $2, %%xmm0, %%xmm4 \n\t"\
00631 "palignr $2, %%xmm7, %%xmm9 \n\t"\
00632 "palignr $4, %%xmm0, %%xmm3 \n\t"\
00633 "palignr $4, %%xmm7, %%xmm8 \n\t"\
00634 "palignr $6, %%xmm0, %%xmm2 \n\t"\
00635 "palignr $6, %%xmm7, %%xmm6 \n\t"\
00636 "paddw %%xmm0 ,%%xmm11 \n\t"\
00637 "palignr $8, %%xmm0, %%xmm1 \n\t"\
00638 "palignr $8, %%xmm7, %%xmm0 \n\t"\
00639 "paddw %%xmm12,%%xmm7 \n\t"\
00640 "paddw %%xmm3, %%xmm2 \n\t"\
00641 "paddw %%xmm8, %%xmm6 \n\t"\
00642 "paddw %%xmm4, %%xmm1 \n\t"\
00643 "paddw %%xmm9, %%xmm0 \n\t"\
00644 "psllw $2, %%xmm2 \n\t"\
00645 "psllw $2, %%xmm6 \n\t"\
00646 "psubw %%xmm1, %%xmm2 \n\t"\
00647 "psubw %%xmm0, %%xmm6 \n\t"\
00648 "paddw %%xmm13,%%xmm11 \n\t"\
00649 "paddw %%xmm13,%%xmm7 \n\t"\
00650 "pmullw %%xmm14,%%xmm2 \n\t"\
00651 "pmullw %%xmm14,%%xmm6 \n\t"\
00652 "lddqu (%2), %%xmm3 \n\t"\
00653 "paddw %%xmm11,%%xmm2 \n\t"\
00654 "paddw %%xmm7, %%xmm6 \n\t"\
00655 "psraw $5, %%xmm2 \n\t"\
00656 "psraw $5, %%xmm6 \n\t"\
00657 "packuswb %%xmm2,%%xmm6 \n\t"\
00658 "pavgb %%xmm3, %%xmm6 \n\t"\
00659 OP(%%xmm6, (%1), %%xmm4, dqa)\
00660 "add %5, %0 \n\t"\
00661 "add %5, %1 \n\t"\
00662 "add %4, %2 \n\t"\
00663 "decl %3 \n\t"\
00664 "jg 1b \n\t"\
00665 : "+a"(src), "+c"(dst), "+d"(src2), "+g"(h)\
00666 : "D"((x86_reg)src2Stride), "S"((x86_reg)dstStride),\
00667 "m"(ff_pw_5), "m"(ff_pw_16)\
00668 : XMM_CLOBBERS("%xmm0" , "%xmm1" , "%xmm2" , "%xmm3" , \
00669 "%xmm4" , "%xmm5" , "%xmm6" , "%xmm7" , \
00670 "%xmm8" , "%xmm9" , "%xmm10", "%xmm11", \
00671 "%xmm12", "%xmm13", "%xmm14", "%xmm15",)\
00672 "memory"\
00673 );\
00674 }
00675 #else // ARCH_X86_64
00676 #define QPEL_H264_H16_XMM(OPNAME, OP, MMX)\
00677 static av_noinline void OPNAME ## h264_qpel16_h_lowpass_l2_ ## MMX(uint8_t *dst, uint8_t *src, uint8_t *src2, int dstStride, int src2Stride){\
00678 OPNAME ## h264_qpel8_h_lowpass_l2_ ## MMX(dst , src , src2 , dstStride, src2Stride);\
00679 OPNAME ## h264_qpel8_h_lowpass_l2_ ## MMX(dst+8, src+8, src2+8, dstStride, src2Stride);\
00680 src += 8*dstStride;\
00681 dst += 8*dstStride;\
00682 src2 += 8*src2Stride;\
00683 OPNAME ## h264_qpel8_h_lowpass_l2_ ## MMX(dst , src , src2 , dstStride, src2Stride);\
00684 OPNAME ## h264_qpel8_h_lowpass_l2_ ## MMX(dst+8, src+8, src2+8, dstStride, src2Stride);\
00685 }
00686 #endif // ARCH_X86_64
00687
00688 #define QPEL_H264_H_XMM(OPNAME, OP, MMX)\
00689 static av_noinline void OPNAME ## h264_qpel8_h_lowpass_l2_ ## MMX(uint8_t *dst, uint8_t *src, uint8_t *src2, int dstStride, int src2Stride){\
00690 int h=8;\
00691 __asm__ volatile(\
00692 "pxor %%xmm7, %%xmm7 \n\t"\
00693 "movdqa "MANGLE(ff_pw_5)", %%xmm6\n\t"\
00694 "1: \n\t"\
00695 "lddqu -2(%0), %%xmm1 \n\t"\
00696 "movdqa %%xmm1, %%xmm0 \n\t"\
00697 "punpckhbw %%xmm7, %%xmm1 \n\t"\
00698 "punpcklbw %%xmm7, %%xmm0 \n\t"\
00699 "movdqa %%xmm1, %%xmm2 \n\t"\
00700 "movdqa %%xmm1, %%xmm3 \n\t"\
00701 "movdqa %%xmm1, %%xmm4 \n\t"\
00702 "movdqa %%xmm1, %%xmm5 \n\t"\
00703 "palignr $2, %%xmm0, %%xmm4 \n\t"\
00704 "palignr $4, %%xmm0, %%xmm3 \n\t"\
00705 "palignr $6, %%xmm0, %%xmm2 \n\t"\
00706 "palignr $8, %%xmm0, %%xmm1 \n\t"\
00707 "palignr $10,%%xmm0, %%xmm5 \n\t"\
00708 "paddw %%xmm5, %%xmm0 \n\t"\
00709 "paddw %%xmm3, %%xmm2 \n\t"\
00710 "paddw %%xmm4, %%xmm1 \n\t"\
00711 "psllw $2, %%xmm2 \n\t"\
00712 "movq (%2), %%xmm3 \n\t"\
00713 "psubw %%xmm1, %%xmm2 \n\t"\
00714 "paddw "MANGLE(ff_pw_16)", %%xmm0\n\t"\
00715 "pmullw %%xmm6, %%xmm2 \n\t"\
00716 "paddw %%xmm0, %%xmm2 \n\t"\
00717 "psraw $5, %%xmm2 \n\t"\
00718 "packuswb %%xmm2, %%xmm2 \n\t"\
00719 "pavgb %%xmm3, %%xmm2 \n\t"\
00720 OP(%%xmm2, (%1), %%xmm4, q)\
00721 "add %5, %0 \n\t"\
00722 "add %5, %1 \n\t"\
00723 "add %4, %2 \n\t"\
00724 "decl %3 \n\t"\
00725 "jg 1b \n\t"\
00726 : "+a"(src), "+c"(dst), "+d"(src2), "+g"(h)\
00727 : "D"((x86_reg)src2Stride), "S"((x86_reg)dstStride)\
00728 : XMM_CLOBBERS("%xmm0", "%xmm1", "%xmm2", "%xmm3", \
00729 "%xmm4", "%xmm5", "%xmm6", "%xmm7",)\
00730 "memory"\
00731 );\
00732 }\
00733 QPEL_H264_H16_XMM(OPNAME, OP, MMX)\
00734 \
00735 static av_noinline void OPNAME ## h264_qpel8_h_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
00736 int h=8;\
00737 __asm__ volatile(\
00738 "pxor %%xmm7, %%xmm7 \n\t"\
00739 "movdqa "MANGLE(ff_pw_5)", %%xmm6\n\t"\
00740 "1: \n\t"\
00741 "lddqu -2(%0), %%xmm1 \n\t"\
00742 "movdqa %%xmm1, %%xmm0 \n\t"\
00743 "punpckhbw %%xmm7, %%xmm1 \n\t"\
00744 "punpcklbw %%xmm7, %%xmm0 \n\t"\
00745 "movdqa %%xmm1, %%xmm2 \n\t"\
00746 "movdqa %%xmm1, %%xmm3 \n\t"\
00747 "movdqa %%xmm1, %%xmm4 \n\t"\
00748 "movdqa %%xmm1, %%xmm5 \n\t"\
00749 "palignr $2, %%xmm0, %%xmm4 \n\t"\
00750 "palignr $4, %%xmm0, %%xmm3 \n\t"\
00751 "palignr $6, %%xmm0, %%xmm2 \n\t"\
00752 "palignr $8, %%xmm0, %%xmm1 \n\t"\
00753 "palignr $10,%%xmm0, %%xmm5 \n\t"\
00754 "paddw %%xmm5, %%xmm0 \n\t"\
00755 "paddw %%xmm3, %%xmm2 \n\t"\
00756 "paddw %%xmm4, %%xmm1 \n\t"\
00757 "psllw $2, %%xmm2 \n\t"\
00758 "psubw %%xmm1, %%xmm2 \n\t"\
00759 "paddw "MANGLE(ff_pw_16)", %%xmm0\n\t"\
00760 "pmullw %%xmm6, %%xmm2 \n\t"\
00761 "paddw %%xmm0, %%xmm2 \n\t"\
00762 "psraw $5, %%xmm2 \n\t"\
00763 "packuswb %%xmm2, %%xmm2 \n\t"\
00764 OP(%%xmm2, (%1), %%xmm4, q)\
00765 "add %3, %0 \n\t"\
00766 "add %4, %1 \n\t"\
00767 "decl %2 \n\t"\
00768 " jnz 1b \n\t"\
00769 : "+a"(src), "+c"(dst), "+g"(h)\
00770 : "D"((x86_reg)srcStride), "S"((x86_reg)dstStride)\
00771 : XMM_CLOBBERS("%xmm0", "%xmm1", "%xmm2", "%xmm3", \
00772 "%xmm4", "%xmm5", "%xmm6", "%xmm7",)\
00773 "memory"\
00774 );\
00775 }\
00776 static void OPNAME ## h264_qpel16_h_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
00777 OPNAME ## h264_qpel8_h_lowpass_ ## MMX(dst , src , dstStride, srcStride);\
00778 OPNAME ## h264_qpel8_h_lowpass_ ## MMX(dst+8, src+8, dstStride, srcStride);\
00779 src += 8*srcStride;\
00780 dst += 8*dstStride;\
00781 OPNAME ## h264_qpel8_h_lowpass_ ## MMX(dst , src , dstStride, srcStride);\
00782 OPNAME ## h264_qpel8_h_lowpass_ ## MMX(dst+8, src+8, dstStride, srcStride);\
00783 }\
00784
00785 #define QPEL_H264_V_XMM(OPNAME, OP, MMX)\
00786 static av_noinline void OPNAME ## h264_qpel8or16_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
00787 src -= 2*srcStride;\
00788 \
00789 __asm__ volatile(\
00790 "pxor %%xmm7, %%xmm7 \n\t"\
00791 "movq (%0), %%xmm0 \n\t"\
00792 "add %2, %0 \n\t"\
00793 "movq (%0), %%xmm1 \n\t"\
00794 "add %2, %0 \n\t"\
00795 "movq (%0), %%xmm2 \n\t"\
00796 "add %2, %0 \n\t"\
00797 "movq (%0), %%xmm3 \n\t"\
00798 "add %2, %0 \n\t"\
00799 "movq (%0), %%xmm4 \n\t"\
00800 "add %2, %0 \n\t"\
00801 "punpcklbw %%xmm7, %%xmm0 \n\t"\
00802 "punpcklbw %%xmm7, %%xmm1 \n\t"\
00803 "punpcklbw %%xmm7, %%xmm2 \n\t"\
00804 "punpcklbw %%xmm7, %%xmm3 \n\t"\
00805 "punpcklbw %%xmm7, %%xmm4 \n\t"\
00806 QPEL_H264V_XMM(%%xmm0, %%xmm1, %%xmm2, %%xmm3, %%xmm4, %%xmm5, OP)\
00807 QPEL_H264V_XMM(%%xmm1, %%xmm2, %%xmm3, %%xmm4, %%xmm5, %%xmm0, OP)\
00808 QPEL_H264V_XMM(%%xmm2, %%xmm3, %%xmm4, %%xmm5, %%xmm0, %%xmm1, OP)\
00809 QPEL_H264V_XMM(%%xmm3, %%xmm4, %%xmm5, %%xmm0, %%xmm1, %%xmm2, OP)\
00810 QPEL_H264V_XMM(%%xmm4, %%xmm5, %%xmm0, %%xmm1, %%xmm2, %%xmm3, OP)\
00811 QPEL_H264V_XMM(%%xmm5, %%xmm0, %%xmm1, %%xmm2, %%xmm3, %%xmm4, OP)\
00812 QPEL_H264V_XMM(%%xmm0, %%xmm1, %%xmm2, %%xmm3, %%xmm4, %%xmm5, OP)\
00813 QPEL_H264V_XMM(%%xmm1, %%xmm2, %%xmm3, %%xmm4, %%xmm5, %%xmm0, OP)\
00814 "cmpl $16, %4 \n\t"\
00815 "jne 2f \n\t"\
00816 QPEL_H264V_XMM(%%xmm2, %%xmm3, %%xmm4, %%xmm5, %%xmm0, %%xmm1, OP)\
00817 QPEL_H264V_XMM(%%xmm3, %%xmm4, %%xmm5, %%xmm0, %%xmm1, %%xmm2, OP)\
00818 QPEL_H264V_XMM(%%xmm4, %%xmm5, %%xmm0, %%xmm1, %%xmm2, %%xmm3, OP)\
00819 QPEL_H264V_XMM(%%xmm5, %%xmm0, %%xmm1, %%xmm2, %%xmm3, %%xmm4, OP)\
00820 QPEL_H264V_XMM(%%xmm0, %%xmm1, %%xmm2, %%xmm3, %%xmm4, %%xmm5, OP)\
00821 QPEL_H264V_XMM(%%xmm1, %%xmm2, %%xmm3, %%xmm4, %%xmm5, %%xmm0, OP)\
00822 QPEL_H264V_XMM(%%xmm2, %%xmm3, %%xmm4, %%xmm5, %%xmm0, %%xmm1, OP)\
00823 QPEL_H264V_XMM(%%xmm3, %%xmm4, %%xmm5, %%xmm0, %%xmm1, %%xmm2, OP)\
00824 "2: \n\t"\
00825 \
00826 : "+a"(src), "+c"(dst)\
00827 : "S"((x86_reg)srcStride), "D"((x86_reg)dstStride), "rm"(h)\
00828 : XMM_CLOBBERS("%xmm0", "%xmm1", "%xmm2", "%xmm3", \
00829 "%xmm4", "%xmm5", "%xmm6", "%xmm7",)\
00830 "memory"\
00831 );\
00832 }\
00833 static void OPNAME ## h264_qpel8_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
00834 OPNAME ## h264_qpel8or16_v_lowpass_ ## MMX(dst , src , dstStride, srcStride, 8);\
00835 }\
00836 static av_noinline void OPNAME ## h264_qpel16_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
00837 OPNAME ## h264_qpel8or16_v_lowpass_ ## MMX(dst , src , dstStride, srcStride, 16);\
00838 OPNAME ## h264_qpel8or16_v_lowpass_ ## MMX(dst+8, src+8, dstStride, srcStride, 16);\
00839 }
00840
00841 static av_always_inline void put_h264_qpel8or16_hv1_lowpass_sse2(int16_t *tmp, uint8_t *src, int tmpStride, int srcStride, int size){
00842 int w = (size+8)>>3;
00843 src -= 2*srcStride+2;
00844 while(w--){
00845 __asm__ volatile(
00846 "pxor %%xmm7, %%xmm7 \n\t"
00847 "movq (%0), %%xmm0 \n\t"
00848 "add %2, %0 \n\t"
00849 "movq (%0), %%xmm1 \n\t"
00850 "add %2, %0 \n\t"
00851 "movq (%0), %%xmm2 \n\t"
00852 "add %2, %0 \n\t"
00853 "movq (%0), %%xmm3 \n\t"
00854 "add %2, %0 \n\t"
00855 "movq (%0), %%xmm4 \n\t"
00856 "add %2, %0 \n\t"
00857 "punpcklbw %%xmm7, %%xmm0 \n\t"
00858 "punpcklbw %%xmm7, %%xmm1 \n\t"
00859 "punpcklbw %%xmm7, %%xmm2 \n\t"
00860 "punpcklbw %%xmm7, %%xmm3 \n\t"
00861 "punpcklbw %%xmm7, %%xmm4 \n\t"
00862 QPEL_H264HV_XMM(%%xmm0, %%xmm1, %%xmm2, %%xmm3, %%xmm4, %%xmm5, 0*48)
00863 QPEL_H264HV_XMM(%%xmm1, %%xmm2, %%xmm3, %%xmm4, %%xmm5, %%xmm0, 1*48)
00864 QPEL_H264HV_XMM(%%xmm2, %%xmm3, %%xmm4, %%xmm5, %%xmm0, %%xmm1, 2*48)
00865 QPEL_H264HV_XMM(%%xmm3, %%xmm4, %%xmm5, %%xmm0, %%xmm1, %%xmm2, 3*48)
00866 QPEL_H264HV_XMM(%%xmm4, %%xmm5, %%xmm0, %%xmm1, %%xmm2, %%xmm3, 4*48)
00867 QPEL_H264HV_XMM(%%xmm5, %%xmm0, %%xmm1, %%xmm2, %%xmm3, %%xmm4, 5*48)
00868 QPEL_H264HV_XMM(%%xmm0, %%xmm1, %%xmm2, %%xmm3, %%xmm4, %%xmm5, 6*48)
00869 QPEL_H264HV_XMM(%%xmm1, %%xmm2, %%xmm3, %%xmm4, %%xmm5, %%xmm0, 7*48)
00870 "cmpl $16, %3 \n\t"
00871 "jne 2f \n\t"
00872 QPEL_H264HV_XMM(%%xmm2, %%xmm3, %%xmm4, %%xmm5, %%xmm0, %%xmm1, 8*48)
00873 QPEL_H264HV_XMM(%%xmm3, %%xmm4, %%xmm5, %%xmm0, %%xmm1, %%xmm2, 9*48)
00874 QPEL_H264HV_XMM(%%xmm4, %%xmm5, %%xmm0, %%xmm1, %%xmm2, %%xmm3, 10*48)
00875 QPEL_H264HV_XMM(%%xmm5, %%xmm0, %%xmm1, %%xmm2, %%xmm3, %%xmm4, 11*48)
00876 QPEL_H264HV_XMM(%%xmm0, %%xmm1, %%xmm2, %%xmm3, %%xmm4, %%xmm5, 12*48)
00877 QPEL_H264HV_XMM(%%xmm1, %%xmm2, %%xmm3, %%xmm4, %%xmm5, %%xmm0, 13*48)
00878 QPEL_H264HV_XMM(%%xmm2, %%xmm3, %%xmm4, %%xmm5, %%xmm0, %%xmm1, 14*48)
00879 QPEL_H264HV_XMM(%%xmm3, %%xmm4, %%xmm5, %%xmm0, %%xmm1, %%xmm2, 15*48)
00880 "2: \n\t"
00881 : "+a"(src)
00882 : "c"(tmp), "S"((x86_reg)srcStride), "rm"(size)
00883 : XMM_CLOBBERS("%xmm0", "%xmm1", "%xmm2", "%xmm3",
00884 "%xmm4", "%xmm5", "%xmm6", "%xmm7",)
00885 "memory"
00886 );
00887 tmp += 8;
00888 src += 8 - (size+5)*srcStride;
00889 }
00890 }
00891
00892 #define QPEL_H264_HV2_XMM(OPNAME, OP, MMX)\
00893 static av_always_inline void OPNAME ## h264_qpel8or16_hv2_lowpass_ ## MMX(uint8_t *dst, int16_t *tmp, int dstStride, int tmpStride, int size){\
00894 int h = size;\
00895 if(size == 16){\
00896 __asm__ volatile(\
00897 "1: \n\t"\
00898 "movdqa 32(%0), %%xmm4 \n\t"\
00899 "movdqa 16(%0), %%xmm5 \n\t"\
00900 "movdqa (%0), %%xmm7 \n\t"\
00901 "movdqa %%xmm4, %%xmm3 \n\t"\
00902 "movdqa %%xmm4, %%xmm2 \n\t"\
00903 "movdqa %%xmm4, %%xmm1 \n\t"\
00904 "movdqa %%xmm4, %%xmm0 \n\t"\
00905 "palignr $10, %%xmm5, %%xmm0 \n\t"\
00906 "palignr $8, %%xmm5, %%xmm1 \n\t"\
00907 "palignr $6, %%xmm5, %%xmm2 \n\t"\
00908 "palignr $4, %%xmm5, %%xmm3 \n\t"\
00909 "palignr $2, %%xmm5, %%xmm4 \n\t"\
00910 "paddw %%xmm5, %%xmm0 \n\t"\
00911 "paddw %%xmm4, %%xmm1 \n\t"\
00912 "paddw %%xmm3, %%xmm2 \n\t"\
00913 "movdqa %%xmm5, %%xmm6 \n\t"\
00914 "movdqa %%xmm5, %%xmm4 \n\t"\
00915 "movdqa %%xmm5, %%xmm3 \n\t"\
00916 "palignr $8, %%xmm7, %%xmm4 \n\t"\
00917 "palignr $2, %%xmm7, %%xmm6 \n\t"\
00918 "palignr $10, %%xmm7, %%xmm3 \n\t"\
00919 "paddw %%xmm6, %%xmm4 \n\t"\
00920 "movdqa %%xmm5, %%xmm6 \n\t"\
00921 "palignr $6, %%xmm7, %%xmm5 \n\t"\
00922 "palignr $4, %%xmm7, %%xmm6 \n\t"\
00923 "paddw %%xmm7, %%xmm3 \n\t"\
00924 "paddw %%xmm6, %%xmm5 \n\t"\
00925 \
00926 "psubw %%xmm1, %%xmm0 \n\t"\
00927 "psubw %%xmm4, %%xmm3 \n\t"\
00928 "psraw $2, %%xmm0 \n\t"\
00929 "psraw $2, %%xmm3 \n\t"\
00930 "psubw %%xmm1, %%xmm0 \n\t"\
00931 "psubw %%xmm4, %%xmm3 \n\t"\
00932 "paddw %%xmm2, %%xmm0 \n\t"\
00933 "paddw %%xmm5, %%xmm3 \n\t"\
00934 "psraw $2, %%xmm0 \n\t"\
00935 "psraw $2, %%xmm3 \n\t"\
00936 "paddw %%xmm2, %%xmm0 \n\t"\
00937 "paddw %%xmm5, %%xmm3 \n\t"\
00938 "psraw $6, %%xmm0 \n\t"\
00939 "psraw $6, %%xmm3 \n\t"\
00940 "packuswb %%xmm0, %%xmm3 \n\t"\
00941 OP(%%xmm3, (%1), %%xmm7, dqa)\
00942 "add $48, %0 \n\t"\
00943 "add %3, %1 \n\t"\
00944 "decl %2 \n\t"\
00945 " jnz 1b \n\t"\
00946 : "+a"(tmp), "+c"(dst), "+g"(h)\
00947 : "S"((x86_reg)dstStride)\
00948 : XMM_CLOBBERS("%xmm0", "%xmm1", "%xmm2", "%xmm3", \
00949 "%xmm4", "%xmm5", "%xmm6", "%xmm7",)\
00950 "memory"\
00951 );\
00952 }else{\
00953 __asm__ volatile(\
00954 "1: \n\t"\
00955 "movdqa 16(%0), %%xmm1 \n\t"\
00956 "movdqa (%0), %%xmm0 \n\t"\
00957 "movdqa %%xmm1, %%xmm2 \n\t"\
00958 "movdqa %%xmm1, %%xmm3 \n\t"\
00959 "movdqa %%xmm1, %%xmm4 \n\t"\
00960 "movdqa %%xmm1, %%xmm5 \n\t"\
00961 "palignr $10, %%xmm0, %%xmm5 \n\t"\
00962 "palignr $8, %%xmm0, %%xmm4 \n\t"\
00963 "palignr $6, %%xmm0, %%xmm3 \n\t"\
00964 "palignr $4, %%xmm0, %%xmm2 \n\t"\
00965 "palignr $2, %%xmm0, %%xmm1 \n\t"\
00966 "paddw %%xmm5, %%xmm0 \n\t"\
00967 "paddw %%xmm4, %%xmm1 \n\t"\
00968 "paddw %%xmm3, %%xmm2 \n\t"\
00969 "psubw %%xmm1, %%xmm0 \n\t"\
00970 "psraw $2, %%xmm0 \n\t"\
00971 "psubw %%xmm1, %%xmm0 \n\t"\
00972 "paddw %%xmm2, %%xmm0 \n\t"\
00973 "psraw $2, %%xmm0 \n\t"\
00974 "paddw %%xmm2, %%xmm0 \n\t"\
00975 "psraw $6, %%xmm0 \n\t"\
00976 "packuswb %%xmm0, %%xmm0 \n\t"\
00977 OP(%%xmm0, (%1), %%xmm7, q)\
00978 "add $48, %0 \n\t"\
00979 "add %3, %1 \n\t"\
00980 "decl %2 \n\t"\
00981 " jnz 1b \n\t"\
00982 : "+a"(tmp), "+c"(dst), "+g"(h)\
00983 : "S"((x86_reg)dstStride)\
00984 : XMM_CLOBBERS("%xmm0", "%xmm1", "%xmm2", "%xmm3", \
00985 "%xmm4", "%xmm5", "%xmm6", "%xmm7",)\
00986 "memory"\
00987 );\
00988 }\
00989 }
00990
00991 #define QPEL_H264_HV_XMM(OPNAME, OP, MMX)\
00992 static av_noinline void OPNAME ## h264_qpel8or16_hv_lowpass_ ## MMX(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride, int size){\
00993 put_h264_qpel8or16_hv1_lowpass_sse2(tmp, src, tmpStride, srcStride, size);\
00994 OPNAME ## h264_qpel8or16_hv2_lowpass_ ## MMX(dst, tmp, dstStride, tmpStride, size);\
00995 }\
00996 static void OPNAME ## h264_qpel8_hv_lowpass_ ## MMX(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride){\
00997 OPNAME ## h264_qpel8or16_hv_lowpass_ ## MMX(dst, tmp, src, dstStride, tmpStride, srcStride, 8);\
00998 }\
00999 static void OPNAME ## h264_qpel16_hv_lowpass_ ## MMX(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride){\
01000 OPNAME ## h264_qpel8or16_hv_lowpass_ ## MMX(dst, tmp, src, dstStride, tmpStride, srcStride, 16);\
01001 }\
01002
01003 #define put_pixels8_l2_sse2 put_pixels8_l2_mmx2
01004 #define avg_pixels8_l2_sse2 avg_pixels8_l2_mmx2
01005 #define put_pixels16_l2_sse2 put_pixels16_l2_mmx2
01006 #define avg_pixels16_l2_sse2 avg_pixels16_l2_mmx2
01007 #define put_pixels8_l2_ssse3 put_pixels8_l2_mmx2
01008 #define avg_pixels8_l2_ssse3 avg_pixels8_l2_mmx2
01009 #define put_pixels16_l2_ssse3 put_pixels16_l2_mmx2
01010 #define avg_pixels16_l2_ssse3 avg_pixels16_l2_mmx2
01011
01012 #define put_pixels8_l2_shift5_sse2 put_pixels8_l2_shift5_mmx2
01013 #define avg_pixels8_l2_shift5_sse2 avg_pixels8_l2_shift5_mmx2
01014 #define put_pixels16_l2_shift5_sse2 put_pixels16_l2_shift5_mmx2
01015 #define avg_pixels16_l2_shift5_sse2 avg_pixels16_l2_shift5_mmx2
01016 #define put_pixels8_l2_shift5_ssse3 put_pixels8_l2_shift5_mmx2
01017 #define avg_pixels8_l2_shift5_ssse3 avg_pixels8_l2_shift5_mmx2
01018 #define put_pixels16_l2_shift5_ssse3 put_pixels16_l2_shift5_mmx2
01019 #define avg_pixels16_l2_shift5_ssse3 avg_pixels16_l2_shift5_mmx2
01020
01021 #define put_h264_qpel8_h_lowpass_l2_sse2 put_h264_qpel8_h_lowpass_l2_mmx2
01022 #define avg_h264_qpel8_h_lowpass_l2_sse2 avg_h264_qpel8_h_lowpass_l2_mmx2
01023 #define put_h264_qpel16_h_lowpass_l2_sse2 put_h264_qpel16_h_lowpass_l2_mmx2
01024 #define avg_h264_qpel16_h_lowpass_l2_sse2 avg_h264_qpel16_h_lowpass_l2_mmx2
01025
01026 #define put_h264_qpel8_v_lowpass_ssse3 put_h264_qpel8_v_lowpass_sse2
01027 #define avg_h264_qpel8_v_lowpass_ssse3 avg_h264_qpel8_v_lowpass_sse2
01028 #define put_h264_qpel16_v_lowpass_ssse3 put_h264_qpel16_v_lowpass_sse2
01029 #define avg_h264_qpel16_v_lowpass_ssse3 avg_h264_qpel16_v_lowpass_sse2
01030
01031 #define put_h264_qpel8or16_hv2_lowpass_sse2 put_h264_qpel8or16_hv2_lowpass_mmx2
01032 #define avg_h264_qpel8or16_hv2_lowpass_sse2 avg_h264_qpel8or16_hv2_lowpass_mmx2
01033
01034 #define H264_MC(OPNAME, SIZE, MMX, ALIGN) \
01035 H264_MC_C(OPNAME, SIZE, MMX, ALIGN)\
01036 H264_MC_V(OPNAME, SIZE, MMX, ALIGN)\
01037 H264_MC_H(OPNAME, SIZE, MMX, ALIGN)\
01038 H264_MC_HV(OPNAME, SIZE, MMX, ALIGN)\
01039
01040 static void put_h264_qpel16_mc00_sse2 (uint8_t *dst, uint8_t *src, int stride){
01041 put_pixels16_sse2(dst, src, stride, 16);
01042 }
01043 static void avg_h264_qpel16_mc00_sse2 (uint8_t *dst, uint8_t *src, int stride){
01044 avg_pixels16_sse2(dst, src, stride, 16);
01045 }
01046 #define put_h264_qpel8_mc00_sse2 put_h264_qpel8_mc00_mmx2
01047 #define avg_h264_qpel8_mc00_sse2 avg_h264_qpel8_mc00_mmx2
01048
01049 #define H264_MC_C(OPNAME, SIZE, MMX, ALIGN) \
01050 static void OPNAME ## h264_qpel ## SIZE ## _mc00_ ## MMX (uint8_t *dst, uint8_t *src, int stride){\
01051 OPNAME ## pixels ## SIZE ## _ ## MMX(dst, src, stride, SIZE);\
01052 }\
01053
01054 #define H264_MC_H(OPNAME, SIZE, MMX, ALIGN) \
01055 static void OPNAME ## h264_qpel ## SIZE ## _mc10_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
01056 OPNAME ## h264_qpel ## SIZE ## _h_lowpass_l2_ ## MMX(dst, src, src, stride, stride);\
01057 }\
01058 \
01059 static void OPNAME ## h264_qpel ## SIZE ## _mc20_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
01060 OPNAME ## h264_qpel ## SIZE ## _h_lowpass_ ## MMX(dst, src, stride, stride);\
01061 }\
01062 \
01063 static void OPNAME ## h264_qpel ## SIZE ## _mc30_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
01064 OPNAME ## h264_qpel ## SIZE ## _h_lowpass_l2_ ## MMX(dst, src, src+1, stride, stride);\
01065 }\
01066
01067 #define H264_MC_V(OPNAME, SIZE, MMX, ALIGN) \
01068 static void OPNAME ## h264_qpel ## SIZE ## _mc01_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
01069 DECLARE_ALIGNED(ALIGN, uint8_t, temp)[SIZE*SIZE];\
01070 put_h264_qpel ## SIZE ## _v_lowpass_ ## MMX(temp, src, SIZE, stride);\
01071 OPNAME ## pixels ## SIZE ## _l2_ ## MMX(dst, src, temp, stride, stride, SIZE);\
01072 }\
01073 \
01074 static void OPNAME ## h264_qpel ## SIZE ## _mc02_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
01075 OPNAME ## h264_qpel ## SIZE ## _v_lowpass_ ## MMX(dst, src, stride, stride);\
01076 }\
01077 \
01078 static void OPNAME ## h264_qpel ## SIZE ## _mc03_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
01079 DECLARE_ALIGNED(ALIGN, uint8_t, temp)[SIZE*SIZE];\
01080 put_h264_qpel ## SIZE ## _v_lowpass_ ## MMX(temp, src, SIZE, stride);\
01081 OPNAME ## pixels ## SIZE ## _l2_ ## MMX(dst, src+stride, temp, stride, stride, SIZE);\
01082 }\
01083
01084 #define H264_MC_HV(OPNAME, SIZE, MMX, ALIGN) \
01085 static void OPNAME ## h264_qpel ## SIZE ## _mc11_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
01086 DECLARE_ALIGNED(ALIGN, uint8_t, temp)[SIZE*SIZE];\
01087 put_h264_qpel ## SIZE ## _v_lowpass_ ## MMX(temp, src, SIZE, stride);\
01088 OPNAME ## h264_qpel ## SIZE ## _h_lowpass_l2_ ## MMX(dst, src, temp, stride, SIZE);\
01089 }\
01090 \
01091 static void OPNAME ## h264_qpel ## SIZE ## _mc31_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
01092 DECLARE_ALIGNED(ALIGN, uint8_t, temp)[SIZE*SIZE];\
01093 put_h264_qpel ## SIZE ## _v_lowpass_ ## MMX(temp, src+1, SIZE, stride);\
01094 OPNAME ## h264_qpel ## SIZE ## _h_lowpass_l2_ ## MMX(dst, src, temp, stride, SIZE);\
01095 }\
01096 \
01097 static void OPNAME ## h264_qpel ## SIZE ## _mc13_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
01098 DECLARE_ALIGNED(ALIGN, uint8_t, temp)[SIZE*SIZE];\
01099 put_h264_qpel ## SIZE ## _v_lowpass_ ## MMX(temp, src, SIZE, stride);\
01100 OPNAME ## h264_qpel ## SIZE ## _h_lowpass_l2_ ## MMX(dst, src+stride, temp, stride, SIZE);\
01101 }\
01102 \
01103 static void OPNAME ## h264_qpel ## SIZE ## _mc33_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
01104 DECLARE_ALIGNED(ALIGN, uint8_t, temp)[SIZE*SIZE];\
01105 put_h264_qpel ## SIZE ## _v_lowpass_ ## MMX(temp, src+1, SIZE, stride);\
01106 OPNAME ## h264_qpel ## SIZE ## _h_lowpass_l2_ ## MMX(dst, src+stride, temp, stride, SIZE);\
01107 }\
01108 \
01109 static void OPNAME ## h264_qpel ## SIZE ## _mc22_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
01110 DECLARE_ALIGNED(ALIGN, uint16_t, temp)[SIZE*(SIZE<8?12:24)];\
01111 OPNAME ## h264_qpel ## SIZE ## _hv_lowpass_ ## MMX(dst, temp, src, stride, SIZE, stride);\
01112 }\
01113 \
01114 static void OPNAME ## h264_qpel ## SIZE ## _mc21_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
01115 DECLARE_ALIGNED(ALIGN, uint8_t, temp)[SIZE*(SIZE<8?12:24)*2 + SIZE*SIZE];\
01116 uint8_t * const halfHV= temp;\
01117 int16_t * const halfV= (int16_t*)(temp + SIZE*SIZE);\
01118 assert(((int)temp & 7) == 0);\
01119 put_h264_qpel ## SIZE ## _hv_lowpass_ ## MMX(halfHV, halfV, src, SIZE, SIZE, stride);\
01120 OPNAME ## h264_qpel ## SIZE ## _h_lowpass_l2_ ## MMX(dst, src, halfHV, stride, SIZE);\
01121 }\
01122 \
01123 static void OPNAME ## h264_qpel ## SIZE ## _mc23_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
01124 DECLARE_ALIGNED(ALIGN, uint8_t, temp)[SIZE*(SIZE<8?12:24)*2 + SIZE*SIZE];\
01125 uint8_t * const halfHV= temp;\
01126 int16_t * const halfV= (int16_t*)(temp + SIZE*SIZE);\
01127 assert(((int)temp & 7) == 0);\
01128 put_h264_qpel ## SIZE ## _hv_lowpass_ ## MMX(halfHV, halfV, src, SIZE, SIZE, stride);\
01129 OPNAME ## h264_qpel ## SIZE ## _h_lowpass_l2_ ## MMX(dst, src+stride, halfHV, stride, SIZE);\
01130 }\
01131 \
01132 static void OPNAME ## h264_qpel ## SIZE ## _mc12_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
01133 DECLARE_ALIGNED(ALIGN, uint8_t, temp)[SIZE*(SIZE<8?12:24)*2 + SIZE*SIZE];\
01134 uint8_t * const halfHV= temp;\
01135 int16_t * const halfV= (int16_t*)(temp + SIZE*SIZE);\
01136 assert(((int)temp & 7) == 0);\
01137 put_h264_qpel ## SIZE ## _hv_lowpass_ ## MMX(halfHV, halfV, src, SIZE, SIZE, stride);\
01138 OPNAME ## pixels ## SIZE ## _l2_shift5_ ## MMX(dst, halfV+2, halfHV, stride, SIZE, SIZE);\
01139 }\
01140 \
01141 static void OPNAME ## h264_qpel ## SIZE ## _mc32_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
01142 DECLARE_ALIGNED(ALIGN, uint8_t, temp)[SIZE*(SIZE<8?12:24)*2 + SIZE*SIZE];\
01143 uint8_t * const halfHV= temp;\
01144 int16_t * const halfV= (int16_t*)(temp + SIZE*SIZE);\
01145 assert(((int)temp & 7) == 0);\
01146 put_h264_qpel ## SIZE ## _hv_lowpass_ ## MMX(halfHV, halfV, src, SIZE, SIZE, stride);\
01147 OPNAME ## pixels ## SIZE ## _l2_shift5_ ## MMX(dst, halfV+3, halfHV, stride, SIZE, SIZE);\
01148 }\
01149
01150 #define H264_MC_4816(MMX)\
01151 H264_MC(put_, 4, MMX, 8)\
01152 H264_MC(put_, 8, MMX, 8)\
01153 H264_MC(put_, 16,MMX, 8)\
01154 H264_MC(avg_, 4, MMX, 8)\
01155 H264_MC(avg_, 8, MMX, 8)\
01156 H264_MC(avg_, 16,MMX, 8)\
01157
01158 #define H264_MC_816(QPEL, XMM)\
01159 QPEL(put_, 8, XMM, 16)\
01160 QPEL(put_, 16,XMM, 16)\
01161 QPEL(avg_, 8, XMM, 16)\
01162 QPEL(avg_, 16,XMM, 16)\
01163
01164
01165 #define AVG_3DNOW_OP(a,b,temp, size) \
01166 "mov" #size " " #b ", " #temp " \n\t"\
01167 "pavgusb " #temp ", " #a " \n\t"\
01168 "mov" #size " " #a ", " #b " \n\t"
01169 #define AVG_MMX2_OP(a,b,temp, size) \
01170 "mov" #size " " #b ", " #temp " \n\t"\
01171 "pavgb " #temp ", " #a " \n\t"\
01172 "mov" #size " " #a ", " #b " \n\t"
01173
01174 #define PAVGB "pavgusb"
01175 QPEL_H264(put_, PUT_OP, 3dnow)
01176 QPEL_H264(avg_, AVG_3DNOW_OP, 3dnow)
01177 #undef PAVGB
01178 #define PAVGB "pavgb"
01179 QPEL_H264(put_, PUT_OP, mmx2)
01180 QPEL_H264(avg_, AVG_MMX2_OP, mmx2)
01181 QPEL_H264_V_XMM(put_, PUT_OP, sse2)
01182 QPEL_H264_V_XMM(avg_, AVG_MMX2_OP, sse2)
01183 QPEL_H264_HV_XMM(put_, PUT_OP, sse2)
01184 QPEL_H264_HV_XMM(avg_, AVG_MMX2_OP, sse2)
01185 #if HAVE_SSSE3
01186 QPEL_H264_H_XMM(put_, PUT_OP, ssse3)
01187 QPEL_H264_H_XMM(avg_, AVG_MMX2_OP, ssse3)
01188 QPEL_H264_HV2_XMM(put_, PUT_OP, ssse3)
01189 QPEL_H264_HV2_XMM(avg_, AVG_MMX2_OP, ssse3)
01190 QPEL_H264_HV_XMM(put_, PUT_OP, ssse3)
01191 QPEL_H264_HV_XMM(avg_, AVG_MMX2_OP, ssse3)
01192 #endif
01193 #undef PAVGB
01194
01195 H264_MC_4816(3dnow)
01196 H264_MC_4816(mmx2)
01197 H264_MC_816(H264_MC_V, sse2)
01198 H264_MC_816(H264_MC_HV, sse2)
01199 #if HAVE_SSSE3
01200 H264_MC_816(H264_MC_H, ssse3)
01201 H264_MC_816(H264_MC_HV, ssse3)
01202 #endif
01203
01204
01205
01206
01207 #define LUMA_MC_OP(OP, NUM, DEPTH, TYPE, OPT) \
01208 void ff_ ## OP ## _h264_qpel ## NUM ## _ ## TYPE ## _ ## DEPTH ## _ ## OPT \
01209 (uint8_t *dst, uint8_t *src, int stride);
01210
01211 #define LUMA_MC_ALL(DEPTH, TYPE, OPT) \
01212 LUMA_MC_OP(put, 4, DEPTH, TYPE, OPT) \
01213 LUMA_MC_OP(avg, 4, DEPTH, TYPE, OPT) \
01214 LUMA_MC_OP(put, 8, DEPTH, TYPE, OPT) \
01215 LUMA_MC_OP(avg, 8, DEPTH, TYPE, OPT) \
01216 LUMA_MC_OP(put, 16, DEPTH, TYPE, OPT) \
01217 LUMA_MC_OP(avg, 16, DEPTH, TYPE, OPT)
01218
01219 #define LUMA_MC_816(DEPTH, TYPE, OPT) \
01220 LUMA_MC_OP(put, 8, DEPTH, TYPE, OPT) \
01221 LUMA_MC_OP(avg, 8, DEPTH, TYPE, OPT) \
01222 LUMA_MC_OP(put, 16, DEPTH, TYPE, OPT) \
01223 LUMA_MC_OP(avg, 16, DEPTH, TYPE, OPT)
01224
01225 LUMA_MC_ALL(10, mc00, mmxext)
01226 LUMA_MC_ALL(10, mc10, mmxext)
01227 LUMA_MC_ALL(10, mc20, mmxext)
01228 LUMA_MC_ALL(10, mc30, mmxext)
01229 LUMA_MC_ALL(10, mc01, mmxext)
01230 LUMA_MC_ALL(10, mc11, mmxext)
01231 LUMA_MC_ALL(10, mc21, mmxext)
01232 LUMA_MC_ALL(10, mc31, mmxext)
01233 LUMA_MC_ALL(10, mc02, mmxext)
01234 LUMA_MC_ALL(10, mc12, mmxext)
01235 LUMA_MC_ALL(10, mc22, mmxext)
01236 LUMA_MC_ALL(10, mc32, mmxext)
01237 LUMA_MC_ALL(10, mc03, mmxext)
01238 LUMA_MC_ALL(10, mc13, mmxext)
01239 LUMA_MC_ALL(10, mc23, mmxext)
01240 LUMA_MC_ALL(10, mc33, mmxext)
01241
01242 LUMA_MC_816(10, mc00, sse2)
01243 LUMA_MC_816(10, mc10, sse2)
01244 LUMA_MC_816(10, mc10, sse2_cache64)
01245 LUMA_MC_816(10, mc10, ssse3_cache64)
01246 LUMA_MC_816(10, mc20, sse2)
01247 LUMA_MC_816(10, mc20, sse2_cache64)
01248 LUMA_MC_816(10, mc20, ssse3_cache64)
01249 LUMA_MC_816(10, mc30, sse2)
01250 LUMA_MC_816(10, mc30, sse2_cache64)
01251 LUMA_MC_816(10, mc30, ssse3_cache64)
01252 LUMA_MC_816(10, mc01, sse2)
01253 LUMA_MC_816(10, mc11, sse2)
01254 LUMA_MC_816(10, mc21, sse2)
01255 LUMA_MC_816(10, mc31, sse2)
01256 LUMA_MC_816(10, mc02, sse2)
01257 LUMA_MC_816(10, mc12, sse2)
01258 LUMA_MC_816(10, mc22, sse2)
01259 LUMA_MC_816(10, mc32, sse2)
01260 LUMA_MC_816(10, mc03, sse2)
01261 LUMA_MC_816(10, mc13, sse2)
01262 LUMA_MC_816(10, mc23, sse2)
01263 LUMA_MC_816(10, mc33, sse2)
01264
01265 #define QPEL16_OPMC(OP, MC, MMX)\
01266 void ff_ ## OP ## _h264_qpel16_ ## MC ## _10_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
01267 ff_ ## OP ## _h264_qpel8_ ## MC ## _10_ ## MMX(dst , src , stride);\
01268 ff_ ## OP ## _h264_qpel8_ ## MC ## _10_ ## MMX(dst+16, src+16, stride);\
01269 src += 8*stride;\
01270 dst += 8*stride;\
01271 ff_ ## OP ## _h264_qpel8_ ## MC ## _10_ ## MMX(dst , src , stride);\
01272 ff_ ## OP ## _h264_qpel8_ ## MC ## _10_ ## MMX(dst+16, src+16, stride);\
01273 }
01274
01275 #define QPEL16_OP(MC, MMX)\
01276 QPEL16_OPMC(put, MC, MMX)\
01277 QPEL16_OPMC(avg, MC, MMX)
01278
01279 #define QPEL16(MMX)\
01280 QPEL16_OP(mc00, MMX)\
01281 QPEL16_OP(mc01, MMX)\
01282 QPEL16_OP(mc02, MMX)\
01283 QPEL16_OP(mc03, MMX)\
01284 QPEL16_OP(mc10, MMX)\
01285 QPEL16_OP(mc11, MMX)\
01286 QPEL16_OP(mc12, MMX)\
01287 QPEL16_OP(mc13, MMX)\
01288 QPEL16_OP(mc20, MMX)\
01289 QPEL16_OP(mc21, MMX)\
01290 QPEL16_OP(mc22, MMX)\
01291 QPEL16_OP(mc23, MMX)\
01292 QPEL16_OP(mc30, MMX)\
01293 QPEL16_OP(mc31, MMX)\
01294 QPEL16_OP(mc32, MMX)\
01295 QPEL16_OP(mc33, MMX)
01296
01297 #if ARCH_X86_32 && HAVE_YASM // ARCH_X86_64 implies sse2+
01298 QPEL16(mmxext)
01299 #endif