00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018
00019
00020
00021
00022
00023
00024
00025
00026
00027 #include "libavutil/cpu.h"
00028 #include "libavutil/x86_cpu.h"
00029 #include "libavcodec/dsputil.h"
00030 #include "dsputil_mmx.h"
00031 #include "libavcodec/vc1dsp.h"
00032
00033 #define OP_PUT(S,D)
00034 #define OP_AVG(S,D) "pavgb " #S ", " #D " \n\t"
00035
00037 #define NORMALIZE_MMX(SHIFT) \
00038 "paddw %%mm7, %%mm3 \n\t" \
00039 "paddw %%mm7, %%mm4 \n\t" \
00040 "psraw "SHIFT", %%mm3 \n\t" \
00041 "psraw "SHIFT", %%mm4 \n\t"
00042
00043 #define TRANSFER_DO_PACK(OP) \
00044 "packuswb %%mm4, %%mm3 \n\t" \
00045 OP((%2), %%mm3) \
00046 "movq %%mm3, (%2) \n\t"
00047
00048 #define TRANSFER_DONT_PACK(OP) \
00049 OP(0(%2), %%mm3) \
00050 OP(8(%2), %%mm4) \
00051 "movq %%mm3, 0(%2) \n\t" \
00052 "movq %%mm4, 8(%2) \n\t"
00053
00055 #define DO_UNPACK(reg) "punpcklbw %%mm0, " reg "\n\t"
00056 #define DONT_UNPACK(reg)
00057
00059 #define LOAD_ROUNDER_MMX(ROUND) \
00060 "movd "ROUND", %%mm7 \n\t" \
00061 "punpcklwd %%mm7, %%mm7 \n\t" \
00062 "punpckldq %%mm7, %%mm7 \n\t"
00063
00064 #define SHIFT2_LINE(OFF, R0,R1,R2,R3) \
00065 "paddw %%mm"#R2", %%mm"#R1" \n\t" \
00066 "movd (%0,%3), %%mm"#R0" \n\t" \
00067 "pmullw %%mm6, %%mm"#R1" \n\t" \
00068 "punpcklbw %%mm0, %%mm"#R0" \n\t" \
00069 "movd (%0,%2), %%mm"#R3" \n\t" \
00070 "psubw %%mm"#R0", %%mm"#R1" \n\t" \
00071 "punpcklbw %%mm0, %%mm"#R3" \n\t" \
00072 "paddw %%mm7, %%mm"#R1" \n\t" \
00073 "psubw %%mm"#R3", %%mm"#R1" \n\t" \
00074 "psraw %4, %%mm"#R1" \n\t" \
00075 "movq %%mm"#R1", "#OFF"(%1) \n\t" \
00076 "add %2, %0 \n\t"
00077
00079 static void vc1_put_ver_16b_shift2_mmx(int16_t *dst,
00080 const uint8_t *src, x86_reg stride,
00081 int rnd, int64_t shift)
00082 {
00083 __asm__ volatile(
00084 "mov $3, %%"REG_c" \n\t"
00085 LOAD_ROUNDER_MMX("%5")
00086 "movq "MANGLE(ff_pw_9)", %%mm6 \n\t"
00087 "1: \n\t"
00088 "movd (%0), %%mm2 \n\t"
00089 "add %2, %0 \n\t"
00090 "movd (%0), %%mm3 \n\t"
00091 "punpcklbw %%mm0, %%mm2 \n\t"
00092 "punpcklbw %%mm0, %%mm3 \n\t"
00093 SHIFT2_LINE( 0, 1, 2, 3, 4)
00094 SHIFT2_LINE( 24, 2, 3, 4, 1)
00095 SHIFT2_LINE( 48, 3, 4, 1, 2)
00096 SHIFT2_LINE( 72, 4, 1, 2, 3)
00097 SHIFT2_LINE( 96, 1, 2, 3, 4)
00098 SHIFT2_LINE(120, 2, 3, 4, 1)
00099 SHIFT2_LINE(144, 3, 4, 1, 2)
00100 SHIFT2_LINE(168, 4, 1, 2, 3)
00101 "sub %6, %0 \n\t"
00102 "add $8, %1 \n\t"
00103 "dec %%"REG_c" \n\t"
00104 "jnz 1b \n\t"
00105 : "+r"(src), "+r"(dst)
00106 : "r"(stride), "r"(-2*stride),
00107 "m"(shift), "m"(rnd), "r"(9*stride-4)
00108 : "%"REG_c, "memory"
00109 );
00110 }
00111
00116 #define VC1_HOR_16b_SHIFT2(OP, OPNAME)\
00117 static void OPNAME ## vc1_hor_16b_shift2_mmx(uint8_t *dst, x86_reg stride,\
00118 const int16_t *src, int rnd)\
00119 {\
00120 int h = 8;\
00121 \
00122 src -= 1;\
00123 rnd -= (-1+9+9-1)*1024; \
00124 __asm__ volatile(\
00125 LOAD_ROUNDER_MMX("%4")\
00126 "movq "MANGLE(ff_pw_128)", %%mm6\n\t"\
00127 "movq "MANGLE(ff_pw_9)", %%mm5 \n\t"\
00128 "1: \n\t"\
00129 "movq 2*0+0(%1), %%mm1 \n\t"\
00130 "movq 2*0+8(%1), %%mm2 \n\t"\
00131 "movq 2*1+0(%1), %%mm3 \n\t"\
00132 "movq 2*1+8(%1), %%mm4 \n\t"\
00133 "paddw 2*3+0(%1), %%mm1 \n\t"\
00134 "paddw 2*3+8(%1), %%mm2 \n\t"\
00135 "paddw 2*2+0(%1), %%mm3 \n\t"\
00136 "paddw 2*2+8(%1), %%mm4 \n\t"\
00137 "pmullw %%mm5, %%mm3 \n\t"\
00138 "pmullw %%mm5, %%mm4 \n\t"\
00139 "psubw %%mm1, %%mm3 \n\t"\
00140 "psubw %%mm2, %%mm4 \n\t"\
00141 NORMALIZE_MMX("$7")\
00142 \
00143 "paddw %%mm6, %%mm3 \n\t"\
00144 "paddw %%mm6, %%mm4 \n\t"\
00145 TRANSFER_DO_PACK(OP)\
00146 "add $24, %1 \n\t"\
00147 "add %3, %2 \n\t"\
00148 "decl %0 \n\t"\
00149 "jnz 1b \n\t"\
00150 : "+r"(h), "+r" (src), "+r" (dst)\
00151 : "r"(stride), "m"(rnd)\
00152 : "memory"\
00153 );\
00154 }
00155
00156 VC1_HOR_16b_SHIFT2(OP_PUT, put_)
00157 VC1_HOR_16b_SHIFT2(OP_AVG, avg_)
00158
00159
00164 #define VC1_SHIFT2(OP, OPNAME)\
00165 static void OPNAME ## vc1_shift2_mmx(uint8_t *dst, const uint8_t *src,\
00166 x86_reg stride, int rnd, x86_reg offset)\
00167 {\
00168 rnd = 8-rnd;\
00169 __asm__ volatile(\
00170 "mov $8, %%"REG_c" \n\t"\
00171 LOAD_ROUNDER_MMX("%5")\
00172 "movq "MANGLE(ff_pw_9)", %%mm6\n\t"\
00173 "1: \n\t"\
00174 "movd 0(%0 ), %%mm3 \n\t"\
00175 "movd 4(%0 ), %%mm4 \n\t"\
00176 "movd 0(%0,%2), %%mm1 \n\t"\
00177 "movd 4(%0,%2), %%mm2 \n\t"\
00178 "add %2, %0 \n\t"\
00179 "punpcklbw %%mm0, %%mm3 \n\t"\
00180 "punpcklbw %%mm0, %%mm4 \n\t"\
00181 "punpcklbw %%mm0, %%mm1 \n\t"\
00182 "punpcklbw %%mm0, %%mm2 \n\t"\
00183 "paddw %%mm1, %%mm3 \n\t"\
00184 "paddw %%mm2, %%mm4 \n\t"\
00185 "movd 0(%0,%3), %%mm1 \n\t"\
00186 "movd 4(%0,%3), %%mm2 \n\t"\
00187 "pmullw %%mm6, %%mm3 \n\t" \
00188 "pmullw %%mm6, %%mm4 \n\t" \
00189 "punpcklbw %%mm0, %%mm1 \n\t"\
00190 "punpcklbw %%mm0, %%mm2 \n\t"\
00191 "psubw %%mm1, %%mm3 \n\t" \
00192 "psubw %%mm2, %%mm4 \n\t" \
00193 "movd 0(%0,%2), %%mm1 \n\t"\
00194 "movd 4(%0,%2), %%mm2 \n\t"\
00195 "punpcklbw %%mm0, %%mm1 \n\t"\
00196 "punpcklbw %%mm0, %%mm2 \n\t"\
00197 "psubw %%mm1, %%mm3 \n\t" \
00198 "psubw %%mm2, %%mm4 \n\t" \
00199 NORMALIZE_MMX("$4")\
00200 "packuswb %%mm4, %%mm3 \n\t"\
00201 OP((%1), %%mm3)\
00202 "movq %%mm3, (%1) \n\t"\
00203 "add %6, %0 \n\t"\
00204 "add %4, %1 \n\t"\
00205 "dec %%"REG_c" \n\t"\
00206 "jnz 1b \n\t"\
00207 : "+r"(src), "+r"(dst)\
00208 : "r"(offset), "r"(-2*offset), "g"(stride), "m"(rnd),\
00209 "g"(stride-offset)\
00210 : "%"REG_c, "memory"\
00211 );\
00212 }
00213
00214 VC1_SHIFT2(OP_PUT, put_)
00215 VC1_SHIFT2(OP_AVG, avg_)
00216
00227 #define MSPEL_FILTER13_CORE(UNPACK, MOVQ, A1, A2, A3, A4) \
00228 MOVQ "*0+"A1", %%mm1 \n\t" \
00229 MOVQ "*4+"A1", %%mm2 \n\t" \
00230 UNPACK("%%mm1") \
00231 UNPACK("%%mm2") \
00232 "pmullw "MANGLE(ff_pw_3)", %%mm1\n\t" \
00233 "pmullw "MANGLE(ff_pw_3)", %%mm2\n\t" \
00234 MOVQ "*0+"A2", %%mm3 \n\t" \
00235 MOVQ "*4+"A2", %%mm4 \n\t" \
00236 UNPACK("%%mm3") \
00237 UNPACK("%%mm4") \
00238 "pmullw %%mm6, %%mm3 \n\t" \
00239 "pmullw %%mm6, %%mm4 \n\t" \
00240 "psubw %%mm1, %%mm3 \n\t" \
00241 "psubw %%mm2, %%mm4 \n\t" \
00242 MOVQ "*0+"A4", %%mm1 \n\t" \
00243 MOVQ "*4+"A4", %%mm2 \n\t" \
00244 UNPACK("%%mm1") \
00245 UNPACK("%%mm2") \
00246 "psllw $2, %%mm1 \n\t" \
00247 "psllw $2, %%mm2 \n\t" \
00248 "psubw %%mm1, %%mm3 \n\t" \
00249 "psubw %%mm2, %%mm4 \n\t" \
00250 MOVQ "*0+"A3", %%mm1 \n\t" \
00251 MOVQ "*4+"A3", %%mm2 \n\t" \
00252 UNPACK("%%mm1") \
00253 UNPACK("%%mm2") \
00254 "pmullw %%mm5, %%mm1 \n\t" \
00255 "pmullw %%mm5, %%mm2 \n\t" \
00256 "paddw %%mm1, %%mm3 \n\t" \
00257 "paddw %%mm2, %%mm4 \n\t"
00258
00267 #define MSPEL_FILTER13_VER_16B(NAME, A1, A2, A3, A4) \
00268 static void \
00269 vc1_put_ver_16b_ ## NAME ## _mmx(int16_t *dst, const uint8_t *src, \
00270 x86_reg src_stride, \
00271 int rnd, int64_t shift) \
00272 { \
00273 int h = 8; \
00274 src -= src_stride; \
00275 __asm__ volatile( \
00276 LOAD_ROUNDER_MMX("%5") \
00277 "movq "MANGLE(ff_pw_53)", %%mm5\n\t" \
00278 "movq "MANGLE(ff_pw_18)", %%mm6\n\t" \
00279 ".p2align 3 \n\t" \
00280 "1: \n\t" \
00281 MSPEL_FILTER13_CORE(DO_UNPACK, "movd 1", A1, A2, A3, A4) \
00282 NORMALIZE_MMX("%6") \
00283 TRANSFER_DONT_PACK(OP_PUT) \
00284 \
00285 "movd 8+"A1", %%mm1 \n\t" \
00286 DO_UNPACK("%%mm1") \
00287 "movq %%mm1, %%mm3 \n\t" \
00288 "paddw %%mm1, %%mm1 \n\t" \
00289 "paddw %%mm3, %%mm1 \n\t" \
00290 "movd 8+"A2", %%mm3 \n\t" \
00291 DO_UNPACK("%%mm3") \
00292 "pmullw %%mm6, %%mm3 \n\t" \
00293 "psubw %%mm1, %%mm3 \n\t" \
00294 "movd 8+"A3", %%mm1 \n\t" \
00295 DO_UNPACK("%%mm1") \
00296 "pmullw %%mm5, %%mm1 \n\t" \
00297 "paddw %%mm1, %%mm3 \n\t" \
00298 "movd 8+"A4", %%mm1 \n\t" \
00299 DO_UNPACK("%%mm1") \
00300 "psllw $2, %%mm1 \n\t" \
00301 "psubw %%mm1, %%mm3 \n\t" \
00302 "paddw %%mm7, %%mm3 \n\t" \
00303 "psraw %6, %%mm3 \n\t" \
00304 "movq %%mm3, 16(%2) \n\t" \
00305 "add %3, %1 \n\t" \
00306 "add $24, %2 \n\t" \
00307 "decl %0 \n\t" \
00308 "jnz 1b \n\t" \
00309 : "+r"(h), "+r" (src), "+r" (dst) \
00310 : "r"(src_stride), "r"(3*src_stride), \
00311 "m"(rnd), "m"(shift) \
00312 : "memory" \
00313 ); \
00314 }
00315
00323 #define MSPEL_FILTER13_HOR_16B(NAME, A1, A2, A3, A4, OP, OPNAME) \
00324 static void \
00325 OPNAME ## vc1_hor_16b_ ## NAME ## _mmx(uint8_t *dst, x86_reg stride, \
00326 const int16_t *src, int rnd) \
00327 { \
00328 int h = 8; \
00329 src -= 1; \
00330 rnd -= (-4+58+13-3)*256; \
00331 __asm__ volatile( \
00332 LOAD_ROUNDER_MMX("%4") \
00333 "movq "MANGLE(ff_pw_18)", %%mm6 \n\t" \
00334 "movq "MANGLE(ff_pw_53)", %%mm5 \n\t" \
00335 ".p2align 3 \n\t" \
00336 "1: \n\t" \
00337 MSPEL_FILTER13_CORE(DONT_UNPACK, "movq 2", A1, A2, A3, A4) \
00338 NORMALIZE_MMX("$7") \
00339 \
00340 "paddw "MANGLE(ff_pw_128)", %%mm3 \n\t" \
00341 "paddw "MANGLE(ff_pw_128)", %%mm4 \n\t" \
00342 TRANSFER_DO_PACK(OP) \
00343 "add $24, %1 \n\t" \
00344 "add %3, %2 \n\t" \
00345 "decl %0 \n\t" \
00346 "jnz 1b \n\t" \
00347 : "+r"(h), "+r" (src), "+r" (dst) \
00348 : "r"(stride), "m"(rnd) \
00349 : "memory" \
00350 ); \
00351 }
00352
00361 #define MSPEL_FILTER13_8B(NAME, A1, A2, A3, A4, OP, OPNAME) \
00362 static void \
00363 OPNAME ## vc1_## NAME ## _mmx(uint8_t *dst, const uint8_t *src, \
00364 x86_reg stride, int rnd, x86_reg offset) \
00365 { \
00366 int h = 8; \
00367 src -= offset; \
00368 rnd = 32-rnd; \
00369 __asm__ volatile ( \
00370 LOAD_ROUNDER_MMX("%6") \
00371 "movq "MANGLE(ff_pw_53)", %%mm5 \n\t" \
00372 "movq "MANGLE(ff_pw_18)", %%mm6 \n\t" \
00373 ".p2align 3 \n\t" \
00374 "1: \n\t" \
00375 MSPEL_FILTER13_CORE(DO_UNPACK, "movd 1", A1, A2, A3, A4) \
00376 NORMALIZE_MMX("$6") \
00377 TRANSFER_DO_PACK(OP) \
00378 "add %5, %1 \n\t" \
00379 "add %5, %2 \n\t" \
00380 "decl %0 \n\t" \
00381 "jnz 1b \n\t" \
00382 : "+r"(h), "+r" (src), "+r" (dst) \
00383 : "r"(offset), "r"(3*offset), "g"(stride), "m"(rnd) \
00384 : "memory" \
00385 ); \
00386 }
00387
00389 MSPEL_FILTER13_8B (shift1, "0(%1,%4 )", "0(%1,%3,2)", "0(%1,%3 )", "0(%1 )", OP_PUT, put_)
00390 MSPEL_FILTER13_8B (shift1, "0(%1,%4 )", "0(%1,%3,2)", "0(%1,%3 )", "0(%1 )", OP_AVG, avg_)
00391 MSPEL_FILTER13_VER_16B(shift1, "0(%1,%4 )", "0(%1,%3,2)", "0(%1,%3 )", "0(%1 )")
00392 MSPEL_FILTER13_HOR_16B(shift1, "2*3(%1)", "2*2(%1)", "2*1(%1)", "2*0(%1)", OP_PUT, put_)
00393 MSPEL_FILTER13_HOR_16B(shift1, "2*3(%1)", "2*2(%1)", "2*1(%1)", "2*0(%1)", OP_AVG, avg_)
00394
00396 MSPEL_FILTER13_8B (shift3, "0(%1 )", "0(%1,%3 )", "0(%1,%3,2)", "0(%1,%4 )", OP_PUT, put_)
00397 MSPEL_FILTER13_8B (shift3, "0(%1 )", "0(%1,%3 )", "0(%1,%3,2)", "0(%1,%4 )", OP_AVG, avg_)
00398 MSPEL_FILTER13_VER_16B(shift3, "0(%1 )", "0(%1,%3 )", "0(%1,%3,2)", "0(%1,%4 )")
00399 MSPEL_FILTER13_HOR_16B(shift3, "2*0(%1)", "2*1(%1)", "2*2(%1)", "2*3(%1)", OP_PUT, put_)
00400 MSPEL_FILTER13_HOR_16B(shift3, "2*0(%1)", "2*1(%1)", "2*2(%1)", "2*3(%1)", OP_AVG, avg_)
00401
00402 typedef void (*vc1_mspel_mc_filter_ver_16bits)(int16_t *dst, const uint8_t *src, x86_reg src_stride, int rnd, int64_t shift);
00403 typedef void (*vc1_mspel_mc_filter_hor_16bits)(uint8_t *dst, x86_reg dst_stride, const int16_t *src, int rnd);
00404 typedef void (*vc1_mspel_mc_filter_8bits)(uint8_t *dst, const uint8_t *src, x86_reg stride, int rnd, x86_reg offset);
00405
00417 #define VC1_MSPEL_MC(OP)\
00418 static void OP ## vc1_mspel_mc(uint8_t *dst, const uint8_t *src, int stride,\
00419 int hmode, int vmode, int rnd)\
00420 {\
00421 static const vc1_mspel_mc_filter_ver_16bits vc1_put_shift_ver_16bits[] =\
00422 { NULL, vc1_put_ver_16b_shift1_mmx, vc1_put_ver_16b_shift2_mmx, vc1_put_ver_16b_shift3_mmx };\
00423 static const vc1_mspel_mc_filter_hor_16bits vc1_put_shift_hor_16bits[] =\
00424 { NULL, OP ## vc1_hor_16b_shift1_mmx, OP ## vc1_hor_16b_shift2_mmx, OP ## vc1_hor_16b_shift3_mmx };\
00425 static const vc1_mspel_mc_filter_8bits vc1_put_shift_8bits[] =\
00426 { NULL, OP ## vc1_shift1_mmx, OP ## vc1_shift2_mmx, OP ## vc1_shift3_mmx };\
00427 \
00428 __asm__ volatile(\
00429 "pxor %%mm0, %%mm0 \n\t"\
00430 ::: "memory"\
00431 );\
00432 \
00433 if (vmode) { \
00434 if (hmode) { \
00435 static const int shift_value[] = { 0, 5, 1, 5 };\
00436 int shift = (shift_value[hmode]+shift_value[vmode])>>1;\
00437 int r;\
00438 DECLARE_ALIGNED(16, int16_t, tmp)[12*8];\
00439 \
00440 r = (1<<(shift-1)) + rnd-1;\
00441 vc1_put_shift_ver_16bits[vmode](tmp, src-1, stride, r, shift);\
00442 \
00443 vc1_put_shift_hor_16bits[hmode](dst, stride, tmp+1, 64-rnd);\
00444 return;\
00445 }\
00446 else { \
00447 vc1_put_shift_8bits[vmode](dst, src, stride, 1-rnd, stride);\
00448 return;\
00449 }\
00450 }\
00451 \
00452 \
00453 vc1_put_shift_8bits[hmode](dst, src, stride, rnd, 1);\
00454 }
00455
00456 VC1_MSPEL_MC(put_)
00457 VC1_MSPEL_MC(avg_)
00458
00460 #define DECLARE_FUNCTION(a, b) \
00461 static void put_vc1_mspel_mc ## a ## b ## _mmx(uint8_t *dst, const uint8_t *src, int stride, int rnd) { \
00462 put_vc1_mspel_mc(dst, src, stride, a, b, rnd); \
00463 }\
00464 static void avg_vc1_mspel_mc ## a ## b ## _mmx2(uint8_t *dst, const uint8_t *src, int stride, int rnd) { \
00465 avg_vc1_mspel_mc(dst, src, stride, a, b, rnd); \
00466 }
00467
00468 DECLARE_FUNCTION(0, 1)
00469 DECLARE_FUNCTION(0, 2)
00470 DECLARE_FUNCTION(0, 3)
00471
00472 DECLARE_FUNCTION(1, 0)
00473 DECLARE_FUNCTION(1, 1)
00474 DECLARE_FUNCTION(1, 2)
00475 DECLARE_FUNCTION(1, 3)
00476
00477 DECLARE_FUNCTION(2, 0)
00478 DECLARE_FUNCTION(2, 1)
00479 DECLARE_FUNCTION(2, 2)
00480 DECLARE_FUNCTION(2, 3)
00481
00482 DECLARE_FUNCTION(3, 0)
00483 DECLARE_FUNCTION(3, 1)
00484 DECLARE_FUNCTION(3, 2)
00485 DECLARE_FUNCTION(3, 3)
00486
00487 static void vc1_inv_trans_4x4_dc_mmx2(uint8_t *dest, int linesize, DCTELEM *block)
00488 {
00489 int dc = block[0];
00490 dc = (17 * dc + 4) >> 3;
00491 dc = (17 * dc + 64) >> 7;
00492 __asm__ volatile(
00493 "movd %0, %%mm0 \n\t"
00494 "pshufw $0, %%mm0, %%mm0 \n\t"
00495 "pxor %%mm1, %%mm1 \n\t"
00496 "psubw %%mm0, %%mm1 \n\t"
00497 "packuswb %%mm0, %%mm0 \n\t"
00498 "packuswb %%mm1, %%mm1 \n\t"
00499 ::"r"(dc)
00500 );
00501 __asm__ volatile(
00502 "movd %0, %%mm2 \n\t"
00503 "movd %1, %%mm3 \n\t"
00504 "movd %2, %%mm4 \n\t"
00505 "movd %3, %%mm5 \n\t"
00506 "paddusb %%mm0, %%mm2 \n\t"
00507 "paddusb %%mm0, %%mm3 \n\t"
00508 "paddusb %%mm0, %%mm4 \n\t"
00509 "paddusb %%mm0, %%mm5 \n\t"
00510 "psubusb %%mm1, %%mm2 \n\t"
00511 "psubusb %%mm1, %%mm3 \n\t"
00512 "psubusb %%mm1, %%mm4 \n\t"
00513 "psubusb %%mm1, %%mm5 \n\t"
00514 "movd %%mm2, %0 \n\t"
00515 "movd %%mm3, %1 \n\t"
00516 "movd %%mm4, %2 \n\t"
00517 "movd %%mm5, %3 \n\t"
00518 :"+m"(*(uint32_t*)(dest+0*linesize)),
00519 "+m"(*(uint32_t*)(dest+1*linesize)),
00520 "+m"(*(uint32_t*)(dest+2*linesize)),
00521 "+m"(*(uint32_t*)(dest+3*linesize))
00522 );
00523 }
00524
00525 static void vc1_inv_trans_4x8_dc_mmx2(uint8_t *dest, int linesize, DCTELEM *block)
00526 {
00527 int dc = block[0];
00528 dc = (17 * dc + 4) >> 3;
00529 dc = (12 * dc + 64) >> 7;
00530 __asm__ volatile(
00531 "movd %0, %%mm0 \n\t"
00532 "pshufw $0, %%mm0, %%mm0 \n\t"
00533 "pxor %%mm1, %%mm1 \n\t"
00534 "psubw %%mm0, %%mm1 \n\t"
00535 "packuswb %%mm0, %%mm0 \n\t"
00536 "packuswb %%mm1, %%mm1 \n\t"
00537 ::"r"(dc)
00538 );
00539 __asm__ volatile(
00540 "movd %0, %%mm2 \n\t"
00541 "movd %1, %%mm3 \n\t"
00542 "movd %2, %%mm4 \n\t"
00543 "movd %3, %%mm5 \n\t"
00544 "paddusb %%mm0, %%mm2 \n\t"
00545 "paddusb %%mm0, %%mm3 \n\t"
00546 "paddusb %%mm0, %%mm4 \n\t"
00547 "paddusb %%mm0, %%mm5 \n\t"
00548 "psubusb %%mm1, %%mm2 \n\t"
00549 "psubusb %%mm1, %%mm3 \n\t"
00550 "psubusb %%mm1, %%mm4 \n\t"
00551 "psubusb %%mm1, %%mm5 \n\t"
00552 "movd %%mm2, %0 \n\t"
00553 "movd %%mm3, %1 \n\t"
00554 "movd %%mm4, %2 \n\t"
00555 "movd %%mm5, %3 \n\t"
00556 :"+m"(*(uint32_t*)(dest+0*linesize)),
00557 "+m"(*(uint32_t*)(dest+1*linesize)),
00558 "+m"(*(uint32_t*)(dest+2*linesize)),
00559 "+m"(*(uint32_t*)(dest+3*linesize))
00560 );
00561 dest += 4*linesize;
00562 __asm__ volatile(
00563 "movd %0, %%mm2 \n\t"
00564 "movd %1, %%mm3 \n\t"
00565 "movd %2, %%mm4 \n\t"
00566 "movd %3, %%mm5 \n\t"
00567 "paddusb %%mm0, %%mm2 \n\t"
00568 "paddusb %%mm0, %%mm3 \n\t"
00569 "paddusb %%mm0, %%mm4 \n\t"
00570 "paddusb %%mm0, %%mm5 \n\t"
00571 "psubusb %%mm1, %%mm2 \n\t"
00572 "psubusb %%mm1, %%mm3 \n\t"
00573 "psubusb %%mm1, %%mm4 \n\t"
00574 "psubusb %%mm1, %%mm5 \n\t"
00575 "movd %%mm2, %0 \n\t"
00576 "movd %%mm3, %1 \n\t"
00577 "movd %%mm4, %2 \n\t"
00578 "movd %%mm5, %3 \n\t"
00579 :"+m"(*(uint32_t*)(dest+0*linesize)),
00580 "+m"(*(uint32_t*)(dest+1*linesize)),
00581 "+m"(*(uint32_t*)(dest+2*linesize)),
00582 "+m"(*(uint32_t*)(dest+3*linesize))
00583 );
00584 }
00585
00586 static void vc1_inv_trans_8x4_dc_mmx2(uint8_t *dest, int linesize, DCTELEM *block)
00587 {
00588 int dc = block[0];
00589 dc = ( 3 * dc + 1) >> 1;
00590 dc = (17 * dc + 64) >> 7;
00591 __asm__ volatile(
00592 "movd %0, %%mm0 \n\t"
00593 "pshufw $0, %%mm0, %%mm0 \n\t"
00594 "pxor %%mm1, %%mm1 \n\t"
00595 "psubw %%mm0, %%mm1 \n\t"
00596 "packuswb %%mm0, %%mm0 \n\t"
00597 "packuswb %%mm1, %%mm1 \n\t"
00598 ::"r"(dc)
00599 );
00600 __asm__ volatile(
00601 "movq %0, %%mm2 \n\t"
00602 "movq %1, %%mm3 \n\t"
00603 "movq %2, %%mm4 \n\t"
00604 "movq %3, %%mm5 \n\t"
00605 "paddusb %%mm0, %%mm2 \n\t"
00606 "paddusb %%mm0, %%mm3 \n\t"
00607 "paddusb %%mm0, %%mm4 \n\t"
00608 "paddusb %%mm0, %%mm5 \n\t"
00609 "psubusb %%mm1, %%mm2 \n\t"
00610 "psubusb %%mm1, %%mm3 \n\t"
00611 "psubusb %%mm1, %%mm4 \n\t"
00612 "psubusb %%mm1, %%mm5 \n\t"
00613 "movq %%mm2, %0 \n\t"
00614 "movq %%mm3, %1 \n\t"
00615 "movq %%mm4, %2 \n\t"
00616 "movq %%mm5, %3 \n\t"
00617 :"+m"(*(uint32_t*)(dest+0*linesize)),
00618 "+m"(*(uint32_t*)(dest+1*linesize)),
00619 "+m"(*(uint32_t*)(dest+2*linesize)),
00620 "+m"(*(uint32_t*)(dest+3*linesize))
00621 );
00622 }
00623
00624 static void vc1_inv_trans_8x8_dc_mmx2(uint8_t *dest, int linesize, DCTELEM *block)
00625 {
00626 int dc = block[0];
00627 dc = (3 * dc + 1) >> 1;
00628 dc = (3 * dc + 16) >> 5;
00629 __asm__ volatile(
00630 "movd %0, %%mm0 \n\t"
00631 "pshufw $0, %%mm0, %%mm0 \n\t"
00632 "pxor %%mm1, %%mm1 \n\t"
00633 "psubw %%mm0, %%mm1 \n\t"
00634 "packuswb %%mm0, %%mm0 \n\t"
00635 "packuswb %%mm1, %%mm1 \n\t"
00636 ::"r"(dc)
00637 );
00638 __asm__ volatile(
00639 "movq %0, %%mm2 \n\t"
00640 "movq %1, %%mm3 \n\t"
00641 "movq %2, %%mm4 \n\t"
00642 "movq %3, %%mm5 \n\t"
00643 "paddusb %%mm0, %%mm2 \n\t"
00644 "paddusb %%mm0, %%mm3 \n\t"
00645 "paddusb %%mm0, %%mm4 \n\t"
00646 "paddusb %%mm0, %%mm5 \n\t"
00647 "psubusb %%mm1, %%mm2 \n\t"
00648 "psubusb %%mm1, %%mm3 \n\t"
00649 "psubusb %%mm1, %%mm4 \n\t"
00650 "psubusb %%mm1, %%mm5 \n\t"
00651 "movq %%mm2, %0 \n\t"
00652 "movq %%mm3, %1 \n\t"
00653 "movq %%mm4, %2 \n\t"
00654 "movq %%mm5, %3 \n\t"
00655 :"+m"(*(uint32_t*)(dest+0*linesize)),
00656 "+m"(*(uint32_t*)(dest+1*linesize)),
00657 "+m"(*(uint32_t*)(dest+2*linesize)),
00658 "+m"(*(uint32_t*)(dest+3*linesize))
00659 );
00660 dest += 4*linesize;
00661 __asm__ volatile(
00662 "movq %0, %%mm2 \n\t"
00663 "movq %1, %%mm3 \n\t"
00664 "movq %2, %%mm4 \n\t"
00665 "movq %3, %%mm5 \n\t"
00666 "paddusb %%mm0, %%mm2 \n\t"
00667 "paddusb %%mm0, %%mm3 \n\t"
00668 "paddusb %%mm0, %%mm4 \n\t"
00669 "paddusb %%mm0, %%mm5 \n\t"
00670 "psubusb %%mm1, %%mm2 \n\t"
00671 "psubusb %%mm1, %%mm3 \n\t"
00672 "psubusb %%mm1, %%mm4 \n\t"
00673 "psubusb %%mm1, %%mm5 \n\t"
00674 "movq %%mm2, %0 \n\t"
00675 "movq %%mm3, %1 \n\t"
00676 "movq %%mm4, %2 \n\t"
00677 "movq %%mm5, %3 \n\t"
00678 :"+m"(*(uint32_t*)(dest+0*linesize)),
00679 "+m"(*(uint32_t*)(dest+1*linesize)),
00680 "+m"(*(uint32_t*)(dest+2*linesize)),
00681 "+m"(*(uint32_t*)(dest+3*linesize))
00682 );
00683 }
00684
00685 #define LOOP_FILTER(EXT) \
00686 void ff_vc1_v_loop_filter4_ ## EXT(uint8_t *src, int stride, int pq); \
00687 void ff_vc1_h_loop_filter4_ ## EXT(uint8_t *src, int stride, int pq); \
00688 void ff_vc1_v_loop_filter8_ ## EXT(uint8_t *src, int stride, int pq); \
00689 void ff_vc1_h_loop_filter8_ ## EXT(uint8_t *src, int stride, int pq); \
00690 \
00691 static void vc1_v_loop_filter16_ ## EXT(uint8_t *src, int stride, int pq) \
00692 { \
00693 ff_vc1_v_loop_filter8_ ## EXT(src, stride, pq); \
00694 ff_vc1_v_loop_filter8_ ## EXT(src+8, stride, pq); \
00695 } \
00696 \
00697 static void vc1_h_loop_filter16_ ## EXT(uint8_t *src, int stride, int pq) \
00698 { \
00699 ff_vc1_h_loop_filter8_ ## EXT(src, stride, pq); \
00700 ff_vc1_h_loop_filter8_ ## EXT(src+8*stride, stride, pq); \
00701 }
00702
00703 #if HAVE_YASM
00704 LOOP_FILTER(mmx2)
00705 LOOP_FILTER(sse2)
00706 LOOP_FILTER(ssse3)
00707
00708 void ff_vc1_h_loop_filter8_sse4(uint8_t *src, int stride, int pq);
00709
00710 static void vc1_h_loop_filter16_sse4(uint8_t *src, int stride, int pq)
00711 {
00712 ff_vc1_h_loop_filter8_sse4(src, stride, pq);
00713 ff_vc1_h_loop_filter8_sse4(src+8*stride, stride, pq);
00714 }
00715
00716 #endif
00717
00718 void ff_put_vc1_chroma_mc8_mmx_nornd (uint8_t *dst, uint8_t *src,
00719 int stride, int h, int x, int y);
00720 void ff_avg_vc1_chroma_mc8_mmx2_nornd (uint8_t *dst, uint8_t *src,
00721 int stride, int h, int x, int y);
00722 void ff_avg_vc1_chroma_mc8_3dnow_nornd(uint8_t *dst, uint8_t *src,
00723 int stride, int h, int x, int y);
00724 void ff_put_vc1_chroma_mc8_ssse3_nornd(uint8_t *dst, uint8_t *src,
00725 int stride, int h, int x, int y);
00726 void ff_avg_vc1_chroma_mc8_ssse3_nornd(uint8_t *dst, uint8_t *src,
00727 int stride, int h, int x, int y);
00728
00729 void ff_vc1dsp_init_mmx(VC1DSPContext *dsp)
00730 {
00731 int mm_flags = av_get_cpu_flags();
00732
00733 if (mm_flags & AV_CPU_FLAG_MMX) {
00734 dsp->put_vc1_mspel_pixels_tab[ 0] = ff_put_vc1_mspel_mc00_mmx;
00735 dsp->put_vc1_mspel_pixels_tab[ 4] = put_vc1_mspel_mc01_mmx;
00736 dsp->put_vc1_mspel_pixels_tab[ 8] = put_vc1_mspel_mc02_mmx;
00737 dsp->put_vc1_mspel_pixels_tab[12] = put_vc1_mspel_mc03_mmx;
00738
00739 dsp->put_vc1_mspel_pixels_tab[ 1] = put_vc1_mspel_mc10_mmx;
00740 dsp->put_vc1_mspel_pixels_tab[ 5] = put_vc1_mspel_mc11_mmx;
00741 dsp->put_vc1_mspel_pixels_tab[ 9] = put_vc1_mspel_mc12_mmx;
00742 dsp->put_vc1_mspel_pixels_tab[13] = put_vc1_mspel_mc13_mmx;
00743
00744 dsp->put_vc1_mspel_pixels_tab[ 2] = put_vc1_mspel_mc20_mmx;
00745 dsp->put_vc1_mspel_pixels_tab[ 6] = put_vc1_mspel_mc21_mmx;
00746 dsp->put_vc1_mspel_pixels_tab[10] = put_vc1_mspel_mc22_mmx;
00747 dsp->put_vc1_mspel_pixels_tab[14] = put_vc1_mspel_mc23_mmx;
00748
00749 dsp->put_vc1_mspel_pixels_tab[ 3] = put_vc1_mspel_mc30_mmx;
00750 dsp->put_vc1_mspel_pixels_tab[ 7] = put_vc1_mspel_mc31_mmx;
00751 dsp->put_vc1_mspel_pixels_tab[11] = put_vc1_mspel_mc32_mmx;
00752 dsp->put_vc1_mspel_pixels_tab[15] = put_vc1_mspel_mc33_mmx;
00753
00754 if (HAVE_YASM)
00755 dsp->put_no_rnd_vc1_chroma_pixels_tab[0]= ff_put_vc1_chroma_mc8_mmx_nornd;
00756 }
00757
00758 if (mm_flags & AV_CPU_FLAG_MMX2){
00759 dsp->avg_vc1_mspel_pixels_tab[ 0] = ff_avg_vc1_mspel_mc00_mmx2;
00760 dsp->avg_vc1_mspel_pixels_tab[ 4] = avg_vc1_mspel_mc01_mmx2;
00761 dsp->avg_vc1_mspel_pixels_tab[ 8] = avg_vc1_mspel_mc02_mmx2;
00762 dsp->avg_vc1_mspel_pixels_tab[12] = avg_vc1_mspel_mc03_mmx2;
00763
00764 dsp->avg_vc1_mspel_pixels_tab[ 1] = avg_vc1_mspel_mc10_mmx2;
00765 dsp->avg_vc1_mspel_pixels_tab[ 5] = avg_vc1_mspel_mc11_mmx2;
00766 dsp->avg_vc1_mspel_pixels_tab[ 9] = avg_vc1_mspel_mc12_mmx2;
00767 dsp->avg_vc1_mspel_pixels_tab[13] = avg_vc1_mspel_mc13_mmx2;
00768
00769 dsp->avg_vc1_mspel_pixels_tab[ 2] = avg_vc1_mspel_mc20_mmx2;
00770 dsp->avg_vc1_mspel_pixels_tab[ 6] = avg_vc1_mspel_mc21_mmx2;
00771 dsp->avg_vc1_mspel_pixels_tab[10] = avg_vc1_mspel_mc22_mmx2;
00772 dsp->avg_vc1_mspel_pixels_tab[14] = avg_vc1_mspel_mc23_mmx2;
00773
00774 dsp->avg_vc1_mspel_pixels_tab[ 3] = avg_vc1_mspel_mc30_mmx2;
00775 dsp->avg_vc1_mspel_pixels_tab[ 7] = avg_vc1_mspel_mc31_mmx2;
00776 dsp->avg_vc1_mspel_pixels_tab[11] = avg_vc1_mspel_mc32_mmx2;
00777 dsp->avg_vc1_mspel_pixels_tab[15] = avg_vc1_mspel_mc33_mmx2;
00778
00779 dsp->vc1_inv_trans_8x8_dc = vc1_inv_trans_8x8_dc_mmx2;
00780 dsp->vc1_inv_trans_4x8_dc = vc1_inv_trans_4x8_dc_mmx2;
00781 dsp->vc1_inv_trans_8x4_dc = vc1_inv_trans_8x4_dc_mmx2;
00782 dsp->vc1_inv_trans_4x4_dc = vc1_inv_trans_4x4_dc_mmx2;
00783
00784 if (HAVE_YASM)
00785 dsp->avg_no_rnd_vc1_chroma_pixels_tab[0]= ff_avg_vc1_chroma_mc8_mmx2_nornd;
00786 } else if (HAVE_YASM && mm_flags & AV_CPU_FLAG_3DNOW) {
00787 dsp->avg_no_rnd_vc1_chroma_pixels_tab[0]= ff_avg_vc1_chroma_mc8_3dnow_nornd;
00788 }
00789
00790 if (HAVE_YASM && mm_flags & AV_CPU_FLAG_SSSE3) {
00791 dsp->put_no_rnd_vc1_chroma_pixels_tab[0]= ff_put_vc1_chroma_mc8_ssse3_nornd;
00792 dsp->avg_no_rnd_vc1_chroma_pixels_tab[0]= ff_avg_vc1_chroma_mc8_ssse3_nornd;
00793 }
00794
00795 #define ASSIGN_LF(EXT) \
00796 dsp->vc1_v_loop_filter4 = ff_vc1_v_loop_filter4_ ## EXT; \
00797 dsp->vc1_h_loop_filter4 = ff_vc1_h_loop_filter4_ ## EXT; \
00798 dsp->vc1_v_loop_filter8 = ff_vc1_v_loop_filter8_ ## EXT; \
00799 dsp->vc1_h_loop_filter8 = ff_vc1_h_loop_filter8_ ## EXT; \
00800 dsp->vc1_v_loop_filter16 = vc1_v_loop_filter16_ ## EXT; \
00801 dsp->vc1_h_loop_filter16 = vc1_h_loop_filter16_ ## EXT
00802
00803 #if HAVE_YASM
00804 if (mm_flags & AV_CPU_FLAG_MMX) {
00805 }
00806 return;
00807 if (mm_flags & AV_CPU_FLAG_MMX2) {
00808 ASSIGN_LF(mmx2);
00809 }
00810
00811 if (mm_flags & AV_CPU_FLAG_SSE2) {
00812 dsp->vc1_v_loop_filter8 = ff_vc1_v_loop_filter8_sse2;
00813 dsp->vc1_h_loop_filter8 = ff_vc1_h_loop_filter8_sse2;
00814 dsp->vc1_v_loop_filter16 = vc1_v_loop_filter16_sse2;
00815 dsp->vc1_h_loop_filter16 = vc1_h_loop_filter16_sse2;
00816 }
00817 if (mm_flags & AV_CPU_FLAG_SSSE3) {
00818 ASSIGN_LF(ssse3);
00819 }
00820 if (mm_flags & AV_CPU_FLAG_SSE4) {
00821 dsp->vc1_h_loop_filter8 = ff_vc1_h_loop_filter8_sse4;
00822 dsp->vc1_h_loop_filter16 = vc1_h_loop_filter16_sse4;
00823 }
00824 #endif
00825 }