21 #include "../swscale_internal.h"
25 #define RET 0xC3 // near return opcode for x86
26 #define PREFETCH "prefetchnta"
58 "movq (%%"FF_REG_d
", %%"FF_REG_a
"), %%mm3 \n\t"
59 "movd (%%"FF_REG_c
", %%"FF_REG_S
"), %%mm0 \n\t"
60 "movd 1(%%"FF_REG_c
", %%"FF_REG_S
"), %%mm1 \n\t"
61 "punpcklbw %%mm7, %%mm1 \n\t"
62 "punpcklbw %%mm7, %%mm0 \n\t"
63 "pshufw $0xFF, %%mm1, %%mm1 \n\t"
65 "pshufw $0xFF, %%mm0, %%mm0 \n\t"
67 "psubw %%mm1, %%mm0 \n\t"
68 "movl 8(%%"FF_REG_b
", %%"FF_REG_a
"), %%esi \n\t"
69 "pmullw %%mm3, %%mm0 \n\t"
70 "psllw $7, %%mm1 \n\t"
71 "paddw %%mm1, %%mm0 \n\t"
73 "movq %%mm0, (%%"FF_REG_D
", %%"FF_REG_a
") \n\t"
75 "add $8, %%"FF_REG_a
" \n\t"
89 :
"=r" (fragmentA),
"=r" (imm8OfPShufW1A),
"=r" (imm8OfPShufW2A),
90 "=r" (fragmentLengthA)
97 "movq (%%"FF_REG_d
", %%"FF_REG_a
"), %%mm3 \n\t"
98 "movd (%%"FF_REG_c
", %%"FF_REG_S
"), %%mm0 \n\t"
99 "punpcklbw %%mm7, %%mm0 \n\t"
100 "pshufw $0xFF, %%mm0, %%mm1 \n\t"
102 "pshufw $0xFF, %%mm0, %%mm0 \n\t"
104 "psubw %%mm1, %%mm0 \n\t"
105 "movl 8(%%"FF_REG_b
", %%"FF_REG_a
"), %%esi \n\t"
106 "pmullw %%mm3, %%mm0 \n\t"
107 "psllw $7, %%mm1 \n\t"
108 "paddw %%mm1, %%mm0 \n\t"
110 "movq %%mm0, (%%"FF_REG_D
", %%"FF_REG_a
") \n\t"
112 "add $8, %%"FF_REG_a
" \n\t"
126 :
"=r" (fragmentB),
"=r" (imm8OfPShufW1B),
"=r" (imm8OfPShufW2B),
127 "=r" (fragmentLengthB)
133 for (i = 0; i < dstW / numSplits; i++) {
138 int b = ((xpos + xInc) >> 16) - xx;
139 int c = ((xpos + xInc * 2) >> 16) - xx;
140 int d = ((xpos + xInc * 3) >> 16) - xx;
141 int inc = (d + 1 < 4);
142 uint8_t *fragment = inc ? fragmentB : fragmentA;
143 x86_reg imm8OfPShufW1 = inc ? imm8OfPShufW1B : imm8OfPShufW1A;
144 x86_reg imm8OfPShufW2 = inc ? imm8OfPShufW2B : imm8OfPShufW2A;
145 x86_reg fragmentLength = inc ? fragmentLengthB : fragmentLengthA;
146 int maxShift = 3 - (d + inc);
150 filter[i] = ((xpos & 0xFFFF) ^ 0xFFFF) >> 9;
151 filter[i + 1] = (((xpos + xInc) & 0xFFFF) ^ 0xFFFF) >> 9;
152 filter[i + 2] = (((xpos + xInc * 2) & 0xFFFF) ^ 0xFFFF) >> 9;
153 filter[i + 3] = (((xpos + xInc * 3) & 0xFFFF) ^ 0xFFFF) >> 9;
154 filterPos[i / 2] = xx;
156 memcpy(filterCode + fragmentPos, fragment, fragmentLength);
158 filterCode[fragmentPos + imm8OfPShufW1] = (a + inc) |
162 filterCode[fragmentPos + imm8OfPShufW2] = a | (b << 2) |
166 if (i + 4 - inc >= dstW)
168 else if ((filterPos[i / 2] & 3) <= maxShift)
169 shift = filterPos[i / 2] & 3;
171 if (shift && i >= shift) {
172 filterCode[fragmentPos + imm8OfPShufW1] += 0x55 *
shift;
173 filterCode[fragmentPos + imm8OfPShufW2] += 0x55 *
shift;
174 filterPos[i / 2] -=
shift;
178 fragmentPos += fragmentLength;
181 filterCode[fragmentPos] =
RET;
186 filterPos[((i / 2) + 1) & (~1)] = xpos >> 16;
188 return fragmentPos + 1;
202 #if !HAVE_EBX_AVAILABLE
209 "mov -8(%%rsp), %%"FF_REG_a
" \n\t"
210 "mov %%"FF_REG_a
", %5 \n\t"
212 #if !HAVE_EBX_AVAILABLE
213 "mov %%"FF_REG_b
", %5 \n\t"
216 "pxor %%mm7, %%mm7 \n\t"
217 "mov %0, %%"FF_REG_c
" \n\t"
218 "mov %1, %%"FF_REG_D
" \n\t"
219 "mov %2, %%"FF_REG_d
" \n\t"
220 "mov %3, %%"FF_REG_b
" \n\t"
221 "xor %%"FF_REG_a
", %%"FF_REG_a
" \n\t"
227 #define CALL_MMXEXT_FILTER_CODE \
228 "movl (%%"FF_REG_b"), %%esi \n\t"\
230 "movl (%%"FF_REG_b", %%"FF_REG_a"), %%esi \n\t"\
231 "add %%"FF_REG_S", %%"FF_REG_c" \n\t"\
232 "add %%"FF_REG_a", %%"FF_REG_D" \n\t"\
233 "xor %%"FF_REG_a", %%"FF_REG_a" \n\t"\
236 #define CALL_MMXEXT_FILTER_CODE \
237 "movl (%%"FF_REG_b"), %%esi \n\t"\
239 "addl (%%"FF_REG_b", %%"FF_REG_a"), %%"FF_REG_c" \n\t"\
240 "add %%"FF_REG_a", %%"FF_REG_D" \n\t"\
241 "xor %%"FF_REG_a", %%"FF_REG_a" \n\t"\
245 CALL_MMXEXT_FILTER_CODE
246 CALL_MMXEXT_FILTER_CODE
247 CALL_MMXEXT_FILTER_CODE
248 CALL_MMXEXT_FILTER_CODE
249 CALL_MMXEXT_FILTER_CODE
250 CALL_MMXEXT_FILTER_CODE
251 CALL_MMXEXT_FILTER_CODE
252 CALL_MMXEXT_FILTER_CODE
255 "mov %5, %%"FF_REG_a
" \n\t"
256 "mov %%"FF_REG_a
", -8(%%rsp) \n\t"
258 #if !HAVE_EBX_AVAILABLE
259 "mov %5, %%"FF_REG_b
" \n\t"
262 ::
"m" (
src),
"m" (dst),
"m" (
filter),
"m" (filterPos),
263 "m" (mmxextFilterCode)
267 #if !HAVE_EBX_AVAILABLE
271 :
"%"FF_REG_a,
"%"FF_REG_c,
"%"FF_REG_d,
"%"FF_REG_S,
"%"FF_REG_D
272 #
if ARCH_X86_64 || HAVE_EBX_AVAILABLE
277 for (i=dstWidth-1; (i*xInc)>>16 >=srcW-1; i--)
278 dst[i] = src[srcW-1]*128;
283 const uint8_t *src2,
int srcW,
int xInc)
292 #if !HAVE_EBX_AVAILABLE
298 "mov -8(%%rsp), %%"FF_REG_a
" \n\t"
299 "mov %%"FF_REG_a
", %7 \n\t"
301 #if !HAVE_EBX_AVAILABLE
302 "mov %%"FF_REG_b
", %7 \n\t"
305 "pxor %%mm7, %%mm7 \n\t"
306 "mov %0, %%"FF_REG_c
" \n\t"
307 "mov %1, %%"FF_REG_D
" \n\t"
308 "mov %2, %%"FF_REG_d
" \n\t"
309 "mov %3, %%"FF_REG_b
" \n\t"
310 "xor %%"FF_REG_a
", %%"FF_REG_a
" \n\t"
315 CALL_MMXEXT_FILTER_CODE
316 CALL_MMXEXT_FILTER_CODE
317 CALL_MMXEXT_FILTER_CODE
318 CALL_MMXEXT_FILTER_CODE
319 "xor %%"FF_REG_a
", %%"FF_REG_a
" \n\t"
320 "mov %5, %%"FF_REG_c
" \n\t"
321 "mov %6, %%"FF_REG_D
" \n\t"
326 CALL_MMXEXT_FILTER_CODE
327 CALL_MMXEXT_FILTER_CODE
328 CALL_MMXEXT_FILTER_CODE
329 CALL_MMXEXT_FILTER_CODE
332 "mov %7, %%"FF_REG_a
" \n\t"
333 "mov %%"FF_REG_a
", -8(%%rsp) \n\t"
335 #if !HAVE_EBX_AVAILABLE
336 "mov %7, %%"FF_REG_b
" \n\t"
339 ::
"m" (
src1),
"m" (dst1),
"m" (
filter),
"m" (filterPos),
340 "m" (mmxextFilterCode),
"m" (src2),
"m"(dst2)
344 #if !HAVE_EBX_AVAILABLE
348 :
"%"FF_REG_a,
"%"FF_REG_c,
"%"FF_REG_d,
"%"FF_REG_S,
"%"FF_REG_D
349 #
if ARCH_X86_64 || HAVE_EBX_AVAILABLE
354 for (i=dstWidth-1; (i*xInc)>>16 >=srcW-1; i--) {
355 dst1[i] = src1[srcW-1]*128;
356 dst2[i] = src2[srcW-1]*128;
359 #endif //HAVE_INLINE_ASM
static int shift(int a, int b)
uint8_t * chrMmxextFilterCode
Runtime-generated MMXEXT horizontal fast bilinear scaler code for chroma planes.
uint8_t * lumMmxextFilterCode
Runtime-generated MMXEXT horizontal fast bilinear scaler code for luma/alpha planes.
void ff_hcscale_fast_mmxext(SwsContext *c, int16_t *dst1, int16_t *dst2, int dstWidth, const uint8_t *src1, const uint8_t *src2, int srcW, int xInc)
void ff_hyscale_fast_mmxext(SwsContext *c, int16_t *dst, int dstWidth, const uint8_t *src, int srcW, int xInc)
static void filter(int16_t *output, ptrdiff_t out_stride, int16_t *low, ptrdiff_t low_stride, int16_t *high, ptrdiff_t high_stride, int len, uint8_t clip)
#define DECLARE_ALIGNED(n, t, v)
Declare a variable that is aligned in memory.
int32_t * hChrFilterPos
Array of horizontal filter starting positions for each dst[i] for chroma planes.
int32_t * hLumFilterPos
Array of horizontal filter starting positions for each dst[i] for luma/alpha planes.
int ff_init_hscaler_mmxext(int dstW, int xInc, uint8_t *filterCode, int16_t *filter, int32_t *filterPos, int numSplits)
int16_t * hLumFilter
Array of horizontal filter coefficients for luma/alpha planes.
int16_t * hChrFilter
Array of horizontal filter coefficients for chroma planes.