55 const int16_t *window,
unsigned int len);
57 const int16_t *window,
unsigned int len);
59 const int16_t *window,
unsigned int len);
61 const int16_t *window,
unsigned int len);
63 const int16_t *window,
unsigned int len);
65 const int16_t *window,
unsigned int len);
67 #if ARCH_X86_32 && defined(__INTEL_COMPILER)
72 #if HAVE_SSE_INLINE && HAVE_7REGS
77 #define MIX5(mono, stereo) \
79 "movss 0(%1), %%xmm5 \n" \
80 "movss 8(%1), %%xmm6 \n" \
81 "movss 24(%1), %%xmm7 \n" \
82 "shufps $0, %%xmm5, %%xmm5 \n" \
83 "shufps $0, %%xmm6, %%xmm6 \n" \
84 "shufps $0, %%xmm7, %%xmm7 \n" \
86 "movaps (%0, %2), %%xmm0 \n" \
87 "movaps (%0, %3), %%xmm1 \n" \
88 "movaps (%0, %4), %%xmm2 \n" \
89 "movaps (%0, %5), %%xmm3 \n" \
90 "movaps (%0, %6), %%xmm4 \n" \
91 "mulps %%xmm5, %%xmm0 \n" \
92 "mulps %%xmm6, %%xmm1 \n" \
93 "mulps %%xmm5, %%xmm2 \n" \
94 "mulps %%xmm7, %%xmm3 \n" \
95 "mulps %%xmm7, %%xmm4 \n" \
96 stereo("addps %%xmm1, %%xmm0 \n") \
97 "addps %%xmm1, %%xmm2 \n" \
98 "addps %%xmm3, %%xmm0 \n" \
99 "addps %%xmm4, %%xmm2 \n" \
100 mono("addps %%xmm2, %%xmm0 \n") \
101 "movaps %%xmm0, (%0, %2) \n" \
102 stereo("movaps %%xmm2, (%0, %3) \n") \
107 "r"(samples[0] + len), \
108 "r"(samples[1] + len), \
109 "r"(samples[2] + len), \
110 "r"(samples[3] + len), \
111 "r"(samples[4] + len) \
112 : XMM_CLOBBERS("%xmm0", "%xmm1", "%xmm2", "%xmm3", \
113 "%xmm4", "%xmm5", "%xmm6", "%xmm7",) \
117 #define MIX_MISC(stereo) \
121 "mov -%c7(%6, %2, %c8), %3 \n" \
122 "movaps (%3, %0), %%xmm0 \n" \
123 stereo("movaps %%xmm0, %%xmm1 \n") \
124 "mulps %%xmm4, %%xmm0 \n" \
125 stereo("mulps %%xmm5, %%xmm1 \n") \
127 "mov (%6, %2, %c8), %1 \n" \
128 "movaps (%1, %0), %%xmm2 \n" \
129 stereo("movaps %%xmm2, %%xmm3 \n") \
130 "mulps (%4, %2, 8), %%xmm2 \n" \
131 stereo("mulps 16(%4, %2, 8), %%xmm3 \n") \
132 "addps %%xmm2, %%xmm0 \n" \
133 stereo("addps %%xmm3, %%xmm1 \n") \
137 stereo("mov (%6, %2, %c8), %1 \n") \
138 "movaps %%xmm0, (%3, %0) \n" \
139 stereo("movaps %%xmm1, (%1, %0) \n") \
142 : "+&r"(i), "=&r"(j), "=&r"(k), "=&r"(m) \
143 : "r"(matrix_simd + in_ch), \
144 "g"((intptr_t) - 4 * (in_ch - 1)), \
146 "i"(sizeof(float *)), "i"(sizeof(float *)/4) \
150 static void ac3_downmix_sse(
float **samples,
float (*matrix)[2],
151 int out_ch,
int in_ch,
int len)
153 int (*matrix_cmp)[2] = (int(*)[2])matrix;
156 i = -len *
sizeof(float);
157 if (in_ch == 5 && out_ch == 2 &&
158 !(matrix_cmp[0][1] | matrix_cmp[2][0] |
159 matrix_cmp[3][1] | matrix_cmp[4][0] |
160 (matrix_cmp[1][0] ^ matrix_cmp[1][1]) |
161 (matrix_cmp[0][0] ^ matrix_cmp[2][1]))) {
163 }
else if (in_ch == 5 && out_ch == 1 &&
164 matrix_cmp[0][0] == matrix_cmp[2][0] &&
165 matrix_cmp[3][0] == matrix_cmp[4][0]) {
171 for (j = 0; j < in_ch; j++)
172 samp[j] = samples[j] + len;
174 j = 2 * in_ch *
sizeof(float);
178 "movss (%2, %0), %%xmm4 \n"
179 "movss 4(%2, %0), %%xmm5 \n"
180 "shufps $0, %%xmm4, %%xmm4 \n"
181 "shufps $0, %%xmm5, %%xmm5 \n"
182 "movaps %%xmm4, (%1, %0, 4) \n"
183 "movaps %%xmm5, 16(%1, %0, 4) \n"
186 :
"r"(matrix_simd),
"r"(matrix)
238 }
else if (!(cpu_flags & AV_CPU_FLAG_SSE2SLOW)) {
252 #if HAVE_SSE_INLINE && HAVE_7REGS