23 #include <wasm_simd128.h>
28 64, 83, 64, 36, 89, 75, 50, 18,
29 90, 87, 80, 70, 57, 43, 25, 9,
30 90, 90, 88, 85, 82, 78, 73, 67,
31 61, 54, 46, 38, 31, 22, 13, 4,
36 v128_t t0 = wasm_i16x8_shuffle(
src[0],
src[1], 0, 8, 2, 10, 4, 12, 6, 14);
37 v128_t t1 = wasm_i16x8_shuffle(
src[0],
src[1], 1, 9, 3, 11, 5, 13, 7, 15);
38 v128_t t2 = wasm_i16x8_shuffle(
src[2],
src[3], 0, 8, 2, 10, 4, 12, 6, 14);
39 v128_t t3 = wasm_i16x8_shuffle(
src[2],
src[3], 1, 9, 3, 11, 5, 13, 7, 15);
41 src[0] = wasm_i32x4_shuffle(t0, t2, 0, 4, 2, 6);
42 src[2] = wasm_i32x4_shuffle(t0, t2, 1, 5, 3, 7);
43 src[1] = wasm_i32x4_shuffle(t1, t3, 0, 4, 2, 6);
44 src[3] = wasm_i32x4_shuffle(t1, t3, 1, 5, 3, 7);
56 v128_t add = wasm_i32x4_splat(1 << (
shift - 1));
57 v128_t e0 = wasm_i32x4_extmul_low_i16x8(
src[0], trans[0]);
58 v128_t e1 = wasm_i32x4_extmul_low_i16x8(
src[0], trans[0]);
59 v128_t o0 = wasm_i32x4_extmul_low_i16x8(
src[1], trans[1]);
60 v128_t o1 = wasm_i32x4_extmul_low_i16x8(
src[1], trans[3]);
62 tmp[0] = wasm_i32x4_extmul_low_i16x8(
src[2], trans[0]);
63 tmp[1] = wasm_i32x4_extmul_low_i16x8(
src[2], trans[0]);
64 tmp[2] = wasm_i32x4_extmul_low_i16x8(
src[3], trans[3]);
65 tmp[3] = wasm_i32x4_extmul_low_i16x8(
src[3], trans[1]);
66 e0 = wasm_i32x4_add(e0,
tmp[0]);
67 e1 = wasm_i32x4_sub(e1,
tmp[1]);
68 o0 = wasm_i32x4_add(o0,
tmp[2]);
69 o1 = wasm_i32x4_sub(o1,
tmp[3]);
71 tmp[0] = wasm_i32x4_add(e0, o0);
72 tmp[1] = wasm_i32x4_sub(e0, o0);
73 tmp[2] = wasm_i32x4_add(e1, o1);
74 tmp[3] = wasm_i32x4_sub(e1, o1);
76 tmp[0] = wasm_i32x4_add(
tmp[0], add);
77 tmp[1] = wasm_i32x4_add(
tmp[1], add);
78 tmp[2] = wasm_i32x4_add(
tmp[2], add);
79 tmp[3] = wasm_i32x4_add(
tmp[3], add);
85 src[0] = wasm_i16x8_narrow_i32x4(
tmp[0],
tmp[0]);
86 src[3] = wasm_i16x8_narrow_i32x4(
tmp[1],
tmp[1]);
87 src[1] = wasm_i16x8_narrow_i32x4(
tmp[2],
tmp[2]);
88 src[2] = wasm_i16x8_narrow_i32x4(
tmp[3],
tmp[3]);
96 src[0] = wasm_v128_load64_zero(&coeffs[0]);
97 src[1] = wasm_v128_load64_zero(&coeffs[4]);
98 src[2] = wasm_v128_load64_zero(&coeffs[8]);
99 src[3] = wasm_v128_load64_zero(&coeffs[12]);
101 trans[0] = wasm_i16x8_const_splat(
transform[0]);
102 trans[1] = wasm_i16x8_const_splat(
transform[1]);
103 trans[2] = wasm_i16x8_const_splat(
transform[2]);
104 trans[3] = wasm_i16x8_const_splat(
transform[3]);
112 src[0] = wasm_i64x2_shuffle(
src[0],
src[1], 0, 2);
113 src[2] = wasm_i64x2_shuffle(
src[2],
src[3], 0, 2);
114 wasm_v128_store(&coeffs[0],
src[0]);
115 wasm_v128_store(&coeffs[8],
src[2]);
130 src = wasm_i32x4_add(
src, add);
132 *
dst = wasm_i64x2_shuffle(wasm_i16x8_narrow_i32x4(
src,
src), *
dst, 0, 3);
137 src = wasm_i32x4_add(
src, add);
139 *
dst = wasm_i64x2_shuffle(wasm_i16x8_narrow_i32x4(
src,
src), *
dst, 2, 0);
142 #define tr_4x4_8(in0, in1, in2, in3, dst0, dst1, dst2, dst3, trans, half0, half1) \
144 v128_t e0, e1, o0, o1; \
147 e0 = wasm_i32x4_extmul_ ## half0 ## _i16x8(in0, trans[0]); \
149 o0 = wasm_i32x4_extmul_ ## half0 ## _i16x8(in1, trans[1]); \
150 o1 = wasm_i32x4_extmul_ ## half0 ## _i16x8(in1, trans[3]); \
152 tmp[0] = wasm_i32x4_extmul_ ## half1 ## _i16x8(in2, trans[0]); \
153 tmp[1] = wasm_i32x4_extmul_ ## half1 ## _i16x8(in2, trans[0]); \
154 tmp[2] = wasm_i32x4_extmul_ ## half1 ## _i16x8(in3, trans[3]); \
155 tmp[3] = wasm_i32x4_extmul_ ## half1 ## _i16x8(in3, trans[1]); \
156 e0 = wasm_i32x4_add(e0, tmp[0]); \
157 e1 = wasm_i32x4_sub(e1, tmp[1]); \
158 o0 = wasm_i32x4_add(o0, tmp[2]); \
159 o1 = wasm_i32x4_sub(o1, tmp[3]); \
160 dst0 = wasm_i32x4_add(e0, o0); \
161 dst1 = wasm_i32x4_add(e1, o1); \
162 dst2 = wasm_i32x4_sub(e1, o1); \
163 dst3 = wasm_i32x4_sub(e0, o0); \
166 #define tr_8x4(src0, src1, half0, half1, trans, shift) \
168 v128_t v24, v25, v26, v27, v28, v29, v30, v31; \
169 v128_t add = wasm_i32x4_splat(1 << (shift - 1)); \
171 tr_4x4_8(src0[0], src0[2], src1[0], src1[2], v24, v25, v26, v27, trans, half0, half1); \
173 v30 = wasm_i32x4_extmul_ ## half0 ## _i16x8(src0[1], trans[6]); \
174 v28 = wasm_i32x4_extmul_ ## half0 ## _i16x8(src0[1], trans[4]); \
175 v29 = wasm_i32x4_extmul_ ## half0 ## _i16x8(src0[1], trans[5]); \
176 v30 = wasm_i32x4_sub(v30, wasm_i32x4_extmul_ ## half0 ## _i16x8(src0[3], trans[4])); \
177 v28 = wasm_i32x4_add(v28, wasm_i32x4_extmul_ ## half0 ## _i16x8(src0[3], trans[5])); \
178 v29 = wasm_i32x4_sub(v29, wasm_i32x4_extmul_ ## half0 ## _i16x8(src0[3], trans[7])); \
180 v30 = wasm_i32x4_add(v30, wasm_i32x4_extmul_ ## half1 ## _i16x8(src1[1], trans[7])); \
181 v28 = wasm_i32x4_add(v28, wasm_i32x4_extmul_ ## half1 ## _i16x8(src1[1], trans[6])); \
182 v29 = wasm_i32x4_sub(v29, wasm_i32x4_extmul_ ## half1 ## _i16x8(src1[1], trans[4])); \
184 v30 = wasm_i32x4_add(v30, wasm_i32x4_extmul_ ## half1 ## _i16x8(src1[3], trans[5])); \
185 v28 = wasm_i32x4_add(v28, wasm_i32x4_extmul_ ## half1 ## _i16x8(src1[3], trans[7])); \
186 v29 = wasm_i32x4_sub(v29, wasm_i32x4_extmul_ ## half1 ## _i16x8(src1[3], trans[6])); \
188 v31 = wasm_i32x4_add(v26, v30); \
189 v26 = wasm_i32x4_sub(v26, v30); \
190 shift_narrow_ ## half0 (v31, &src0[2], add, shift); \
191 v31 = wasm_i32x4_extmul_ ## half0 ## _i16x8(src0[1], trans[7]); \
192 v31 = wasm_i32x4_sub(v31, wasm_i32x4_extmul_ ## half0 ## _i16x8(src0[3], trans[6])); \
193 v31 = wasm_i32x4_add(v31, wasm_i32x4_extmul_ ## half1 ## _i16x8(src1[1], trans[5])); \
194 v31 = wasm_i32x4_sub(v31, wasm_i32x4_extmul_ ## half1 ## _i16x8(src1[3], trans[4])); \
195 shift_narrow_ ## half1 (v26, &src1[1], add, shift); \
196 v26 = wasm_i32x4_add(v24, v28); \
197 v24 = wasm_i32x4_sub(v24, v28); \
198 v28 = wasm_i32x4_add(v25, v29); \
199 v25 = wasm_i32x4_sub(v25, v29); \
200 v30 = wasm_i32x4_add(v27, v31); \
201 v27 = wasm_i32x4_sub(v27, v31); \
202 shift_narrow_ ## half0 (v26, &src0[0], add, shift); \
203 shift_narrow_ ## half1 (v24, &src1[3], add, shift); \
204 shift_narrow_ ## half0 (v28, &src0[1], add, shift); \
205 shift_narrow_ ## half1 (v25, &src1[2], add, shift); \
206 shift_narrow_ ## half0 (v30, &src0[3], add, shift); \
207 shift_narrow_ ## half1 (v27, &src1[0], add, shift); \
218 src[0] = wasm_v128_load(coeffs + 0 * 8);
219 src[1] = wasm_v128_load(coeffs + 1 * 8);
220 src[2] = wasm_v128_load(coeffs + 2 * 8);
221 src[3] = wasm_v128_load(coeffs + 3 * 8);
222 src[4] = wasm_v128_load(coeffs + 4 * 8);
223 src[5] = wasm_v128_load(coeffs + 5 * 8);
224 src[6] = wasm_v128_load(coeffs + 6 * 8);
225 src[7] = wasm_v128_load(coeffs + 7 * 8);
227 trans[0] = wasm_i16x8_const_splat(
transform[0]);
228 trans[1] = wasm_i16x8_const_splat(
transform[1]);
229 trans[2] = wasm_i16x8_const_splat(
transform[2]);
230 trans[3] = wasm_i16x8_const_splat(
transform[3]);
231 trans[4] = wasm_i16x8_const_splat(
transform[4]);
232 trans[5] = wasm_i16x8_const_splat(
transform[5]);
233 trans[6] = wasm_i16x8_const_splat(
transform[6]);
234 trans[7] = wasm_i16x8_const_splat(
transform[7]);
244 wasm_v128_store(&coeffs[0 * 8],
src[0]);
245 wasm_v128_store(&coeffs[1 * 8],
src[1]);
246 wasm_v128_store(&coeffs[2 * 8],
src[2]);
247 wasm_v128_store(&coeffs[3 * 8],
src[3]);
248 wasm_v128_store(&coeffs[4 * 8],
src[4]);
249 wasm_v128_store(&coeffs[5 * 8],
src[5]);
250 wasm_v128_store(&coeffs[6 * 8],
src[6]);
251 wasm_v128_store(&coeffs[7 * 8],
src[7]);
264 #define load16(x1, x3, x2, in0, in1, in2, in3) \
265 in0 = wasm_v128_load64_zero(x1); \
266 in0 = wasm_v128_load64_lane(x3, in0, 1); \
269 in1 = wasm_v128_load64_zero(x1); \
270 in1 = wasm_v128_load64_lane(x3, in1, 1); \
273 in2 = wasm_v128_load64_zero(x1); \
274 in2 = wasm_v128_load64_lane(x3, in2, 1); \
277 in3 = wasm_v128_load64_zero(x1); \
278 in3 = wasm_v128_load64_lane(x3, in3, 1); \
282 #define bufferfly(e, o, p, m) \
283 p = wasm_i32x4_add(e, o); \
284 m = wasm_i32x4_sub(e, o); \
286 static void tr16_8x4(v128_t in0, v128_t in1, v128_t in2, v128_t in3,
287 const v128_t *trans,
char *sp,
int offset)
289 v128_t v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31;
291 tr_4x4_8(in0, in1, in2, in3, v24, v25, v26, v27, trans, low, low);
293 v28 = wasm_i32x4_extmul_high_i16x8(in0, trans[4]);
294 v29 = wasm_i32x4_extmul_high_i16x8(in0, trans[5]);
295 v30 = wasm_i32x4_extmul_high_i16x8(in0, trans[6]);
296 v31 = wasm_i32x4_extmul_high_i16x8(in0, trans[7]);
297 v28 = wasm_i32x4_add(v28, wasm_i32x4_extmul_high_i16x8(in1, trans[5]));
298 v29 = wasm_i32x4_sub(v29, wasm_i32x4_extmul_high_i16x8(in1, trans[7]));
299 v30 = wasm_i32x4_sub(v30, wasm_i32x4_extmul_high_i16x8(in1, trans[4]));
300 v31 = wasm_i32x4_sub(v31, wasm_i32x4_extmul_high_i16x8(in1, trans[6]));
302 v28 = wasm_i32x4_add(v28, wasm_i32x4_extmul_high_i16x8(in2, trans[6]));
303 v29 = wasm_i32x4_sub(v29, wasm_i32x4_extmul_high_i16x8(in2, trans[4]));
304 v30 = wasm_i32x4_add(v30, wasm_i32x4_extmul_high_i16x8(in2, trans[7]));
305 v31 = wasm_i32x4_add(v31, wasm_i32x4_extmul_high_i16x8(in2, trans[5]));
307 v28 = wasm_i32x4_add(v28, wasm_i32x4_extmul_high_i16x8(in3, trans[7]));
308 v29 = wasm_i32x4_sub(v29, wasm_i32x4_extmul_high_i16x8(in3, trans[6]));
309 v30 = wasm_i32x4_add(v30, wasm_i32x4_extmul_high_i16x8(in3, trans[5]));
310 v31 = wasm_i32x4_sub(v31, wasm_i32x4_extmul_high_i16x8(in3, trans[4]));
318 wasm_v128_store(sp, v16); sp += 16;
319 wasm_v128_store(sp, v17); sp += 16;
320 wasm_v128_store(sp, v18); sp += 16;
321 wasm_v128_store(sp, v19); sp += 16;
322 wasm_v128_store(sp, v20); sp += 16;
323 wasm_v128_store(sp, v21); sp += 16;
324 wasm_v128_store(sp, v22); sp += 16;
325 wasm_v128_store(sp, v23);
328 static void scale(v128_t *out0, v128_t *out1, v128_t *out2, v128_t *out3,
329 v128_t in0, v128_t in1, v128_t in2, v128_t in3,
330 v128_t in4, v128_t in5, v128_t in6, v128_t in7,
333 v128_t add = wasm_i32x4_splat(1 << (
shift - 1));
335 in0 = wasm_i32x4_add(in0, add);
336 in1 = wasm_i32x4_add(in1, add);
337 in2 = wasm_i32x4_add(in2, add);
338 in3 = wasm_i32x4_add(in3, add);
339 in4 = wasm_i32x4_add(in4, add);
340 in5 = wasm_i32x4_add(in5, add);
341 in6 = wasm_i32x4_add(in6, add);
342 in7 = wasm_i32x4_add(in7, add);
344 in0 = wasm_i32x4_shr(in0,
shift);
345 in1 = wasm_i32x4_shr(in1,
shift);
346 in2 = wasm_i32x4_shr(in2,
shift);
347 in3 = wasm_i32x4_shr(in3,
shift);
348 in4 = wasm_i32x4_shr(in4,
shift);
349 in5 = wasm_i32x4_shr(in5,
shift);
350 in6 = wasm_i32x4_shr(in6,
shift);
351 in7 = wasm_i32x4_shr(in7,
shift);
353 *out0 = wasm_i16x8_narrow_i32x4(in0, in1);
354 *out1 = wasm_i16x8_narrow_i32x4(in2, in3);
355 *out2 = wasm_i16x8_narrow_i32x4(in4, in5);
356 *out3 = wasm_i16x8_narrow_i32x4(in6, in7);
361 v128_t t0, t1, t2, t3, t4, t5;
363 t0 = wasm_i16x8_shuffle(*r0, *r1, 0, 8, 2, 10, 4, 12, 6, 14);
364 t1 = wasm_i16x8_shuffle(*r0, *r1, 1, 9, 3, 11, 5, 13, 7, 15);
365 t2 = wasm_i16x8_shuffle(*r2, *r3, 0, 8, 2, 10, 4, 12, 6, 14);
366 t3 = wasm_i16x8_shuffle(*r2, *r3, 1, 9, 3, 11, 5, 13, 7, 15);
367 t4 = wasm_i32x4_shuffle(t0, t2, 0, 4, 2, 6);
368 t5 = wasm_i32x4_shuffle(t0, t2, 1, 5, 3, 7);
369 t0 = wasm_i32x4_shuffle(t1, t3, 0, 4, 2, 6);
370 t2 = wasm_i32x4_shuffle(t1, t3, 1, 5, 3, 7);
371 *r0 = wasm_i64x2_shuffle(t4, *r0, 0, 3);
372 *r2 = wasm_i64x2_shuffle(t5, *r2, 0, 3);
373 *r1 = wasm_i64x2_shuffle(t0, *r1, 0, 3);
374 *r3 = wasm_i64x2_shuffle(t2, *r3, 0, 3);
376 t0 = wasm_i16x8_shuffle(*r3, *r2, 0, 8, 2, 10, 4, 12, 6, 14);
377 t1 = wasm_i16x8_shuffle(*r3, *r2, 1, 9, 3, 11, 5, 13, 7, 15);
378 t2 = wasm_i16x8_shuffle(*r1, *r0, 0, 8, 2, 10, 4, 12, 6, 14);
379 t3 = wasm_i16x8_shuffle(*r1, *r0, 1, 9, 3, 11, 5, 13, 7, 15);
380 t4 = wasm_i32x4_shuffle(t0, t2, 0, 4, 2, 6);
381 t5 = wasm_i32x4_shuffle(t0, t2, 1, 5, 3, 7);
382 t0 = wasm_i32x4_shuffle(t1, t3, 0, 4, 2, 6);
383 t2 = wasm_i32x4_shuffle(t1, t3, 1, 5, 3, 7);
384 *r3 = wasm_i64x2_shuffle(*r3, t4, 0, 3);
385 *r1 = wasm_i64x2_shuffle(*r1, t5, 0, 3);
386 *r2 = wasm_i64x2_shuffle(*r2, t0, 0, 3);
387 *r0 = wasm_i64x2_shuffle(*r0, t2, 0, 3);
390 static void store16(v128_t in0, v128_t in1, v128_t in2, v128_t in3,
391 char *x1,
char *x3,
int x1_step,
int x3_step)
393 wasm_v128_store64_lane(x1, in0, 0);
394 wasm_v128_store64_lane(x3, in0, 1);
398 wasm_v128_store64_lane(x1, in1, 0);
399 wasm_v128_store64_lane(x3, in1, 1);
403 wasm_v128_store64_lane(x1, in2, 0);
404 wasm_v128_store64_lane(x3, in2, 1);
408 wasm_v128_store64_lane(x1, in3, 0);
409 wasm_v128_store64_lane(x3, in3, 1);
414 v128_t in0, v128_t in2, v128_t in4, v128_t in6,
415 v128_t in7, v128_t in5, v128_t in3, v128_t in1)
417 char *x1 = sp + off1;
418 char *x3 = sp + off2;
420 wasm_v128_store(x1, in0);
421 wasm_v128_store(x3, in1);
424 wasm_v128_store(x1, in2);
425 wasm_v128_store(x3, in3);
428 wasm_v128_store(x1, in4);
429 wasm_v128_store(x3, in5);
432 wasm_v128_store(x1, in6);
433 wasm_v128_store(x3, in7);
436 #define sum_sub(out, in0, in1, operation, half) \
437 out = wasm_i32x4_ ## operation (out, wasm_i32x4_extmul_ ## half ## _i16x8(in0, in1));
439 #define add_member(in, t0, t1, t2, t3, t4, t5, t6, t7, op0, op1, op2, op3, op4, op5, op6, op7, half) \
441 sum_sub(v21, in, t0, op0, half) \
442 sum_sub(v22, in, t1, op1, half) \
443 sum_sub(v23, in, t2, op2, half) \
444 sum_sub(v24, in, t3, op3, half) \
445 sum_sub(v25, in, t4, op4, half) \
446 sum_sub(v26, in, t5, op5, half) \
447 sum_sub(v27, in, t6, op6, half) \
448 sum_sub(v28, in, t7, op7, half) \
451 #define butterfly16(in0, in1, in2, in3, in4, in5, in6, in7) \
453 v20 = wasm_i32x4_add(in0, in1); \
454 in0 = wasm_i32x4_sub(in0, in1); \
455 in1 = wasm_i32x4_add(in2, in3); \
456 in2 = wasm_i32x4_sub(in2, in3); \
457 in3 = wasm_i32x4_add(in4, in5); \
458 in4 = wasm_i32x4_sub(in4, in5); \
459 in5 = wasm_i32x4_add(in6, in7); \
460 in6 = wasm_i32x4_sub(in6, in7); \
469 v128_t v16, v17, v18, v19, v20, v21, v22, v23,
470 v24, v25, v26, v27, v28, v29, v30, v31;
472 trans[0] = wasm_i16x8_const_splat(
transform[0]);
473 trans[1] = wasm_i16x8_const_splat(
transform[1]);
474 trans[2] = wasm_i16x8_const_splat(
transform[2]);
475 trans[3] = wasm_i16x8_const_splat(
transform[3]);
476 trans[4] = wasm_i16x8_const_splat(
transform[4]);
477 trans[5] = wasm_i16x8_const_splat(
transform[5]);
478 trans[6] = wasm_i16x8_const_splat(
transform[6]);
479 trans[7] = wasm_i16x8_const_splat(
transform[7]);
484 load16(x1, x3, x2, v16, v17, v18, v19);
490 load16(x1, x3, x2, v20, v17, v18, v19);
492 trans[0] = wasm_i16x8_const_splat(
transform[0 + 8]);
493 trans[1] = wasm_i16x8_const_splat(
transform[1 + 8]);
494 trans[2] = wasm_i16x8_const_splat(
transform[2 + 8]);
495 trans[3] = wasm_i16x8_const_splat(
transform[3 + 8]);
496 trans[4] = wasm_i16x8_const_splat(
transform[4 + 8]);
497 trans[5] = wasm_i16x8_const_splat(
transform[5 + 8]);
498 trans[6] = wasm_i16x8_const_splat(
transform[6 + 8]);
499 trans[7] = wasm_i16x8_const_splat(
transform[7 + 8]);
501 v21 = wasm_i32x4_extmul_low_i16x8(v20, trans[0]);
502 v22 = wasm_i32x4_extmul_low_i16x8(v20, trans[1]);
503 v23 = wasm_i32x4_extmul_low_i16x8(v20, trans[2]);
504 v24 = wasm_i32x4_extmul_low_i16x8(v20, trans[3]);
505 v25 = wasm_i32x4_extmul_low_i16x8(v20, trans[4]);
506 v26 = wasm_i32x4_extmul_low_i16x8(v20, trans[5]);
507 v27 = wasm_i32x4_extmul_low_i16x8(v20, trans[6]);
508 v28 = wasm_i32x4_extmul_low_i16x8(v20, trans[7]);
510 add_member(v20, trans[1], trans[4], trans[7], trans[5],
511 trans[2], trans[0], trans[3], trans[6],
512 add, add, add, sub, sub, sub, sub, sub,
high);
513 add_member(v17, trans[2], trans[7], trans[3], trans[1],
514 trans[6], trans[4], trans[0], trans[5],
515 add, add, sub, sub, sub, add, add, add, low);
516 add_member(v17, trans[3], trans[5], trans[1], trans[7],
517 trans[0], trans[6], trans[2], trans[4],
518 add, sub, sub, add, add, add, sub, sub,
high);
519 add_member(v18, trans[4], trans[2], trans[6], trans[0],
520 trans[7], trans[1], trans[5], trans[3],
521 add, sub, sub, add, sub, sub, add, add, low);
522 add_member(v18, trans[5], trans[0], trans[4], trans[6],
523 trans[1], trans[3], trans[7], trans[2],
524 add, sub, add, add, sub, add, add, sub,
high);
525 add_member(v19, trans[6], trans[3], trans[0], trans[2],
526 trans[5], trans[7], trans[4], trans[1],
527 add, sub, add, sub, add, add, sub, add, low);
528 add_member(v19, trans[7], trans[6], trans[5], trans[4],
529 trans[3], trans[2], trans[1], trans[0],
530 add, sub, add, sub, add, sub, add, sub,
high);
533 v16 = wasm_v128_load(x4);
535 v17 = wasm_v128_load(x4);
537 v18 = wasm_v128_load(x4);
539 v19 = wasm_v128_load(x4);
540 butterfly16(v16, v21, v17, v22, v18, v23, v19, v24);
543 scale(&v29, &v30, &v31, &v24,
544 v20, v16, v21, v17, v22, v18, v23, v19,
548 x3 = &buf[24 + 3 * 32];
549 store16(v29, v30, v31, v24, x1, x3, 32, -32);
552 v20, v21, v22, v23, v19, v18, v17, v16);
556 v16 = wasm_v128_load(x4);
558 v17 = wasm_v128_load(x4);
560 v18 = wasm_v128_load(x4);
562 v19 = wasm_v128_load(x4);
563 butterfly16(v16, v25, v17, v26, v18, v27, v19, v28);
566 scale(&v29, &v30, &v31, &v20,
567 v20, v16, v25, v17, v26, v18, v27, v19,
571 x3 = &buf[16 + 3 * 32];
572 store16(v29, v30, v31, v20, x1, x3, 32, -32);
575 v20, v25, v26, v27, v19, v18, v17, v16);
583 for (
int i = 0;
i < 4;
i++) {
584 char *x5 = &coeffs[8 *
i];
585 char *x6 = &sp[8 *
i * 16];
586 tr_16x4(x5, x6, sp, 7, 512, 1);
589 for (
int i = 0;
i < 4;
i++) {
590 char *x5 = &sp[8 *
i];
591 char *x6 = &coeffs[8 *
i * 16];
606 #define add_member32(in, t0, t1, t2, t3, op0, op1, op2, op3, half) \
608 sum_sub(v24, in, t0, op0, half) \
609 sum_sub(v25, in, t1, op1, half) \
610 sum_sub(v26, in, t2, op2, half) \
611 sum_sub(v27, in, t3, op3, half) \
614 #define butterfly32(in0, in1, in2, in3, out) \
616 out = wasm_i32x4_add(in0, in1); \
617 in0 = wasm_i32x4_sub(in0, in1); \
618 in1 = wasm_i32x4_add(in2, in3); \
619 in2 = wasm_i32x4_sub(in2, in3); \
630 v128_t v4, v5, v6, v7, v16, v17, v18, v19,
631 v20, v21, v22, v23, v24, v25, v26, v27,
632 v28, v29, v30, v31, v32, v33;
634 tr_16x4(x5, x11, sp, 0, 2048, 4);
639 v4 = wasm_v128_load64_zero(x1);
640 v4 = wasm_v128_load64_lane(x3, v4, 1);
643 v5 = wasm_v128_load64_zero(x1);
644 v5 = wasm_v128_load64_lane(x3, v5, 1);
647 v6 = wasm_v128_load64_zero(x1);
648 v6 = wasm_v128_load64_lane(x3, v6, 1);
651 v7 = wasm_v128_load64_zero(x1);
652 v7 = wasm_v128_load64_lane(x3, v7, 1);
655 v16 = wasm_v128_load64_zero(x1);
656 v16 = wasm_v128_load64_lane(x3, v16, 1);
659 v17 = wasm_v128_load64_zero(x1);
660 v17 = wasm_v128_load64_lane(x3, v17, 1);
663 v18 = wasm_v128_load64_zero(x1);
664 v18 = wasm_v128_load64_lane(x3, v18, 1);
667 v19 = wasm_v128_load64_zero(x1);
668 v19 = wasm_v128_load64_lane(x3, v19, 1);
671 v0[0] = wasm_i16x8_const_splat(
transform[16 + 0]);
672 v0[1] = wasm_i16x8_const_splat(
transform[16 + 1]);
673 v0[2] = wasm_i16x8_const_splat(
transform[16 + 2]);
674 v0[3] = wasm_i16x8_const_splat(
transform[16 + 3]);
675 v1[0] = wasm_i16x8_const_splat(
transform[16 + 4]);
676 v1[1] = wasm_i16x8_const_splat(
transform[16 + 5]);
677 v1[2] = wasm_i16x8_const_splat(
transform[16 + 6]);
678 v1[3] = wasm_i16x8_const_splat(
transform[16 + 7]);
679 v2[0] = wasm_i16x8_const_splat(
transform[16 + 8]);
680 v2[1] = wasm_i16x8_const_splat(
transform[16 + 9]);
681 v2[2] = wasm_i16x8_const_splat(
transform[16 + 10]);
682 v2[3] = wasm_i16x8_const_splat(
transform[16 + 11]);
683 v3[0] = wasm_i16x8_const_splat(
transform[16 + 12]);
684 v3[1] = wasm_i16x8_const_splat(
transform[16 + 13]);
685 v3[2] = wasm_i16x8_const_splat(
transform[16 + 14]);
686 v3[3] = wasm_i16x8_const_splat(
transform[16 + 15]);
689 v24 = wasm_i32x4_extmul_low_i16x8(v4, v0[0]);
690 v25 = wasm_i32x4_extmul_low_i16x8(v4, v0[1]);
691 v26 = wasm_i32x4_extmul_low_i16x8(v4, v0[2]);
692 v27 = wasm_i32x4_extmul_low_i16x8(v4, v0[3]);
695 add_member32(v5, v0[2], v1[3], v3[0], v3[2], add, add, add, sub, low);
697 add_member32(v6, v1[0], v3[1], v2[1], v0[0], add, add, sub, sub, low);
699 add_member32(v7, v1[2], v3[0], v0[0], v3[1], add, sub, sub, sub, low);
701 add_member32(v16, v2[0], v1[2], v2[2], v1[0], add, sub, sub, add, low);
703 add_member32(v17, v2[2], v0[1], v2[3], v2[1], add, sub, add, add, low);
705 add_member32(v18, v3[0], v1[1], v0[1], v2[0], add, sub, add, sub, low);
707 add_member32(v19, v3[2], v2[3], v2[0], v1[1], add, sub, add, sub, low);
712 v28 = wasm_v128_load(x4);
714 v29 = wasm_v128_load(x4);
716 v30 = wasm_v128_load(x4);
718 v31 = wasm_v128_load(x4);
722 scale(&v20, &v21, &v22, &v23, v32, v28, v24, v29, v33, v30, v26, v31,
shift);
725 x3 = &x11[56 + 3 * 64];
726 store16(v20, v21, v22, v23, x1, x3, 64, -64);
729 v24 = wasm_i32x4_extmul_low_i16x8(v4, v1[0]);
730 v25 = wasm_i32x4_extmul_low_i16x8(v4, v1[1]);
731 v26 = wasm_i32x4_extmul_low_i16x8(v4, v1[2]);
732 v27 = wasm_i32x4_extmul_low_i16x8(v4, v1[3]);
735 add_member32(v5, v2[1], v1[0], v0[0], v1[1], sub, sub, sub, sub, low);
737 add_member32(v6, v2[0], v3[2], v1[1], v0[3], sub, add, add, add, low);
739 add_member32(v7, v1[1], v1[3], v2[3], v0[0], add, add, sub, sub, low);
741 add_member32(v16, v3[0], v0[2], v3[2], v0[1], add, sub, sub, add, low);
743 add_member32(v17, v0[1], v3[0], v2[0], v0[2], sub, add, add, sub, low);
745 add_member32(v18, v3[3], v2[1], v0[2], v1[0], add, add, sub, add, low);
747 add_member32(v19, v0[2], v0[1], v0[3], v1[2], add, sub, add, sub, low);
751 v28 = wasm_v128_load(x4);
753 v29 = wasm_v128_load(x4);
755 v30 = wasm_v128_load(x4);
757 v31 = wasm_v128_load(x4);
761 scale(&v20, &v21, &v22, &v23, v32, v28, v24, v29, v33, v30, v26, v31,
shift);
764 x3 = &x11[48 + 3 * 64];
765 store16(v20, v21, v22, v23, x1, x3, 64, -64);
768 v24 = wasm_i32x4_extmul_low_i16x8(v4, v2[0]);
769 v25 = wasm_i32x4_extmul_low_i16x8(v4, v2[1]);
770 v26 = wasm_i32x4_extmul_low_i16x8(v4, v2[2]);
771 v27 = wasm_i32x4_extmul_low_i16x8(v4, v2[3]);
773 add_member32(v5, v2[2], v3[3], v2[3], v1[2], sub, sub, add, add, low);
775 add_member32(v6, v3[0], v2[2], v0[1], v1[3], add, sub, sub, sub, low);
777 add_member32(v7, v3[2], v1[0], v2[0], v2[2], sub, add, add, sub, low);
779 add_member32(v16, v3[3], v0[1], v3[1], v0[3], sub, sub, add, add, low);
781 add_member32(v17, v3[1], v1[3], v0[3], v3[2], add, add, sub, add, low);
783 add_member32(v18, v2[3], v3[1], v1[2], v0[1], sub, sub, add, sub, low);
785 add_member32(v19, v2[1], v3[0], v3[3], v3[1], add, sub, add, add, low);
789 v28 = wasm_v128_load(x4);
791 v29 = wasm_v128_load(x4);
793 v30 = wasm_v128_load(x4);
795 v31 = wasm_v128_load(x4);
799 scale(&v20, &v21, &v22, &v23, v32, v28, v24, v29, v33, v30, v26, v31,
shift);
802 x3 = &x11[40 + 3 * 64];
803 store16(v20, v21, v22, v23, x1, x3, 64, -64);
806 v24 = wasm_i32x4_extmul_low_i16x8(v4, v3[0]);
807 v25 = wasm_i32x4_extmul_low_i16x8(v4, v3[1]);
808 v26 = wasm_i32x4_extmul_low_i16x8(v4, v3[2]);
809 v27 = wasm_i32x4_extmul_low_i16x8(v4, v3[3]);
811 add_member32(v5, v0[0], v0[3], v2[0], v3[1], add, add, add, add, low);
813 add_member32(v6, v3[3], v1[2], v0[2], v2[3], add, add, add, add, low);
815 add_member32(v7, v0[2], v3[3], v0[3], v2[1], sub, sub, add, add, low);
817 add_member32(v16, v2[3], v1[1], v2[1], v1[3], sub, sub, add, add, low);
819 add_member32(v17, v1[2], v1[0], v3[3], v1[1], add, sub, add, add, low);
821 add_member32(v18, v1[3], v3[2], v2[2], v0[3], add, sub, sub, add, low);
823 add_member32(v19, v2[2], v1[3], v1[0], v0[1], sub, add, sub, add, low);
827 v28 = wasm_v128_load(x4);
829 v29 = wasm_v128_load(x4);
831 v30 = wasm_v128_load(x4);
833 v31 = wasm_v128_load(x4);
836 scale(&v20, &v21, &v22, &v23, v32, v28, v24, v29, v33, v30, v26, v31,
shift);
839 x3 = &x11[32 + 3 * 64];
840 store16(v20, v21, v22, v23, x1, x3, 64, -64);
848 for (
int i = 0;
i < 8;
i++) {
850 x11 = &sp[8 *
i * 32];
854 for (
int i = 0;
i < 8;
i++) {
856 x11 = &coeffs[8 *
i * 32];