Go to the documentation of this file.
21 #ifndef AVUTIL_MIPS_GENERIC_MACROS_MSA_H
22 #define AVUTIL_MIPS_GENERIC_MACROS_MSA_H
33 #define ALLOC_ALIGNED(align) __attribute__ ((aligned((align) << 1)))
35 #define LD_V(RTYPE, psrc) *((RTYPE *)(psrc))
36 #define LD_UB(...) LD_V(v16u8, __VA_ARGS__)
37 #define LD_SB(...) LD_V(v16i8, __VA_ARGS__)
38 #define LD_UH(...) LD_V(v8u16, __VA_ARGS__)
39 #define LD_SH(...) LD_V(v8i16, __VA_ARGS__)
40 #define LD_UW(...) LD_V(v4u32, __VA_ARGS__)
41 #define LD_SW(...) LD_V(v4i32, __VA_ARGS__)
43 #define ST_V(RTYPE, in, pdst) *((RTYPE *)(pdst)) = (in)
44 #define ST_UB(...) ST_V(v16u8, __VA_ARGS__)
45 #define ST_SB(...) ST_V(v16i8, __VA_ARGS__)
46 #define ST_UH(...) ST_V(v8u16, __VA_ARGS__)
47 #define ST_SH(...) ST_V(v8i16, __VA_ARGS__)
48 #define ST_UW(...) ST_V(v4u32, __VA_ARGS__)
49 #define ST_SW(...) ST_V(v4i32, __VA_ARGS__)
51 #if (__mips_isa_rev >= 6)
54 uint16_t val_lh_m = *(uint16_t *)(psrc); \
60 uint32_t val_lw_m = *(uint32_t *)(psrc); \
67 uint64_t val_ld_m = *(uint64_t *)(psrc); \
70 #else // !(__mips == 64)
73 uint8_t *psrc_ld_m = (uint8_t *) (psrc); \
74 uint32_t val0_ld_m, val1_ld_m; \
75 uint64_t val_ld_m = 0; \
77 val0_ld_m = LW(psrc_ld_m); \
78 val1_ld_m = LW(psrc_ld_m + 4); \
80 val_ld_m = (uint64_t) (val1_ld_m); \
81 val_ld_m = (uint64_t) ((val_ld_m << 32) & 0xFFFFFFFF00000000); \
82 val_ld_m = (uint64_t) (val_ld_m | (uint64_t) val0_ld_m); \
86 #endif // (__mips == 64)
88 #define SH(val, pdst) *(uint16_t *)(pdst) = (val);
89 #define SW(val, pdst) *(uint32_t *)(pdst) = (val);
90 #define SD(val, pdst) *(uint64_t *)(pdst) = (val);
92 #else // !(__mips_isa_rev >= 6)
95 uint8_t *psrc_lh_m = (uint8_t *) (psrc); \
99 "ulh %[val_lh_m], %[psrc_lh_m] \n\t" \
101 : [val_lh_m] "=r" (val_lh_m) \
102 : [psrc_lh_m] "m" (*psrc_lh_m) \
110 uint8_t *psrc_lw_m = (uint8_t *) (psrc); \
114 "ulw %[val_lw_m], %[psrc_lw_m] \n\t" \
116 : [val_lw_m] "=r" (val_lw_m) \
117 : [psrc_lw_m] "m" (*psrc_lw_m) \
126 uint8_t *psrc_ld_m = (uint8_t *) (psrc); \
127 uint64_t val_ld_m = 0; \
130 "uld %[val_ld_m], %[psrc_ld_m] \n\t" \
132 : [val_ld_m] "=r" (val_ld_m) \
133 : [psrc_ld_m] "m" (*psrc_ld_m) \
138 #else // !(__mips == 64)
141 uint8_t *psrc_ld_m = (uint8_t *) (psrc); \
142 uint32_t val0_ld_m, val1_ld_m; \
143 uint64_t val_ld_m = 0; \
145 val0_ld_m = LW(psrc_ld_m); \
146 val1_ld_m = LW(psrc_ld_m + 4); \
148 val_ld_m = (uint64_t) (val1_ld_m); \
149 val_ld_m = (uint64_t) ((val_ld_m << 32) & 0xFFFFFFFF00000000); \
150 val_ld_m = (uint64_t) (val_ld_m | (uint64_t) val0_ld_m); \
154 #endif // (__mips == 64)
156 #define SH(val, pdst) \
158 uint8_t *pdst_sh_m = (uint8_t *) (pdst); \
159 uint16_t val_sh_m = (val); \
162 "ush %[val_sh_m], %[pdst_sh_m] \n\t" \
164 : [pdst_sh_m] "=m" (*pdst_sh_m) \
165 : [val_sh_m] "r" (val_sh_m) \
169 #define SW(val, pdst) \
171 uint8_t *pdst_sw_m = (uint8_t *) (pdst); \
172 uint32_t val_sw_m = (val); \
175 "usw %[val_sw_m], %[pdst_sw_m] \n\t" \
177 : [pdst_sw_m] "=m" (*pdst_sw_m) \
178 : [val_sw_m] "r" (val_sw_m) \
182 #define SD(val, pdst) \
184 uint8_t *pdst_sd_m = (uint8_t *) (pdst); \
185 uint32_t val0_sd_m, val1_sd_m; \
187 val0_sd_m = (uint32_t) ((val) & 0x00000000FFFFFFFF); \
188 val1_sd_m = (uint32_t) (((val) >> 32) & 0x00000000FFFFFFFF); \
190 SW(val0_sd_m, pdst_sd_m); \
191 SW(val1_sd_m, pdst_sd_m + 4); \
193 #endif // (__mips_isa_rev >= 6)
204 #define LW4(psrc, stride, out0, out1, out2, out3) \
207 out1 = LW((psrc) + stride); \
208 out2 = LW((psrc) + 2 * stride); \
209 out3 = LW((psrc) + 3 * stride); \
212 #define LW2(psrc, stride, out0, out1) \
215 out1 = LW((psrc) + stride); \
225 #define LD2(psrc, stride, out0, out1) \
228 out1 = LD((psrc) + stride); \
230 #define LD4(psrc, stride, out0, out1, out2, out3) \
232 LD2((psrc), stride, out0, out1); \
233 LD2((psrc) + 2 * stride, stride, out2, out3); \
243 #define SW4(in0, in1, in2, in3, pdst, stride) \
246 SW(in1, (pdst) + stride); \
247 SW(in2, (pdst) + 2 * stride); \
248 SW(in3, (pdst) + 3 * stride); \
258 #define SD4(in0, in1, in2, in3, pdst, stride) \
261 SD(in1, (pdst) + stride); \
262 SD(in2, (pdst) + 2 * stride); \
263 SD(in3, (pdst) + 3 * stride); \
274 #define LD_V2(RTYPE, psrc, stride, out0, out1) \
276 out0 = LD_V(RTYPE, (psrc)); \
277 out1 = LD_V(RTYPE, (psrc) + stride); \
279 #define LD_UB2(...) LD_V2(v16u8, __VA_ARGS__)
280 #define LD_SB2(...) LD_V2(v16i8, __VA_ARGS__)
281 #define LD_UH2(...) LD_V2(v8u16, __VA_ARGS__)
282 #define LD_SH2(...) LD_V2(v8i16, __VA_ARGS__)
283 #define LD_SW2(...) LD_V2(v4i32, __VA_ARGS__)
285 #define LD_V3(RTYPE, psrc, stride, out0, out1, out2) \
287 LD_V2(RTYPE, (psrc), stride, out0, out1); \
288 out2 = LD_V(RTYPE, (psrc) + 2 * stride); \
290 #define LD_UB3(...) LD_V3(v16u8, __VA_ARGS__)
291 #define LD_SB3(...) LD_V3(v16i8, __VA_ARGS__)
293 #define LD_V4(RTYPE, psrc, stride, out0, out1, out2, out3) \
295 LD_V2(RTYPE, (psrc), stride, out0, out1); \
296 LD_V2(RTYPE, (psrc) + 2 * stride , stride, out2, out3); \
298 #define LD_UB4(...) LD_V4(v16u8, __VA_ARGS__)
299 #define LD_SB4(...) LD_V4(v16i8, __VA_ARGS__)
300 #define LD_UH4(...) LD_V4(v8u16, __VA_ARGS__)
301 #define LD_SH4(...) LD_V4(v8i16, __VA_ARGS__)
302 #define LD_SW4(...) LD_V4(v4i32, __VA_ARGS__)
304 #define LD_V5(RTYPE, psrc, stride, out0, out1, out2, out3, out4) \
306 LD_V4(RTYPE, (psrc), stride, out0, out1, out2, out3); \
307 out4 = LD_V(RTYPE, (psrc) + 4 * stride); \
309 #define LD_UB5(...) LD_V5(v16u8, __VA_ARGS__)
310 #define LD_SB5(...) LD_V5(v16i8, __VA_ARGS__)
312 #define LD_V6(RTYPE, psrc, stride, out0, out1, out2, out3, out4, out5) \
314 LD_V4(RTYPE, (psrc), stride, out0, out1, out2, out3); \
315 LD_V2(RTYPE, (psrc) + 4 * stride, stride, out4, out5); \
317 #define LD_UB6(...) LD_V6(v16u8, __VA_ARGS__)
318 #define LD_SB6(...) LD_V6(v16i8, __VA_ARGS__)
319 #define LD_UH6(...) LD_V6(v8u16, __VA_ARGS__)
320 #define LD_SH6(...) LD_V6(v8i16, __VA_ARGS__)
322 #define LD_V7(RTYPE, psrc, stride, \
323 out0, out1, out2, out3, out4, out5, out6) \
325 LD_V5(RTYPE, (psrc), stride, out0, out1, out2, out3, out4); \
326 LD_V2(RTYPE, (psrc) + 5 * stride, stride, out5, out6); \
328 #define LD_UB7(...) LD_V7(v16u8, __VA_ARGS__)
329 #define LD_SB7(...) LD_V7(v16i8, __VA_ARGS__)
331 #define LD_V8(RTYPE, psrc, stride, \
332 out0, out1, out2, out3, out4, out5, out6, out7) \
334 LD_V4(RTYPE, (psrc), stride, out0, out1, out2, out3); \
335 LD_V4(RTYPE, (psrc) + 4 * stride, stride, out4, out5, out6, out7); \
337 #define LD_UB8(...) LD_V8(v16u8, __VA_ARGS__)
338 #define LD_SB8(...) LD_V8(v16i8, __VA_ARGS__)
339 #define LD_UH8(...) LD_V8(v8u16, __VA_ARGS__)
340 #define LD_SH8(...) LD_V8(v8i16, __VA_ARGS__)
341 #define LD_SW8(...) LD_V8(v4i32, __VA_ARGS__)
343 #define LD_V16(RTYPE, psrc, stride, \
344 out0, out1, out2, out3, out4, out5, out6, out7, \
345 out8, out9, out10, out11, out12, out13, out14, out15) \
347 LD_V8(RTYPE, (psrc), stride, \
348 out0, out1, out2, out3, out4, out5, out6, out7); \
349 LD_V8(RTYPE, (psrc) + 8 * stride, stride, \
350 out8, out9, out10, out11, out12, out13, out14, out15); \
352 #define LD_SH16(...) LD_V16(v8i16, __VA_ARGS__)
360 #define ST_V2(RTYPE, in0, in1, pdst, stride) \
362 ST_V(RTYPE, in0, (pdst)); \
363 ST_V(RTYPE, in1, (pdst) + stride); \
365 #define ST_UB2(...) ST_V2(v16u8, __VA_ARGS__)
366 #define ST_SB2(...) ST_V2(v16i8, __VA_ARGS__)
367 #define ST_UH2(...) ST_V2(v8u16, __VA_ARGS__)
368 #define ST_SH2(...) ST_V2(v8i16, __VA_ARGS__)
369 #define ST_SW2(...) ST_V2(v4i32, __VA_ARGS__)
371 #define ST_V4(RTYPE, in0, in1, in2, in3, pdst, stride) \
373 ST_V2(RTYPE, in0, in1, (pdst), stride); \
374 ST_V2(RTYPE, in2, in3, (pdst) + 2 * stride, stride); \
376 #define ST_UB4(...) ST_V4(v16u8, __VA_ARGS__)
377 #define ST_SB4(...) ST_V4(v16i8, __VA_ARGS__)
378 #define ST_SH4(...) ST_V4(v8i16, __VA_ARGS__)
379 #define ST_SW4(...) ST_V4(v4i32, __VA_ARGS__)
381 #define ST_V6(RTYPE, in0, in1, in2, in3, in4, in5, pdst, stride) \
383 ST_V4(RTYPE, in0, in1, in2, in3, (pdst), stride); \
384 ST_V2(RTYPE, in4, in5, (pdst) + 4 * stride, stride); \
386 #define ST_SH6(...) ST_V6(v8i16, __VA_ARGS__)
388 #define ST_V8(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, pdst, stride) \
390 ST_V4(RTYPE, in0, in1, in2, in3, (pdst), stride); \
391 ST_V4(RTYPE, in4, in5, in6, in7, (pdst) + 4 * stride, stride); \
393 #define ST_UB8(...) ST_V8(v16u8, __VA_ARGS__)
394 #define ST_SH8(...) ST_V8(v8i16, __VA_ARGS__)
395 #define ST_SW8(...) ST_V8(v4i32, __VA_ARGS__)
405 #define ST_H1(in, idx, pdst) \
408 out0_m = __msa_copy_u_h((v8i16) in, idx); \
409 SH(out0_m, (pdst)); \
411 #define ST_H2(in, idx0, idx1, pdst, stride) \
413 uint16_t out0_m, out1_m; \
414 out0_m = __msa_copy_u_h((v8i16) in, idx0); \
415 out1_m = __msa_copy_u_h((v8i16) in, idx1); \
416 SH(out0_m, (pdst)); \
417 SH(out1_m, (pdst) + stride); \
419 #define ST_H4(in, idx0, idx1, idx2, idx3, pdst, stride) \
421 uint16_t out0_m, out1_m, out2_m, out3_m; \
422 out0_m = __msa_copy_u_h((v8i16) in, idx0); \
423 out1_m = __msa_copy_u_h((v8i16) in, idx1); \
424 out2_m = __msa_copy_u_h((v8i16) in, idx2); \
425 out3_m = __msa_copy_u_h((v8i16) in, idx3); \
426 SH(out0_m, (pdst)); \
427 SH(out1_m, (pdst) + stride); \
428 SH(out2_m, (pdst) + 2 * stride); \
429 SH(out3_m, (pdst) + 3 * stride); \
431 #define ST_H8(in, idx0, idx1, idx2, idx3, idx4, idx5, \
432 idx6, idx7, pdst, stride) \
434 ST_H4(in, idx0, idx1, idx2, idx3, pdst, stride) \
435 ST_H4(in, idx4, idx5, idx6, idx7, (pdst) + 4*stride, stride) \
446 #define ST_W1(in, idx, pdst) \
449 out0_m = __msa_copy_u_w((v4i32) in, idx); \
450 SW(out0_m, (pdst)); \
452 #define ST_W2(in, idx0, idx1, pdst, stride) \
454 uint32_t out0_m, out1_m; \
455 out0_m = __msa_copy_u_w((v4i32) in, idx0); \
456 out1_m = __msa_copy_u_w((v4i32) in, idx1); \
457 SW(out0_m, (pdst)); \
458 SW(out1_m, (pdst) + stride); \
460 #define ST_W4(in, idx0, idx1, idx2, idx3, pdst, stride) \
462 uint32_t out0_m, out1_m, out2_m, out3_m; \
463 out0_m = __msa_copy_u_w((v4i32) in, idx0); \
464 out1_m = __msa_copy_u_w((v4i32) in, idx1); \
465 out2_m = __msa_copy_u_w((v4i32) in, idx2); \
466 out3_m = __msa_copy_u_w((v4i32) in, idx3); \
467 SW(out0_m, (pdst)); \
468 SW(out1_m, (pdst) + stride); \
469 SW(out2_m, (pdst) + 2*stride); \
470 SW(out3_m, (pdst) + 3*stride); \
472 #define ST_W8(in0, in1, idx0, idx1, idx2, idx3, \
473 idx4, idx5, idx6, idx7, pdst, stride) \
475 ST_W4(in0, idx0, idx1, idx2, idx3, pdst, stride) \
476 ST_W4(in1, idx4, idx5, idx6, idx7, pdst + 4*stride, stride) \
487 #define ST_D1(in, idx, pdst) \
490 out0_m = __msa_copy_u_d((v2i64) in, idx); \
491 SD(out0_m, (pdst)); \
493 #define ST_D2(in, idx0, idx1, pdst, stride) \
495 uint64_t out0_m, out1_m; \
496 out0_m = __msa_copy_u_d((v2i64) in, idx0); \
497 out1_m = __msa_copy_u_d((v2i64) in, idx1); \
498 SD(out0_m, (pdst)); \
499 SD(out1_m, (pdst) + stride); \
501 #define ST_D4(in0, in1, idx0, idx1, idx2, idx3, pdst, stride) \
503 uint64_t out0_m, out1_m, out2_m, out3_m; \
504 out0_m = __msa_copy_u_d((v2i64) in0, idx0); \
505 out1_m = __msa_copy_u_d((v2i64) in0, idx1); \
506 out2_m = __msa_copy_u_d((v2i64) in1, idx2); \
507 out3_m = __msa_copy_u_d((v2i64) in1, idx3); \
508 SD(out0_m, (pdst)); \
509 SD(out1_m, (pdst) + stride); \
510 SD(out2_m, (pdst) + 2 * stride); \
511 SD(out3_m, (pdst) + 3 * stride); \
513 #define ST_D8(in0, in1, in2, in3, idx0, idx1, idx2, idx3, \
514 idx4, idx5, idx6, idx7, pdst, stride) \
516 ST_D4(in0, in1, idx0, idx1, idx2, idx3, pdst, stride) \
517 ST_D4(in2, in3, idx4, idx5, idx6, idx7, pdst + 4 * stride, stride) \
529 #define ST12x8_UB(in0, in1, in2, in3, in4, in5, in6, in7, pdst, stride) \
531 uint64_t out0_m, out1_m, out2_m, out3_m; \
532 uint64_t out4_m, out5_m, out6_m, out7_m; \
533 uint32_t out8_m, out9_m, out10_m, out11_m; \
534 uint32_t out12_m, out13_m, out14_m, out15_m; \
535 uint8_t *pblk_12x8_m = (uint8_t *) (pdst); \
537 out0_m = __msa_copy_u_d((v2i64) in0, 0); \
538 out1_m = __msa_copy_u_d((v2i64) in1, 0); \
539 out2_m = __msa_copy_u_d((v2i64) in2, 0); \
540 out3_m = __msa_copy_u_d((v2i64) in3, 0); \
541 out4_m = __msa_copy_u_d((v2i64) in4, 0); \
542 out5_m = __msa_copy_u_d((v2i64) in5, 0); \
543 out6_m = __msa_copy_u_d((v2i64) in6, 0); \
544 out7_m = __msa_copy_u_d((v2i64) in7, 0); \
546 out8_m = __msa_copy_u_w((v4i32) in0, 2); \
547 out9_m = __msa_copy_u_w((v4i32) in1, 2); \
548 out10_m = __msa_copy_u_w((v4i32) in2, 2); \
549 out11_m = __msa_copy_u_w((v4i32) in3, 2); \
550 out12_m = __msa_copy_u_w((v4i32) in4, 2); \
551 out13_m = __msa_copy_u_w((v4i32) in5, 2); \
552 out14_m = __msa_copy_u_w((v4i32) in6, 2); \
553 out15_m = __msa_copy_u_w((v4i32) in7, 2); \
555 SD(out0_m, pblk_12x8_m); \
556 SW(out8_m, pblk_12x8_m + 8); \
557 pblk_12x8_m += stride; \
558 SD(out1_m, pblk_12x8_m); \
559 SW(out9_m, pblk_12x8_m + 8); \
560 pblk_12x8_m += stride; \
561 SD(out2_m, pblk_12x8_m); \
562 SW(out10_m, pblk_12x8_m + 8); \
563 pblk_12x8_m += stride; \
564 SD(out3_m, pblk_12x8_m); \
565 SW(out11_m, pblk_12x8_m + 8); \
566 pblk_12x8_m += stride; \
567 SD(out4_m, pblk_12x8_m); \
568 SW(out12_m, pblk_12x8_m + 8); \
569 pblk_12x8_m += stride; \
570 SD(out5_m, pblk_12x8_m); \
571 SW(out13_m, pblk_12x8_m + 8); \
572 pblk_12x8_m += stride; \
573 SD(out6_m, pblk_12x8_m); \
574 SW(out14_m, pblk_12x8_m + 8); \
575 pblk_12x8_m += stride; \
576 SD(out7_m, pblk_12x8_m); \
577 SW(out15_m, pblk_12x8_m + 8); \
592 #define AVER_UB2(RTYPE, in0, in1, in2, in3, out0, out1) \
594 out0 = (RTYPE) __msa_aver_u_b((v16u8) in0, (v16u8) in1); \
595 out1 = (RTYPE) __msa_aver_u_b((v16u8) in2, (v16u8) in3); \
597 #define AVER_UB2_UB(...) AVER_UB2(v16u8, __VA_ARGS__)
599 #define AVER_UB4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \
600 out0, out1, out2, out3) \
602 AVER_UB2(RTYPE, in0, in1, in2, in3, out0, out1) \
603 AVER_UB2(RTYPE, in4, in5, in6, in7, out2, out3) \
605 #define AVER_UB4_UB(...) AVER_UB4(v16u8, __VA_ARGS__)
614 #define SLDI_B(RTYPE, d, s, slide_val, out) \
616 out = (RTYPE) __msa_sldi_b((v16i8) d, (v16i8) s, slide_val); \
619 #define SLDI_B2(RTYPE, d0, s0, d1, s1, slide_val, out0, out1) \
621 SLDI_B(RTYPE, d0, s0, slide_val, out0) \
622 SLDI_B(RTYPE, d1, s1, slide_val, out1) \
624 #define SLDI_B2_UB(...) SLDI_B2(v16u8, __VA_ARGS__)
625 #define SLDI_B2_SB(...) SLDI_B2(v16i8, __VA_ARGS__)
626 #define SLDI_B2_SH(...) SLDI_B2(v8i16, __VA_ARGS__)
627 #define SLDI_B2_SW(...) SLDI_B2(v4i32, __VA_ARGS__)
629 #define SLDI_B3(RTYPE, d0, s0, d1, s1, d2, s2, slide_val, \
632 SLDI_B2(RTYPE, d0, s0, d1, s1, slide_val, out0, out1) \
633 SLDI_B(RTYPE, d2, s2, slide_val, out2) \
635 #define SLDI_B3_UB(...) SLDI_B3(v16u8, __VA_ARGS__)
636 #define SLDI_B3_SB(...) SLDI_B3(v16i8, __VA_ARGS__)
637 #define SLDI_B3_UH(...) SLDI_B3(v8u16, __VA_ARGS__)
639 #define SLDI_B4(RTYPE, d0, s0, d1, s1, d2, s2, d3, s3, \
640 slide_val, out0, out1, out2, out3) \
642 SLDI_B2(RTYPE, d0, s0, d1, s1, slide_val, out0, out1) \
643 SLDI_B2(RTYPE, d2, s2, d3, s3, slide_val, out2, out3) \
645 #define SLDI_B4_UB(...) SLDI_B4(v16u8, __VA_ARGS__)
646 #define SLDI_B4_SB(...) SLDI_B4(v16i8, __VA_ARGS__)
647 #define SLDI_B4_SH(...) SLDI_B4(v8i16, __VA_ARGS__)
658 #define VSHF_B2(RTYPE, in0, in1, in2, in3, mask0, mask1, out0, out1) \
660 out0 = (RTYPE) __msa_vshf_b((v16i8) mask0, (v16i8) in1, (v16i8) in0); \
661 out1 = (RTYPE) __msa_vshf_b((v16i8) mask1, (v16i8) in3, (v16i8) in2); \
663 #define VSHF_B2_UB(...) VSHF_B2(v16u8, __VA_ARGS__)
664 #define VSHF_B2_SB(...) VSHF_B2(v16i8, __VA_ARGS__)
665 #define VSHF_B2_UH(...) VSHF_B2(v8u16, __VA_ARGS__)
666 #define VSHF_B2_SH(...) VSHF_B2(v8i16, __VA_ARGS__)
668 #define VSHF_B3(RTYPE, in0, in1, in2, in3, in4, in5, mask0, mask1, mask2, \
671 VSHF_B2(RTYPE, in0, in1, in2, in3, mask0, mask1, out0, out1); \
672 out2 = (RTYPE) __msa_vshf_b((v16i8) mask2, (v16i8) in5, (v16i8) in4); \
674 #define VSHF_B3_SB(...) VSHF_B3(v16i8, __VA_ARGS__)
676 #define VSHF_B4(RTYPE, in0, in1, mask0, mask1, mask2, mask3, \
677 out0, out1, out2, out3) \
679 VSHF_B2(RTYPE, in0, in1, in0, in1, mask0, mask1, out0, out1); \
680 VSHF_B2(RTYPE, in0, in1, in0, in1, mask2, mask3, out2, out3); \
682 #define VSHF_B4_SB(...) VSHF_B4(v16i8, __VA_ARGS__)
683 #define VSHF_B4_SH(...) VSHF_B4(v8i16, __VA_ARGS__)
694 #define VSHF_H2(RTYPE, in0, in1, in2, in3, mask0, mask1, out0, out1) \
696 out0 = (RTYPE) __msa_vshf_h((v8i16) mask0, (v8i16) in1, (v8i16) in0); \
697 out1 = (RTYPE) __msa_vshf_h((v8i16) mask1, (v8i16) in3, (v8i16) in2); \
699 #define VSHF_H2_SH(...) VSHF_H2(v8i16, __VA_ARGS__)
701 #define VSHF_H3(RTYPE, in0, in1, in2, in3, in4, in5, mask0, mask1, mask2, \
704 VSHF_H2(RTYPE, in0, in1, in2, in3, mask0, mask1, out0, out1); \
705 out2 = (RTYPE) __msa_vshf_h((v8i16) mask2, (v8i16) in5, (v8i16) in4); \
707 #define VSHF_H3_SH(...) VSHF_H3(v8i16, __VA_ARGS__)
718 #define VSHF_W2(RTYPE, in0, in1, in2, in3, mask0, mask1, out0, out1) \
720 out0 = (RTYPE) __msa_vshf_w((v4i32) mask0, (v4i32) in1, (v4i32) in0); \
721 out1 = (RTYPE) __msa_vshf_w((v4i32) mask1, (v4i32) in3, (v4i32) in2); \
723 #define VSHF_W2_SB(...) VSHF_W2(v16i8, __VA_ARGS__)
737 #define DOTP_UB2(RTYPE, mult0, mult1, cnst0, cnst1, out0, out1) \
739 out0 = (RTYPE) __msa_dotp_u_h((v16u8) mult0, (v16u8) cnst0); \
740 out1 = (RTYPE) __msa_dotp_u_h((v16u8) mult1, (v16u8) cnst1); \
742 #define DOTP_UB2_UH(...) DOTP_UB2(v8u16, __VA_ARGS__)
744 #define DOTP_UB4(RTYPE, mult0, mult1, mult2, mult3, \
745 cnst0, cnst1, cnst2, cnst3, \
746 out0, out1, out2, out3) \
748 DOTP_UB2(RTYPE, mult0, mult1, cnst0, cnst1, out0, out1); \
749 DOTP_UB2(RTYPE, mult2, mult3, cnst2, cnst3, out2, out3); \
751 #define DOTP_UB4_UH(...) DOTP_UB4(v8u16, __VA_ARGS__)
765 #define DOTP_SB2(RTYPE, mult0, mult1, cnst0, cnst1, out0, out1) \
767 out0 = (RTYPE) __msa_dotp_s_h((v16i8) mult0, (v16i8) cnst0); \
768 out1 = (RTYPE) __msa_dotp_s_h((v16i8) mult1, (v16i8) cnst1); \
770 #define DOTP_SB2_SH(...) DOTP_SB2(v8i16, __VA_ARGS__)
772 #define DOTP_SB3(RTYPE, mult0, mult1, mult2, cnst0, cnst1, cnst2, \
775 DOTP_SB2(RTYPE, mult0, mult1, cnst0, cnst1, out0, out1); \
776 out2 = (RTYPE) __msa_dotp_s_h((v16i8) mult2, (v16i8) cnst2); \
778 #define DOTP_SB3_SH(...) DOTP_SB3(v8i16, __VA_ARGS__)
780 #define DOTP_SB4(RTYPE, mult0, mult1, mult2, mult3, \
781 cnst0, cnst1, cnst2, cnst3, out0, out1, out2, out3) \
783 DOTP_SB2(RTYPE, mult0, mult1, cnst0, cnst1, out0, out1); \
784 DOTP_SB2(RTYPE, mult2, mult3, cnst2, cnst3, out2, out3); \
786 #define DOTP_SB4_SH(...) DOTP_SB4(v8i16, __VA_ARGS__)
800 #define DOTP_SH2(RTYPE, mult0, mult1, cnst0, cnst1, out0, out1) \
802 out0 = (RTYPE) __msa_dotp_s_w((v8i16) mult0, (v8i16) cnst0); \
803 out1 = (RTYPE) __msa_dotp_s_w((v8i16) mult1, (v8i16) cnst1); \
805 #define DOTP_SH2_SW(...) DOTP_SH2(v4i32, __VA_ARGS__)
807 #define DOTP_SH4(RTYPE, mult0, mult1, mult2, mult3, \
808 cnst0, cnst1, cnst2, cnst3, \
809 out0, out1, out2, out3) \
811 DOTP_SH2(RTYPE, mult0, mult1, cnst0, cnst1, out0, out1); \
812 DOTP_SH2(RTYPE, mult2, mult3, cnst2, cnst3, out2, out3); \
814 #define DOTP_SH4_SW(...) DOTP_SH4(v4i32, __VA_ARGS__)
828 #define DPADD_SB2(RTYPE, mult0, mult1, cnst0, cnst1, out0, out1) \
830 out0 = (RTYPE) __msa_dpadd_s_h((v8i16) out0, \
831 (v16i8) mult0, (v16i8) cnst0); \
832 out1 = (RTYPE) __msa_dpadd_s_h((v8i16) out1, \
833 (v16i8) mult1, (v16i8) cnst1); \
835 #define DPADD_SB2_SH(...) DPADD_SB2(v8i16, __VA_ARGS__)
837 #define DPADD_SB4(RTYPE, mult0, mult1, mult2, mult3, \
838 cnst0, cnst1, cnst2, cnst3, out0, out1, out2, out3) \
840 DPADD_SB2(RTYPE, mult0, mult1, cnst0, cnst1, out0, out1); \
841 DPADD_SB2(RTYPE, mult2, mult3, cnst2, cnst3, out2, out3); \
843 #define DPADD_SB4_SH(...) DPADD_SB4(v8i16, __VA_ARGS__)
857 #define DPADD_UB2(RTYPE, mult0, mult1, cnst0, cnst1, out0, out1) \
859 out0 = (RTYPE) __msa_dpadd_u_h((v8u16) out0, \
860 (v16u8) mult0, (v16u8) cnst0); \
861 out1 = (RTYPE) __msa_dpadd_u_h((v8u16) out1, \
862 (v16u8) mult1, (v16u8) cnst1); \
864 #define DPADD_UB2_UH(...) DPADD_UB2(v8u16, __VA_ARGS__)
878 #define DPADD_SH2(RTYPE, mult0, mult1, cnst0, cnst1, out0, out1) \
880 out0 = (RTYPE) __msa_dpadd_s_w((v4i32) out0, \
881 (v8i16) mult0, (v8i16) cnst0); \
882 out1 = (RTYPE) __msa_dpadd_s_w((v4i32) out1, \
883 (v8i16) mult1, (v8i16) cnst1); \
885 #define DPADD_SH2_SW(...) DPADD_SH2(v4i32, __VA_ARGS__)
887 #define DPADD_SH4(RTYPE, mult0, mult1, mult2, mult3, \
888 cnst0, cnst1, cnst2, cnst3, out0, out1, out2, out3) \
890 DPADD_SH2(RTYPE, mult0, mult1, cnst0, cnst1, out0, out1); \
891 DPADD_SH2(RTYPE, mult2, mult3, cnst2, cnst3, out2, out3); \
893 #define DPADD_SH4_SW(...) DPADD_SH4(v4i32, __VA_ARGS__)
903 #define MIN_UH2(RTYPE, in0, in1, min_vec) \
905 in0 = (RTYPE) __msa_min_u_h((v8u16) in0, min_vec); \
906 in1 = (RTYPE) __msa_min_u_h((v8u16) in1, min_vec); \
908 #define MIN_UH2_UH(...) MIN_UH2(v8u16, __VA_ARGS__)
910 #define MIN_UH4(RTYPE, in0, in1, in2, in3, min_vec) \
912 MIN_UH2(RTYPE, in0, in1, min_vec); \
913 MIN_UH2(RTYPE, in2, in3, min_vec); \
915 #define MIN_UH4_UH(...) MIN_UH4(v8u16, __VA_ARGS__)
925 #define CLIP_SH(in, min, max) \
927 in = __msa_max_s_h((v8i16) min, (v8i16) in); \
928 in = __msa_min_s_h((v8i16) max, (v8i16) in); \
937 #define CLIP_SH_0_255(in) \
939 in = __msa_maxi_s_h((v8i16) in, 0); \
940 in = (v8i16) __msa_sat_u_h((v8u16) in, 7); \
943 #define CLIP_SH2_0_255(in0, in1) \
945 CLIP_SH_0_255(in0); \
946 CLIP_SH_0_255(in1); \
949 #define CLIP_SH4_0_255(in0, in1, in2, in3) \
951 CLIP_SH2_0_255(in0, in1); \
952 CLIP_SH2_0_255(in2, in3); \
955 #define CLIP_SH8_0_255(in0, in1, in2, in3, \
956 in4, in5, in6, in7) \
958 CLIP_SH4_0_255(in0, in1, in2, in3); \
959 CLIP_SH4_0_255(in4, in5, in6, in7); \
968 #define CLIP_SW_0_255(in) \
970 in = __msa_maxi_s_w((v4i32) in, 0); \
971 in = (v4i32) __msa_sat_u_w((v4u32) in, 7); \
974 #define CLIP_SW2_0_255(in0, in1) \
976 CLIP_SW_0_255(in0); \
977 CLIP_SW_0_255(in1); \
980 #define CLIP_SW4_0_255(in0, in1, in2, in3) \
982 CLIP_SW2_0_255(in0, in1); \
983 CLIP_SW2_0_255(in2, in3); \
986 #define CLIP_SW8_0_255(in0, in1, in2, in3, \
987 in4, in5, in6, in7) \
989 CLIP_SW4_0_255(in0, in1, in2, in3); \
990 CLIP_SW4_0_255(in4, in5, in6, in7); \
1000 #define HADD_SW_S32(in) \
1002 v2i64 res0_m, res1_m; \
1005 res0_m = __msa_hadd_s_d((v4i32) in, (v4i32) in); \
1006 res1_m = __msa_splati_d(res0_m, 1); \
1008 sum_m = __msa_copy_s_w((v4i32) res0_m, 0); \
1019 #define HADD_UH_U32(in) \
1022 v2u64 res0_m, res1_m; \
1025 res_m = __msa_hadd_u_w((v8u16) in, (v8u16) in); \
1026 res0_m = __msa_hadd_u_d(res_m, res_m); \
1027 res1_m = (v2u64) __msa_splati_d((v2i64) res0_m, 1); \
1029 sum_m = __msa_copy_u_w((v4i32) res0_m, 0); \
1041 #define HADD_SB2(RTYPE, in0, in1, out0, out1) \
1043 out0 = (RTYPE) __msa_hadd_s_h((v16i8) in0, (v16i8) in0); \
1044 out1 = (RTYPE) __msa_hadd_s_h((v16i8) in1, (v16i8) in1); \
1046 #define HADD_SB2_SH(...) HADD_SB2(v8i16, __VA_ARGS__)
1048 #define HADD_SB4(RTYPE, in0, in1, in2, in3, out0, out1, out2, out3) \
1050 HADD_SB2(RTYPE, in0, in1, out0, out1); \
1051 HADD_SB2(RTYPE, in2, in3, out2, out3); \
1053 #define HADD_SB4_UH(...) HADD_SB4(v8u16, __VA_ARGS__)
1054 #define HADD_SB4_SH(...) HADD_SB4(v8i16, __VA_ARGS__)
1064 #define HADD_UB2(RTYPE, in0, in1, out0, out1) \
1066 out0 = (RTYPE) __msa_hadd_u_h((v16u8) in0, (v16u8) in0); \
1067 out1 = (RTYPE) __msa_hadd_u_h((v16u8) in1, (v16u8) in1); \
1069 #define HADD_UB2_UH(...) HADD_UB2(v8u16, __VA_ARGS__)
1071 #define HADD_UB3(RTYPE, in0, in1, in2, out0, out1, out2) \
1073 HADD_UB2(RTYPE, in0, in1, out0, out1); \
1074 out2 = (RTYPE) __msa_hadd_u_h((v16u8) in2, (v16u8) in2); \
1076 #define HADD_UB3_UH(...) HADD_UB3(v8u16, __VA_ARGS__)
1078 #define HADD_UB4(RTYPE, in0, in1, in2, in3, out0, out1, out2, out3) \
1080 HADD_UB2(RTYPE, in0, in1, out0, out1); \
1081 HADD_UB2(RTYPE, in2, in3, out2, out3); \
1083 #define HADD_UB4_UB(...) HADD_UB4(v16u8, __VA_ARGS__)
1084 #define HADD_UB4_UH(...) HADD_UB4(v8u16, __VA_ARGS__)
1085 #define HADD_UB4_SH(...) HADD_UB4(v8i16, __VA_ARGS__)
1095 #define HSUB_UB2(RTYPE, in0, in1, out0, out1) \
1097 out0 = (RTYPE) __msa_hsub_u_h((v16u8) in0, (v16u8) in0); \
1098 out1 = (RTYPE) __msa_hsub_u_h((v16u8) in1, (v16u8) in1); \
1100 #define HSUB_UB2_UH(...) HSUB_UB2(v8u16, __VA_ARGS__)
1101 #define HSUB_UB2_SH(...) HSUB_UB2(v8i16, __VA_ARGS__)
1103 #define HSUB_UB4(RTYPE, in0, in1, in2, in3, out0, out1, out2, out3) \
1105 HSUB_UB2(RTYPE, in0, in1, out0, out1); \
1106 HSUB_UB2(RTYPE, in2, in3, out2, out3); \
1108 #define HSUB_UB4_UH(...) HSUB_UB4(v8u16, __VA_ARGS__)
1109 #define HSUB_UB4_SH(...) HSUB_UB4(v8i16, __VA_ARGS__)
1121 #define SAD_UB2_UH(in0, in1, ref0, ref1) \
1123 v8u16 sad_m = { 0 }; \
1124 sad_m += __builtin_msa2_sad_adj2_u_w2x_b((v16u8) in0, (v16u8) ref0); \
1125 sad_m += __builtin_msa2_sad_adj2_u_w2x_b((v16u8) in1, (v16u8) ref1); \
1129 #define SAD_UB2_UH(in0, in1, ref0, ref1) \
1131 v16u8 diff0_m, diff1_m; \
1132 v8u16 sad_m = { 0 }; \
1134 diff0_m = __msa_asub_u_b((v16u8) in0, (v16u8) ref0); \
1135 diff1_m = __msa_asub_u_b((v16u8) in1, (v16u8) ref1); \
1137 sad_m += __msa_hadd_u_h((v16u8) diff0_m, (v16u8) diff0_m); \
1138 sad_m += __msa_hadd_u_h((v16u8) diff1_m, (v16u8) diff1_m); \
1142 #endif // #if HAVE_MSA2
1150 #define INSERT_W2(RTYPE, in0, in1, out) \
1152 out = (RTYPE) __msa_insert_w((v4i32) out, 0, in0); \
1153 out = (RTYPE) __msa_insert_w((v4i32) out, 1, in1); \
1155 #define INSERT_W2_UB(...) INSERT_W2(v16u8, __VA_ARGS__)
1156 #define INSERT_W2_SB(...) INSERT_W2(v16i8, __VA_ARGS__)
1158 #define INSERT_W4(RTYPE, in0, in1, in2, in3, out) \
1160 out = (RTYPE) __msa_insert_w((v4i32) out, 0, in0); \
1161 out = (RTYPE) __msa_insert_w((v4i32) out, 1, in1); \
1162 out = (RTYPE) __msa_insert_w((v4i32) out, 2, in2); \
1163 out = (RTYPE) __msa_insert_w((v4i32) out, 3, in3); \
1165 #define INSERT_W4_UB(...) INSERT_W4(v16u8, __VA_ARGS__)
1166 #define INSERT_W4_SB(...) INSERT_W4(v16i8, __VA_ARGS__)
1167 #define INSERT_W4_SH(...) INSERT_W4(v8i16, __VA_ARGS__)
1168 #define INSERT_W4_SW(...) INSERT_W4(v4i32, __VA_ARGS__)
1176 #define INSERT_D2(RTYPE, in0, in1, out) \
1178 out = (RTYPE) __msa_insert_d((v2i64) out, 0, in0); \
1179 out = (RTYPE) __msa_insert_d((v2i64) out, 1, in1); \
1181 #define INSERT_D2_UB(...) INSERT_D2(v16u8, __VA_ARGS__)
1182 #define INSERT_D2_SB(...) INSERT_D2(v16i8, __VA_ARGS__)
1183 #define INSERT_D2_SH(...) INSERT_D2(v8i16, __VA_ARGS__)
1184 #define INSERT_D2_SD(...) INSERT_D2(v2i64, __VA_ARGS__)
1195 #define ILVEV_B2(RTYPE, in0, in1, in2, in3, out0, out1) \
1197 out0 = (RTYPE) __msa_ilvev_b((v16i8) in1, (v16i8) in0); \
1198 out1 = (RTYPE) __msa_ilvev_b((v16i8) in3, (v16i8) in2); \
1200 #define ILVEV_B2_UB(...) ILVEV_B2(v16u8, __VA_ARGS__)
1201 #define ILVEV_B2_SB(...) ILVEV_B2(v16i8, __VA_ARGS__)
1202 #define ILVEV_B2_SH(...) ILVEV_B2(v8i16, __VA_ARGS__)
1203 #define ILVEV_B2_SD(...) ILVEV_B2(v2i64, __VA_ARGS__)
1214 #define ILVEV_H2(RTYPE, in0, in1, in2, in3, out0, out1) \
1216 out0 = (RTYPE) __msa_ilvev_h((v8i16) in1, (v8i16) in0); \
1217 out1 = (RTYPE) __msa_ilvev_h((v8i16) in3, (v8i16) in2); \
1219 #define ILVEV_H2_UB(...) ILVEV_H2(v16u8, __VA_ARGS__)
1220 #define ILVEV_H2_SH(...) ILVEV_H2(v8i16, __VA_ARGS__)
1221 #define ILVEV_H2_SW(...) ILVEV_H2(v4i32, __VA_ARGS__)
1232 #define ILVEV_W2(RTYPE, in0, in1, in2, in3, out0, out1) \
1234 out0 = (RTYPE) __msa_ilvev_w((v4i32) in1, (v4i32) in0); \
1235 out1 = (RTYPE) __msa_ilvev_w((v4i32) in3, (v4i32) in2); \
1237 #define ILVEV_W2_UB(...) ILVEV_W2(v16u8, __VA_ARGS__)
1238 #define ILVEV_W2_SB(...) ILVEV_W2(v16i8, __VA_ARGS__)
1239 #define ILVEV_W2_UH(...) ILVEV_W2(v8u16, __VA_ARGS__)
1240 #define ILVEV_W2_SD(...) ILVEV_W2(v2i64, __VA_ARGS__)
1251 #define ILVEV_D2(RTYPE, in0, in1, in2, in3, out0, out1) \
1253 out0 = (RTYPE) __msa_ilvev_d((v2i64) in1, (v2i64) in0); \
1254 out1 = (RTYPE) __msa_ilvev_d((v2i64) in3, (v2i64) in2); \
1256 #define ILVEV_D2_UB(...) ILVEV_D2(v16u8, __VA_ARGS__)
1257 #define ILVEV_D2_SB(...) ILVEV_D2(v16i8, __VA_ARGS__)
1258 #define ILVEV_D2_SW(...) ILVEV_D2(v4i32, __VA_ARGS__)
1269 #define ILVL_B2(RTYPE, in0, in1, in2, in3, out0, out1) \
1271 out0 = (RTYPE) __msa_ilvl_b((v16i8) in0, (v16i8) in1); \
1272 out1 = (RTYPE) __msa_ilvl_b((v16i8) in2, (v16i8) in3); \
1274 #define ILVL_B2_UB(...) ILVL_B2(v16u8, __VA_ARGS__)
1275 #define ILVL_B2_SB(...) ILVL_B2(v16i8, __VA_ARGS__)
1276 #define ILVL_B2_UH(...) ILVL_B2(v8u16, __VA_ARGS__)
1277 #define ILVL_B2_SH(...) ILVL_B2(v8i16, __VA_ARGS__)
1279 #define ILVL_B4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \
1280 out0, out1, out2, out3) \
1282 ILVL_B2(RTYPE, in0, in1, in2, in3, out0, out1); \
1283 ILVL_B2(RTYPE, in4, in5, in6, in7, out2, out3); \
1285 #define ILVL_B4_UB(...) ILVL_B4(v16u8, __VA_ARGS__)
1286 #define ILVL_B4_SB(...) ILVL_B4(v16i8, __VA_ARGS__)
1287 #define ILVL_B4_UH(...) ILVL_B4(v8u16, __VA_ARGS__)
1288 #define ILVL_B4_SH(...) ILVL_B4(v8i16, __VA_ARGS__)
1299 #define ILVL_H2(RTYPE, in0, in1, in2, in3, out0, out1) \
1301 out0 = (RTYPE) __msa_ilvl_h((v8i16) in0, (v8i16) in1); \
1302 out1 = (RTYPE) __msa_ilvl_h((v8i16) in2, (v8i16) in3); \
1304 #define ILVL_H2_SH(...) ILVL_H2(v8i16, __VA_ARGS__)
1305 #define ILVL_H2_SW(...) ILVL_H2(v4i32, __VA_ARGS__)
1307 #define ILVL_H4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \
1308 out0, out1, out2, out3) \
1310 ILVL_H2(RTYPE, in0, in1, in2, in3, out0, out1); \
1311 ILVL_H2(RTYPE, in4, in5, in6, in7, out2, out3); \
1313 #define ILVL_H4_SH(...) ILVL_H4(v8i16, __VA_ARGS__)
1314 #define ILVL_H4_SW(...) ILVL_H4(v4i32, __VA_ARGS__)
1325 #define ILVL_W2(RTYPE, in0, in1, in2, in3, out0, out1) \
1327 out0 = (RTYPE) __msa_ilvl_w((v4i32) in0, (v4i32) in1); \
1328 out1 = (RTYPE) __msa_ilvl_w((v4i32) in2, (v4i32) in3); \
1330 #define ILVL_W2_UB(...) ILVL_W2(v16u8, __VA_ARGS__)
1331 #define ILVL_W2_SB(...) ILVL_W2(v16i8, __VA_ARGS__)
1332 #define ILVL_W2_SH(...) ILVL_W2(v8i16, __VA_ARGS__)
1344 #define ILVR_B2(RTYPE, in0, in1, in2, in3, out0, out1) \
1346 out0 = (RTYPE) __msa_ilvr_b((v16i8) in0, (v16i8) in1); \
1347 out1 = (RTYPE) __msa_ilvr_b((v16i8) in2, (v16i8) in3); \
1349 #define ILVR_B2_UB(...) ILVR_B2(v16u8, __VA_ARGS__)
1350 #define ILVR_B2_SB(...) ILVR_B2(v16i8, __VA_ARGS__)
1351 #define ILVR_B2_UH(...) ILVR_B2(v8u16, __VA_ARGS__)
1352 #define ILVR_B2_SH(...) ILVR_B2(v8i16, __VA_ARGS__)
1353 #define ILVR_B2_SW(...) ILVR_B2(v4i32, __VA_ARGS__)
1355 #define ILVR_B3(RTYPE, in0, in1, in2, in3, in4, in5, out0, out1, out2) \
1357 ILVR_B2(RTYPE, in0, in1, in2, in3, out0, out1); \
1358 out2 = (RTYPE) __msa_ilvr_b((v16i8) in4, (v16i8) in5); \
1360 #define ILVR_B3_UB(...) ILVR_B3(v16u8, __VA_ARGS__)
1361 #define ILVR_B3_SB(...) ILVR_B3(v16i8, __VA_ARGS__)
1362 #define ILVR_B3_UH(...) ILVR_B3(v8u16, __VA_ARGS__)
1363 #define ILVR_B3_SH(...) ILVR_B3(v8i16, __VA_ARGS__)
1365 #define ILVR_B4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \
1366 out0, out1, out2, out3) \
1368 ILVR_B2(RTYPE, in0, in1, in2, in3, out0, out1); \
1369 ILVR_B2(RTYPE, in4, in5, in6, in7, out2, out3); \
1371 #define ILVR_B4_UB(...) ILVR_B4(v16u8, __VA_ARGS__)
1372 #define ILVR_B4_SB(...) ILVR_B4(v16i8, __VA_ARGS__)
1373 #define ILVR_B4_UH(...) ILVR_B4(v8u16, __VA_ARGS__)
1374 #define ILVR_B4_SH(...) ILVR_B4(v8i16, __VA_ARGS__)
1375 #define ILVR_B4_SW(...) ILVR_B4(v4i32, __VA_ARGS__)
1377 #define ILVR_B8(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \
1378 in8, in9, in10, in11, in12, in13, in14, in15, \
1379 out0, out1, out2, out3, out4, out5, out6, out7) \
1381 ILVR_B4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \
1382 out0, out1, out2, out3); \
1383 ILVR_B4(RTYPE, in8, in9, in10, in11, in12, in13, in14, in15, \
1384 out4, out5, out6, out7); \
1386 #define ILVR_B8_UH(...) ILVR_B8(v8u16, __VA_ARGS__)
1387 #define ILVR_B8_SW(...) ILVR_B8(v4i32, __VA_ARGS__)
1399 #define ILVR_H2(RTYPE, in0, in1, in2, in3, out0, out1) \
1401 out0 = (RTYPE) __msa_ilvr_h((v8i16) in0, (v8i16) in1); \
1402 out1 = (RTYPE) __msa_ilvr_h((v8i16) in2, (v8i16) in3); \
1404 #define ILVR_H2_SH(...) ILVR_H2(v8i16, __VA_ARGS__)
1405 #define ILVR_H2_SW(...) ILVR_H2(v4i32, __VA_ARGS__)
1407 #define ILVR_H3(RTYPE, in0, in1, in2, in3, in4, in5, out0, out1, out2) \
1409 ILVR_H2(RTYPE, in0, in1, in2, in3, out0, out1); \
1410 out2 = (RTYPE) __msa_ilvr_h((v8i16) in4, (v8i16) in5); \
1412 #define ILVR_H3_SH(...) ILVR_H3(v8i16, __VA_ARGS__)
1414 #define ILVR_H4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \
1415 out0, out1, out2, out3) \
1417 ILVR_H2(RTYPE, in0, in1, in2, in3, out0, out1); \
1418 ILVR_H2(RTYPE, in4, in5, in6, in7, out2, out3); \
1420 #define ILVR_H4_SH(...) ILVR_H4(v8i16, __VA_ARGS__)
1421 #define ILVR_H4_SW(...) ILVR_H4(v4i32, __VA_ARGS__)
1423 #define ILVR_W2(RTYPE, in0, in1, in2, in3, out0, out1) \
1425 out0 = (RTYPE) __msa_ilvr_w((v4i32) in0, (v4i32) in1); \
1426 out1 = (RTYPE) __msa_ilvr_w((v4i32) in2, (v4i32) in3); \
1428 #define ILVR_W2_UB(...) ILVR_W2(v16u8, __VA_ARGS__)
1429 #define ILVR_W2_SB(...) ILVR_W2(v16i8, __VA_ARGS__)
1430 #define ILVR_W2_SH(...) ILVR_W2(v8i16, __VA_ARGS__)
1432 #define ILVR_W4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \
1433 out0, out1, out2, out3) \
1435 ILVR_W2(RTYPE, in0, in1, in2, in3, out0, out1); \
1436 ILVR_W2(RTYPE, in4, in5, in6, in7, out2, out3); \
1438 #define ILVR_W4_SB(...) ILVR_W4(v16i8, __VA_ARGS__)
1439 #define ILVR_W4_UB(...) ILVR_W4(v16u8, __VA_ARGS__)
1450 #define ILVR_D2(RTYPE, in0, in1, in2, in3, out0, out1) \
1452 out0 = (RTYPE) __msa_ilvr_d((v2i64) in0, (v2i64) in1); \
1453 out1 = (RTYPE) __msa_ilvr_d((v2i64) in2, (v2i64) in3); \
1455 #define ILVR_D2_UB(...) ILVR_D2(v16u8, __VA_ARGS__)
1456 #define ILVR_D2_SB(...) ILVR_D2(v16i8, __VA_ARGS__)
1457 #define ILVR_D2_SH(...) ILVR_D2(v8i16, __VA_ARGS__)
1459 #define ILVR_D3(RTYPE, in0, in1, in2, in3, in4, in5, out0, out1, out2) \
1461 ILVR_D2(RTYPE, in0, in1, in2, in3, out0, out1); \
1462 out2 = (RTYPE) __msa_ilvr_d((v2i64) in4, (v2i64) in5); \
1464 #define ILVR_D3_SB(...) ILVR_D3(v16i8, __VA_ARGS__)
1466 #define ILVR_D4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \
1467 out0, out1, out2, out3) \
1469 ILVR_D2(RTYPE, in0, in1, in2, in3, out0, out1); \
1470 ILVR_D2(RTYPE, in4, in5, in6, in7, out2, out3); \
1472 #define ILVR_D4_SB(...) ILVR_D4(v16i8, __VA_ARGS__)
1473 #define ILVR_D4_UB(...) ILVR_D4(v16u8, __VA_ARGS__)
1484 #define ILVL_D2(RTYPE, in0, in1, in2, in3, out0, out1) \
1486 out0 = (RTYPE) __msa_ilvl_d((v2i64) in0, (v2i64) in1); \
1487 out1 = (RTYPE) __msa_ilvl_d((v2i64) in2, (v2i64) in3); \
1489 #define ILVL_D2_UB(...) ILVL_D2(v16u8, __VA_ARGS__)
1490 #define ILVL_D2_SB(...) ILVL_D2(v16i8, __VA_ARGS__)
1491 #define ILVL_D2_SH(...) ILVL_D2(v8i16, __VA_ARGS__)
1502 #define ILVRL_B2(RTYPE, in0, in1, out0, out1) \
1504 out0 = (RTYPE) __msa_ilvr_b((v16i8) in0, (v16i8) in1); \
1505 out1 = (RTYPE) __msa_ilvl_b((v16i8) in0, (v16i8) in1); \
1507 #define ILVRL_B2_UB(...) ILVRL_B2(v16u8, __VA_ARGS__)
1508 #define ILVRL_B2_SB(...) ILVRL_B2(v16i8, __VA_ARGS__)
1509 #define ILVRL_B2_UH(...) ILVRL_B2(v8u16, __VA_ARGS__)
1510 #define ILVRL_B2_SH(...) ILVRL_B2(v8i16, __VA_ARGS__)
1511 #define ILVRL_B2_SW(...) ILVRL_B2(v4i32, __VA_ARGS__)
1513 #define ILVRL_H2(RTYPE, in0, in1, out0, out1) \
1515 out0 = (RTYPE) __msa_ilvr_h((v8i16) in0, (v8i16) in1); \
1516 out1 = (RTYPE) __msa_ilvl_h((v8i16) in0, (v8i16) in1); \
1518 #define ILVRL_H2_UB(...) ILVRL_H2(v16u8, __VA_ARGS__)
1519 #define ILVRL_H2_SB(...) ILVRL_H2(v16i8, __VA_ARGS__)
1520 #define ILVRL_H2_SH(...) ILVRL_H2(v8i16, __VA_ARGS__)
1521 #define ILVRL_H2_SW(...) ILVRL_H2(v4i32, __VA_ARGS__)
1523 #define ILVRL_W2(RTYPE, in0, in1, out0, out1) \
1525 out0 = (RTYPE) __msa_ilvr_w((v4i32) in0, (v4i32) in1); \
1526 out1 = (RTYPE) __msa_ilvl_w((v4i32) in0, (v4i32) in1); \
1528 #define ILVRL_W2_UB(...) ILVRL_W2(v16u8, __VA_ARGS__)
1529 #define ILVRL_W2_SH(...) ILVRL_W2(v8i16, __VA_ARGS__)
1530 #define ILVRL_W2_SW(...) ILVRL_W2(v4i32, __VA_ARGS__)
1540 #define MAXI_SH2(RTYPE, in0, in1, max_val) \
1542 in0 = (RTYPE) __msa_maxi_s_h((v8i16) in0, max_val); \
1543 in1 = (RTYPE) __msa_maxi_s_h((v8i16) in1, max_val); \
1545 #define MAXI_SH2_UH(...) MAXI_SH2(v8u16, __VA_ARGS__)
1546 #define MAXI_SH2_SH(...) MAXI_SH2(v8i16, __VA_ARGS__)
1548 #define MAXI_SH4(RTYPE, in0, in1, in2, in3, max_val) \
1550 MAXI_SH2(RTYPE, in0, in1, max_val); \
1551 MAXI_SH2(RTYPE, in2, in3, max_val); \
1553 #define MAXI_SH4_UH(...) MAXI_SH4(v8u16, __VA_ARGS__)
1554 #define MAXI_SH4_SH(...) MAXI_SH4(v8i16, __VA_ARGS__)
1556 #define MAXI_SH8(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, max_val) \
1558 MAXI_SH4(RTYPE, in0, in1, in2, in3, max_val); \
1559 MAXI_SH4(RTYPE, in4, in5, in6, in7, max_val); \
1561 #define MAXI_SH8_UH(...) MAXI_SH8(v8u16, __VA_ARGS__)
1562 #define MAXI_SH8_SH(...) MAXI_SH8(v8i16, __VA_ARGS__)
1574 #define SAT_UH2(RTYPE, in0, in1, sat_val) \
1576 in0 = (RTYPE) __msa_sat_u_h((v8u16) in0, sat_val); \
1577 in1 = (RTYPE) __msa_sat_u_h((v8u16) in1, sat_val); \
1579 #define SAT_UH2_UH(...) SAT_UH2(v8u16, __VA_ARGS__)
1580 #define SAT_UH2_SH(...) SAT_UH2(v8i16, __VA_ARGS__)
1582 #define SAT_UH4(RTYPE, in0, in1, in2, in3, sat_val) \
1584 SAT_UH2(RTYPE, in0, in1, sat_val); \
1585 SAT_UH2(RTYPE, in2, in3, sat_val); \
1587 #define SAT_UH4_UH(...) SAT_UH4(v8u16, __VA_ARGS__)
1588 #define SAT_UH4_SH(...) SAT_UH4(v8i16, __VA_ARGS__)
1590 #define SAT_UH8(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, sat_val) \
1592 SAT_UH4(RTYPE, in0, in1, in2, in3, sat_val); \
1593 SAT_UH4(RTYPE, in4, in5, in6, in7, sat_val); \
1595 #define SAT_UH8_UH(...) SAT_UH8(v8u16, __VA_ARGS__)
1596 #define SAT_UH8_SH(...) SAT_UH8(v8i16, __VA_ARGS__)
1608 #define SAT_SH2(RTYPE, in0, in1, sat_val) \
1610 in0 = (RTYPE) __msa_sat_s_h((v8i16) in0, sat_val); \
1611 in1 = (RTYPE) __msa_sat_s_h((v8i16) in1, sat_val); \
1613 #define SAT_SH2_SH(...) SAT_SH2(v8i16, __VA_ARGS__)
1615 #define SAT_SH3(RTYPE, in0, in1, in2, sat_val) \
1617 SAT_SH2(RTYPE, in0, in1, sat_val); \
1618 in2 = (RTYPE) __msa_sat_s_h((v8i16) in2, sat_val); \
1620 #define SAT_SH3_SH(...) SAT_SH3(v8i16, __VA_ARGS__)
1622 #define SAT_SH4(RTYPE, in0, in1, in2, in3, sat_val) \
1624 SAT_SH2(RTYPE, in0, in1, sat_val); \
1625 SAT_SH2(RTYPE, in2, in3, sat_val); \
1627 #define SAT_SH4_SH(...) SAT_SH4(v8i16, __VA_ARGS__)
1639 #define SAT_SW2(RTYPE, in0, in1, sat_val) \
1641 in0 = (RTYPE) __msa_sat_s_w((v4i32) in0, sat_val); \
1642 in1 = (RTYPE) __msa_sat_s_w((v4i32) in1, sat_val); \
1644 #define SAT_SW2_SW(...) SAT_SW2(v4i32, __VA_ARGS__)
1646 #define SAT_SW4(RTYPE, in0, in1, in2, in3, sat_val) \
1648 SAT_SW2(RTYPE, in0, in1, sat_val); \
1649 SAT_SW2(RTYPE, in2, in3, sat_val); \
1651 #define SAT_SW4_SW(...) SAT_SW4(v4i32, __VA_ARGS__)
1662 #define SPLATI_H2(RTYPE, in, idx0, idx1, out0, out1) \
1664 out0 = (RTYPE) __msa_splati_h((v8i16) in, idx0); \
1665 out1 = (RTYPE) __msa_splati_h((v8i16) in, idx1); \
1667 #define SPLATI_H2_SB(...) SPLATI_H2(v16i8, __VA_ARGS__)
1668 #define SPLATI_H2_SH(...) SPLATI_H2(v8i16, __VA_ARGS__)
1670 #define SPLATI_H3(RTYPE, in, idx0, idx1, idx2, \
1673 SPLATI_H2(RTYPE, in, idx0, idx1, out0, out1); \
1674 out2 = (RTYPE) __msa_splati_h((v8i16) in, idx2); \
1676 #define SPLATI_H3_SB(...) SPLATI_H3(v16i8, __VA_ARGS__)
1677 #define SPLATI_H3_SH(...) SPLATI_H3(v8i16, __VA_ARGS__)
1679 #define SPLATI_H4(RTYPE, in, idx0, idx1, idx2, idx3, \
1680 out0, out1, out2, out3) \
1682 SPLATI_H2(RTYPE, in, idx0, idx1, out0, out1); \
1683 SPLATI_H2(RTYPE, in, idx2, idx3, out2, out3); \
1685 #define SPLATI_H4_SB(...) SPLATI_H4(v16i8, __VA_ARGS__)
1686 #define SPLATI_H4_SH(...) SPLATI_H4(v8i16, __VA_ARGS__)
1699 #define SPLATI_W2(RTYPE, in, stidx, out0, out1) \
1701 out0 = (RTYPE) __msa_splati_w((v4i32) in, stidx); \
1702 out1 = (RTYPE) __msa_splati_w((v4i32) in, (stidx+1)); \
1704 #define SPLATI_W2_SH(...) SPLATI_W2(v8i16, __VA_ARGS__)
1705 #define SPLATI_W2_SW(...) SPLATI_W2(v4i32, __VA_ARGS__)
1707 #define SPLATI_W4(RTYPE, in, out0, out1, out2, out3) \
1709 SPLATI_W2(RTYPE, in, 0, out0, out1); \
1710 SPLATI_W2(RTYPE, in, 2, out2, out3); \
1712 #define SPLATI_W4_SH(...) SPLATI_W4(v8i16, __VA_ARGS__)
1713 #define SPLATI_W4_SW(...) SPLATI_W4(v4i32, __VA_ARGS__)
1726 #define PCKEV_B2(RTYPE, in0, in1, in2, in3, out0, out1) \
1728 out0 = (RTYPE) __msa_pckev_b((v16i8) in0, (v16i8) in1); \
1729 out1 = (RTYPE) __msa_pckev_b((v16i8) in2, (v16i8) in3); \
1731 #define PCKEV_B2_SB(...) PCKEV_B2(v16i8, __VA_ARGS__)
1732 #define PCKEV_B2_UB(...) PCKEV_B2(v16u8, __VA_ARGS__)
1733 #define PCKEV_B2_SH(...) PCKEV_B2(v8i16, __VA_ARGS__)
1734 #define PCKEV_B2_SW(...) PCKEV_B2(v4i32, __VA_ARGS__)
1736 #define PCKEV_B3(RTYPE, in0, in1, in2, in3, in4, in5, out0, out1, out2) \
1738 PCKEV_B2(RTYPE, in0, in1, in2, in3, out0, out1); \
1739 out2 = (RTYPE) __msa_pckev_b((v16i8) in4, (v16i8) in5); \
1741 #define PCKEV_B3_UB(...) PCKEV_B3(v16u8, __VA_ARGS__)
1742 #define PCKEV_B3_SB(...) PCKEV_B3(v16i8, __VA_ARGS__)
1744 #define PCKEV_B4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \
1745 out0, out1, out2, out3) \
1747 PCKEV_B2(RTYPE, in0, in1, in2, in3, out0, out1); \
1748 PCKEV_B2(RTYPE, in4, in5, in6, in7, out2, out3); \
1750 #define PCKEV_B4_SB(...) PCKEV_B4(v16i8, __VA_ARGS__)
1751 #define PCKEV_B4_UB(...) PCKEV_B4(v16u8, __VA_ARGS__)
1752 #define PCKEV_B4_SH(...) PCKEV_B4(v8i16, __VA_ARGS__)
1753 #define PCKEV_B4_SW(...) PCKEV_B4(v4i32, __VA_ARGS__)
1766 #define PCKEV_H2(RTYPE, in0, in1, in2, in3, out0, out1) \
1768 out0 = (RTYPE) __msa_pckev_h((v8i16) in0, (v8i16) in1); \
1769 out1 = (RTYPE) __msa_pckev_h((v8i16) in2, (v8i16) in3); \
1771 #define PCKEV_H2_SH(...) PCKEV_H2(v8i16, __VA_ARGS__)
1772 #define PCKEV_H2_SW(...) PCKEV_H2(v4i32, __VA_ARGS__)
1774 #define PCKEV_H4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \
1775 out0, out1, out2, out3) \
1777 PCKEV_H2(RTYPE, in0, in1, in2, in3, out0, out1); \
1778 PCKEV_H2(RTYPE, in4, in5, in6, in7, out2, out3); \
1780 #define PCKEV_H4_SH(...) PCKEV_H4(v8i16, __VA_ARGS__)
1781 #define PCKEV_H4_SW(...) PCKEV_H4(v4i32, __VA_ARGS__)
1794 #define PCKEV_D2(RTYPE, in0, in1, in2, in3, out0, out1) \
1796 out0 = (RTYPE) __msa_pckev_d((v2i64) in0, (v2i64) in1); \
1797 out1 = (RTYPE) __msa_pckev_d((v2i64) in2, (v2i64) in3); \
1799 #define PCKEV_D2_UB(...) PCKEV_D2(v16u8, __VA_ARGS__)
1800 #define PCKEV_D2_SB(...) PCKEV_D2(v16i8, __VA_ARGS__)
1801 #define PCKEV_D2_SH(...) PCKEV_D2(v8i16, __VA_ARGS__)
1803 #define PCKEV_D4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \
1804 out0, out1, out2, out3) \
1806 PCKEV_D2(RTYPE, in0, in1, in2, in3, out0, out1); \
1807 PCKEV_D2(RTYPE, in4, in5, in6, in7, out2, out3); \
1809 #define PCKEV_D4_UB(...) PCKEV_D4(v16u8, __VA_ARGS__)
1820 #define PCKOD_D2(RTYPE, in0, in1, in2, in3, out0, out1) \
1822 out0 = (RTYPE) __msa_pckod_d((v2i64) in0, (v2i64) in1); \
1823 out1 = (RTYPE) __msa_pckod_d((v2i64) in2, (v2i64) in3); \
1825 #define PCKOD_D2_UB(...) PCKOD_D2(v16u8, __VA_ARGS__)
1826 #define PCKOD_D2_SH(...) PCKOD_D2(v8i16, __VA_ARGS__)
1827 #define PCKOD_D2_SD(...) PCKOD_D2(v2i64, __VA_ARGS__)
1841 #define XORI_B2_128(RTYPE, in0, in1) \
1843 in0 = (RTYPE) __msa_xori_b((v16u8) in0, 128); \
1844 in1 = (RTYPE) __msa_xori_b((v16u8) in1, 128); \
1846 #define XORI_B2_128_UB(...) XORI_B2_128(v16u8, __VA_ARGS__)
1847 #define XORI_B2_128_SB(...) XORI_B2_128(v16i8, __VA_ARGS__)
1848 #define XORI_B2_128_SH(...) XORI_B2_128(v8i16, __VA_ARGS__)
1850 #define XORI_B3_128(RTYPE, in0, in1, in2) \
1852 XORI_B2_128(RTYPE, in0, in1); \
1853 in2 = (RTYPE) __msa_xori_b((v16u8) in2, 128); \
1855 #define XORI_B3_128_SB(...) XORI_B3_128(v16i8, __VA_ARGS__)
1857 #define XORI_B4_128(RTYPE, in0, in1, in2, in3) \
1859 XORI_B2_128(RTYPE, in0, in1); \
1860 XORI_B2_128(RTYPE, in2, in3); \
1862 #define XORI_B4_128_UB(...) XORI_B4_128(v16u8, __VA_ARGS__)
1863 #define XORI_B4_128_SB(...) XORI_B4_128(v16i8, __VA_ARGS__)
1864 #define XORI_B4_128_SH(...) XORI_B4_128(v8i16, __VA_ARGS__)
1866 #define XORI_B5_128(RTYPE, in0, in1, in2, in3, in4) \
1868 XORI_B3_128(RTYPE, in0, in1, in2); \
1869 XORI_B2_128(RTYPE, in3, in4); \
1871 #define XORI_B5_128_SB(...) XORI_B5_128(v16i8, __VA_ARGS__)
1873 #define XORI_B6_128(RTYPE, in0, in1, in2, in3, in4, in5) \
1875 XORI_B4_128(RTYPE, in0, in1, in2, in3); \
1876 XORI_B2_128(RTYPE, in4, in5); \
1878 #define XORI_B6_128_SB(...) XORI_B6_128(v16i8, __VA_ARGS__)
1880 #define XORI_B7_128(RTYPE, in0, in1, in2, in3, in4, in5, in6) \
1882 XORI_B4_128(RTYPE, in0, in1, in2, in3); \
1883 XORI_B3_128(RTYPE, in4, in5, in6); \
1885 #define XORI_B7_128_SB(...) XORI_B7_128(v16i8, __VA_ARGS__)
1887 #define XORI_B8_128(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7) \
1889 XORI_B4_128(RTYPE, in0, in1, in2, in3); \
1890 XORI_B4_128(RTYPE, in4, in5, in6, in7); \
1892 #define XORI_B8_128_SB(...) XORI_B8_128(v16i8, __VA_ARGS__)
1893 #define XORI_B8_128_UB(...) XORI_B8_128(v16u8, __VA_ARGS__)
1904 #define ADDS_SH2(RTYPE, in0, in1, in2, in3, out0, out1) \
1906 out0 = (RTYPE) __msa_adds_s_h((v8i16) in0, (v8i16) in1); \
1907 out1 = (RTYPE) __msa_adds_s_h((v8i16) in2, (v8i16) in3); \
1909 #define ADDS_SH2_SH(...) ADDS_SH2(v8i16, __VA_ARGS__)
1911 #define ADDS_SH4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \
1912 out0, out1, out2, out3) \
1914 ADDS_SH2(RTYPE, in0, in1, in2, in3, out0, out1); \
1915 ADDS_SH2(RTYPE, in4, in5, in6, in7, out2, out3); \
1917 #define ADDS_SH4_UH(...) ADDS_SH4(v8u16, __VA_ARGS__)
1918 #define ADDS_SH4_SH(...) ADDS_SH4(v8i16, __VA_ARGS__)
1928 #define SLLI_2V(in0, in1, shift) \
1930 in0 = in0 << shift; \
1931 in1 = in1 << shift; \
1933 #define SLLI_4V(in0, in1, in2, in3, shift) \
1935 in0 = in0 << shift; \
1936 in1 = in1 << shift; \
1937 in2 = in2 << shift; \
1938 in3 = in3 << shift; \
1951 #define SRA_4V(in0, in1, in2, in3, shift) \
1953 in0 = in0 >> shift; \
1954 in1 = in1 >> shift; \
1955 in2 = in2 >> shift; \
1956 in3 = in3 >> shift; \
1969 #define SRL_H4(RTYPE, in0, in1, in2, in3, shift) \
1971 in0 = (RTYPE) __msa_srl_h((v8i16) in0, (v8i16) shift); \
1972 in1 = (RTYPE) __msa_srl_h((v8i16) in1, (v8i16) shift); \
1973 in2 = (RTYPE) __msa_srl_h((v8i16) in2, (v8i16) shift); \
1974 in3 = (RTYPE) __msa_srl_h((v8i16) in3, (v8i16) shift); \
1976 #define SRL_H4_UH(...) SRL_H4(v8u16, __VA_ARGS__)
1978 #define SRLR_H4(RTYPE, in0, in1, in2, in3, shift) \
1980 in0 = (RTYPE) __msa_srlr_h((v8i16) in0, (v8i16) shift); \
1981 in1 = (RTYPE) __msa_srlr_h((v8i16) in1, (v8i16) shift); \
1982 in2 = (RTYPE) __msa_srlr_h((v8i16) in2, (v8i16) shift); \
1983 in3 = (RTYPE) __msa_srlr_h((v8i16) in3, (v8i16) shift); \
1985 #define SRLR_H4_UH(...) SRLR_H4(v8u16, __VA_ARGS__)
1986 #define SRLR_H4_SH(...) SRLR_H4(v8i16, __VA_ARGS__)
1988 #define SRLR_H8(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, shift) \
1990 SRLR_H4(RTYPE, in0, in1, in2, in3, shift); \
1991 SRLR_H4(RTYPE, in4, in5, in6, in7, shift); \
1993 #define SRLR_H8_UH(...) SRLR_H8(v8u16, __VA_ARGS__)
1994 #define SRLR_H8_SH(...) SRLR_H8(v8i16, __VA_ARGS__)
2007 #define SRAR_H2(RTYPE, in0, in1, shift) \
2009 in0 = (RTYPE) __msa_srar_h((v8i16) in0, (v8i16) shift); \
2010 in1 = (RTYPE) __msa_srar_h((v8i16) in1, (v8i16) shift); \
2012 #define SRAR_H2_UH(...) SRAR_H2(v8u16, __VA_ARGS__)
2013 #define SRAR_H2_SH(...) SRAR_H2(v8i16, __VA_ARGS__)
2015 #define SRAR_H3(RTYPE, in0, in1, in2, shift) \
2017 SRAR_H2(RTYPE, in0, in1, shift) \
2018 in2 = (RTYPE) __msa_srar_h((v8i16) in2, (v8i16) shift); \
2020 #define SRAR_H3_SH(...) SRAR_H3(v8i16, __VA_ARGS__)
2022 #define SRAR_H4(RTYPE, in0, in1, in2, in3, shift) \
2024 SRAR_H2(RTYPE, in0, in1, shift) \
2025 SRAR_H2(RTYPE, in2, in3, shift) \
2027 #define SRAR_H4_UH(...) SRAR_H4(v8u16, __VA_ARGS__)
2028 #define SRAR_H4_SH(...) SRAR_H4(v8i16, __VA_ARGS__)
2041 #define SRAR_W2(RTYPE, in0, in1, shift) \
2043 in0 = (RTYPE) __msa_srar_w((v4i32) in0, (v4i32) shift); \
2044 in1 = (RTYPE) __msa_srar_w((v4i32) in1, (v4i32) shift); \
2046 #define SRAR_W2_SW(...) SRAR_W2(v4i32, __VA_ARGS__)
2048 #define SRAR_W4(RTYPE, in0, in1, in2, in3, shift) \
2050 SRAR_W2(RTYPE, in0, in1, shift) \
2051 SRAR_W2(RTYPE, in2, in3, shift) \
2053 #define SRAR_W4_SW(...) SRAR_W4(v4i32, __VA_ARGS__)
2065 #define SRARI_H2(RTYPE, in0, in1, shift) \
2067 in0 = (RTYPE) __msa_srari_h((v8i16) in0, shift); \
2068 in1 = (RTYPE) __msa_srari_h((v8i16) in1, shift); \
2070 #define SRARI_H2_UH(...) SRARI_H2(v8u16, __VA_ARGS__)
2071 #define SRARI_H2_SH(...) SRARI_H2(v8i16, __VA_ARGS__)
2073 #define SRARI_H4(RTYPE, in0, in1, in2, in3, shift) \
2075 SRARI_H2(RTYPE, in0, in1, shift); \
2076 SRARI_H2(RTYPE, in2, in3, shift); \
2078 #define SRARI_H4_UH(...) SRARI_H4(v8u16, __VA_ARGS__)
2079 #define SRARI_H4_SH(...) SRARI_H4(v8i16, __VA_ARGS__)
2091 #define SRARI_W2(RTYPE, in0, in1, shift) \
2093 in0 = (RTYPE) __msa_srari_w((v4i32) in0, shift); \
2094 in1 = (RTYPE) __msa_srari_w((v4i32) in1, shift); \
2096 #define SRARI_W2_SW(...) SRARI_W2(v4i32, __VA_ARGS__)
2098 #define SRARI_W4(RTYPE, in0, in1, in2, in3, shift) \
2100 SRARI_W2(RTYPE, in0, in1, shift); \
2101 SRARI_W2(RTYPE, in2, in3, shift); \
2103 #define SRARI_W4_SH(...) SRARI_W4(v8i16, __VA_ARGS__)
2104 #define SRARI_W4_SW(...) SRARI_W4(v4i32, __VA_ARGS__)
2113 #define MUL2(in0, in1, in2, in3, out0, out1) \
2118 #define MUL4(in0, in1, in2, in3, in4, in5, in6, in7, out0, out1, out2, out3) \
2120 MUL2(in0, in1, in2, in3, out0, out1); \
2121 MUL2(in4, in5, in6, in7, out2, out3); \
2130 #define ADD2(in0, in1, in2, in3, out0, out1) \
2135 #define ADD4(in0, in1, in2, in3, in4, in5, in6, in7, out0, out1, out2, out3) \
2137 ADD2(in0, in1, in2, in3, out0, out1); \
2138 ADD2(in4, in5, in6, in7, out2, out3); \
2147 #define SUB2(in0, in1, in2, in3, out0, out1) \
2152 #define SUB4(in0, in1, in2, in3, in4, in5, in6, in7, out0, out1, out2, out3) \
2168 #define UNPCK_R_SB_SH(in, out) \
2172 sign_m = __msa_clti_s_b((v16i8) in, 0); \
2173 out = (v8i16) __msa_ilvr_b(sign_m, (v16i8) in); \
2185 #define UNPCK_R_SH_SW(in, out) \
2187 out = (v4i32) __builtin_msa2_w2x_lo_s_h((v8i16) in); \
2190 #define UNPCK_R_SH_SW(in, out) \
2194 sign_m = __msa_clti_s_h((v8i16) in, 0); \
2195 out = (v4i32) __msa_ilvr_h(sign_m, (v8i16) in); \
2197 #endif // #if HAVE_MSA2
2211 #define UNPCK_SB_SH(in, out0, out1) \
2213 out0 = (v4i32) __builtin_msa2_w2x_lo_s_b((v16i8) in); \
2214 out1 = (v4i32) __builtin_msa2_w2x_hi_s_b((v16i8) in); \
2217 #define UNPCK_SB_SH(in, out0, out1) \
2221 tmp_m = __msa_clti_s_b((v16i8) in, 0); \
2222 ILVRL_B2_SH(tmp_m, in, out0, out1); \
2224 #endif // #if HAVE_MSA2
2233 #define UNPCK_UB_SH(in, out0, out1) \
2235 v16i8 zero_m = { 0 }; \
2237 ILVRL_B2_SH(zero_m, in, out0, out1); \
2252 #define UNPCK_SH_SW(in, out0, out1) \
2254 out0 = (v4i32) __builtin_msa2_w2x_lo_s_h((v8i16) in); \
2255 out1 = (v4i32) __builtin_msa2_w2x_hi_s_h((v8i16) in); \
2258 #define UNPCK_SH_SW(in, out0, out1) \
2262 tmp_m = __msa_clti_s_h((v8i16) in, 0); \
2263 ILVRL_H2_SW(tmp_m, in, out0, out1); \
2265 #endif // #if HAVE_MSA2
2272 #define SWAP(in0, in1) \
2284 #define BUTTERFLY_4(in0, in1, in2, in3, out0, out1, out2, out3) \
2298 #define BUTTERFLY_8(in0, in1, in2, in3, in4, in5, in6, in7, \
2299 out0, out1, out2, out3, out4, out5, out6, out7) \
2317 #define BUTTERFLY_16(in0, in1, in2, in3, in4, in5, in6, in7, \
2318 in8, in9, in10, in11, in12, in13, in14, in15, \
2319 out0, out1, out2, out3, out4, out5, out6, out7, \
2320 out8, out9, out10, out11, out12, out13, out14, out15) \
2322 out0 = in0 + in15; \
2323 out1 = in1 + in14; \
2324 out2 = in2 + in13; \
2325 out3 = in3 + in12; \
2326 out4 = in4 + in11; \
2327 out5 = in5 + in10; \
2333 out10 = in5 - in10; \
2334 out11 = in4 - in11; \
2335 out12 = in3 - in12; \
2336 out13 = in2 - in13; \
2337 out14 = in1 - in14; \
2338 out15 = in0 - in15; \
2347 #define TRANSPOSE4x4_UB_UB(in0, in1, in2, in3, out0, out1, out2, out3) \
2349 v16i8 zero_m = { 0 }; \
2350 v16i8 s0_m, s1_m, s2_m, s3_m; \
2352 ILVR_D2_SB(in1, in0, in3, in2, s0_m, s1_m); \
2353 ILVRL_B2_SB(s1_m, s0_m, s2_m, s3_m); \
2355 out0 = (v16u8) __msa_ilvr_b(s3_m, s2_m); \
2356 out1 = (v16u8) __msa_sldi_b(zero_m, (v16i8) out0, 4); \
2357 out2 = (v16u8) __msa_sldi_b(zero_m, (v16i8) out1, 4); \
2358 out3 = (v16u8) __msa_sldi_b(zero_m, (v16i8) out2, 4); \
2367 #define TRANSPOSE8x4_UB(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \
2368 out0, out1, out2, out3) \
2370 v16i8 tmp0_m, tmp1_m, tmp2_m, tmp3_m; \
2372 ILVEV_W2_SB(in0, in4, in1, in5, tmp0_m, tmp1_m); \
2373 tmp2_m = __msa_ilvr_b(tmp1_m, tmp0_m); \
2374 ILVEV_W2_SB(in2, in6, in3, in7, tmp0_m, tmp1_m); \
2376 tmp3_m = __msa_ilvr_b(tmp1_m, tmp0_m); \
2377 ILVRL_H2_SB(tmp3_m, tmp2_m, tmp0_m, tmp1_m); \
2379 ILVRL_W2(RTYPE, tmp1_m, tmp0_m, out0, out2); \
2380 out1 = (RTYPE) __msa_ilvl_d((v2i64) out2, (v2i64) out0); \
2381 out3 = (RTYPE) __msa_ilvl_d((v2i64) out0, (v2i64) out2); \
2383 #define TRANSPOSE8x4_UB_UB(...) TRANSPOSE8x4_UB(v16u8, __VA_ARGS__)
2384 #define TRANSPOSE8x4_UB_UH(...) TRANSPOSE8x4_UB(v8u16, __VA_ARGS__)
2394 #define TRANSPOSE8x8_UB(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \
2395 out0, out1, out2, out3, out4, out5, out6, out7) \
2397 v16i8 tmp0_m, tmp1_m, tmp2_m, tmp3_m; \
2398 v16i8 tmp4_m, tmp5_m, tmp6_m, tmp7_m; \
2399 v16i8 zeros = { 0 }; \
2401 ILVR_B4_SB(in2, in0, in3, in1, in6, in4, in7, in5, \
2402 tmp0_m, tmp1_m, tmp2_m, tmp3_m); \
2403 ILVRL_B2_SB(tmp1_m, tmp0_m, tmp4_m, tmp5_m); \
2404 ILVRL_B2_SB(tmp3_m, tmp2_m, tmp6_m, tmp7_m); \
2405 ILVRL_W2(RTYPE, tmp6_m, tmp4_m, out0, out2); \
2406 ILVRL_W2(RTYPE, tmp7_m, tmp5_m, out4, out6); \
2407 SLDI_B4(RTYPE, zeros, out0, zeros, out2, zeros, out4, zeros, out6, \
2408 8, out1, out3, out5, out7); \
2410 #define TRANSPOSE8x8_UB_UB(...) TRANSPOSE8x8_UB(v16u8, __VA_ARGS__)
2411 #define TRANSPOSE8x8_UB_UH(...) TRANSPOSE8x8_UB(v8u16, __VA_ARGS__)
2420 #define TRANSPOSE16x4_UB_UB(in0, in1, in2, in3, in4, in5, in6, in7, \
2421 in8, in9, in10, in11, in12, in13, in14, in15, \
2422 out0, out1, out2, out3) \
2424 v2i64 tmp0_m, tmp1_m, tmp2_m, tmp3_m; \
2426 ILVEV_W2_SD(in0, in4, in8, in12, tmp0_m, tmp1_m); \
2427 out1 = (v16u8) __msa_ilvev_d(tmp1_m, tmp0_m); \
2429 ILVEV_W2_SD(in1, in5, in9, in13, tmp0_m, tmp1_m); \
2430 out3 = (v16u8) __msa_ilvev_d(tmp1_m, tmp0_m); \
2432 ILVEV_W2_SD(in2, in6, in10, in14, tmp0_m, tmp1_m); \
2434 tmp2_m = __msa_ilvev_d(tmp1_m, tmp0_m); \
2435 ILVEV_W2_SD(in3, in7, in11, in15, tmp0_m, tmp1_m); \
2437 tmp3_m = __msa_ilvev_d(tmp1_m, tmp0_m); \
2438 ILVEV_B2_SD(out1, out3, tmp2_m, tmp3_m, tmp0_m, tmp1_m); \
2439 out0 = (v16u8) __msa_ilvev_h((v8i16) tmp1_m, (v8i16) tmp0_m); \
2440 out2 = (v16u8) __msa_ilvod_h((v8i16) tmp1_m, (v8i16) tmp0_m); \
2442 tmp0_m = (v2i64) __msa_ilvod_b((v16i8) out3, (v16i8) out1); \
2443 tmp1_m = (v2i64) __msa_ilvod_b((v16i8) tmp3_m, (v16i8) tmp2_m); \
2444 out1 = (v16u8) __msa_ilvev_h((v8i16) tmp1_m, (v8i16) tmp0_m); \
2445 out3 = (v16u8) __msa_ilvod_h((v8i16) tmp1_m, (v8i16) tmp0_m); \
2455 #define TRANSPOSE16x8_UB_UB(in0, in1, in2, in3, in4, in5, in6, in7, \
2456 in8, in9, in10, in11, in12, in13, in14, in15, \
2457 out0, out1, out2, out3, out4, out5, out6, out7) \
2459 v16u8 tmp0_m, tmp1_m, tmp2_m, tmp3_m; \
2460 v16u8 tmp4_m, tmp5_m, tmp6_m, tmp7_m; \
2462 ILVEV_D2_UB(in0, in8, in1, in9, out7, out6); \
2463 ILVEV_D2_UB(in2, in10, in3, in11, out5, out4); \
2464 ILVEV_D2_UB(in4, in12, in5, in13, out3, out2); \
2465 ILVEV_D2_UB(in6, in14, in7, in15, out1, out0); \
2467 tmp0_m = (v16u8) __msa_ilvev_b((v16i8) out6, (v16i8) out7); \
2468 tmp4_m = (v16u8) __msa_ilvod_b((v16i8) out6, (v16i8) out7); \
2469 tmp1_m = (v16u8) __msa_ilvev_b((v16i8) out4, (v16i8) out5); \
2470 tmp5_m = (v16u8) __msa_ilvod_b((v16i8) out4, (v16i8) out5); \
2471 out5 = (v16u8) __msa_ilvev_b((v16i8) out2, (v16i8) out3); \
2472 tmp6_m = (v16u8) __msa_ilvod_b((v16i8) out2, (v16i8) out3); \
2473 out7 = (v16u8) __msa_ilvev_b((v16i8) out0, (v16i8) out1); \
2474 tmp7_m = (v16u8) __msa_ilvod_b((v16i8) out0, (v16i8) out1); \
2476 ILVEV_H2_UB(tmp0_m, tmp1_m, out5, out7, tmp2_m, tmp3_m); \
2477 out0 = (v16u8) __msa_ilvev_w((v4i32) tmp3_m, (v4i32) tmp2_m); \
2478 out4 = (v16u8) __msa_ilvod_w((v4i32) tmp3_m, (v4i32) tmp2_m); \
2480 tmp2_m = (v16u8) __msa_ilvod_h((v8i16) tmp1_m, (v8i16) tmp0_m); \
2481 tmp3_m = (v16u8) __msa_ilvod_h((v8i16) out7, (v8i16) out5); \
2482 out2 = (v16u8) __msa_ilvev_w((v4i32) tmp3_m, (v4i32) tmp2_m); \
2483 out6 = (v16u8) __msa_ilvod_w((v4i32) tmp3_m, (v4i32) tmp2_m); \
2485 ILVEV_H2_UB(tmp4_m, tmp5_m, tmp6_m, tmp7_m, tmp2_m, tmp3_m); \
2486 out1 = (v16u8) __msa_ilvev_w((v4i32) tmp3_m, (v4i32) tmp2_m); \
2487 out5 = (v16u8) __msa_ilvod_w((v4i32) tmp3_m, (v4i32) tmp2_m); \
2489 tmp2_m = (v16u8) __msa_ilvod_h((v8i16) tmp5_m, (v8i16) tmp4_m); \
2490 tmp3_m = (v16u8) __msa_ilvod_h((v8i16) tmp7_m, (v8i16) tmp6_m); \
2491 out3 = (v16u8) __msa_ilvev_w((v4i32) tmp3_m, (v4i32) tmp2_m); \
2492 out7 = (v16u8) __msa_ilvod_w((v4i32) tmp3_m, (v4i32) tmp2_m); \
2501 #define TRANSPOSE4x4_SH_SH(in0, in1, in2, in3, out0, out1, out2, out3) \
2505 ILVR_H2_SH(in1, in0, in3, in2, s0_m, s1_m); \
2506 ILVRL_W2_SH(s1_m, s0_m, out0, out2); \
2507 out1 = (v8i16) __msa_ilvl_d((v2i64) out0, (v2i64) out0); \
2508 out3 = (v8i16) __msa_ilvl_d((v2i64) out0, (v2i64) out2); \
2517 #define TRANSPOSE8x8_H(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \
2518 out0, out1, out2, out3, out4, out5, out6, out7) \
2521 v8i16 tmp0_m, tmp1_m, tmp2_m, tmp3_m; \
2522 v8i16 tmp4_m, tmp5_m, tmp6_m, tmp7_m; \
2524 ILVR_H2_SH(in6, in4, in7, in5, s0_m, s1_m); \
2525 ILVRL_H2_SH(s1_m, s0_m, tmp0_m, tmp1_m); \
2526 ILVL_H2_SH(in6, in4, in7, in5, s0_m, s1_m); \
2527 ILVRL_H2_SH(s1_m, s0_m, tmp2_m, tmp3_m); \
2528 ILVR_H2_SH(in2, in0, in3, in1, s0_m, s1_m); \
2529 ILVRL_H2_SH(s1_m, s0_m, tmp4_m, tmp5_m); \
2530 ILVL_H2_SH(in2, in0, in3, in1, s0_m, s1_m); \
2531 ILVRL_H2_SH(s1_m, s0_m, tmp6_m, tmp7_m); \
2532 PCKEV_D4(RTYPE, tmp0_m, tmp4_m, tmp1_m, tmp5_m, tmp2_m, tmp6_m, \
2533 tmp3_m, tmp7_m, out0, out2, out4, out6); \
2534 out1 = (RTYPE) __msa_pckod_d((v2i64) tmp0_m, (v2i64) tmp4_m); \
2535 out3 = (RTYPE) __msa_pckod_d((v2i64) tmp1_m, (v2i64) tmp5_m); \
2536 out5 = (RTYPE) __msa_pckod_d((v2i64) tmp2_m, (v2i64) tmp6_m); \
2537 out7 = (RTYPE) __msa_pckod_d((v2i64) tmp3_m, (v2i64) tmp7_m); \
2539 #define TRANSPOSE8x8_UH_UH(...) TRANSPOSE8x8_H(v8u16, __VA_ARGS__)
2540 #define TRANSPOSE8x8_SH_SH(...) TRANSPOSE8x8_H(v8i16, __VA_ARGS__)
2548 #define TRANSPOSE4x4_SW_SW(in0, in1, in2, in3, out0, out1, out2, out3) \
2550 v4i32 s0_m, s1_m, s2_m, s3_m; \
2552 ILVRL_W2_SW(in1, in0, s0_m, s1_m); \
2553 ILVRL_W2_SW(in3, in2, s2_m, s3_m); \
2555 out0 = (v4i32) __msa_ilvr_d((v2i64) s2_m, (v2i64) s0_m); \
2556 out1 = (v4i32) __msa_ilvl_d((v2i64) s2_m, (v2i64) s0_m); \
2557 out2 = (v4i32) __msa_ilvr_d((v2i64) s3_m, (v2i64) s1_m); \
2558 out3 = (v4i32) __msa_ilvl_d((v2i64) s3_m, (v2i64) s1_m); \
2575 #define AVE_ST8x4_UB(in0, in1, in2, in3, in4, in5, in6, in7, pdst, stride) \
2577 uint64_t out0_m, out1_m, out2_m, out3_m; \
2578 v16u8 tmp0_m, tmp1_m, tmp2_m, tmp3_m; \
2580 tmp0_m = __msa_ave_u_b((v16u8) in0, (v16u8) in1); \
2581 tmp1_m = __msa_ave_u_b((v16u8) in2, (v16u8) in3); \
2582 tmp2_m = __msa_ave_u_b((v16u8) in4, (v16u8) in5); \
2583 tmp3_m = __msa_ave_u_b((v16u8) in6, (v16u8) in7); \
2585 out0_m = __msa_copy_u_d((v2i64) tmp0_m, 0); \
2586 out1_m = __msa_copy_u_d((v2i64) tmp1_m, 0); \
2587 out2_m = __msa_copy_u_d((v2i64) tmp2_m, 0); \
2588 out3_m = __msa_copy_u_d((v2i64) tmp3_m, 0); \
2589 SD4(out0_m, out1_m, out2_m, out3_m, pdst, stride); \
2606 #define AVE_ST16x4_UB(in0, in1, in2, in3, in4, in5, in6, in7, pdst, stride) \
2608 v16u8 tmp0_m, tmp1_m, tmp2_m, tmp3_m; \
2610 tmp0_m = __msa_ave_u_b((v16u8) in0, (v16u8) in1); \
2611 tmp1_m = __msa_ave_u_b((v16u8) in2, (v16u8) in3); \
2612 tmp2_m = __msa_ave_u_b((v16u8) in4, (v16u8) in5); \
2613 tmp3_m = __msa_ave_u_b((v16u8) in6, (v16u8) in7); \
2615 ST_UB4(tmp0_m, tmp1_m, tmp2_m, tmp3_m, pdst, stride); \
2632 #define AVER_ST8x4_UB(in0, in1, in2, in3, in4, in5, in6, in7, pdst, stride) \
2634 uint64_t out0_m, out1_m, out2_m, out3_m; \
2635 v16u8 tp0_m, tp1_m, tp2_m, tp3_m; \
2637 AVER_UB4_UB(in0, in1, in2, in3, in4, in5, in6, in7, \
2638 tp0_m, tp1_m, tp2_m, tp3_m); \
2640 out0_m = __msa_copy_u_d((v2i64) tp0_m, 0); \
2641 out1_m = __msa_copy_u_d((v2i64) tp1_m, 0); \
2642 out2_m = __msa_copy_u_d((v2i64) tp2_m, 0); \
2643 out3_m = __msa_copy_u_d((v2i64) tp3_m, 0); \
2644 SD4(out0_m, out1_m, out2_m, out3_m, pdst, stride); \
2661 #define AVER_ST16x4_UB(in0, in1, in2, in3, in4, in5, in6, in7, pdst, stride) \
2663 v16u8 t0_m, t1_m, t2_m, t3_m; \
2665 AVER_UB4_UB(in0, in1, in2, in3, in4, in5, in6, in7, \
2666 t0_m, t1_m, t2_m, t3_m); \
2667 ST_UB4(t0_m, t1_m, t2_m, t3_m, pdst, stride); \
2685 #define AVER_DST_ST8x4_UB(in0, in1, in2, in3, in4, in5, in6, in7, \
2688 v16u8 tmp0_m, tmp1_m, tmp2_m, tmp3_m; \
2689 v16u8 dst0_m, dst1_m, dst2_m, dst3_m; \
2691 LD_UB4(pdst, stride, dst0_m, dst1_m, dst2_m, dst3_m); \
2692 AVER_UB4_UB(in0, in1, in2, in3, in4, in5, in6, in7, \
2693 tmp0_m, tmp1_m, tmp2_m, tmp3_m); \
2694 AVER_ST8x4_UB(dst0_m, tmp0_m, dst1_m, tmp1_m, \
2695 dst2_m, tmp2_m, dst3_m, tmp3_m, pdst, stride); \
2713 #define AVER_DST_ST16x4_UB(in0, in1, in2, in3, in4, in5, in6, in7, \
2716 v16u8 tmp0_m, tmp1_m, tmp2_m, tmp3_m; \
2717 v16u8 dst0_m, dst1_m, dst2_m, dst3_m; \
2719 LD_UB4(pdst, stride, dst0_m, dst1_m, dst2_m, dst3_m); \
2720 AVER_UB4_UB(in0, in1, in2, in3, in4, in5, in6, in7, \
2721 tmp0_m, tmp1_m, tmp2_m, tmp3_m); \
2722 AVER_ST16x4_UB(dst0_m, tmp0_m, dst1_m, tmp1_m, \
2723 dst2_m, tmp2_m, dst3_m, tmp3_m, pdst, stride); \
2731 #define ADDBLK_ST4x4_UB(in0, in1, in2, in3, pdst, stride) \
2733 uint32_t src0_m, src1_m, src2_m, src3_m; \
2734 uint32_t out0_m, out1_m, out2_m, out3_m; \
2735 v8i16 inp0_m, inp1_m, res0_m, res1_m; \
2736 v16i8 dst0_m = { 0 }; \
2737 v16i8 dst1_m = { 0 }; \
2738 v16i8 zero_m = { 0 }; \
2740 ILVR_D2_SH(in1, in0, in3, in2, inp0_m, inp1_m) \
2741 LW4(pdst, stride, src0_m, src1_m, src2_m, src3_m); \
2742 INSERT_W2_SB(src0_m, src1_m, dst0_m); \
2743 INSERT_W2_SB(src2_m, src3_m, dst1_m); \
2744 ILVR_B2_SH(zero_m, dst0_m, zero_m, dst1_m, res0_m, res1_m); \
2745 ADD2(res0_m, inp0_m, res1_m, inp1_m, res0_m, res1_m); \
2746 CLIP_SH2_0_255(res0_m, res1_m); \
2747 PCKEV_B2_SB(res0_m, res0_m, res1_m, res1_m, dst0_m, dst1_m); \
2749 out0_m = __msa_copy_u_w((v4i32) dst0_m, 0); \
2750 out1_m = __msa_copy_u_w((v4i32) dst0_m, 1); \
2751 out2_m = __msa_copy_u_w((v4i32) dst1_m, 0); \
2752 out3_m = __msa_copy_u_w((v4i32) dst1_m, 1); \
2753 SW4(out0_m, out1_m, out2_m, out3_m, pdst, stride); \
2767 #define DPADD_SH3_SH(in0, in1, in2, coeff0, coeff1, coeff2) \
2771 out0_m = __msa_dotp_s_h((v16i8) in0, (v16i8) coeff0); \
2772 out0_m = __msa_dpadd_s_h(out0_m, (v16i8) in1, (v16i8) coeff1); \
2773 out0_m = __msa_dpadd_s_h(out0_m, (v16i8) in2, (v16i8) coeff2); \
2786 #define PCKEV_XORI128_UB(in0, in1) \
2789 out_m = (v16u8) __msa_pckev_b((v16i8) in1, (v16i8) in0); \
2790 out_m = (v16u8) __msa_xori_b((v16u8) out_m, 128); \
2798 #define CONVERT_UB_AVG_ST8x4_UB(in0, in1, in2, in3, \
2799 dst0, dst1, pdst, stride) \
2801 v16u8 tmp0_m, tmp1_m; \
2802 uint8_t *pdst_m = (uint8_t *) (pdst); \
2804 tmp0_m = PCKEV_XORI128_UB(in0, in1); \
2805 tmp1_m = PCKEV_XORI128_UB(in2, in3); \
2806 AVER_UB2_UB(tmp0_m, dst0, tmp1_m, dst1, tmp0_m, tmp1_m); \
2807 ST_D4(tmp0_m, tmp1_m, 0, 1, 0, 1, pdst_m, stride); \
2815 #define PCKEV_ST4x4_UB(in0, in1, in2, in3, pdst, stride) \
2817 uint32_t out0_m, out1_m, out2_m, out3_m; \
2818 v16i8 tmp0_m, tmp1_m; \
2820 PCKEV_B2_SB(in1, in0, in3, in2, tmp0_m, tmp1_m); \
2822 out0_m = __msa_copy_u_w((v4i32) tmp0_m, 0); \
2823 out1_m = __msa_copy_u_w((v4i32) tmp0_m, 2); \
2824 out2_m = __msa_copy_u_w((v4i32) tmp1_m, 0); \
2825 out3_m = __msa_copy_u_w((v4i32) tmp1_m, 2); \
2827 SW4(out0_m, out1_m, out2_m, out3_m, pdst, stride); \
2834 #define PCKEV_ST_SB(in0, in1, pdst) \
2837 tmp_m = __msa_pckev_b((v16i8) in1, (v16i8) in0); \
2838 ST_SB(tmp_m, (pdst)); \
2844 #define HORIZ_2TAP_FILT_UH(in0, in1, mask, coeff, shift) \
2849 tmp0_m = __msa_vshf_b((v16i8) mask, (v16i8) in1, (v16i8) in0); \
2850 tmp1_m = __msa_dotp_u_h((v16u8) tmp0_m, (v16u8) coeff); \
2851 tmp1_m = (v8u16) __msa_srari_h((v8i16) tmp1_m, shift); \
2852 tmp1_m = __msa_sat_u_h(tmp1_m, shift); \