25 0, 1, 1, 2, 2, 3, 3, 4, 16, 17, 17, 18, 18, 19, 19, 20,
26 0, 2, 2, 4, 4, 6, 6, 8, 16, 18, 18, 20, 20, 22, 22, 24,
27 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8,
28 0, 1, 1, 2, 16, 17, 17, 18, 4, 5, 5, 6, 6, 7, 7, 8,
29 0, 1, 1, 2, 16, 17, 17, 18, 16, 17, 17, 18, 18, 19, 19, 20
33 uint32_t coeff0, uint32_t coeff1)
40 v16i8 coeff_vec0 = __msa_fill_b(coeff0);
41 v16i8 coeff_vec1 = __msa_fill_b(coeff1);
42 v16u8 coeff_vec = (v16u8) __msa_ilvr_b(coeff_vec0, coeff_vec1);
46 LD_SB2(src, stride, src0, src1);
48 src0 = __msa_vshf_b(mask, src1, src0);
49 res_r = __msa_dotp_u_h((v16u8) src0, coeff_vec);
51 res_r = (v8u16) __msa_srari_h((v8i16) res_r, 6);
52 res_r = __msa_sat_u_h(res_r, 7);
53 res = (v8i16) __msa_pckev_b((v16i8) res_r, (v16i8) res_r);
55 out0 = __msa_copy_u_h(res, 0);
56 out1 = __msa_copy_u_h(res, 2);
64 uint32_t coeff0, uint32_t coeff1)
70 v16i8 coeff_vec0 = __msa_fill_b(coeff0);
71 v16i8 coeff_vec1 = __msa_fill_b(coeff1);
72 v16u8 coeff_vec = (v16u8) __msa_ilvr_b(coeff_vec0, coeff_vec1);
76 LD_UB4(src, stride, src0, src1, src2, src3);
78 VSHF_B2_UB(src0, src1, src2, src3, mask, mask, src0, src2);
80 src0 = (v16u8) __msa_ilvr_d((v2i64) src2, (v2i64) src0);
82 res_r = __msa_dotp_u_h(src0, coeff_vec);
84 res_r = (v8u16) __msa_srari_h((v8i16) res_r, 6);
85 res_r = __msa_sat_u_h(res_r, 7);
86 res = (v8i16) __msa_pckev_b((v16i8) res_r, (v16i8) res_r);
92 uint32_t coeff0, uint32_t coeff1,
97 }
else if (4 == height) {
103 uint32_t coeff0, uint32_t coeff1)
109 v16i8 coeff_vec0 = __msa_fill_b(coeff0);
110 v16i8 coeff_vec1 = __msa_fill_b(coeff1);
111 v16u8 coeff_vec = (v16u8) __msa_ilvr_b(coeff_vec0, coeff_vec1);
115 LD_SB2(src, stride, src0, src1);
117 src0 = __msa_vshf_b(mask, src1, src0);
118 res_r = __msa_dotp_u_h((v16u8) src0, coeff_vec);
120 res_r = (v8u16) __msa_srari_h((v8i16) res_r, 6);
121 res_r = __msa_sat_u_h(res_r, 7);
122 res = (v4i32) __msa_pckev_b((v16i8) res_r, (v16i8) res_r);
128 uint32_t coeff0, uint32_t coeff1)
131 v8u16 res0_r, res1_r;
133 v16i8 coeff_vec0 = __msa_fill_b(coeff0);
134 v16i8 coeff_vec1 = __msa_fill_b(coeff1);
135 v16u8 coeff_vec = (v16u8) __msa_ilvr_b(coeff_vec0, coeff_vec1);
139 LD_UB4(src, stride, src0, src1, src2, src3);
140 VSHF_B2_UB(src0, src1, src2, src3, mask, mask, src0, src2);
141 DOTP_UB2_UH(src0, src2, coeff_vec, coeff_vec, res0_r, res1_r);
146 out = (v16u8) __msa_pckev_b((v16i8) res1_r, (v16i8) res0_r);
147 ST4x4_UB(out, out, 0, 1, 2, 3, dst, stride);
151 uint32_t coeff0, uint32_t coeff1)
153 v16u8
src0,
src1, src2, src3, src4, src5, src6, src7, out0, out1;
155 v8u16 res0, res1, res2, res3;
156 v16i8 coeff_vec0 = __msa_fill_b(coeff0);
157 v16i8 coeff_vec1 = __msa_fill_b(coeff1);
158 v16u8 coeff_vec = (v16u8) __msa_ilvr_b(coeff_vec0, coeff_vec1);
162 LD_UB8(src, stride, src0, src1, src2, src3, src4, src5, src6, src7);
163 VSHF_B2_UB(src0, src1, src2, src3, mask, mask, src0, src2);
164 VSHF_B2_UB(src4, src5, src6, src7, mask, mask, src4, src6);
165 DOTP_UB2_UH(src0, src2, coeff_vec, coeff_vec, res0, res1);
166 DOTP_UB2_UH(src4, src6, coeff_vec, coeff_vec, res2, res3);
167 SLLI_4V(res0, res1, res2, res3, 3);
175 uint32_t coeff0, uint32_t coeff1,
180 }
else if (4 == height) {
182 }
else if (8 == height) {
188 uint32_t coeff0, uint32_t coeff1)
190 v16u8
src0,
src1, src2, src3, out0, out1;
191 v8u16 res0, res1, res2, res3;
193 v16i8 coeff_vec0 = __msa_fill_b(coeff0);
194 v16i8 coeff_vec1 = __msa_fill_b(coeff1);
195 v16u8 coeff_vec = (v16u8) __msa_ilvr_b(coeff_vec0, coeff_vec1);
198 LD_UB4(src, stride, src0, src1, src2, src3);
199 VSHF_B2_UB(src0, src0, src1, src1, mask, mask, src0, src1);
200 VSHF_B2_UB(src2, src2, src3, src3, mask, mask, src2, src3);
201 DOTP_UB4_UH(src0, src1, src2, src3, coeff_vec, coeff_vec, coeff_vec,
202 coeff_vec, res0, res1, res2, res3);
203 SLLI_4V(res0, res1, res2, res3, 3);
211 uint32_t coeff0, uint32_t coeff1)
213 v16u8
src0,
src1, src2, src3, src4, src5, src6, src7;
214 v16u8 out0, out1, out2, out3;
215 v8u16 res0, res1, res2, res3, res4, res5, res6, res7;
217 v16i8 coeff_vec0 = __msa_fill_b(coeff0);
218 v16i8 coeff_vec1 = __msa_fill_b(coeff1);
219 v16u8 coeff_vec = (v16u8) __msa_ilvr_b(coeff_vec0, coeff_vec1);
223 LD_UB8(src, stride, src0, src1, src2, src3, src4, src5, src6, src7);
224 VSHF_B2_UB(src0, src0, src1, src1, mask, mask, src0, src1);
225 VSHF_B2_UB(src2, src2, src3, src3, mask, mask, src2, src3);
226 VSHF_B2_UB(src4, src4, src5, src5, mask, mask, src4, src5);
227 VSHF_B2_UB(src6, src6, src7, src7, mask, mask, src6, src7);
228 DOTP_UB4_UH(src0, src1, src2, src3, coeff_vec, coeff_vec, coeff_vec,
229 coeff_vec, res0, res1, res2, res3);
230 DOTP_UB4_UH(src4, src5, src6, src7, coeff_vec, coeff_vec, coeff_vec,
231 coeff_vec, res4, res5, res6, res7);
232 SLLI_4V(res0, res1, res2, res3, 3);
233 SLLI_4V(res4, res5, res6, res7, 3);
240 ST8x8_UB(out0, out1, out2, out3, dst, stride);
248 v16u8
src0,
src1, src2, src3, out0, out1;
249 v8u16 res0, res1, res2, res3;
251 v16i8 coeff_vec0 = __msa_fill_b(coeff0);
252 v16i8 coeff_vec1 = __msa_fill_b(coeff1);
253 v16u8 coeff_vec = (v16u8) __msa_ilvr_b(coeff_vec0, coeff_vec1);
257 for (row = height >> 2; row--;) {
258 LD_UB4(src, stride, src0, src1, src2, src3);
261 VSHF_B2_UB(src0, src0, src1, src1, mask, mask, src0, src1);
262 VSHF_B2_UB(src2, src2, src3, src3, mask, mask, src2, src3);
263 DOTP_UB4_UH(src0, src1, src2, src3, coeff_vec, coeff_vec, coeff_vec,
264 coeff_vec, res0, res1, res2, res3);
265 SLLI_4V(res0, res1, res2, res3, 3);
273 if (0 != (height % 4)) {
274 for (row = (height % 4); row--;) {
278 src0 = (v16u8) __msa_vshf_b(mask, (v16i8)
src0, (v16i8) src0);
280 res0 = __msa_dotp_u_h(src0, coeff_vec);
282 res0 = (v8u16) __msa_srari_h((v8i16) res0, 6);
283 res0 = __msa_sat_u_h(res0, 7);
284 res0 = (v8u16) __msa_pckev_b((v16i8) res0, (v16i8) res0);
293 uint32_t coeff0, uint32_t coeff1,
298 }
else if (8 == height) {
306 uint32_t coeff0, uint32_t coeff1)
313 v16i8 coeff_vec0 = __msa_fill_b(coeff0);
314 v16i8 coeff_vec1 = __msa_fill_b(coeff1);
315 v16u8 coeff_vec = (v16u8) __msa_ilvr_b(coeff_vec0, coeff_vec1);
317 LD_SB3(src, stride, src0, src1, src2);
319 ILVR_B2_UB(src1, src0, src2, src1, tmp0, tmp1);
321 tmp0 = (v16u8) __msa_ilvr_d((v2i64) tmp1, (v2i64) tmp0);
323 res_r = __msa_dotp_u_h(tmp0, coeff_vec);
325 res_r = (v8u16) __msa_srari_h((v8i16) res_r, 6);
326 res_r = __msa_sat_u_h(res_r, 7);
327 res = (v8i16) __msa_pckev_b((v16i8) res_r, (v16i8) res_r);
329 out0 = __msa_copy_u_h(res, 0);
330 out1 = __msa_copy_u_h(res, 2);
338 uint32_t coeff0, uint32_t coeff1)
341 v16u8 tmp0, tmp1, tmp2, tmp3;
344 v16i8 coeff_vec0 = __msa_fill_b(coeff0);
345 v16i8 coeff_vec1 = __msa_fill_b(coeff1);
346 v16u8 coeff_vec = (v16u8) __msa_ilvr_b(coeff_vec0, coeff_vec1);
348 LD_UB5(src, stride, src0, src1, src2, src3, src4);
349 ILVR_B4_UB(src1, src0, src2, src1, src3, src2, src4, src3,
350 tmp0, tmp1, tmp2, tmp3);
351 ILVR_W2_UB(tmp1, tmp0, tmp3, tmp2, tmp0, tmp2);
353 tmp0 = (v16u8) __msa_ilvr_d((v2i64) tmp2, (v2i64) tmp0);
355 res_r = __msa_dotp_u_h(tmp0, coeff_vec);
357 res_r = (v8u16) __msa_srari_h((v8i16) res_r, 6);
358 res_r = __msa_sat_u_h(res_r, 7);
360 res = (v8i16) __msa_pckev_b((v16i8) res_r, (v16i8) res_r);
366 uint32_t coeff0, uint32_t coeff1,
371 }
else if (4 == height) {
377 uint32_t coeff0, uint32_t coeff1)
383 v16i8 coeff_vec0 = __msa_fill_b(coeff0);
384 v16i8 coeff_vec1 = __msa_fill_b(coeff1);
385 v16u8 coeff_vec = (v16u8) __msa_ilvr_b(coeff_vec0, coeff_vec1);
387 LD_UB3(src, stride, src0, src1, src2);
388 ILVR_B2_UB(src1, src0, src2, src1, tmp0, tmp1);
390 tmp0 = (v16u8) __msa_ilvr_d((v2i64) tmp1, (v2i64) tmp0);
391 res_r = __msa_dotp_u_h(tmp0, coeff_vec);
393 res_r = (v8u16) __msa_srari_h((v8i16) res_r, 6);
394 res_r = __msa_sat_u_h(res_r, 7);
395 res = (v4i32) __msa_pckev_b((v16i8) res_r, (v16i8) res_r);
401 uint32_t coeff0, uint32_t coeff1)
404 v16u8 tmp0, tmp1, tmp2, tmp3;
406 v8u16 res0_r, res1_r;
407 v16i8 coeff_vec0 = __msa_fill_b(coeff0);
408 v16i8 coeff_vec1 = __msa_fill_b(coeff1);
409 v16u8 coeff_vec = (v16u8) __msa_ilvr_b(coeff_vec0, coeff_vec1);
411 LD_UB5(src, stride, src0, src1, src2, src3, src4);
412 ILVR_B4_UB(src1, src0, src2, src1, src3, src2, src4, src3, tmp0, tmp1, tmp2,
414 ILVR_D2_UB(tmp1, tmp0, tmp3, tmp2, tmp0, tmp2);
415 DOTP_UB2_UH(tmp0, tmp2, coeff_vec, coeff_vec, res0_r, res1_r);
420 out = (v16u8) __msa_pckev_b((v16i8) res1_r, (v16i8) res0_r);
421 ST4x4_UB(out, out, 0, 1, 2, 3, dst, stride);
425 uint32_t coeff0, uint32_t coeff1)
427 v16u8
src0,
src1, src2, src3, src4, src5, src6, src7, src8;
428 v16u8 tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7, out0, out1;
429 v8u16 res0, res1, res2, res3;
430 v16i8 coeff_vec0 = __msa_fill_b(coeff0);
431 v16i8 coeff_vec1 = __msa_fill_b(coeff1);
432 v16u8 coeff_vec = (v16u8) __msa_ilvr_b(coeff_vec0, coeff_vec1);
434 LD_UB5(src, stride, src0, src1, src2, src3, src4);
436 LD_UB4(src, stride, src5, src6, src7, src8);
437 ILVR_B4_UB(src1, src0, src2, src1, src3, src2, src4, src3, tmp0, tmp1, tmp2,
439 ILVR_B4_UB(src5, src4, src6, src5, src7, src6, src8, src7, tmp4, tmp5, tmp6,
441 ILVR_D2_UB(tmp1, tmp0, tmp3, tmp2, tmp0, tmp2);
442 ILVR_D2_UB(tmp5, tmp4, tmp7, tmp6, tmp4, tmp6);
443 DOTP_UB2_UH(tmp0, tmp2, coeff_vec, coeff_vec, res0, res1);
444 DOTP_UB2_UH(tmp4, tmp6, coeff_vec, coeff_vec, res2, res3);
445 SLLI_4V(res0, res1, res2, res3, 3);
453 uint32_t coeff0, uint32_t coeff1,
458 }
else if (4 == height) {
460 }
else if (8 == height) {
466 uint32_t coeff0, uint32_t coeff1)
468 v16u8
src0,
src1, src2, src3, src4, out0, out1;
469 v8u16 res0, res1, res2, res3;
470 v16i8 coeff_vec0 = __msa_fill_b(coeff0);
471 v16i8 coeff_vec1 = __msa_fill_b(coeff1);
472 v16u8 coeff_vec = (v16u8) __msa_ilvr_b(coeff_vec0, coeff_vec1);
474 LD_UB5(src, stride, src0, src1, src2, src3, src4);
475 ILVR_B4_UB(src1, src0, src2, src1, src3, src2, src4, src3, src0, src1, src2,
477 DOTP_UB4_UH(src0, src1, src2, src3, coeff_vec, coeff_vec, coeff_vec,
478 coeff_vec, res0, res1, res2, res3);
479 SLLI_4V(res0, res1, res2, res3, 3);
487 uint32_t coeff0, uint32_t coeff1)
489 v16u8
src0,
src1, src2, src3, src4, src5, src6, src7, src8;
490 v16u8 out0, out1, out2, out3;
491 v8u16 res0, res1, res2, res3, res4, res5, res6, res7;
492 v16i8 coeff_vec0 = __msa_fill_b(coeff0);
493 v16i8 coeff_vec1 = __msa_fill_b(coeff1);
494 v16u8 coeff_vec = (v16u8) __msa_ilvr_b(coeff_vec0, coeff_vec1);
496 LD_UB5(src, stride, src0, src1, src2, src3, src4);
498 LD_UB4(src, stride, src5, src6, src7, src8);
499 ILVR_B4_UB(src1, src0, src2, src1, src3, src2, src4, src3, src0, src1, src2,
501 ILVR_B4_UB(src5, src4, src6, src5, src7, src6, src8, src7, src4, src5, src6,
503 DOTP_UB4_UH(src0, src1, src2, src3, coeff_vec, coeff_vec, coeff_vec,
504 coeff_vec, res0, res1, res2, res3);
505 DOTP_UB4_UH(src4, src5, src6, src7, coeff_vec, coeff_vec, coeff_vec,
506 coeff_vec, res4, res5, res6, res7);
507 SLLI_4V(res0, res1, res2, res3, 3);
508 SLLI_4V(res4, res5, res6, res7, 3);
515 ST8x8_UB(out0, out1, out2, out3, dst, stride);
519 uint32_t coeff0, uint32_t coeff1,
524 }
else if (8 == height) {
530 uint32_t coef_hor0, uint32_t coef_hor1,
531 uint32_t coef_ver0, uint32_t coef_ver1)
535 v8u16 res_hz0, res_hz1, res_vt0, res_vt1;
538 v16i8 coeff_hz_vec0 = __msa_fill_b(coef_hor0);
539 v16i8 coeff_hz_vec1 = __msa_fill_b(coef_hor1);
540 v16u8 coeff_hz_vec = (v16u8) __msa_ilvr_b(coeff_hz_vec0, coeff_hz_vec1);
541 v8u16 coeff_vt_vec0 = (v8u16) __msa_fill_h(coef_ver0);
542 v8u16 coeff_vt_vec1 = (v8u16) __msa_fill_h(coef_ver1);
546 LD_UB3(src, stride, src0, src1, src2);
547 VSHF_B2_UB(src0, src1, src1, src2, mask, mask, src0, src1);
548 DOTP_UB2_UH(src0, src1, coeff_hz_vec, coeff_hz_vec, res_hz0, res_hz1);
549 MUL2(res_hz0, coeff_vt_vec1, res_hz1, coeff_vt_vec0, res_vt0, res_vt1);
552 res_vt0 = (v8u16) __msa_srari_h((v8i16) res_vt0, 6);
553 res_vt0 = __msa_sat_u_h(res_vt0, 7);
554 res_vert = (v8i16) __msa_pckev_b((v16i8) res_vt0, (v16i8) res_vt0);
556 out0 = __msa_copy_u_h(res_vert, 0);
557 out1 = __msa_copy_u_h(res_vert, 1);
565 uint32_t coef_hor0, uint32_t coef_hor1,
566 uint32_t coef_ver0, uint32_t coef_ver1)
569 v16u8 tmp0, tmp1, tmp2, tmp3;
570 v8u16 res_hz0, res_hz1, res_vt0, res_vt1;
573 v16i8 coeff_hz_vec0 = __msa_fill_b(coef_hor0);
574 v16i8 coeff_hz_vec1 = __msa_fill_b(coef_hor1);
575 v16u8 coeff_hz_vec = (v16u8) __msa_ilvr_b(coeff_hz_vec0, coeff_hz_vec1);
576 v8u16 coeff_vt_vec0 = (v8u16) __msa_fill_h(coef_ver0);
577 v8u16 coeff_vt_vec1 = (v8u16) __msa_fill_h(coef_ver1);
581 LD_UB5(src, stride, src0, src1, src2, src3, src4);
583 VSHF_B2_UB(src0, src1, src2, src3, mask, mask, tmp0, tmp1);
584 VSHF_B2_UB(src1, src2, src3, src4, mask, mask, tmp2, tmp3);
585 ILVR_D2_UB(tmp1, tmp0, tmp3, tmp2, src0, src1);
586 DOTP_UB2_UH(src0, src1, coeff_hz_vec, coeff_hz_vec, res_hz0, res_hz1);
587 MUL2(res_hz0, coeff_vt_vec1, res_hz1, coeff_vt_vec0, res_vt0, res_vt1);
590 res_vt0 = (v8u16) __msa_srari_h((v8i16) res_vt0, 6);
591 res_vt0 = __msa_sat_u_h(res_vt0, 7);
593 res = (v8i16) __msa_pckev_b((v16i8) res_vt0, (v16i8) res_vt0);
599 uint32_t coef_hor0, uint32_t coef_hor1,
600 uint32_t coef_ver0, uint32_t coef_ver1,
606 }
else if (4 == height) {
613 uint32_t coef_hor0, uint32_t coef_hor1,
614 uint32_t coef_ver0, uint32_t coef_ver1)
617 v8u16 res_hz0, res_hz1, res_vt0, res_vt1;
620 v16i8 coeff_hz_vec0 = __msa_fill_b(coef_hor0);
621 v16i8 coeff_hz_vec1 = __msa_fill_b(coef_hor1);
622 v16u8 coeff_hz_vec = (v16u8) __msa_ilvr_b(coeff_hz_vec0, coeff_hz_vec1);
623 v8u16 coeff_vt_vec0 = (v8u16) __msa_fill_h(coef_ver0);
624 v8u16 coeff_vt_vec1 = (v8u16) __msa_fill_h(coef_ver1);
627 LD_UB3(src, stride, src0, src1, src2);
628 VSHF_B2_UB(src0, src1, src1, src2, mask, mask, src0, src1);
629 DOTP_UB2_UH(src0, src1, coeff_hz_vec, coeff_hz_vec, res_hz0, res_hz1);
630 MUL2(res_hz0, coeff_vt_vec1, res_hz1, coeff_vt_vec0, res_vt0, res_vt1);
633 res_vt0 = (v8u16) __msa_srari_h((v8i16) res_vt0, 6);
634 res_vt0 = __msa_sat_u_h(res_vt0, 7);
635 res = (v4i32) __msa_pckev_b((v16i8) res_vt0, (v16i8) res_vt0);
641 uint32_t coef_hor0, uint32_t coef_hor1,
642 uint32_t coef_ver0, uint32_t coef_ver1)
645 v8u16 res_hz0, res_hz1, res_hz2, res_hz3;
646 v8u16 res_vt0, res_vt1, res_vt2, res_vt3;
648 v16i8 coeff_hz_vec0 = __msa_fill_b(coef_hor0);
649 v16i8 coeff_hz_vec1 = __msa_fill_b(coef_hor1);
650 v16u8 coeff_hz_vec = (v16u8) __msa_ilvr_b(coeff_hz_vec0, coeff_hz_vec1);
651 v8u16 coeff_vt_vec0 = (v8u16) __msa_fill_h(coef_ver0);
652 v8u16 coeff_vt_vec1 = (v8u16) __msa_fill_h(coef_ver1);
657 LD_UB5(src, stride, src0, src1, src2, src3, src4);
658 VSHF_B2_UB(src0, src1, src1, src2, mask, mask, src0, src1);
659 VSHF_B2_UB(src2, src3, src3, src4, mask, mask, src2, src3);
660 DOTP_UB4_UH(src0, src1, src2, src3, coeff_hz_vec, coeff_hz_vec,
661 coeff_hz_vec, coeff_hz_vec, res_hz0, res_hz1, res_hz2,
663 MUL4(res_hz0, coeff_vt_vec1, res_hz1, coeff_vt_vec0, res_hz2, coeff_vt_vec1,
664 res_hz3, coeff_vt_vec0, res_vt0, res_vt1, res_vt2, res_vt3);
665 ADD2(res_vt0, res_vt1, res_vt2, res_vt3, res_vt0, res_vt1);
668 PCKEV_B2_SW(res_vt0, res_vt0, res_vt1, res_vt1, res0, res1);
669 ST4x4_UB(res0, res1, 0, 1, 0, 1, dst, stride);
673 uint32_t coef_hor0, uint32_t coef_hor1,
674 uint32_t coef_ver0, uint32_t coef_ver1)
676 v16u8
src0,
src1, src2, src3, src4, src5, src6, src7, src8, res0, res1;
677 v8u16 res_hz0, res_hz1, res_hz2, res_hz3, res_hz4, res_hz5, res_hz6, res_hz7;
678 v8u16 res_vt0, res_vt1, res_vt2, res_vt3, res_vt4, res_vt5, res_vt6, res_vt7;
680 v16i8 coeff_hz_vec0 = __msa_fill_b(coef_hor0);
681 v16i8 coeff_hz_vec1 = __msa_fill_b(coef_hor1);
682 v16u8 coeff_hz_vec = (v16u8) __msa_ilvr_b(coeff_hz_vec0, coeff_hz_vec1);
683 v8u16 coeff_vt_vec0 = (v8u16) __msa_fill_h(coef_ver0);
684 v8u16 coeff_vt_vec1 = (v8u16) __msa_fill_h(coef_ver1);
688 LD_UB5(src, stride, src0, src1, src2, src3, src4);
690 LD_UB4(src, stride, src5, src6, src7, src8);
692 VSHF_B2_UB(src0, src1, src1, src2, mask, mask, src0, src1);
693 VSHF_B2_UB(src2, src3, src3, src4, mask, mask, src2, src3);
694 VSHF_B2_UB(src4, src5, src5, src6, mask, mask, src4, src5);
695 VSHF_B2_UB(src6, src7, src7, src8, mask, mask, src6, src7);
696 DOTP_UB4_UH(src0, src1, src2, src3, coeff_hz_vec, coeff_hz_vec,
697 coeff_hz_vec, coeff_hz_vec, res_hz0, res_hz1, res_hz2, res_hz3);
698 DOTP_UB4_UH(src4, src5, src6, src7, coeff_hz_vec, coeff_hz_vec,
699 coeff_hz_vec, coeff_hz_vec, res_hz4, res_hz5, res_hz6, res_hz7);
700 MUL4(res_hz0, coeff_vt_vec1, res_hz1, coeff_vt_vec0, res_hz2, coeff_vt_vec1,
701 res_hz3, coeff_vt_vec0, res_vt0, res_vt1, res_vt2, res_vt3);
702 MUL4(res_hz4, coeff_vt_vec1, res_hz5, coeff_vt_vec0, res_hz6, coeff_vt_vec1,
703 res_hz7, coeff_vt_vec0, res_vt4, res_vt5, res_vt6, res_vt7);
704 ADD2(res_vt0, res_vt1, res_vt2, res_vt3, res_vt0, res_vt1);
705 ADD2(res_vt4, res_vt5, res_vt6, res_vt7, res_vt2, res_vt3);
706 SRARI_H4_UH(res_vt0, res_vt1, res_vt2, res_vt3, 6);
707 SAT_UH4_UH(res_vt0, res_vt1, res_vt2, res_vt3, 7);
708 PCKEV_B2_UB(res_vt1, res_vt0, res_vt3, res_vt2, res0, res1);
713 uint32_t coef_hor0, uint32_t coef_hor1,
714 uint32_t coef_ver0, uint32_t coef_ver1,
720 }
else if (4 == height) {
723 }
else if (8 == height) {
730 uint32_t coef_hor0, uint32_t coef_hor1,
731 uint32_t coef_ver0, uint32_t coef_ver1)
733 v16u8
src0,
src1, src2, src3, src4, out0, out1;
734 v8u16 res_hz0, res_hz1, res_hz2, res_hz3, res_hz4;
735 v8u16 res_vt0, res_vt1, res_vt2, res_vt3;
737 v16i8 coeff_hz_vec0 = __msa_fill_b(coef_hor0);
738 v16i8 coeff_hz_vec1 = __msa_fill_b(coef_hor1);
739 v16u8 coeff_hz_vec = (v16u8) __msa_ilvr_b(coeff_hz_vec0, coeff_hz_vec1);
740 v8u16 coeff_vt_vec0 = (v8u16) __msa_fill_h(coef_ver0);
741 v8u16 coeff_vt_vec1 = (v8u16) __msa_fill_h(coef_ver1);
748 src0 = (v16u8) __msa_vshf_b(mask, (v16i8)
src0, (v16i8) src0);
749 res_hz0 = __msa_dotp_u_h(src0, coeff_hz_vec);
751 LD_UB4(src, stride, src1, src2, src3, src4);
754 VSHF_B2_UB(src1, src1, src2, src2, mask, mask, src1, src2);
755 VSHF_B2_UB(src3, src3, src4, src4, mask, mask, src3, src4);
756 DOTP_UB4_UH(src1, src2, src3, src4, coeff_hz_vec, coeff_hz_vec,
757 coeff_hz_vec, coeff_hz_vec, res_hz1, res_hz2, res_hz3, res_hz4);
758 MUL4(res_hz1, coeff_vt_vec0, res_hz2, coeff_vt_vec0, res_hz3, coeff_vt_vec0,
759 res_hz4, coeff_vt_vec0, res_vt0, res_vt1, res_vt2, res_vt3);
761 res_vt0 += (res_hz0 * coeff_vt_vec1);
762 res_vt1 += (res_hz1 * coeff_vt_vec1);
763 res_vt2 += (res_hz2 * coeff_vt_vec1);
764 res_vt3 += (res_hz3 * coeff_vt_vec1);
766 SRARI_H4_UH(res_vt0, res_vt1, res_vt2, res_vt3, 6);
767 SAT_UH4_UH(res_vt0, res_vt1, res_vt2, res_vt3, 7);
768 PCKEV_B2_UB(res_vt1, res_vt0, res_vt3, res_vt2, out0, out1);
773 uint32_t coef_hor0, uint32_t coef_hor1,
774 uint32_t coef_ver0, uint32_t coef_ver1)
776 v16u8
src0,
src1, src2, src3, src4, src5, src6, src7, src8;
777 v16u8 out0, out1, out2, out3;
778 v8u16 res_hz0, res_hz1, res_hz2, res_hz3, res_hz4;
779 v8u16 res_hz5, res_hz6, res_hz7, res_hz8;
780 v8u16 res_vt0, res_vt1, res_vt2, res_vt3;
781 v8u16 res_vt4, res_vt5, res_vt6, res_vt7;
783 v16i8 coeff_hz_vec0 = __msa_fill_b(coef_hor0);
784 v16i8 coeff_hz_vec1 = __msa_fill_b(coef_hor1);
785 v16u8 coeff_hz_vec = (v16u8) __msa_ilvr_b(coeff_hz_vec0, coeff_hz_vec1);
786 v8u16 coeff_vt_vec0 = (v8u16) __msa_fill_h(coef_ver0);
787 v8u16 coeff_vt_vec1 = (v8u16) __msa_fill_h(coef_ver1);
791 LD_UB5(src, stride, src0, src1, src2, src3, src4);
793 LD_UB4(src, stride, src5, src6, src7, src8);
794 src0 = (v16u8) __msa_vshf_b(mask, (v16i8)
src0, (v16i8) src0);
795 VSHF_B2_UB(src1, src1, src2, src2, mask, mask, src1, src2);
796 VSHF_B2_UB(src3, src3, src4, src4, mask, mask, src3, src4);
797 VSHF_B2_UB(src5, src5, src6, src6, mask, mask, src5, src6);
798 VSHF_B2_UB(src7, src7, src8, src8, mask, mask, src7, src8);
799 res_hz0 = __msa_dotp_u_h(src0, coeff_hz_vec);
800 DOTP_UB4_UH(src1, src2, src3, src4, coeff_hz_vec, coeff_hz_vec,
801 coeff_hz_vec, coeff_hz_vec, res_hz1, res_hz2, res_hz3,
803 DOTP_UB4_UH(src5, src6, src7, src8, coeff_hz_vec, coeff_hz_vec,
804 coeff_hz_vec, coeff_hz_vec, res_hz5, res_hz6, res_hz7, res_hz8);
805 MUL4(res_hz1, coeff_vt_vec0, res_hz2, coeff_vt_vec0, res_hz3,
806 coeff_vt_vec0, res_hz4, coeff_vt_vec0, res_vt0, res_vt1, res_vt2,
808 MUL4(res_hz5, coeff_vt_vec0, res_hz6, coeff_vt_vec0, res_hz7,
809 coeff_vt_vec0, res_hz8, coeff_vt_vec0, res_vt4, res_vt5, res_vt6,
811 res_vt0 += (res_hz0 * coeff_vt_vec1);
812 res_vt1 += (res_hz1 * coeff_vt_vec1);
813 res_vt2 += (res_hz2 * coeff_vt_vec1);
814 res_vt3 += (res_hz3 * coeff_vt_vec1);
815 res_vt4 += (res_hz4 * coeff_vt_vec1);
816 res_vt5 += (res_hz5 * coeff_vt_vec1);
817 res_vt6 += (res_hz6 * coeff_vt_vec1);
818 res_vt7 += (res_hz7 * coeff_vt_vec1);
819 SRARI_H4_UH(res_vt0, res_vt1, res_vt2, res_vt3, 6);
820 SRARI_H4_UH(res_vt4, res_vt5, res_vt6, res_vt7, 6);
821 SAT_UH4_UH(res_vt0, res_vt1, res_vt2, res_vt3, 7);
822 SAT_UH4_UH(res_vt4, res_vt5, res_vt6, res_vt7, 7);
823 PCKEV_B2_UB(res_vt1, res_vt0, res_vt3, res_vt2, out0, out1);
824 PCKEV_B2_UB(res_vt5, res_vt4, res_vt7, res_vt6, out2, out3);
825 ST8x8_UB(out0, out1, out2, out3, dst, stride);
829 uint32_t coef_hor0, uint32_t coef_hor1,
830 uint32_t coef_ver0, uint32_t coef_ver1,
836 }
else if (8 == height) {
844 uint32_t coeff0, uint32_t coeff1)
847 uint32_t load0, load1;
849 v16u8 dst_data = { 0 };
853 v16i8 coeff_vec0 = __msa_fill_b(coeff0);
854 v16i8 coeff_vec1 = __msa_fill_b(coeff1);
855 v16u8 coeff_vec = (v16u8) __msa_ilvr_b(coeff_vec0, coeff_vec1);
859 LD_SB2(src, src_stride, src0, src1);
862 load1 =
LW(dst + dst_stride);
866 src0 = __msa_vshf_b(mask, src1, src0);
868 res_r = __msa_dotp_u_h((v16u8) src0, coeff_vec);
870 res_r = (v8u16) __msa_srari_h((v8i16) res_r, 6);
871 res_r = __msa_sat_u_h(res_r, 7);
873 res = (v16u8) __msa_pckev_b((v16i8) res_r, (v16i8) res_r);
874 dst_data = __msa_aver_u_b(res, dst_data);
876 out0 = __msa_copy_u_h((v8i16) dst_data, 0);
877 out1 = __msa_copy_u_h((v8i16) dst_data, 2);
886 uint32_t coeff0, uint32_t coeff1)
889 v16u8 dst0, dst1, dst2, dst3;
892 v16i8 coeff_vec0 = __msa_fill_b(coeff0);
893 v16i8 coeff_vec1 = __msa_fill_b(coeff1);
894 v16u8 coeff_vec = (v16u8) __msa_ilvr_b(coeff_vec0, coeff_vec1);
898 LD_UB4(src, src_stride, src0, src1, src2, src3);
899 LD_UB4(dst, dst_stride, dst0, dst1, dst2, dst3);
901 dst0 = (v16u8) __msa_insve_h((v8i16) dst0, 1, (v8i16) dst1);
902 dst0 = (v16u8) __msa_insve_h((v8i16) dst0, 2, (v8i16) dst2);
903 dst0 = (v16u8) __msa_insve_h((v8i16) dst0, 3, (v8i16) dst3);
905 VSHF_B2_UB(src0, src1, src2, src3, mask, mask, src0, src2);
907 src0 = (v16u8) __msa_ilvr_d((v2i64) src2, (v2i64) src0);
909 res_r = __msa_dotp_u_h(src0, coeff_vec);
911 res_r = (v8u16) __msa_srari_h((v8i16) res_r, 6);
912 res_r = __msa_sat_u_h(res_r, 7);
914 res = __msa_pckev_b((v16i8) res_r, (v16i8) res_r);
915 dst0 = __msa_aver_u_b((v16u8) res, dst0);
922 uint32_t coeff0, uint32_t coeff1)
924 v16u8
src0,
src1, src2, src3, src4, src5, src6, src7;
925 v16u8 dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7;
926 v8u16 res0_r, res1_r;
927 v16u8 res0, res1,
mask;
928 v16i8 coeff_vec0 = __msa_fill_b(coeff0);
929 v16i8 coeff_vec1 = __msa_fill_b(coeff1);
930 v16u8 coeff_vec = (v16u8) __msa_ilvr_b(coeff_vec0, coeff_vec1);
934 LD_UB8(src, src_stride, src0, src1, src2, src3, src4, src5, src6, src7);
935 LD_UB8(dst, dst_stride, dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7);
937 dst0 = (v16u8) __msa_insve_h((v8i16) dst0, 1, (v8i16) dst1);
938 dst0 = (v16u8) __msa_insve_h((v8i16) dst0, 2, (v8i16) dst2);
939 dst0 = (v16u8) __msa_insve_h((v8i16) dst0, 3, (v8i16) dst3);
941 dst4 = (v16u8) __msa_insve_h((v8i16) dst4, 1, (v8i16) dst5);
942 dst4 = (v16u8) __msa_insve_h((v8i16) dst4, 2, (v8i16) dst6);
943 dst4 = (v16u8) __msa_insve_h((v8i16) dst4, 3, (v8i16) dst7);
945 VSHF_B2_UB(src0, src1, src2, src3, mask, mask, src0, src2);
946 VSHF_B2_UB(src4, src5, src6, src7, mask, mask, src4, src6);
947 ILVR_D2_UB(src2, src0, src6, src4, src0, src4);
948 DOTP_UB2_UH(src0, src4, coeff_vec, coeff_vec, res0_r, res1_r);
955 PCKEV_B2_UB(res0_r, res0_r, res1_r, res1_r, res0, res1);
959 dst += (4 * dst_stride);
965 uint32_t coeff0, uint32_t coeff1,
971 }
else if (4 == height) {
974 }
else if (8 == height) {
982 uint32_t coeff0, uint32_t coeff1)
984 uint32_t load0, load1;
986 v16u8 dst_data = { 0 };
989 v16i8 coeff_vec0 = __msa_fill_b(coeff0);
990 v16i8 coeff_vec1 = __msa_fill_b(coeff1);
991 v16u8 coeff_vec = (v16u8) __msa_ilvr_b(coeff_vec0, coeff_vec1);
995 LD_SB2(src, src_stride, src0, src1);
998 load1 =
LW(dst + dst_stride);
1002 src0 = __msa_vshf_b(mask, src1, src0);
1004 res_r = __msa_dotp_u_h((v16u8) src0, coeff_vec);
1006 res_r = (v8u16) __msa_srari_h((v8i16) res_r, 6);
1007 res_r = __msa_sat_u_h(res_r, 7);
1008 res = __msa_pckev_b((v16i8) res_r, (v16i8) res_r);
1009 dst_data = __msa_aver_u_b((v16u8) res, dst_data);
1011 ST4x2_UB(dst_data, dst, dst_stride);
1022 uint32_t load0, load1;
1027 v8u16 res0_r, res1_r;
1028 v16u8 res0, res1,
mask;
1029 v16i8 coeff_vec0 = __msa_fill_b(coeff0);
1030 v16i8 coeff_vec1 = __msa_fill_b(coeff1);
1031 v16u8 coeff_vec = (v16u8) __msa_ilvr_b(coeff_vec0, coeff_vec1);
1035 for (row = (height >> 2); row--;) {
1036 LD_UB4(src, src_stride, src0, src1, src2, src3);
1037 src += (4 * src_stride);
1040 load1 =
LW(dst + dst_stride);
1044 load0 =
LW(dst + 2 * dst_stride);
1045 load1 =
LW(dst + 3 * dst_stride);
1049 VSHF_B2_UB(src0, src1, src2, src3, mask, mask, src0, src2);
1050 DOTP_UB2_UH(src0, src2, coeff_vec, coeff_vec, res0_r, res1_r);
1057 PCKEV_B2_UB(res0_r, res0_r, res1_r, res1_r, res0, res1);
1060 ST4x4_UB(dst0, dst1, 0, 1, 0, 1, dst, dst_stride);
1061 dst += (4 * dst_stride);
1067 uint32_t coeff0, uint32_t coeff1,
1076 coeff0, coeff1, height);
1082 uint32_t coeff0, uint32_t coeff1,
1086 v16u8
src0,
src1, src2, src3, out0, out1;
1087 v8u16 res0, res1, res2, res3;
1088 v16u8 dst0, dst1, dst2, dst3;
1090 v16i8 coeff_vec0 = __msa_fill_b(coeff0);
1091 v16i8 coeff_vec1 = __msa_fill_b(coeff1);
1092 v16u8 coeff_vec = (v16u8) __msa_ilvr_b(coeff_vec0, coeff_vec1);
1096 for (row = height >> 2; row--;) {
1097 LD_UB4(src, src_stride, src0, src1, src2, src3);
1098 src += (4 * src_stride);
1099 LD_UB4(dst, dst_stride, dst0, dst1, dst2, dst3);
1100 VSHF_B2_UB(src0, src0, src1, src1, mask, mask, src0, src1);
1101 VSHF_B2_UB(src2, src2, src3, src3, mask, mask, src2, src3);
1102 DOTP_UB4_UH(src0, src1, src2, src3, coeff_vec, coeff_vec, coeff_vec,
1103 coeff_vec, res0, res1, res2, res3);
1104 SLLI_4V(res0, res1, res2, res3, 3);
1110 ST8x4_UB(out0, out1, dst, dst_stride);
1111 dst += (4 * dst_stride);
1117 uint32_t coeff0, uint32_t coeff1)
1119 uint16_t out0, out1;
1120 uint32_t load0, load1;
1121 v16i8
src0,
src1, src2, tmp0, tmp1, res;
1122 v16u8 dst_data = { 0 };
1124 v16i8 coeff_vec0 = __msa_fill_b(coeff0);
1125 v16i8 coeff_vec1 = __msa_fill_b(coeff1);
1126 v16u8 coeff_vec = (v16u8) __msa_ilvr_b(coeff_vec0, coeff_vec1);
1128 LD_SB3(src, src_stride, src0, src1, src2);
1130 load1 =
LW(dst + dst_stride);
1134 ILVR_B2_SB(src1, src0, src2, src1, tmp0, tmp1);
1136 tmp0 = (v16i8) __msa_ilvr_d((v2i64) tmp1, (v2i64) tmp0);
1137 res_r = __msa_dotp_u_h((v16u8) tmp0, coeff_vec);
1139 res_r = (v8u16) __msa_srari_h((v8i16) res_r, 6);
1140 res_r = __msa_sat_u_h(res_r, 7);
1141 res = __msa_pckev_b((v16i8) res_r, (v16i8) res_r);
1142 dst_data = __msa_aver_u_b((v16u8) res, dst_data);
1143 out0 = __msa_copy_u_h((v8i16) dst_data, 0);
1144 out1 = __msa_copy_u_h((v8i16) dst_data, 2);
1153 uint32_t coeff0, uint32_t coeff1)
1155 uint32_t load0, load1;
1156 v16i8
src0,
src1, src2, src3, src4;
1157 v16u8 tmp0, tmp1, tmp2, tmp3;
1160 v16i8 coeff_vec0 = __msa_fill_b(coeff0);
1161 v16i8 coeff_vec1 = __msa_fill_b(coeff1);
1162 v16u8 coeff_vec = (v16u8) __msa_ilvr_b(coeff_vec0, coeff_vec1);
1163 v16u8 dst_data = { 0 };
1165 LD_SB5(src, src_stride, src0, src1, src2, src3, src4);
1168 load1 =
LW(dst + dst_stride);
1170 dst_data = (v16u8) __msa_insert_h((v8i16) dst_data, 0, load0);
1171 dst_data = (v16u8) __msa_insert_h((v8i16) dst_data, 1, load1);
1173 load0 =
LW(dst + 2 * dst_stride);
1174 load1 =
LW(dst + 3 * dst_stride);
1176 dst_data = (v16u8) __msa_insert_h((v8i16) dst_data, 2, load0);
1177 dst_data = (v16u8) __msa_insert_h((v8i16) dst_data, 3, load1);
1179 ILVR_B4_UB(src1, src0, src2, src1, src3, src2, src4, src3,
1180 tmp0, tmp1, tmp2, tmp3);
1181 ILVR_W2_UB(tmp1, tmp0, tmp3, tmp2, tmp0, tmp2);
1183 tmp0 = (v16u8) __msa_ilvr_d((v2i64) tmp2, (v2i64) tmp0);
1185 res_r = __msa_dotp_u_h(tmp0, coeff_vec);
1187 res_r = (v8u16) __msa_srari_h((v8i16) res_r, 6);
1188 res_r = __msa_sat_u_h(res_r, 7);
1190 res = (v8i16) __msa_pckev_b((v16i8) res_r, (v16i8) res_r);
1191 res = (v8i16) __msa_aver_u_b((v16u8) res, dst_data);
1194 dst += (4 * dst_stride);
1199 uint32_t coeff0, uint32_t coeff1)
1201 uint32_t load0, load1, load2, load3;
1202 v16i8
src0,
src1, src2, src3, src4, src5, src6, src7, src8;
1203 v16u8 tmp0, tmp1, tmp2, tmp3;
1206 v16i8 coeff_vec0 = __msa_fill_b(coeff0);
1207 v16i8 coeff_vec1 = __msa_fill_b(coeff1);
1208 v16u8 coeff_vec = (v16u8) __msa_ilvr_b(coeff_vec0, coeff_vec1);
1209 v16u8 dst_data0 = { 0 };
1210 v16u8 dst_data1 = { 0 };
1212 LD_SB5(src, src_stride, src0, src1, src2, src3, src4);
1213 src += (5 * src_stride);
1214 LD_SB4(src, src_stride, src5, src6, src7, src8);
1216 LW4(dst, dst_stride, load0, load1, load2, load3);
1218 dst_data0 = (v16u8) __msa_insert_h((v8i16) dst_data0, 0, load0);
1219 dst_data0 = (v16u8) __msa_insert_h((v8i16) dst_data0, 1, load1);
1220 dst_data0 = (v16u8) __msa_insert_h((v8i16) dst_data0, 2, load2);
1221 dst_data0 = (v16u8) __msa_insert_h((v8i16) dst_data0, 3, load3);
1223 LW4(dst + 4 * dst_stride, dst_stride, load0, load1, load2, load3);
1225 dst_data1 = (v16u8) __msa_insert_h((v8i16) dst_data1, 0, load0);
1226 dst_data1 = (v16u8) __msa_insert_h((v8i16) dst_data1, 1, load1);
1227 dst_data1 = (v16u8) __msa_insert_h((v8i16) dst_data1, 2, load2);
1228 dst_data1 = (v16u8) __msa_insert_h((v8i16) dst_data1, 3, load3);
1230 ILVR_B4_UB(src1, src0, src2, src1, src3, src2, src4, src3,
1231 tmp0, tmp1, tmp2, tmp3);
1233 ILVR_W2_UB(tmp1, tmp0, tmp3, tmp2, tmp0, tmp2);
1235 tmp0 = (v16u8) __msa_ilvr_d((v2i64) tmp2, (v2i64) tmp0);
1237 res_r = __msa_dotp_u_h(tmp0, coeff_vec);
1239 res_r = (v8u16) __msa_srari_h((v8i16) res_r, 6);
1240 res_r = __msa_sat_u_h(res_r, 7);
1242 res = (v8i16) __msa_pckev_b((v16i8) res_r, (v16i8) res_r);
1243 res = (v8i16) __msa_aver_u_b((v16u8) res, dst_data0);
1246 dst += (4 * dst_stride);
1248 ILVR_B4_UB(src5, src4, src6, src5, src7, src6, src8, src7,
1249 tmp0, tmp1, tmp2, tmp3);
1251 ILVR_W2_UB(tmp1, tmp0, tmp3, tmp2, tmp0, tmp2);
1253 tmp0 = (v16u8) __msa_ilvr_d((v2i64) tmp2, (v2i64) tmp0);
1255 res_r = __msa_dotp_u_h(tmp0, coeff_vec);
1257 res_r = (v8u16) __msa_srari_h((v8i16) res_r, 6);
1258 res_r = __msa_sat_u_h(res_r, 7);
1260 res = (v8i16) __msa_pckev_b((v16i8) res_r, (v16i8) res_r);
1261 res = (v8i16) __msa_aver_u_b((v16u8) res, dst_data1);
1268 uint32_t coeff0, uint32_t coeff1,
1274 }
else if (4 == height) {
1277 }
else if (8 == height) {
1285 uint32_t coeff0, uint32_t coeff1)
1287 uint32_t load0, load1;
1288 v16i8
src0,
src1, src2, tmp0, tmp1;
1289 v16u8 dst_data = { 0 };
1292 v16i8 coeff_vec0 = __msa_fill_b(coeff0);
1293 v16i8 coeff_vec1 = __msa_fill_b(coeff1);
1294 v16u8 coeff_vec = (v16u8) __msa_ilvr_b(coeff_vec0, coeff_vec1);
1296 LD_SB3(src, src_stride, src0, src1, src2);
1299 load1 =
LW(dst + dst_stride);
1302 ILVR_B2_SB(src1, src0, src2, src1, tmp0, tmp1);
1304 tmp0 = (v16i8) __msa_ilvr_d((v2i64) tmp1, (v2i64) tmp0);
1306 res_r = __msa_dotp_u_h((v16u8) tmp0, coeff_vec);
1308 res_r = (v8u16) __msa_srari_h((v8i16) res_r, 6);
1309 res_r = __msa_sat_u_h(res_r, 7);
1310 res = (v16u8) __msa_pckev_b((v16i8) res_r, (v16i8) res_r);
1311 res = __msa_aver_u_b(res, dst_data);
1324 uint32_t load0, load1, row;
1325 v16i8
src0,
src1, src2, src3, src4;
1326 v16u8 tmp0, tmp1, tmp2, tmp3;
1329 v8u16 res0_r, res1_r;
1331 v16i8 coeff_vec0 = __msa_fill_b(coeff0);
1332 v16i8 coeff_vec1 = __msa_fill_b(coeff1);
1333 v16u8 coeff_vec = (v16u8) __msa_ilvr_b(coeff_vec0, coeff_vec1);
1338 for (row = (height >> 2); row--;) {
1339 LD_SB4(src, src_stride, src1, src2, src3, src4);
1340 src += (4 * src_stride);
1343 load1 =
LW(dst + dst_stride);
1346 load0 =
LW(dst + 2 * dst_stride);
1347 load1 =
LW(dst + 3 * dst_stride);
1350 ILVR_B4_UB(src1, src0, src2, src1, src3, src2, src4, src3,
1351 tmp0, tmp1, tmp2, tmp3);
1352 ILVR_D2_UB(tmp1, tmp0, tmp3, tmp2, tmp0, tmp2);
1353 DOTP_UB2_UH(tmp0, tmp2, coeff_vec, coeff_vec, res0_r, res1_r);
1360 PCKEV_B2_UB(res0_r, res0_r, res1_r, res1_r, res0, res1);
1363 ST4x4_UB(res0, res1, 0, 1, 0, 1, dst, dst_stride);
1364 dst += (4 * dst_stride);
1371 uint32_t coeff0, uint32_t coeff1,
1379 coeff0, coeff1, height);
1385 uint32_t coeff0, uint32_t coeff1,
1389 v16u8
src0,
src1, src2, src3, src4;
1391 v8u16 res0, res1, res2, res3;
1392 v16u8 dst0, dst1, dst2, dst3;
1393 v16i8 coeff_vec0 = __msa_fill_b(coeff0);
1394 v16i8 coeff_vec1 = __msa_fill_b(coeff1);
1395 v16u8 coeff_vec = (v16u8) __msa_ilvr_b(coeff_vec0, coeff_vec1);
1400 for (row = height >> 2; row--;) {
1401 LD_UB4(src, src_stride, src1, src2, src3, src4);
1402 src += (4 * src_stride);
1403 LD_UB4(dst, dst_stride, dst0, dst1, dst2, dst3);
1404 ILVR_B4_UB(src1, src0, src2, src1, src3, src2, src4, src3,
1405 src0, src1, src2, src3);
1406 DOTP_UB4_UH(src0, src1, src2, src3, coeff_vec, coeff_vec, coeff_vec,
1407 coeff_vec, res0, res1, res2, res3);
1408 SLLI_4V(res0, res1, res2, res3, 3);
1414 ST8x4_UB(out0, out1, dst, dst_stride);
1416 dst += (4 * dst_stride);
1428 uint16_t out0, out1;
1431 v8u16 res_hz0, res_hz1, res_vt0, res_vt1;
1433 v16i8 coeff_hz_vec0 = __msa_fill_b(coef_hor0);
1434 v16i8 coeff_hz_vec1 = __msa_fill_b(coef_hor1);
1435 v16u8 coeff_hz_vec = (v16u8) __msa_ilvr_b(coeff_hz_vec0, coeff_hz_vec1);
1436 v8u16 coeff_vt_vec0 = (v8u16) __msa_fill_h(coef_ver0);
1437 v8u16 coeff_vt_vec1 = (v8u16) __msa_fill_h(coef_ver1);
1441 LD_UB3(src, src_stride, src0, src1, src2);
1442 LD_UB2(dst, dst_stride, dst0, dst1);
1443 VSHF_B2_UB(src0, src1, src1, src2, mask, mask, src0, src1);
1444 DOTP_UB2_UH(src0, src1, coeff_hz_vec, coeff_hz_vec, res_hz0, res_hz1);
1445 MUL2(res_hz0, coeff_vt_vec1, res_hz1, coeff_vt_vec0, res_vt0, res_vt1);
1448 res_vt0 = (v8u16) __msa_srari_h((v8i16) res_vt0, 6);
1449 res_vt0 = __msa_sat_u_h(res_vt0, 7);
1450 res = __msa_pckev_b((v16i8) res_vt0, (v16i8) res_vt0);
1451 dst0 = (v16u8) __msa_insve_h((v8i16) dst0, 1, (v8i16) dst1);
1452 dst0 = __msa_aver_u_b((v16u8) res, dst0);
1453 out0 = __msa_copy_u_h((v8i16) dst0, 0);
1454 out1 = __msa_copy_u_h((v8i16) dst0, 1);
1468 v16u8
src0,
src1, src2, src3, src4;
1469 v16u8 tmp0, tmp1, tmp2, tmp3;
1470 v16u8 dst0, dst1, dst2, dst3;
1471 v8u16 res_hz0, res_hz1, res_vt0, res_vt1;
1473 v16i8 coeff_hz_vec0 = __msa_fill_b(coef_hor0);
1474 v16i8 coeff_hz_vec1 = __msa_fill_b(coef_hor1);
1475 v16u8 coeff_hz_vec = (v16u8) __msa_ilvr_b(coeff_hz_vec0, coeff_hz_vec1);
1476 v8u16 coeff_vt_vec0 = (v8u16) __msa_fill_h(coef_ver0);
1477 v8u16 coeff_vt_vec1 = (v8u16) __msa_fill_h(coef_ver1);
1481 LD_UB5(src, src_stride, src0, src1, src2, src3, src4);
1482 LD_UB4(dst, dst_stride, dst0, dst1, dst2, dst3);
1483 VSHF_B2_UB(src0, src1, src2, src3, mask, mask, tmp0, tmp1);
1484 VSHF_B2_UB(src1, src2, src3, src4, mask, mask, tmp2, tmp3);
1485 ILVR_D2_UB(tmp1, tmp0, tmp3, tmp2, src0, src1);
1486 DOTP_UB2_UH(src0, src1, coeff_hz_vec, coeff_hz_vec, res_hz0, res_hz1);
1487 MUL2(res_hz0, coeff_vt_vec1, res_hz1, coeff_vt_vec0, res_vt0, res_vt1);
1490 res_vt0 = (v8u16) __msa_srari_h((v8i16) res_vt0, 6);
1491 res_vt0 = __msa_sat_u_h(res_vt0, 7);
1492 res = __msa_pckev_b((v16i8) res_vt0, (v16i8) res_vt0);
1494 dst0 = (v16u8) __msa_insve_h((v8i16) dst0, 1, (v8i16) dst1);
1495 dst0 = (v16u8) __msa_insve_h((v8i16) dst0, 2, (v8i16) dst2);
1496 dst0 = (v16u8) __msa_insve_h((v8i16) dst0, 3, (v8i16) dst3);
1497 dst0 = __msa_aver_u_b((v16u8) res, dst0);
1499 ST2x4_UB(dst0, 0, dst, dst_stride);
1509 v16u8
src0,
src1, src2, src3, src4, src5, src6, src7, src8;
1510 v16u8 dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7;
1511 v16u8 tmp0, tmp1, tmp2, tmp3;
1512 v8u16 res_hz0, res_hz1, res_vt0, res_vt1;
1514 v16i8 coeff_hz_vec0 = __msa_fill_b(coef_hor0);
1515 v16i8 coeff_hz_vec1 = __msa_fill_b(coef_hor1);
1516 v16u8 coeff_hz_vec = (v16u8) __msa_ilvr_b(coeff_hz_vec0, coeff_hz_vec1);
1517 v8u16 coeff_vt_vec0 = (v8u16) __msa_fill_h(coef_ver0);
1518 v8u16 coeff_vt_vec1 = (v8u16) __msa_fill_h(coef_ver1);
1522 LD_UB5(src, src_stride, src0, src1, src2, src3, src4);
1523 src += (5 * src_stride);
1524 LD_UB4(src, src_stride, src5, src6, src7, src8);
1526 LD_UB8(dst, dst_stride, dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7);
1528 dst0 = (v16u8) __msa_insve_h((v8i16) dst0, 1, (v8i16) dst1);
1529 dst0 = (v16u8) __msa_insve_h((v8i16) dst0, 2, (v8i16) dst2);
1530 dst0 = (v16u8) __msa_insve_h((v8i16) dst0, 3, (v8i16) dst3);
1532 dst4 = (v16u8) __msa_insve_h((v8i16) dst4, 1, (v8i16) dst5);
1533 dst4 = (v16u8) __msa_insve_h((v8i16) dst4, 2, (v8i16) dst6);
1534 dst4 = (v16u8) __msa_insve_h((v8i16) dst4, 3, (v8i16) dst7);
1536 VSHF_B2_UB(src0, src1, src2, src3, mask, mask, tmp0, tmp1);
1537 VSHF_B2_UB(src1, src2, src3, src4, mask, mask, tmp2, tmp3);
1538 ILVR_D2_UB(tmp1, tmp0, tmp3, tmp2, src0, src1);
1539 VSHF_B2_UB(src4, src5, src6, src7, mask, mask, tmp0, tmp1);
1540 VSHF_B2_UB(src5, src6, src7, src8, mask, mask, tmp2, tmp3);
1541 ILVR_D2_UB(tmp1, tmp0, tmp3, tmp2, src4, src5);
1542 DOTP_UB2_UH(src0, src1, coeff_hz_vec, coeff_hz_vec, res_hz0, res_hz1);
1543 MUL2(res_hz0, coeff_vt_vec1, res_hz1, coeff_vt_vec0, res_vt0, res_vt1);
1546 res_vt0 = (v8u16) __msa_srari_h((v8i16) res_vt0, 6);
1547 res_vt0 = __msa_sat_u_h(res_vt0, 7);
1548 res = __msa_pckev_b((v16i8) res_vt0, (v16i8) res_vt0);
1549 dst0 = __msa_aver_u_b((v16u8) res, dst0);
1551 ST2x4_UB(dst0, 0, dst, dst_stride);
1552 dst += (4 * dst_stride);
1554 DOTP_UB2_UH(src4, src5, coeff_hz_vec, coeff_hz_vec, res_hz0, res_hz1);
1555 MUL2(res_hz0, coeff_vt_vec1, res_hz1, coeff_vt_vec0, res_vt0, res_vt1);
1558 res_vt0 = (v8u16) __msa_srari_h((v8i16) res_vt0, 6);
1559 res_vt0 = __msa_sat_u_h(res_vt0, 7);
1560 res = __msa_pckev_b((v16i8) res_vt0, (v16i8) res_vt0);
1561 dst4 = __msa_aver_u_b((v16u8) res, dst4);
1563 ST2x4_UB(dst4, 0, dst, dst_stride);
1576 coef_hor0, coef_hor1,
1577 coef_ver0, coef_ver1);
1578 }
else if (4 == height) {
1580 coef_hor0, coef_hor1,
1581 coef_ver0, coef_ver1);
1582 }
else if (8 == height) {
1584 coef_hor0, coef_hor1,
1585 coef_ver0, coef_ver1);
1598 v8u16 res_hz0, res_hz1, res_vt0, res_vt1;
1600 v16i8 coeff_hz_vec0 = __msa_fill_b(coef_hor0);
1601 v16i8 coeff_hz_vec1 = __msa_fill_b(coef_hor1);
1602 v16u8 coeff_hz_vec = (v16u8) __msa_ilvr_b(coeff_hz_vec0, coeff_hz_vec1);
1603 v8u16 coeff_vt_vec0 = (v8u16) __msa_fill_h(coef_ver0);
1604 v8u16 coeff_vt_vec1 = (v8u16) __msa_fill_h(coef_ver1);
1608 LD_UB3(src, src_stride, src0, src1, src2);
1609 LD_UB2(dst, dst_stride, dst0, dst1);
1610 VSHF_B2_UB(src0, src1, src1, src2, mask, mask, src0, src1);
1611 DOTP_UB2_UH(src0, src1, coeff_hz_vec, coeff_hz_vec, res_hz0, res_hz1);
1612 MUL2(res_hz0, coeff_vt_vec1, res_hz1, coeff_vt_vec0, res_vt0, res_vt1);
1615 res_vt0 = (v8u16) __msa_srari_h((v8i16) res_vt0, 6);
1616 res_vt0 = __msa_sat_u_h(res_vt0, 7);
1617 res = __msa_pckev_b((v16i8) res_vt0, (v16i8) res_vt0);
1618 dst0 = (v16u8) __msa_insve_w((v4i32) dst0, 1, (v4i32) dst1);
1619 dst0 = __msa_aver_u_b((v16u8) res, dst0);
1635 v16u8
src0,
src1, src2, src3, src4;
1636 v16u8 dst0, dst1, dst2, dst3;
1637 v8u16 res_hz0, res_hz1, res_hz2, res_hz3;
1638 v8u16 res_vt0, res_vt1, res_vt2, res_vt3;
1640 v16i8 coeff_hz_vec0 = __msa_fill_b(coef_hor0);
1641 v16i8 coeff_hz_vec1 = __msa_fill_b(coef_hor1);
1642 v16u8 coeff_hz_vec = (v16u8) __msa_ilvr_b(coeff_hz_vec0, coeff_hz_vec1);
1643 v8u16 coeff_vt_vec0 = (v8u16) __msa_fill_h(coef_ver0);
1644 v8u16 coeff_vt_vec1 = (v8u16) __msa_fill_h(coef_ver1);
1652 for (row = (height >> 2); row--;) {
1653 LD_UB4(src, src_stride, src1, src2, src3, src4);
1654 src += (4 * src_stride);
1656 LD_UB4(dst, dst_stride, dst0, dst1, dst2, dst3);
1658 VSHF_B2_UB(src0, src1, src1, src2, mask, mask, src0, src1);
1659 VSHF_B2_UB(src2, src3, src3, src4, mask, mask, src2, src3);
1660 DOTP_UB4_UH(src0, src1, src2, src3, coeff_hz_vec, coeff_hz_vec,
1661 coeff_hz_vec, coeff_hz_vec, res_hz0, res_hz1, res_hz2,
1663 MUL4(res_hz0, coeff_vt_vec1, res_hz1, coeff_vt_vec0, res_hz2,
1664 coeff_vt_vec1, res_hz3, coeff_vt_vec0, res_vt0, res_vt1, res_vt2,
1666 ADD2(res_vt0, res_vt1, res_vt2, res_vt3, res_vt0, res_vt1);
1669 PCKEV_B2_UB(res_vt0, res_vt0, res_vt1, res_vt1, res0, res1);
1671 dst0 = (v16u8) __msa_insve_w((v4i32) dst0, 1, (v4i32) dst1);
1672 dst1 = (v16u8) __msa_insve_w((v4i32) dst2, 1, (v4i32) dst3);
1676 ST4x4_UB(dst0, dst1, 0, 1, 0, 1, dst, dst_stride);
1677 dst += (4 * dst_stride);
1692 coef_hor0, coef_hor1,
1693 coef_ver0, coef_ver1);
1696 coef_hor0, coef_hor1,
1697 coef_ver0, coef_ver1, height);
1710 v16u8
src0,
src1, src2, src3, src4, out0, out1;
1711 v8u16 res_hz0, res_hz1, res_hz2;
1712 v8u16 res_hz3, res_hz4;
1713 v8u16 res_vt0, res_vt1, res_vt2, res_vt3;
1714 v16u8 dst0, dst1, dst2, dst3;
1716 v16i8 coeff_hz_vec0 = __msa_fill_b(coef_hor0);
1717 v16i8 coeff_hz_vec1 = __msa_fill_b(coef_hor1);
1718 v16u8 coeff_hz_vec = (v16u8) __msa_ilvr_b(coeff_hz_vec0, coeff_hz_vec1);
1719 v8u16 coeff_vt_vec0 = (v8u16) __msa_fill_h(coef_ver0);
1720 v8u16 coeff_vt_vec1 = (v8u16) __msa_fill_h(coef_ver1);
1727 src0 = (v16u8) __msa_vshf_b(mask, (v16i8)
src0, (v16i8) src0);
1728 res_hz0 = __msa_dotp_u_h(src0, coeff_hz_vec);
1730 for (row = (height >> 2); row--;) {
1731 LD_UB4(src, src_stride, src1, src2, src3, src4);
1732 src += (4 * src_stride);
1734 LD_UB4(dst, dst_stride, dst0, dst1, dst2, dst3);
1735 VSHF_B2_UB(src1, src1, src2, src2, mask, mask, src1, src2);
1736 VSHF_B2_UB(src3, src3, src4, src4, mask, mask, src3, src4);
1737 DOTP_UB4_UH(src1, src2, src3, src4, coeff_hz_vec, coeff_hz_vec,
1738 coeff_hz_vec, coeff_hz_vec, res_hz1, res_hz2, res_hz3,
1740 MUL4(res_hz1, coeff_vt_vec0, res_hz2, coeff_vt_vec0, res_hz3,
1741 coeff_vt_vec0, res_hz4, coeff_vt_vec0, res_vt0, res_vt1, res_vt2,
1744 res_vt0 += (res_hz0 * coeff_vt_vec1);
1745 res_vt1 += (res_hz1 * coeff_vt_vec1);
1746 res_vt2 += (res_hz2 * coeff_vt_vec1);
1747 res_vt3 += (res_hz3 * coeff_vt_vec1);
1749 SRARI_H4_UH(res_vt0, res_vt1, res_vt2, res_vt3, 6);
1750 SAT_UH4_UH(res_vt0, res_vt1, res_vt2, res_vt3, 7);
1752 PCKEV_B2_UB(res_vt1, res_vt0, res_vt3, res_vt2, out0, out1);
1755 ST8x4_UB(out0, out1, dst, dst_stride);
1756 dst += (4 * dst_stride);
1767 uint64_t out0, out1, out2, out3, out4, out5, out6, out7;
1768 v16u8
src0,
src1, src2, src3, src4, src5, src6, src7;
1770 if (0 == height % 12) {
1771 for (cnt = (height / 12); cnt--;) {
1773 src0, src1, src2, src3, src4, src5, src6, src7);
1774 src += (8 * src_stride);
1776 out0 = __msa_copy_u_d((v2i64) src0, 0);
1777 out1 = __msa_copy_u_d((v2i64) src1, 0);
1778 out2 = __msa_copy_u_d((v2i64) src2, 0);
1779 out3 = __msa_copy_u_d((v2i64) src3, 0);
1780 out4 = __msa_copy_u_d((v2i64) src4, 0);
1781 out5 = __msa_copy_u_d((v2i64) src5, 0);
1782 out6 = __msa_copy_u_d((v2i64) src6, 0);
1783 out7 = __msa_copy_u_d((v2i64) src7, 0);
1785 SD4(out0, out1, out2, out3, dst, dst_stride);
1786 dst += (4 * dst_stride);
1787 SD4(out4, out5, out6, out7, dst, dst_stride);
1788 dst += (4 * dst_stride);
1790 LD_UB4(src, src_stride, src0, src1, src2, src3);
1791 src += (4 * src_stride);
1793 out0 = __msa_copy_u_d((v2i64) src0, 0);
1794 out1 = __msa_copy_u_d((v2i64) src1, 0);
1795 out2 = __msa_copy_u_d((v2i64) src2, 0);
1796 out3 = __msa_copy_u_d((v2i64) src3, 0);
1798 SD4(out0, out1, out2, out3, dst, dst_stride);
1799 dst += (4 * dst_stride);
1801 }
else if (0 == height % 8) {
1802 for (cnt = height >> 3; cnt--;) {
1804 src0, src1, src2, src3, src4, src5, src6, src7);
1805 src += (8 * src_stride);
1807 out0 = __msa_copy_u_d((v2i64) src0, 0);
1808 out1 = __msa_copy_u_d((v2i64) src1, 0);
1809 out2 = __msa_copy_u_d((v2i64) src2, 0);
1810 out3 = __msa_copy_u_d((v2i64) src3, 0);
1811 out4 = __msa_copy_u_d((v2i64) src4, 0);
1812 out5 = __msa_copy_u_d((v2i64) src5, 0);
1813 out6 = __msa_copy_u_d((v2i64) src6, 0);
1814 out7 = __msa_copy_u_d((v2i64) src7, 0);
1816 SD4(out0, out1, out2, out3, dst, dst_stride);
1817 dst += (4 * dst_stride);
1818 SD4(out4, out5, out6, out7, dst, dst_stride);
1819 dst += (4 * dst_stride);
1821 }
else if (0 == height % 4) {
1822 for (cnt = (height / 4); cnt--;) {
1823 LD_UB4(src, src_stride, src0, src1, src2, src3);
1824 src += (4 * src_stride);
1825 out0 = __msa_copy_u_d((v2i64) src0, 0);
1826 out1 = __msa_copy_u_d((v2i64) src1, 0);
1827 out2 = __msa_copy_u_d((v2i64) src2, 0);
1828 out3 = __msa_copy_u_d((v2i64) src3, 0);
1830 SD4(out0, out1, out2, out3, dst, dst_stride);
1831 dst += (4 * dst_stride);
1833 }
else if (0 == height % 2) {
1834 for (cnt = (height / 2); cnt--;) {
1835 LD_UB2(src, src_stride, src0, src1);
1836 src += (2 * src_stride);
1837 out0 = __msa_copy_u_d((v2i64) src0, 0);
1838 out1 = __msa_copy_u_d((v2i64) src1, 0);
1853 uint32_t out0, out1, out2, out3;
1855 v16u8 dst0, dst1, dst2, dst3;
1857 if (0 == (height % 4)) {
1858 for (cnt = (height / 4); cnt--;) {
1859 LD_UB4(src, src_stride, src0, src1, src2, src3);
1860 src += (4 * src_stride);
1862 LD_UB4(dst, dst_stride, dst0, dst1, dst2, dst3);
1864 AVER_UB4_UB(src0, dst0, src1, dst1, src2, dst2, src3, dst3,
1865 dst0, dst1, dst2, dst3);
1867 out0 = __msa_copy_u_w((v4i32) dst0, 0);
1868 out1 = __msa_copy_u_w((v4i32) dst1, 0);
1869 out2 = __msa_copy_u_w((v4i32) dst2, 0);
1870 out3 = __msa_copy_u_w((v4i32) dst3, 0);
1871 SW4(out0, out1, out2, out3, dst, dst_stride);
1872 dst += (4 * dst_stride);
1874 }
else if (0 == (height % 2)) {
1875 for (cnt = (height / 2); cnt--;) {
1876 LD_UB2(src, src_stride, src0, src1);
1877 src += (2 * src_stride);
1879 LD_UB2(dst, dst_stride, dst0, dst1);
1883 out0 = __msa_copy_u_w((v4i32) dst0, 0);
1884 out1 = __msa_copy_u_w((v4i32) dst1, 0);
1898 uint64_t out0, out1, out2, out3;
1900 v16u8 dst0, dst1, dst2, dst3;
1902 for (cnt = (height / 4); cnt--;) {
1903 LD_UB4(src, src_stride, src0, src1, src2, src3);
1904 src += (4 * src_stride);
1905 LD_UB4(dst, dst_stride, dst0, dst1, dst2, dst3);
1907 AVER_UB4_UB(src0, dst0, src1, dst1, src2, dst2, src3, dst3,
1908 dst0, dst1, dst2, dst3);
1910 out0 = __msa_copy_u_d((v2i64) dst0, 0);
1911 out1 = __msa_copy_u_d((v2i64) dst1, 0);
1912 out2 = __msa_copy_u_d((v2i64) dst2, 0);
1913 out3 = __msa_copy_u_d((v2i64) dst3, 0);
1914 SD4(out0, out1, out2, out3, dst, dst_stride);
1915 dst += (4 * dst_stride);
1922 av_assert2(x < 8 && y < 8 && x >= 0 && y >= 0);
1940 av_assert2(x < 8 && y < 8 && x >= 0 && y >= 0);
1949 for (cnt = height; cnt--;) {
1950 *((uint32_t *) dst) = *((uint32_t *) src);
1963 av_assert2(x < 8 && y < 8 && x >= 0 && y >= 0);
1972 for (cnt = height; cnt--;) {
1973 *((uint16_t *) dst) = *((uint16_t *) src);
1984 av_assert2(x < 8 && y < 8 && x >= 0 && y >= 0);
1989 stride, x, (8 - x), y,
1993 stride, x, (8 - x), height);
1996 stride, y, (8 - y), height);
2005 av_assert2(x < 8 && y < 8 && x >= 0 && y >= 0);
2009 stride, x, (8 - x), y,
2013 stride, x, (8 - x), height);
2016 stride, y, (8 - y), height);
2027 av_assert2(x < 8 && y < 8 && x >= 0 && y >= 0);
2031 stride, x, (8 - x), y,
2035 stride, x, (8 - x), height);
2038 stride, y, (8 - y), height);
2040 for (cnt = height; cnt--;) {
2041 dst[0] = (dst[0] + src[0] + 1) >> 1;
2042 dst[1] = (dst[1] + src[1] + 1) >> 1;
static void avc_chroma_vt_4x4_msa(uint8_t *src, uint8_t *dst, int32_t stride, uint32_t coeff0, uint32_t coeff1)
static void avc_chroma_vt_8x4_msa(uint8_t *src, uint8_t *dst, int32_t stride, uint32_t coeff0, uint32_t coeff1)
static void avc_chroma_hz_8x4_msa(uint8_t *src, uint8_t *dst, int32_t stride, uint32_t coeff0, uint32_t coeff1)
static void avc_chroma_vt_and_aver_dst_2x8_msa(uint8_t *src, int32_t src_stride, uint8_t *dst, int32_t dst_stride, uint32_t coeff0, uint32_t coeff1)
static void avc_chroma_vt_2x2_msa(uint8_t *src, uint8_t *dst, int32_t stride, uint32_t coeff0, uint32_t coeff1)
static void avc_chroma_hz_and_aver_dst_2x4_msa(uint8_t *src, int32_t src_stride, uint8_t *dst, int32_t dst_stride, uint32_t coeff0, uint32_t coeff1)
static void avc_chroma_hz_2x2_msa(uint8_t *src, uint8_t *dst, int32_t stride, uint32_t coeff0, uint32_t coeff1)
static void avc_chroma_hv_2x2_msa(uint8_t *src, uint8_t *dst, int32_t stride, uint32_t coef_hor0, uint32_t coef_hor1, uint32_t coef_ver0, uint32_t coef_ver1)
static void avc_chroma_hz_and_aver_dst_2x2_msa(uint8_t *src, int32_t src_stride, uint8_t *dst, int32_t dst_stride, uint32_t coeff0, uint32_t coeff1)
static void avc_chroma_hv_and_aver_dst_4x2_msa(uint8_t *src, int32_t src_stride, uint8_t *dst, int32_t dst_stride, uint32_t coef_hor0, uint32_t coef_hor1, uint32_t coef_ver0, uint32_t coef_ver1)
static void avc_chroma_hv_and_aver_dst_2x2_msa(uint8_t *src, int32_t src_stride, uint8_t *dst, int32_t dst_stride, uint32_t coef_hor0, uint32_t coef_hor1, uint32_t coef_ver0, uint32_t coef_ver1)
static void avc_chroma_hv_2x4_msa(uint8_t *src, uint8_t *dst, int32_t stride, uint32_t coef_hor0, uint32_t coef_hor1, uint32_t coef_ver0, uint32_t coef_ver1)
static void avc_chroma_vt_4w_msa(uint8_t *src, uint8_t *dst, int32_t stride, uint32_t coeff0, uint32_t coeff1, int32_t height)
#define MUL2(in0, in1, in2, in3, out0, out1)
static void avc_chroma_vt_8w_msa(uint8_t *src, uint8_t *dst, int32_t stride, uint32_t coeff0, uint32_t coeff1, int32_t height)
static void avc_chroma_hv_8w_msa(uint8_t *src, uint8_t *dst, int32_t stride, uint32_t coef_hor0, uint32_t coef_hor1, uint32_t coef_ver0, uint32_t coef_ver1, int32_t height)
static void avc_chroma_hv_and_aver_dst_2w_msa(uint8_t *src, int32_t src_stride, uint8_t *dst, int32_t dst_stride, uint32_t coef_hor0, uint32_t coef_hor1, uint32_t coef_ver0, uint32_t coef_ver1, int32_t height)
#define ST4x4_UB(in0, in1, idx0, idx1, idx2, idx3, pdst, stride)
static void avc_chroma_hz_8x8_msa(uint8_t *src, uint8_t *dst, int32_t stride, uint32_t coeff0, uint32_t coeff1)
static void avc_chroma_vt_8x8_msa(uint8_t *src, uint8_t *dst, int32_t stride, uint32_t coeff0, uint32_t coeff1)
static void avc_chroma_vt_and_aver_dst_8w_msa(uint8_t *src, int32_t src_stride, uint8_t *dst, int32_t dst_stride, uint32_t coeff0, uint32_t coeff1, int32_t height)
static void avc_chroma_vt_and_aver_dst_2x4_msa(uint8_t *src, int32_t src_stride, uint8_t *dst, int32_t dst_stride, uint32_t coeff0, uint32_t coeff1)
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
static void avc_chroma_vt_2w_msa(uint8_t *src, uint8_t *dst, int32_t stride, uint32_t coeff0, uint32_t coeff1, int32_t height)
static void avc_chroma_hv_2w_msa(uint8_t *src, uint8_t *dst, int32_t stride, uint32_t coef_hor0, uint32_t coef_hor1, uint32_t coef_ver0, uint32_t coef_ver1, int32_t height)
static void avc_chroma_hv_and_aver_dst_8w_msa(uint8_t *src, int32_t src_stride, uint8_t *dst, int32_t dst_stride, uint32_t coef_hor0, uint32_t coef_hor1, uint32_t coef_ver0, uint32_t coef_ver1, int32_t height)
static void avc_chroma_hv_4w_msa(uint8_t *src, uint8_t *dst, int32_t stride, uint32_t coef_hor0, uint32_t coef_hor1, uint32_t coef_ver0, uint32_t coef_ver1, int32_t height)
static void avc_chroma_hv_and_aver_dst_4x4mul_msa(uint8_t *src, int32_t src_stride, uint8_t *dst, int32_t dst_stride, uint32_t coef_hor0, uint32_t coef_hor1, uint32_t coef_ver0, uint32_t coef_ver1, int32_t height)
void ff_avg_h264_chroma_mc4_msa(uint8_t *dst, uint8_t *src, ptrdiff_t stride, int height, int x, int y)
void ff_put_h264_chroma_mc8_msa(uint8_t *dst, uint8_t *src, ptrdiff_t stride, int height, int x, int y)
#define MUL4(in0, in1, in2, in3, in4, in5, in6, in7, out0, out1, out2, out3)
#define INSERT_W2_UB(...)
static const uint16_t mask[17]
static void avc_chroma_hv_8x8_msa(uint8_t *src, uint8_t *dst, int32_t stride, uint32_t coef_hor0, uint32_t coef_hor1, uint32_t coef_ver0, uint32_t coef_ver1)
static void avc_chroma_hv_and_aver_dst_4w_msa(uint8_t *src, int32_t src_stride, uint8_t *dst, int32_t dst_stride, uint32_t coef_hor0, uint32_t coef_hor1, uint32_t coef_ver0, uint32_t coef_ver1, int32_t height)
static void avc_chroma_hz_8w_msa(uint8_t *src, uint8_t *dst, int32_t stride, uint32_t coeff0, uint32_t coeff1, int32_t height)
static void avc_chroma_vt_and_aver_dst_2x2_msa(uint8_t *src, int32_t src_stride, uint8_t *dst, int32_t dst_stride, uint32_t coeff0, uint32_t coeff1)
#define SW4(in0, in1, in2, in3, pdst, stride)
void ff_avg_h264_chroma_mc8_msa(uint8_t *dst, uint8_t *src, ptrdiff_t stride, int height, int x, int y)
void ff_put_h264_chroma_mc2_msa(uint8_t *dst, uint8_t *src, ptrdiff_t stride, int height, int x, int y)
static void avc_chroma_hz_and_aver_dst_4x2_msa(uint8_t *src, int32_t src_stride, uint8_t *dst, int32_t dst_stride, uint32_t coeff0, uint32_t coeff1)
static void avc_chroma_vt_and_aver_dst_4x2_msa(uint8_t *src, int32_t src_stride, uint8_t *dst, int32_t dst_stride, uint32_t coeff0, uint32_t coeff1)
static void avc_chroma_hz_and_aver_dst_4x4multiple_msa(uint8_t *src, int32_t src_stride, uint8_t *dst, int32_t dst_stride, uint32_t coeff0, uint32_t coeff1, int32_t height)
#define ST2x4_UB(in, stidx, pdst, stride)
static void avc_chroma_hz_and_aver_dst_2w_msa(uint8_t *src, int32_t src_stride, uint8_t *dst, int32_t dst_stride, uint32_t coeff0, uint32_t coeff1, int32_t height)
static const uint8_t chroma_mask_arr[16 *5]
static void avc_chroma_vt_4x2_msa(uint8_t *src, uint8_t *dst, int32_t stride, uint32_t coeff0, uint32_t coeff1)
static void copy_width8_msa(uint8_t *src, int32_t src_stride, uint8_t *dst, int32_t dst_stride, int32_t height)
static void avc_chroma_hz_nonmult_msa(uint8_t *src, uint8_t *dst, int32_t stride, uint32_t coeff0, uint32_t coeff1, int32_t height)
static void avc_chroma_hz_4x8_msa(uint8_t *src, uint8_t *dst, int32_t stride, uint32_t coeff0, uint32_t coeff1)
void ff_avg_h264_chroma_mc2_msa(uint8_t *dst, uint8_t *src, ptrdiff_t stride, int height, int x, int y)
static void avc_chroma_hz_4x4_msa(uint8_t *src, uint8_t *dst, int32_t stride, uint32_t coeff0, uint32_t coeff1)
static void avc_chroma_vt_4x8_msa(uint8_t *src, uint8_t *dst, int32_t stride, uint32_t coeff0, uint32_t coeff1)
static void avc_chroma_hz_and_aver_dst_2x8_msa(uint8_t *src, int32_t src_stride, uint8_t *dst, int32_t dst_stride, uint32_t coeff0, uint32_t coeff1)
#define ADD2(in0, in1, in2, in3, out0, out1)
static void avc_chroma_vt_and_aver_dst_2w_msa(uint8_t *src, int32_t src_stride, uint8_t *dst, int32_t dst_stride, uint32_t coeff0, uint32_t coeff1, int32_t height)
static void avc_chroma_hv_and_aver_dst_2x8_msa(uint8_t *src, int32_t src_stride, uint8_t *dst, int32_t dst_stride, uint32_t coef_hor0, uint32_t coef_hor1, uint32_t coef_ver0, uint32_t coef_ver1)
#define SD4(in0, in1, in2, in3, pdst, stride)
static void avc_chroma_hv_4x2_msa(uint8_t *src, uint8_t *dst, int32_t stride, uint32_t coef_hor0, uint32_t coef_hor1, uint32_t coef_ver0, uint32_t coef_ver1)
#define SLLI_4V(in0, in1, in2, in3, shift)
#define ST4x8_UB(in0, in1, pdst, stride)
static void avc_chroma_hv_8x4_msa(uint8_t *src, uint8_t *dst, int32_t stride, uint32_t coef_hor0, uint32_t coef_hor1, uint32_t coef_ver0, uint32_t coef_ver1)
GLint GLenum GLboolean GLsizei stride
#define LW4(psrc, stride, out0, out1, out2, out3)
#define ST8x4_UB(in0, in1, pdst, stride)
static void avc_chroma_hv_and_aver_dst_2x4_msa(uint8_t *src, int32_t src_stride, uint8_t *dst, int32_t dst_stride, uint32_t coef_hor0, uint32_t coef_hor1, uint32_t coef_ver0, uint32_t coef_ver1)
static void avc_chroma_hz_2x4_msa(uint8_t *src, uint8_t *dst, int32_t stride, uint32_t coeff0, uint32_t coeff1)
#define ST8x8_UB(in0, in1, in2, in3, pdst, stride)
static void avc_chroma_vt_and_aver_dst_4w_msa(uint8_t *src, int32_t src_stride, uint8_t *dst, int32_t dst_stride, uint32_t coeff0, uint32_t coeff1, int32_t height)
static void avc_chroma_hz_4x2_msa(uint8_t *src, uint8_t *dst, int32_t stride, uint32_t coeff0, uint32_t coeff1)
static void avg_width8_msa(uint8_t *src, int32_t src_stride, uint8_t *dst, int32_t dst_stride, int32_t height)
static void avg_width4_msa(uint8_t *src, int32_t src_stride, uint8_t *dst, int32_t dst_stride, int32_t height)
static void avc_chroma_hv_4x8_msa(uint8_t *src, uint8_t *dst, int32_t stride, uint32_t coef_hor0, uint32_t coef_hor1, uint32_t coef_ver0, uint32_t coef_ver1)
static void avc_chroma_hz_and_aver_dst_8w_msa(uint8_t *src, int32_t src_stride, uint8_t *dst, int32_t dst_stride, uint32_t coeff0, uint32_t coeff1, int32_t height)
static void avc_chroma_hz_4w_msa(uint8_t *src, uint8_t *dst, int32_t stride, uint32_t coeff0, uint32_t coeff1, int32_t height)
#define ST8x1_UB(in, pdst)
#define ST4x2_UB(in, pdst, stride)
static void avc_chroma_hv_4x4_msa(uint8_t *src, uint8_t *dst, int32_t stride, uint32_t coef_hor0, uint32_t coef_hor1, uint32_t coef_ver0, uint32_t coef_ver1)
static void avc_chroma_vt_and_aver_dst_4x4mul_msa(uint8_t *src, int32_t src_stride, uint8_t *dst, int32_t dst_stride, uint32_t coeff0, uint32_t coeff1, int32_t height)
static void avc_chroma_hz_2w_msa(uint8_t *src, uint8_t *dst, int32_t stride, uint32_t coeff0, uint32_t coeff1, int32_t height)
static void avc_chroma_hz_and_aver_dst_4w_msa(uint8_t *src, int32_t src_stride, uint8_t *dst, int32_t dst_stride, uint32_t coeff0, uint32_t coeff1, int32_t height)
static void avc_chroma_vt_2x4_msa(uint8_t *src, uint8_t *dst, int32_t stride, uint32_t coeff0, uint32_t coeff1)
void ff_put_h264_chroma_mc4_msa(uint8_t *dst, uint8_t *src, ptrdiff_t stride, int height, int x, int y)