30 dst += (4 * dst_stride);
40 dst += (8 * dst_stride);
45 uint8_t *dst,
int32_t dst_stride)
47 uint64_t out0, out1, out2, out3, out4, out5, out6, out7;
49 out0 =
src[0 * src_stride] * 0x0101010101010101;
50 out1 =
src[1 * src_stride] * 0x0101010101010101;
51 out2 =
src[2 * src_stride] * 0x0101010101010101;
52 out3 =
src[3 * src_stride] * 0x0101010101010101;
53 out4 =
src[4 * src_stride] * 0x0101010101010101;
54 out5 =
src[5 * src_stride] * 0x0101010101010101;
55 out6 =
src[6 * src_stride] * 0x0101010101010101;
56 out7 =
src[7 * src_stride] * 0x0101010101010101;
58 SD4(out0, out1, out2, out3, dst, dst_stride);
59 dst += (4 * dst_stride);
60 SD4(out4, out5, out6, out7, dst, dst_stride);
64 uint8_t *dst,
int32_t dst_stride)
66 uint8_t inp0, inp1, inp2, inp3;
68 v16u8 src8, src9, src10, src11, src12, src13, src14, src15;
70 inp0 =
src[0 * src_stride];
71 inp1 =
src[1 * src_stride];
72 inp2 =
src[2 * src_stride];
73 inp3 =
src[3 * src_stride];
74 src0 = (v16u8) __msa_fill_b(inp0);
75 src1 = (v16u8) __msa_fill_b(inp1);
76 src2 = (v16u8) __msa_fill_b(inp2);
77 src3 = (v16u8) __msa_fill_b(inp3);
78 inp0 =
src[4 * src_stride];
79 inp1 =
src[5 * src_stride];
80 inp2 =
src[6 * src_stride];
81 inp3 =
src[7 * src_stride];
82 src4 = (v16u8) __msa_fill_b(inp0);
83 src5 = (v16u8) __msa_fill_b(inp1);
84 src6 = (v16u8) __msa_fill_b(inp2);
85 src7 = (v16u8) __msa_fill_b(inp3);
86 inp0 =
src[ 8 * src_stride];
87 inp1 =
src[ 9 * src_stride];
88 inp2 =
src[10 * src_stride];
89 inp3 =
src[11 * src_stride];
90 src8 = (v16u8) __msa_fill_b(inp0);
91 src9 = (v16u8) __msa_fill_b(inp1);
92 src10 = (v16u8) __msa_fill_b(inp2);
93 src11 = (v16u8) __msa_fill_b(inp3);
94 inp0 =
src[12 * src_stride];
95 inp1 =
src[13 * src_stride];
96 inp2 =
src[14 * src_stride];
97 inp3 =
src[15 * src_stride];
98 src12 = (v16u8) __msa_fill_b(inp0);
99 src13 = (v16u8) __msa_fill_b(inp1);
100 src14 = (v16u8) __msa_fill_b(inp2);
101 src15 = (v16u8) __msa_fill_b(inp3);
104 dst += (8 * dst_stride);
105 ST_UB8(src8, src9, src10, src11, src12, src13, src14, src15,
109 #define INTRA_PREDICT_VALDC_8X8_MSA(val) \
110 static void intra_predict_##val##dc_8x8_msa(uint8_t *dst, int32_t dst_stride) \
112 v16i8 store = __msa_fill_b(val); \
113 uint64_t out = __msa_copy_u_d((v2i64) store, 0); \
115 SD4(out, out, out, out, dst, dst_stride); \
116 dst += (4 * dst_stride); \
117 SD4(out, out, out, out, dst, dst_stride); \
123 #define INTRA_PREDICT_VALDC_16X16_MSA(val) \
124 static void intra_predict_##val##dc_16x16_msa(uint8_t *dst, \
125 int32_t dst_stride) \
127 v16u8 out = (v16u8) __msa_fill_b(val); \
129 ST_UB8(out, out, out, out, out, out, out, out, dst, dst_stride); \
130 dst += (8 * dst_stride); \
131 ST_UB8(out, out, out, out, out, out, out, out, dst, dst_stride); \
140 int32_t res, res0, res1, res2, res3;
142 v16i8 shf_mask = { 3, 5, 2, 6, 1, 7, 0, 8, 3, 5, 2, 6, 1, 7, 0, 8 };
143 v8i16 short_multiplier = { 1, 2, 3, 4, 1, 2, 3, 4 };
144 v4i32 int_multiplier = { 0, 1, 2, 3 };
146 v8i16 vec9, vec10, vec11;
147 v4i32 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7, vec8;
151 src_top = (v16u8) __msa_vshf_b(shf_mask, (v16i8) src_top, (v16i8) src_top);
153 vec9 = __msa_hsub_u_h(src_top, src_top);
154 vec9 *= short_multiplier;
155 vec8 = __msa_hadd_s_w(vec9, vec9);
156 sum = __msa_hadd_s_d(vec8, vec8);
158 res0 = __msa_copy_s_w((v4i32) sum, 0);
167 res0 = (res0 + 16) >> 5;
168 res1 = (res1 + 16) >> 5;
170 res3 = 3 * (res0 + res1);
174 vec8 = __msa_fill_w(res0);
175 vec4 = __msa_fill_w(res);
176 vec2 = __msa_fill_w(res1);
177 vec5 = vec8 * int_multiplier;
180 for (lpcnt = 4; lpcnt--;) {
189 SRA_4V(vec0, vec1, vec6, vec7, 5);
192 PCKEV_B2_SH(vec10, vec10, vec11, vec11, vec10, vec11);
194 out0 = __msa_copy_s_d((v2i64) vec10, 0);
195 out1 = __msa_copy_s_d((v2i64) vec11, 0);
208 int32_t res0, res1, res2, res3;
209 uint64_t load0, load1;
210 v16i8 shf_mask = { 7, 8, 6, 9, 5, 10, 4, 11, 3, 12, 2, 13, 1, 14, 0, 15 };
211 v8i16 short_multiplier = { 1, 2, 3, 4, 5, 6, 7, 8 };
212 v4i32 int_multiplier = { 0, 1, 2, 3 };
213 v16u8 src_top = { 0 };
214 v16u8 store0, store1;
215 v8i16 vec9, vec10, vec11, vec12;
216 v4i32 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7, vec8, res_add;
217 v4i32 reg0, reg1, reg2, reg3;
224 src_top = (v16u8) __msa_vshf_b(shf_mask, (v16i8) src_top, (v16i8) src_top);
226 vec9 = __msa_hsub_u_h(src_top, src_top);
227 vec9 *= short_multiplier;
228 vec8 = __msa_hadd_s_w(vec9, vec9);
229 res_add = (v4i32) __msa_hadd_s_d(vec8, vec8);
231 res0 = __msa_copy_s_w(res_add, 0) + __msa_copy_s_w(res_add, 2);
244 res0 = (res0 + 32) >> 6;
245 res1 = (res1 + 32) >> 6;
247 res3 = 7 * (res0 + res1);
251 vec8 = __msa_fill_w(res0);
252 vec4 = __msa_fill_w(res2);
253 vec5 = __msa_fill_w(res1);
255 vec7 = vec8 * int_multiplier;
257 for (lpcnt = 8; lpcnt--;) {
270 SRA_4V(vec0, vec1, vec2, vec3, 5);
271 SRA_4V(reg0, reg1, reg2, reg3, 5);
276 PCKEV_B2_UB(vec10, vec9, vec12, vec11, store0, store1);
287 uint32_t out0, out1, out2, out3;
288 uint64_t store0, store1;
294 add = __msa_hadd_u_h((v16u8) src_top, (v16u8) src_top);
295 sum = __msa_hadd_u_w(add, add);
296 src0 = __msa_copy_u_w((v4i32) sum, 0);
297 src1 = __msa_copy_u_w((v4i32) sum, 1);
310 out0 =
src0 * 0x01010101;
311 out1 =
src1 * 0x01010101;
312 out2 =
src2 * 0x01010101;
313 out3 = src3 * 0x01010101;
314 store0 = ((uint64_t) out1 << 32) | out0;
315 store1 = ((uint64_t) out3 << 32) | out2;
337 out0 =
src0 * 0x0101010101010101;
338 out1 =
src1 * 0x0101010101010101;
348 v16i8
mask = { 0, 0, 0, 0, 4, 4, 4, 4, 0, 0, 0, 0, 0, 0, 0, 0 };
354 add = __msa_hadd_u_h(src_top, src_top);
355 sum = __msa_hadd_u_w(add, add);
356 sum = (v4u32) __msa_srari_w((v4i32) sum, 2);
357 res0 = (v16u8) __msa_vshf_b(
mask, (v16i8) sum, (v16i8) sum);
358 out0 = __msa_copy_u_d((v2i64) res0, 0);
368 uint32_t out0, out1, out2;
369 uint64_t store0, store1;
375 add = __msa_hadd_u_h(src_top, src_top);
376 sum = __msa_hadd_u_w(add, add);
377 src0 = __msa_copy_u_w((v4i32) sum, 0);
378 src1 = __msa_copy_u_w((v4i32) sum, 1);
387 out0 =
src0 * 0x01010101;
388 out1 =
src1 * 0x01010101;
389 out2 =
src2 * 0x01010101;
390 store1 = ((uint64_t) out1 << 32);
391 store0 = store1 | ((uint64_t) out2);
392 store1 = store1 | ((uint64_t) out0);
402 uint32_t out0, out1, out2, out3;
403 uint64_t store0, store1;
409 add = __msa_hadd_u_h(src_top, src_top);
410 sum = __msa_hadd_u_w(add, add);
411 src0 = __msa_copy_u_w((v4i32) sum, 0);
412 src1 = __msa_copy_u_w((v4i32) sum, 1);
423 out0 =
src0 * 0x01010101;
424 out1 =
src1 * 0x01010101;
425 out2 =
src2 * 0x01010101;
426 out3 = src3 * 0x01010101;
427 store0 = ((uint64_t) out1 << 32) | out0;
428 store1 = ((uint64_t) out3 << 32) | out2;
445 out0 =
src0 * 0x0101010101010101;
446 out1 = 0x8080808080808080;
464 out0 = 0x8080808080808080;
465 out1 =
src0 * 0x0101010101010101;
538 uint8_t *src_left =
src - 1;
540 uint32_t addition = 0;
541 v16u8 src_above,
out;
546 src_above =
LD_UB(src_top);
548 sum_above = __msa_hadd_u_h(src_above, src_above);
549 sum_top = __msa_hadd_u_w(sum_above, sum_above);
550 sum = __msa_hadd_u_d(sum_top, sum_top);
551 sum_top = (v4u32) __msa_pckev_w((v4i32) sum, (v4i32) sum);
552 sum = __msa_hadd_u_d(sum_top, sum_top);
553 addition = __msa_copy_u_w((v4i32) sum, 0);
554 addition += src_left[ 0 *
stride];
555 addition += src_left[ 1 *
stride];
556 addition += src_left[ 2 *
stride];
557 addition += src_left[ 3 *
stride];
558 addition += src_left[ 4 *
stride];
559 addition += src_left[ 5 *
stride];
560 addition += src_left[ 6 *
stride];
561 addition += src_left[ 7 *
stride];
562 addition += src_left[ 8 *
stride];
563 addition += src_left[ 9 *
stride];
564 addition += src_left[10 *
stride];
565 addition += src_left[11 *
stride];
566 addition += src_left[12 *
stride];
567 addition += src_left[13 *
stride];
568 addition += src_left[14 *
stride];
569 addition += src_left[15 *
stride];
570 addition = (addition + 16) >> 5;
571 out = (v16u8) __msa_fill_b(addition);
594 uint8_t *src_left =
src - 1;
599 addition = src_left[ 0 *
stride];
600 addition += src_left[ 1 *
stride];
601 addition += src_left[ 2 *
stride];
602 addition += src_left[ 3 *
stride];
603 addition += src_left[ 4 *
stride];
604 addition += src_left[ 5 *
stride];
605 addition += src_left[ 6 *
stride];
606 addition += src_left[ 7 *
stride];
607 addition += src_left[ 8 *
stride];
608 addition += src_left[ 9 *
stride];
609 addition += src_left[10 *
stride];
610 addition += src_left[11 *
stride];
611 addition += src_left[12 *
stride];
612 addition += src_left[13 *
stride];
613 addition += src_left[14 *
stride];
614 addition += src_left[15 *
stride];
616 addition = (addition + 8) >> 4;
617 out = (v16u8) __msa_fill_b(addition);
628 v16u8 src_above,
out;
633 src_above =
LD_UB(src_top);
635 sum_above = __msa_hadd_u_h(src_above, src_above);
636 sum_top = __msa_hadd_u_w(sum_above, sum_above);
637 sum = __msa_hadd_u_d(sum_top, sum_top);
638 sum_top = (v4u32) __msa_pckev_w((v4i32) sum, (v4i32) sum);
639 sum = __msa_hadd_u_d(sum_top, sum_top);
640 sum = (v2u64) __msa_srari_d((v2i64) sum, 4);
641 out = (v16u8) __msa_splati_b((v16i8) sum, 0);
653 store = (v16u8) __msa_fill_b(128);
654 out = __msa_copy_u_d((v2i64) store, 0);
665 out = (v16u8) __msa_fill_b(128);
674 intra_predict_127dc_8x8_msa(
src,
stride);
679 intra_predict_129dc_8x8_msa(
src,
stride);
684 intra_predict_127dc_16x16_msa(
src,
stride);
689 intra_predict_129dc_16x16_msa(
src,
stride);