37 #define VEC_1D_DCT(vb0,vb1,vb2,vb3,va0,va1,va2,va3) \
39 vz0 = vec_add(vb0,vb2); \
40 vz1 = vec_sub(vb0,vb2); \
41 vz2 = vec_sra(vb1,vec_splat_u16(1)); \
42 vz2 = vec_sub(vz2,vb3); \
43 vz3 = vec_sra(vb3,vec_splat_u16(1)); \
44 vz3 = vec_add(vb1,vz3); \
46 va0 = vec_add(vz0,vz3); \
47 va1 = vec_add(vz1,vz2); \
48 va2 = vec_sub(vz1,vz2); \
49 va3 = vec_sub(vz0,vz3)
51 #define VEC_TRANSPOSE_4(a0,a1,a2,a3,b0,b1,b2,b3) \
52 b0 = vec_mergeh( a0, a0 ); \
53 b1 = vec_mergeh( a1, a0 ); \
54 b2 = vec_mergeh( a2, a0 ); \
55 b3 = vec_mergeh( a3, a0 ); \
56 a0 = vec_mergeh( b0, b2 ); \
57 a1 = vec_mergel( b0, b2 ); \
58 a2 = vec_mergeh( b1, b3 ); \
59 a3 = vec_mergel( b1, b3 ); \
60 b0 = vec_mergeh( a0, a2 ); \
61 b1 = vec_mergel( a0, a2 ); \
62 b2 = vec_mergeh( a1, a3 ); \
63 b3 = vec_mergel( a1, a3 )
66 #define vdst_load(d) \
67 vdst_orig = vec_ld(0, dst); \
68 vdst = vec_perm(vdst_orig, zero_u8v, vdst_mask);
70 #define vdst_load(d) vdst = vec_vsx_ld(0, dst)
73 #define VEC_LOAD_U8_ADD_S16_STORE_U8(va) \
75 vdst_ss = (vec_s16) VEC_MERGEH(zero_u8v, vdst); \
76 va = vec_add(va, vdst_ss); \
77 va_u8 = vec_packsu(va, zero_s16v); \
78 va_u32 = vec_splat((vec_u32)va_u8, 0); \
79 vec_ste(va_u32, element, (uint32_t*)dst);
85 vec_s16 vtmp0, vtmp1, vtmp2, vtmp3;
89 const vec_u16 v6us = vec_splat_u16(6);
91 vec_u8 vdst_mask = vec_lvsl(0, dst);
92 int element = ((
unsigned long)dst & 0xf) >> 2;
97 vtmp0 = vec_ld(0,block);
98 vtmp1 = vec_sld(vtmp0, vtmp0, 8);
99 vtmp2 = vec_ld(16,block);
100 vtmp3 = vec_sld(vtmp2, vtmp2, 8);
101 memset(block, 0, 16 *
sizeof(int16_t));
103 VEC_1D_DCT(vtmp0,vtmp1,vtmp2,vtmp3,va0,va1,va2,va3);
104 VEC_TRANSPOSE_4(va0,va1,va2,va3,vtmp0,vtmp1,vtmp2,vtmp3);
105 VEC_1D_DCT(vtmp0,vtmp1,vtmp2,vtmp3,va0,va1,va2,va3);
107 va0 = vec_sra(va0,v6us);
108 va1 = vec_sra(va1,v6us);
109 va2 = vec_sra(va2,v6us);
110 va3 = vec_sra(va3,v6us);
112 VEC_LOAD_U8_ADD_S16_STORE_U8(va0);
114 VEC_LOAD_U8_ADD_S16_STORE_U8(va1);
116 VEC_LOAD_U8_ADD_S16_STORE_U8(va2);
118 VEC_LOAD_U8_ADD_S16_STORE_U8(va3);
121 #define IDCT8_1D_ALTIVEC(s0, s1, s2, s3, s4, s5, s6, s7, d0, d1, d2, d3, d4, d5, d6, d7) {\
123 vec_s16 a0v = vec_add(s0, s4); \
125 vec_s16 a2v = vec_sub(s0, s4); \
127 vec_s16 a4v = vec_sub(vec_sra(s2, onev), s6); \
129 vec_s16 a6v = vec_add(vec_sra(s6, onev), s2); \
131 vec_s16 b0v = vec_add(a0v, a6v); \
133 vec_s16 b2v = vec_add(a2v, a4v); \
135 vec_s16 b4v = vec_sub(a2v, a4v); \
137 vec_s16 b6v = vec_sub(a0v, a6v); \
140 vec_s16 a1v = vec_sub( vec_sub(s5, s3), vec_add(s7, vec_sra(s7, onev)) ); \
143 vec_s16 a3v = vec_sub( vec_add(s7, s1), vec_add(s3, vec_sra(s3, onev)) );\
146 vec_s16 a5v = vec_add( vec_sub(s7, s1), vec_add(s5, vec_sra(s5, onev)) );\
148 vec_s16 a7v = vec_add( vec_add(s5, s3), vec_add(s1, vec_sra(s1, onev)) );\
150 vec_s16 b1v = vec_add( vec_sra(a7v, twov), a1v); \
152 vec_s16 b3v = vec_add(a3v, vec_sra(a5v, twov)); \
154 vec_s16 b5v = vec_sub( vec_sra(a3v, twov), a5v); \
156 vec_s16 b7v = vec_sub( a7v, vec_sra(a1v, twov)); \
158 d0 = vec_add(b0v, b7v); \
160 d1 = vec_add(b2v, b5v); \
162 d2 = vec_add(b4v, b3v); \
164 d3 = vec_add(b6v, b1v); \
166 d4 = vec_sub(b6v, b1v); \
168 d5 = vec_sub(b4v, b3v); \
170 d6 = vec_sub(b2v, b5v); \
172 d7 = vec_sub(b0v, b7v); \
176 #define GET_2PERM(ldv, stv, d) \
177 ldv = vec_lvsl(0, d); \
178 stv = vec_lvsr(8, d);
179 #define dstv_load(d) \
180 vec_u8 hv = vec_ld( 0, d ); \
181 vec_u8 lv = vec_ld( 7, d); \
182 vec_u8 dstv = vec_perm( hv, lv, (vec_u8)perm_ldv );
183 #define dest_unligned_store(d) \
185 vec_u8 bodyv = vec_perm( idstsum8, idstsum8, perm_stv ); \
186 vec_u8 edgelv = vec_perm( sel, zero_u8v, perm_stv ); \
187 lv = vec_sel( lv, bodyv, edgelv ); \
188 vec_st( lv, 7, d ); \
189 hv = vec_ld( 0, d ); \
190 edgehv = vec_perm( zero_u8v, sel, perm_stv ); \
191 hv = vec_sel( hv, bodyv, edgehv ); \
195 #define GET_2PERM(ldv, stv, d) {}
196 #define dstv_load(d) vec_u8 dstv = vec_vsx_ld(0, d)
197 #define dest_unligned_store(d)\
198 vec_u8 dst8 = vec_perm((vec_u8)idstsum8, dstv, vcprm(2,3,s2,s3));\
199 vec_vsx_st(dst8, 0, d)
202 #define ALTIVEC_STORE_SUM_CLIP(dest, idctv, perm_ldv, perm_stv, sel) { \
205 vec_s16 idct_sh6 = vec_sra(idctv, sixv); \
206 vec_u16 dst16 = (vec_u16)VEC_MERGEH(zero_u8v, dstv); \
207 vec_s16 idstsum = vec_adds(idct_sh6, (vec_s16)dst16); \
208 vec_u8 idstsum8 = vec_packsu(zero_s16v, idstsum); \
210 dest_unligned_store(dest);\
213 static void h264_idct8_add_altivec(
uint8_t *dst, int16_t *dct,
int stride)
216 vec_s16 d0, d1, d2, d3, d4, d5, d6, d7;
217 vec_s16 idct0, idct1, idct2, idct3, idct4, idct5,
idct6, idct7;
219 vec_u8 perm_ldv, perm_stv;
220 GET_2PERM(perm_ldv, perm_stv, dst);
222 const vec_u16 onev = vec_splat_u16(1);
223 const vec_u16 twov = vec_splat_u16(2);
224 const vec_u16 sixv = vec_splat_u16(6);
226 const vec_u8 sel = (
vec_u8) {0,0,0,0,0,0,0,0,-1,-1,-1,-1,-1,-1,-1,-1};
231 s0 = vec_ld(0x00, (int16_t*)dct);
232 s1 = vec_ld(0x10, (int16_t*)dct);
233 s2 = vec_ld(0x20, (int16_t*)dct);
234 s3 = vec_ld(0x30, (int16_t*)dct);
235 s4 = vec_ld(0x40, (int16_t*)dct);
236 s5 = vec_ld(0x50, (int16_t*)dct);
237 s6 = vec_ld(0x60, (int16_t*)dct);
238 s7 = vec_ld(0x70, (int16_t*)dct);
239 memset(dct, 0, 64 *
sizeof(int16_t));
241 IDCT8_1D_ALTIVEC(s0, s1, s2, s3, s4, s5, s6, s7,
242 d0, d1, d2, d3, d4, d5, d6, d7);
244 TRANSPOSE8( d0, d1, d2, d3, d4, d5, d6, d7 );
246 IDCT8_1D_ALTIVEC(d0, d1, d2, d3, d4, d5, d6, d7,
247 idct0, idct1, idct2, idct3, idct4, idct5, idct6, idct7);
249 ALTIVEC_STORE_SUM_CLIP(&dst[0*stride], idct0, perm_ldv, perm_stv, sel);
250 ALTIVEC_STORE_SUM_CLIP(&dst[1*stride], idct1, perm_ldv, perm_stv, sel);
251 ALTIVEC_STORE_SUM_CLIP(&dst[2*stride], idct2, perm_ldv, perm_stv, sel);
252 ALTIVEC_STORE_SUM_CLIP(&dst[3*stride], idct3, perm_ldv, perm_stv, sel);
253 ALTIVEC_STORE_SUM_CLIP(&dst[4*stride], idct4, perm_ldv, perm_stv, sel);
254 ALTIVEC_STORE_SUM_CLIP(&dst[5*stride], idct5, perm_ldv, perm_stv, sel);
255 ALTIVEC_STORE_SUM_CLIP(&dst[6*stride], idct6, perm_ldv, perm_stv, sel);
256 ALTIVEC_STORE_SUM_CLIP(&dst[7*stride], idct7, perm_ldv, perm_stv, sel);
262 vec_u8 dcplus, dcminus,
v0, v1, v2, v3, aligner;
268 dc = (block[0] + 32) >> 6;
270 v_dc32 = vec_lde(0, &
dc);
271 dc16 = VEC_SPLAT16((
vec_s16)v_dc32, 1);
278 aligner = vec_lvsr(0, dst);
280 aligner = vec_perm(aligner,
zero_u8v, vcswapc());
282 dcplus = vec_perm(dcplus, dcplus, aligner);
283 dcminus = vec_perm(dcminus, dcminus, aligner);
285 for (i = 0; i <
size; i += 4) {
286 v0 = vec_ld(0, dst+0*stride);
287 v1 = vec_ld(0, dst+1*stride);
288 v2 = vec_ld(0, dst+2*stride);
289 v3 = vec_ld(0, dst+3*stride);
291 v0 = vec_adds(v0, dcplus);
292 v1 = vec_adds(v1, dcplus);
293 v2 = vec_adds(v2, dcplus);
294 v3 = vec_adds(v3, dcplus);
296 v0 = vec_subs(v0, dcminus);
297 v1 = vec_subs(v1, dcminus);
298 v2 = vec_subs(v2, dcminus);
299 v3 = vec_subs(v3, dcminus);
301 vec_st(v0, 0, dst+0*stride);
302 vec_st(v1, 0, dst+1*stride);
303 vec_st(v2, 0, dst+2*stride);
304 vec_st(v3, 0, dst+3*stride);
310 static void h264_idct_dc_add_altivec(
uint8_t *dst, int16_t *block,
int stride)
312 h264_idct_dc_add_internal(dst, block, stride, 4);
315 static void h264_idct8_dc_add_altivec(
uint8_t *dst, int16_t *block,
int stride)
317 h264_idct_dc_add_internal(dst, block, stride, 8);
320 static void h264_idct_add16_altivec(
uint8_t *dst,
const int *block_offset,
321 int16_t *block,
int stride,
326 int nnz = nnzc[
scan8[i] ];
328 if(nnz==1 && block[i*16]) h264_idct_dc_add_altivec(dst + block_offset[i], block + i*16, stride);
329 else h264_idct_add_altivec(dst + block_offset[i], block + i*16, stride);
334 static void h264_idct_add16intra_altivec(
uint8_t *dst,
const int *block_offset,
335 int16_t *block,
int stride,
340 if(nnzc[ scan8[i] ]) h264_idct_add_altivec(dst + block_offset[i], block + i*16, stride);
341 else if(block[i*16]) h264_idct_dc_add_altivec(dst + block_offset[i], block + i*16, stride);
345 static void h264_idct8_add4_altivec(
uint8_t *dst,
const int *block_offset,
346 int16_t *block,
int stride,
350 for(i=0; i<16; i+=4){
351 int nnz = nnzc[ scan8[i] ];
353 if(nnz==1 && block[i*16]) h264_idct8_dc_add_altivec(dst + block_offset[i], block + i*16, stride);
354 else h264_idct8_add_altivec(dst + block_offset[i], block + i*16, stride);
359 static void h264_idct_add8_altivec(
uint8_t **dest,
const int *block_offset,
360 int16_t *block,
int stride,
364 for (j = 1; j < 3; j++) {
365 for(i = j * 16; i < j * 16 + 4; i++){
367 h264_idct_add_altivec(dest[j-1] + block_offset[i], block + i*16, stride);
369 h264_idct_dc_add_altivec(dest[j-1] + block_offset[i], block + i*16, stride);
374 #define transpose4x16(r0, r1, r2, r3) { \
375 register vec_u8 r4; \
376 register vec_u8 r5; \
377 register vec_u8 r6; \
378 register vec_u8 r7; \
380 r4 = vec_mergeh(r0, r2); \
381 r5 = vec_mergel(r0, r2); \
382 r6 = vec_mergeh(r1, r3); \
383 r7 = vec_mergel(r1, r3); \
385 r0 = vec_mergeh(r4, r6); \
386 r1 = vec_mergel(r4, r6); \
387 r2 = vec_mergeh(r5, r7); \
388 r3 = vec_mergel(r5, r7); \
391 static inline void write16x4(
uint8_t *dst,
int dst_stride,
395 uint32_t *src_int = (uint32_t *)result, *dst_int = (uint32_t *)dst;
396 int int_dst_stride = dst_stride/4;
398 vec_st(r0, 0, result);
399 vec_st(r1, 16, result);
400 vec_st(r2, 32, result);
401 vec_st(r3, 48, result);
404 *(dst_int+ int_dst_stride) = *(src_int + 1);
405 *(dst_int+ 2*int_dst_stride) = *(src_int + 2);
406 *(dst_int+ 3*int_dst_stride) = *(src_int + 3);
407 *(dst_int+ 4*int_dst_stride) = *(src_int + 4);
408 *(dst_int+ 5*int_dst_stride) = *(src_int + 5);
409 *(dst_int+ 6*int_dst_stride) = *(src_int + 6);
410 *(dst_int+ 7*int_dst_stride) = *(src_int + 7);
411 *(dst_int+ 8*int_dst_stride) = *(src_int + 8);
412 *(dst_int+ 9*int_dst_stride) = *(src_int + 9);
413 *(dst_int+10*int_dst_stride) = *(src_int + 10);
414 *(dst_int+11*int_dst_stride) = *(src_int + 11);
415 *(dst_int+12*int_dst_stride) = *(src_int + 12);
416 *(dst_int+13*int_dst_stride) = *(src_int + 13);
417 *(dst_int+14*int_dst_stride) = *(src_int + 14);
418 *(dst_int+15*int_dst_stride) = *(src_int + 15);
424 #define readAndTranspose16x6(src, src_stride, r8, r9, r10, r11, r12, r13) {\
425 register vec_u8 r0 = unaligned_load(0, src); \
426 register vec_u8 r1 = unaligned_load( src_stride, src); \
427 register vec_u8 r2 = unaligned_load(2* src_stride, src); \
428 register vec_u8 r3 = unaligned_load(3* src_stride, src); \
429 register vec_u8 r4 = unaligned_load(4* src_stride, src); \
430 register vec_u8 r5 = unaligned_load(5* src_stride, src); \
431 register vec_u8 r6 = unaligned_load(6* src_stride, src); \
432 register vec_u8 r7 = unaligned_load(7* src_stride, src); \
433 register vec_u8 r14 = unaligned_load(14*src_stride, src); \
434 register vec_u8 r15 = unaligned_load(15*src_stride, src); \
436 r8 = unaligned_load( 8*src_stride, src); \
437 r9 = unaligned_load( 9*src_stride, src); \
438 r10 = unaligned_load(10*src_stride, src); \
439 r11 = unaligned_load(11*src_stride, src); \
440 r12 = unaligned_load(12*src_stride, src); \
441 r13 = unaligned_load(13*src_stride, src); \
444 r0 = vec_mergeh(r0, r8); \
445 r1 = vec_mergeh(r1, r9); \
446 r2 = vec_mergeh(r2, r10); \
447 r3 = vec_mergeh(r3, r11); \
448 r4 = vec_mergeh(r4, r12); \
449 r5 = vec_mergeh(r5, r13); \
450 r6 = vec_mergeh(r6, r14); \
451 r7 = vec_mergeh(r7, r15); \
454 r8 = vec_mergeh(r0, r4); \
455 r9 = vec_mergel(r0, r4); \
456 r10 = vec_mergeh(r1, r5); \
457 r11 = vec_mergel(r1, r5); \
458 r12 = vec_mergeh(r2, r6); \
459 r13 = vec_mergel(r2, r6); \
460 r14 = vec_mergeh(r3, r7); \
461 r15 = vec_mergel(r3, r7); \
464 r0 = vec_mergeh(r8, r12); \
465 r1 = vec_mergel(r8, r12); \
466 r2 = vec_mergeh(r9, r13); \
467 r4 = vec_mergeh(r10, r14); \
468 r5 = vec_mergel(r10, r14); \
469 r6 = vec_mergeh(r11, r15); \
473 r8 = vec_mergeh(r0, r4); \
474 r9 = vec_mergel(r0, r4); \
475 r10 = vec_mergeh(r1, r5); \
476 r11 = vec_mergel(r1, r5); \
477 r12 = vec_mergeh(r2, r6); \
478 r13 = vec_mergel(r2, r6); \
484 static inline vec_u8 diff_lt_altivec (
register vec_u8 x,
489 register vec_u8 diffneg = vec_subs(y, x);
490 register vec_u8 o = vec_or(diff, diffneg);
491 o = (
vec_u8)vec_cmplt(o, a);
495 static inline vec_u8 h264_deblock_mask (
register vec_u8 p0,
505 mask = diff_lt_altivec(p0, q0, alpha);
506 tempmask = diff_lt_altivec(p1, p0, beta);
507 mask = vec_and(mask, tempmask);
508 tempmask = diff_lt_altivec(q1, q0, beta);
509 mask = vec_and(mask, tempmask);
515 static inline vec_u8 h264_deblock_q1(
register vec_u8 p0,
521 register vec_u8 average = vec_avg(p0, q0);
529 temp = vec_xor(average, p2);
530 average = vec_avg(average, p2);
531 ones = vec_splat_u8(1);
532 temp = vec_and(temp, ones);
533 uncliped = vec_subs(average, temp);
534 max = vec_adds(p1, tc0);
535 min = vec_subs(p1, tc0);
536 newp1 = vec_max(min, uncliped);
537 newp1 = vec_min(max, newp1);
541 #define h264_deblock_p0_q0(p0, p1, q0, q1, tc0masked) { \
543 const vec_u8 A0v = vec_sl(vec_splat_u8(10), vec_splat_u8(4)); \
545 register vec_u8 pq0bit = vec_xor(p0,q0); \
546 register vec_u8 q1minus; \
547 register vec_u8 p0minus; \
548 register vec_u8 stage1; \
549 register vec_u8 stage2; \
550 register vec_u8 vec160; \
551 register vec_u8 delta; \
552 register vec_u8 deltaneg; \
554 q1minus = vec_nor(q1, q1); \
555 stage1 = vec_avg(p1, q1minus); \
556 stage2 = vec_sr(stage1, vec_splat_u8(1)); \
557 p0minus = vec_nor(p0, p0); \
558 stage1 = vec_avg(q0, p0minus); \
559 pq0bit = vec_and(pq0bit, vec_splat_u8(1)); \
560 stage2 = vec_avg(stage2, pq0bit); \
561 stage2 = vec_adds(stage2, stage1); \
562 vec160 = vec_ld(0, &A0v); \
563 deltaneg = vec_subs(vec160, stage2); \
564 delta = vec_subs(stage2, vec160); \
565 deltaneg = vec_min(tc0masked, deltaneg); \
566 delta = vec_min(tc0masked, delta); \
567 p0 = vec_subs(p0, deltaneg); \
568 q0 = vec_subs(q0, delta); \
569 p0 = vec_adds(p0, delta); \
570 q0 = vec_adds(q0, deltaneg); \
573 #define h264_loop_filter_luma_altivec(p2, p1, p0, q0, q1, q2, alpha, beta, tc0) { \
574 DECLARE_ALIGNED(16, unsigned char, temp)[16]; \
575 register vec_u8 alphavec; \
576 register vec_u8 betavec; \
577 register vec_u8 mask; \
578 register vec_u8 p1mask; \
579 register vec_u8 q1mask; \
580 register vector signed char tc0vec; \
581 register vec_u8 finaltc0; \
582 register vec_u8 tc0masked; \
583 register vec_u8 newp1; \
584 register vec_u8 newq1; \
588 alphavec = vec_ld(0, temp); \
589 betavec = vec_splat(alphavec, 0x1); \
590 alphavec = vec_splat(alphavec, 0x0); \
591 mask = h264_deblock_mask(p0, p1, q0, q1, alphavec, betavec); \
593 AV_COPY32(temp, tc0); \
594 tc0vec = vec_ld(0, (signed char*)temp); \
595 tc0vec = vec_mergeh(tc0vec, tc0vec); \
596 tc0vec = vec_mergeh(tc0vec, tc0vec); \
597 mask = vec_and(mask, vec_cmpgt(tc0vec, vec_splat_s8(-1))); \
598 finaltc0 = vec_and((vec_u8)tc0vec, mask); \
600 p1mask = diff_lt_altivec(p2, p0, betavec); \
601 p1mask = vec_and(p1mask, mask); \
602 tc0masked = vec_and(p1mask, (vec_u8)tc0vec); \
603 finaltc0 = vec_sub(finaltc0, p1mask); \
604 newp1 = h264_deblock_q1(p0, p1, p2, q0, tc0masked); \
607 q1mask = diff_lt_altivec(q2, q0, betavec); \
608 q1mask = vec_and(q1mask, mask); \
609 tc0masked = vec_and(q1mask, (vec_u8)tc0vec); \
610 finaltc0 = vec_sub(finaltc0, q1mask); \
611 newq1 = h264_deblock_q1(p0, q1, q2, q0, tc0masked); \
614 h264_deblock_p0_q0(p0, p1, q0, q1, finaltc0); \
619 static void h264_v_loop_filter_luma_altivec(
uint8_t *pix,
int stride,
int alpha,
int beta, int8_t *tc0) {
621 if ((tc0[0] & tc0[1] & tc0[2] & tc0[3]) >= 0) {
622 register vec_u8 p2 = vec_ld(-3*stride, pix);
623 register vec_u8 p1 = vec_ld(-2*stride, pix);
624 register vec_u8 p0 = vec_ld(-1*stride, pix);
625 register vec_u8 q0 = vec_ld(0, pix);
626 register vec_u8 q1 = vec_ld(stride, pix);
627 register vec_u8 q2 = vec_ld(2*stride, pix);
628 h264_loop_filter_luma_altivec(p2, p1, p0, q0, q1, q2, alpha, beta, tc0);
629 vec_st(p1, -2*stride, pix);
630 vec_st(p0, -1*stride, pix);
632 vec_st(q1, stride, pix);
636 static void h264_h_loop_filter_luma_altivec(
uint8_t *pix,
int stride,
int alpha,
int beta, int8_t *tc0) {
638 register vec_u8 line0, line1, line2, line3, line4, line5;
639 if ((tc0[0] & tc0[1] & tc0[2] & tc0[3]) < 0)
641 readAndTranspose16x6(pix-3, stride, line0, line1, line2, line3, line4, line5);
642 h264_loop_filter_luma_altivec(line0, line1, line2, line3, line4, line5, alpha, beta, tc0);
643 transpose4x16(line1, line2, line3, line4);
644 write16x4(pix-2, stride, line1, line2, line3, line4);
648 void weight_h264_W_altivec(
uint8_t *block,
int stride,
int height,
658 offset <<= log2_denom;
659 if(log2_denom) offset += 1<<(log2_denom-1);
660 temp[0] = log2_denom;
666 vtemp =(
vec_s16)vec_perm(vtemp, vtemp, vcswapi2s(0,1,2,3));
668 vlog2_denom = (
vec_u16)vec_splat(vtemp, 1);
669 vweight = vec_splat(vtemp, 3);
670 voffset = vec_splat(vtemp, 5);
671 aligned = !((
unsigned long)block & 0xf);
673 for (y = 0; y <
height; y++) {
674 vblock = vec_ld(0, block);
679 if (w == 16 || aligned) {
681 v0 = vec_adds(v0, voffset);
682 v0 = vec_sra(v0, vlog2_denom);
684 if (w == 16 || !aligned) {
686 v1 = vec_adds(v1, voffset);
687 v1 = vec_sra(v1, vlog2_denom);
689 vblock = vec_packsu(v0, v1);
690 vec_st(vblock, 0, block);
698 int log2_denom,
int weightd,
int weights,
int offset,
int w)
700 int y, dst_aligned, src_aligned;
702 vec_s16 vtemp, vweights, vweightd, voffset,
v0, v1, v2, v3;
707 offset = ((offset + 1) | 1) << log2_denom;
708 temp[0] = log2_denom+1;
715 vtemp =(
vec_s16)vec_perm(vtemp, vtemp, vcswapi2s(0,1,2,3));
717 vlog2_denom = (
vec_u16)vec_splat(vtemp, 1);
718 vweights = vec_splat(vtemp, 3);
719 vweightd = vec_splat(vtemp, 5);
720 voffset = vec_splat(vtemp, 7);
721 dst_aligned = !((
unsigned long)dst & 0xf);
722 src_aligned = !((
unsigned long)src & 0xf);
724 for (y = 0; y <
height; y++) {
725 vdst = vec_ld(0, dst);
726 vsrc = vec_ld(0, src);
740 if (w == 16 || dst_aligned) {
744 v0 = vec_adds(v0, voffset);
745 v0 = vec_adds(v0, v2);
746 v0 = vec_sra(v0, vlog2_denom);
748 if (w == 16 || !dst_aligned) {
752 v1 = vec_adds(v1, voffset);
753 v1 = vec_adds(v1, v3);
754 v1 = vec_sra(v1, vlog2_denom);
756 vdst = vec_packsu(v0, v1);
757 vec_st(vdst, 0, dst);
764 #define H264_WEIGHT(W) \
765 static void weight_h264_pixels ## W ## _altivec(uint8_t *block, int stride, int height, \
766 int log2_denom, int weight, int offset) \
768 weight_h264_W_altivec(block, stride, height, log2_denom, weight, offset, W); \
770 static void biweight_h264_pixels ## W ## _altivec(uint8_t *dst, uint8_t *src, int stride, int height, \
771 int log2_denom, int weightd, int weights, int offset) \
773 biweight_h264_W_altivec(dst, src, stride, height, log2_denom, weightd, weights, offset, W); \
781 const int chroma_format_idc)
787 if (bit_depth == 8) {
789 if (chroma_format_idc <= 1)