00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018
00019
00020
00021 #include "libavutil/cpu.h"
00022 #include "libavcodec/dsputil.h"
00023 #include "libavcodec/h264data.h"
00024 #include "libavcodec/h264dsp.h"
00025
00026 #include "dsputil_altivec.h"
00027 #include "util_altivec.h"
00028 #include "types_altivec.h"
00029
00030 #define PUT_OP_U8_ALTIVEC(d, s, dst) d = s
00031 #define AVG_OP_U8_ALTIVEC(d, s, dst) d = vec_avg(dst, s)
00032
00033 #define OP_U8_ALTIVEC PUT_OP_U8_ALTIVEC
00034 #define PREFIX_h264_chroma_mc8_altivec put_h264_chroma_mc8_altivec
00035 #define PREFIX_h264_chroma_mc8_num altivec_put_h264_chroma_mc8_num
00036 #define PREFIX_h264_qpel16_h_lowpass_altivec put_h264_qpel16_h_lowpass_altivec
00037 #define PREFIX_h264_qpel16_h_lowpass_num altivec_put_h264_qpel16_h_lowpass_num
00038 #define PREFIX_h264_qpel16_v_lowpass_altivec put_h264_qpel16_v_lowpass_altivec
00039 #define PREFIX_h264_qpel16_v_lowpass_num altivec_put_h264_qpel16_v_lowpass_num
00040 #define PREFIX_h264_qpel16_hv_lowpass_altivec put_h264_qpel16_hv_lowpass_altivec
00041 #define PREFIX_h264_qpel16_hv_lowpass_num altivec_put_h264_qpel16_hv_lowpass_num
00042 #include "h264_template_altivec.c"
00043 #undef OP_U8_ALTIVEC
00044 #undef PREFIX_h264_chroma_mc8_altivec
00045 #undef PREFIX_h264_chroma_mc8_num
00046 #undef PREFIX_h264_qpel16_h_lowpass_altivec
00047 #undef PREFIX_h264_qpel16_h_lowpass_num
00048 #undef PREFIX_h264_qpel16_v_lowpass_altivec
00049 #undef PREFIX_h264_qpel16_v_lowpass_num
00050 #undef PREFIX_h264_qpel16_hv_lowpass_altivec
00051 #undef PREFIX_h264_qpel16_hv_lowpass_num
00052
00053 #define OP_U8_ALTIVEC AVG_OP_U8_ALTIVEC
00054 #define PREFIX_h264_chroma_mc8_altivec avg_h264_chroma_mc8_altivec
00055 #define PREFIX_h264_chroma_mc8_num altivec_avg_h264_chroma_mc8_num
00056 #define PREFIX_h264_qpel16_h_lowpass_altivec avg_h264_qpel16_h_lowpass_altivec
00057 #define PREFIX_h264_qpel16_h_lowpass_num altivec_avg_h264_qpel16_h_lowpass_num
00058 #define PREFIX_h264_qpel16_v_lowpass_altivec avg_h264_qpel16_v_lowpass_altivec
00059 #define PREFIX_h264_qpel16_v_lowpass_num altivec_avg_h264_qpel16_v_lowpass_num
00060 #define PREFIX_h264_qpel16_hv_lowpass_altivec avg_h264_qpel16_hv_lowpass_altivec
00061 #define PREFIX_h264_qpel16_hv_lowpass_num altivec_avg_h264_qpel16_hv_lowpass_num
00062 #include "h264_template_altivec.c"
00063 #undef OP_U8_ALTIVEC
00064 #undef PREFIX_h264_chroma_mc8_altivec
00065 #undef PREFIX_h264_chroma_mc8_num
00066 #undef PREFIX_h264_qpel16_h_lowpass_altivec
00067 #undef PREFIX_h264_qpel16_h_lowpass_num
00068 #undef PREFIX_h264_qpel16_v_lowpass_altivec
00069 #undef PREFIX_h264_qpel16_v_lowpass_num
00070 #undef PREFIX_h264_qpel16_hv_lowpass_altivec
00071 #undef PREFIX_h264_qpel16_hv_lowpass_num
00072
00073 #define H264_MC(OPNAME, SIZE, CODETYPE) \
00074 static void OPNAME ## h264_qpel ## SIZE ## _mc00_ ## CODETYPE (uint8_t *dst, uint8_t *src, int stride){\
00075 OPNAME ## pixels ## SIZE ## _ ## CODETYPE(dst, src, stride, SIZE);\
00076 }\
00077 \
00078 static void OPNAME ## h264_qpel ## SIZE ## _mc10_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){ \
00079 DECLARE_ALIGNED(16, uint8_t, half)[SIZE*SIZE];\
00080 put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(half, src, SIZE, stride);\
00081 OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, src, half, stride, stride, SIZE);\
00082 }\
00083 \
00084 static void OPNAME ## h264_qpel ## SIZE ## _mc20_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
00085 OPNAME ## h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(dst, src, stride, stride);\
00086 }\
00087 \
00088 static void OPNAME ## h264_qpel ## SIZE ## _mc30_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
00089 DECLARE_ALIGNED(16, uint8_t, half)[SIZE*SIZE];\
00090 put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(half, src, SIZE, stride);\
00091 OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, src+1, half, stride, stride, SIZE);\
00092 }\
00093 \
00094 static void OPNAME ## h264_qpel ## SIZE ## _mc01_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
00095 DECLARE_ALIGNED(16, uint8_t, half)[SIZE*SIZE];\
00096 put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(half, src, SIZE, stride);\
00097 OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, src, half, stride, stride, SIZE);\
00098 }\
00099 \
00100 static void OPNAME ## h264_qpel ## SIZE ## _mc02_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
00101 OPNAME ## h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(dst, src, stride, stride);\
00102 }\
00103 \
00104 static void OPNAME ## h264_qpel ## SIZE ## _mc03_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
00105 DECLARE_ALIGNED(16, uint8_t, half)[SIZE*SIZE];\
00106 put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(half, src, SIZE, stride);\
00107 OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, src+stride, half, stride, stride, SIZE);\
00108 }\
00109 \
00110 static void OPNAME ## h264_qpel ## SIZE ## _mc11_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
00111 DECLARE_ALIGNED(16, uint8_t, halfH)[SIZE*SIZE];\
00112 DECLARE_ALIGNED(16, uint8_t, halfV)[SIZE*SIZE];\
00113 put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(halfH, src, SIZE, stride);\
00114 put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(halfV, src, SIZE, stride);\
00115 OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfH, halfV, stride, SIZE, SIZE);\
00116 }\
00117 \
00118 static void OPNAME ## h264_qpel ## SIZE ## _mc31_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
00119 DECLARE_ALIGNED(16, uint8_t, halfH)[SIZE*SIZE];\
00120 DECLARE_ALIGNED(16, uint8_t, halfV)[SIZE*SIZE];\
00121 put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(halfH, src, SIZE, stride);\
00122 put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(halfV, src+1, SIZE, stride);\
00123 OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfH, halfV, stride, SIZE, SIZE);\
00124 }\
00125 \
00126 static void OPNAME ## h264_qpel ## SIZE ## _mc13_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
00127 DECLARE_ALIGNED(16, uint8_t, halfH)[SIZE*SIZE];\
00128 DECLARE_ALIGNED(16, uint8_t, halfV)[SIZE*SIZE];\
00129 put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(halfH, src + stride, SIZE, stride);\
00130 put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(halfV, src, SIZE, stride);\
00131 OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfH, halfV, stride, SIZE, SIZE);\
00132 }\
00133 \
00134 static void OPNAME ## h264_qpel ## SIZE ## _mc33_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
00135 DECLARE_ALIGNED(16, uint8_t, halfH)[SIZE*SIZE];\
00136 DECLARE_ALIGNED(16, uint8_t, halfV)[SIZE*SIZE];\
00137 put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(halfH, src + stride, SIZE, stride);\
00138 put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(halfV, src+1, SIZE, stride);\
00139 OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfH, halfV, stride, SIZE, SIZE);\
00140 }\
00141 \
00142 static void OPNAME ## h264_qpel ## SIZE ## _mc22_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
00143 DECLARE_ALIGNED(16, int16_t, tmp)[SIZE*(SIZE+8)];\
00144 OPNAME ## h264_qpel ## SIZE ## _hv_lowpass_ ## CODETYPE(dst, tmp, src, stride, SIZE, stride);\
00145 }\
00146 \
00147 static void OPNAME ## h264_qpel ## SIZE ## _mc21_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
00148 DECLARE_ALIGNED(16, uint8_t, halfH)[SIZE*SIZE];\
00149 DECLARE_ALIGNED(16, uint8_t, halfHV)[SIZE*SIZE];\
00150 DECLARE_ALIGNED(16, int16_t, tmp)[SIZE*(SIZE+8)];\
00151 put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(halfH, src, SIZE, stride);\
00152 put_h264_qpel ## SIZE ## _hv_lowpass_ ## CODETYPE(halfHV, tmp, src, SIZE, SIZE, stride);\
00153 OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfH, halfHV, stride, SIZE, SIZE);\
00154 }\
00155 \
00156 static void OPNAME ## h264_qpel ## SIZE ## _mc23_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
00157 DECLARE_ALIGNED(16, uint8_t, halfH)[SIZE*SIZE];\
00158 DECLARE_ALIGNED(16, uint8_t, halfHV)[SIZE*SIZE];\
00159 DECLARE_ALIGNED(16, int16_t, tmp)[SIZE*(SIZE+8)];\
00160 put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(halfH, src + stride, SIZE, stride);\
00161 put_h264_qpel ## SIZE ## _hv_lowpass_ ## CODETYPE(halfHV, tmp, src, SIZE, SIZE, stride);\
00162 OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfH, halfHV, stride, SIZE, SIZE);\
00163 }\
00164 \
00165 static void OPNAME ## h264_qpel ## SIZE ## _mc12_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
00166 DECLARE_ALIGNED(16, uint8_t, halfV)[SIZE*SIZE];\
00167 DECLARE_ALIGNED(16, uint8_t, halfHV)[SIZE*SIZE];\
00168 DECLARE_ALIGNED(16, int16_t, tmp)[SIZE*(SIZE+8)];\
00169 put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(halfV, src, SIZE, stride);\
00170 put_h264_qpel ## SIZE ## _hv_lowpass_ ## CODETYPE(halfHV, tmp, src, SIZE, SIZE, stride);\
00171 OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfV, halfHV, stride, SIZE, SIZE);\
00172 }\
00173 \
00174 static void OPNAME ## h264_qpel ## SIZE ## _mc32_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
00175 DECLARE_ALIGNED(16, uint8_t, halfV)[SIZE*SIZE];\
00176 DECLARE_ALIGNED(16, uint8_t, halfHV)[SIZE*SIZE];\
00177 DECLARE_ALIGNED(16, int16_t, tmp)[SIZE*(SIZE+8)];\
00178 put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(halfV, src+1, SIZE, stride);\
00179 put_h264_qpel ## SIZE ## _hv_lowpass_ ## CODETYPE(halfHV, tmp, src, SIZE, SIZE, stride);\
00180 OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfV, halfHV, stride, SIZE, SIZE);\
00181 }\
00182
00183 static inline void put_pixels16_l2_altivec( uint8_t * dst, const uint8_t * src1,
00184 const uint8_t * src2, int dst_stride,
00185 int src_stride1, int h)
00186 {
00187 int i;
00188 vec_u8 a, b, d, tmp1, tmp2, mask, mask_, edges, align;
00189
00190 mask_ = vec_lvsl(0, src2);
00191
00192 for (i = 0; i < h; i++) {
00193
00194 tmp1 = vec_ld(i * src_stride1, src1);
00195 mask = vec_lvsl(i * src_stride1, src1);
00196 tmp2 = vec_ld(i * src_stride1 + 15, src1);
00197
00198 a = vec_perm(tmp1, tmp2, mask);
00199
00200 tmp1 = vec_ld(i * 16, src2);
00201 tmp2 = vec_ld(i * 16 + 15, src2);
00202
00203 b = vec_perm(tmp1, tmp2, mask_);
00204
00205 tmp1 = vec_ld(0, dst);
00206 mask = vec_lvsl(0, dst);
00207 tmp2 = vec_ld(15, dst);
00208
00209 d = vec_avg(a, b);
00210
00211 edges = vec_perm(tmp2, tmp1, mask);
00212
00213 align = vec_lvsr(0, dst);
00214
00215 tmp2 = vec_perm(d, edges, align);
00216 tmp1 = vec_perm(edges, d, align);
00217
00218 vec_st(tmp2, 15, dst);
00219 vec_st(tmp1, 0 , dst);
00220
00221 dst += dst_stride;
00222 }
00223 }
00224
00225 static inline void avg_pixels16_l2_altivec( uint8_t * dst, const uint8_t * src1,
00226 const uint8_t * src2, int dst_stride,
00227 int src_stride1, int h)
00228 {
00229 int i;
00230 vec_u8 a, b, d, tmp1, tmp2, mask, mask_, edges, align;
00231
00232 mask_ = vec_lvsl(0, src2);
00233
00234 for (i = 0; i < h; i++) {
00235
00236 tmp1 = vec_ld(i * src_stride1, src1);
00237 mask = vec_lvsl(i * src_stride1, src1);
00238 tmp2 = vec_ld(i * src_stride1 + 15, src1);
00239
00240 a = vec_perm(tmp1, tmp2, mask);
00241
00242 tmp1 = vec_ld(i * 16, src2);
00243 tmp2 = vec_ld(i * 16 + 15, src2);
00244
00245 b = vec_perm(tmp1, tmp2, mask_);
00246
00247 tmp1 = vec_ld(0, dst);
00248 mask = vec_lvsl(0, dst);
00249 tmp2 = vec_ld(15, dst);
00250
00251 d = vec_avg(vec_perm(tmp1, tmp2, mask), vec_avg(a, b));
00252
00253 edges = vec_perm(tmp2, tmp1, mask);
00254
00255 align = vec_lvsr(0, dst);
00256
00257 tmp2 = vec_perm(d, edges, align);
00258 tmp1 = vec_perm(edges, d, align);
00259
00260 vec_st(tmp2, 15, dst);
00261 vec_st(tmp1, 0 , dst);
00262
00263 dst += dst_stride;
00264 }
00265 }
00266
00267
00268
00269
00270
00271
00272 H264_MC(put_, 16, altivec)
00273 H264_MC(avg_, 16, altivec)
00274
00275
00276
00277
00278
00279
00280 #define VEC_1D_DCT(vb0,vb1,vb2,vb3,va0,va1,va2,va3) \
00281 \
00282 vz0 = vec_add(vb0,vb2); \
00283 vz1 = vec_sub(vb0,vb2); \
00284 vz2 = vec_sra(vb1,vec_splat_u16(1)); \
00285 vz2 = vec_sub(vz2,vb3); \
00286 vz3 = vec_sra(vb3,vec_splat_u16(1)); \
00287 vz3 = vec_add(vb1,vz3); \
00288 \
00289 va0 = vec_add(vz0,vz3); \
00290 va1 = vec_add(vz1,vz2); \
00291 va2 = vec_sub(vz1,vz2); \
00292 va3 = vec_sub(vz0,vz3)
00293
00294 #define VEC_TRANSPOSE_4(a0,a1,a2,a3,b0,b1,b2,b3) \
00295 b0 = vec_mergeh( a0, a0 ); \
00296 b1 = vec_mergeh( a1, a0 ); \
00297 b2 = vec_mergeh( a2, a0 ); \
00298 b3 = vec_mergeh( a3, a0 ); \
00299 a0 = vec_mergeh( b0, b2 ); \
00300 a1 = vec_mergel( b0, b2 ); \
00301 a2 = vec_mergeh( b1, b3 ); \
00302 a3 = vec_mergel( b1, b3 ); \
00303 b0 = vec_mergeh( a0, a2 ); \
00304 b1 = vec_mergel( a0, a2 ); \
00305 b2 = vec_mergeh( a1, a3 ); \
00306 b3 = vec_mergel( a1, a3 )
00307
00308 #define VEC_LOAD_U8_ADD_S16_STORE_U8(va) \
00309 vdst_orig = vec_ld(0, dst); \
00310 vdst = vec_perm(vdst_orig, zero_u8v, vdst_mask); \
00311 vdst_ss = (vec_s16) vec_mergeh(zero_u8v, vdst); \
00312 va = vec_add(va, vdst_ss); \
00313 va_u8 = vec_packsu(va, zero_s16v); \
00314 va_u32 = vec_splat((vec_u32)va_u8, 0); \
00315 vec_ste(va_u32, element, (uint32_t*)dst);
00316
00317 static void ff_h264_idct_add_altivec(uint8_t *dst, DCTELEM *block, int stride)
00318 {
00319 vec_s16 va0, va1, va2, va3;
00320 vec_s16 vz0, vz1, vz2, vz3;
00321 vec_s16 vtmp0, vtmp1, vtmp2, vtmp3;
00322 vec_u8 va_u8;
00323 vec_u32 va_u32;
00324 vec_s16 vdst_ss;
00325 const vec_u16 v6us = vec_splat_u16(6);
00326 vec_u8 vdst, vdst_orig;
00327 vec_u8 vdst_mask = vec_lvsl(0, dst);
00328 int element = ((unsigned long)dst & 0xf) >> 2;
00329 LOAD_ZERO;
00330
00331 block[0] += 32;
00332
00333 vtmp0 = vec_ld(0,block);
00334 vtmp1 = vec_sld(vtmp0, vtmp0, 8);
00335 vtmp2 = vec_ld(16,block);
00336 vtmp3 = vec_sld(vtmp2, vtmp2, 8);
00337
00338 VEC_1D_DCT(vtmp0,vtmp1,vtmp2,vtmp3,va0,va1,va2,va3);
00339 VEC_TRANSPOSE_4(va0,va1,va2,va3,vtmp0,vtmp1,vtmp2,vtmp3);
00340 VEC_1D_DCT(vtmp0,vtmp1,vtmp2,vtmp3,va0,va1,va2,va3);
00341
00342 va0 = vec_sra(va0,v6us);
00343 va1 = vec_sra(va1,v6us);
00344 va2 = vec_sra(va2,v6us);
00345 va3 = vec_sra(va3,v6us);
00346
00347 VEC_LOAD_U8_ADD_S16_STORE_U8(va0);
00348 dst += stride;
00349 VEC_LOAD_U8_ADD_S16_STORE_U8(va1);
00350 dst += stride;
00351 VEC_LOAD_U8_ADD_S16_STORE_U8(va2);
00352 dst += stride;
00353 VEC_LOAD_U8_ADD_S16_STORE_U8(va3);
00354 }
00355
00356 #define IDCT8_1D_ALTIVEC(s0, s1, s2, s3, s4, s5, s6, s7, d0, d1, d2, d3, d4, d5, d6, d7) {\
00357 \
00358 vec_s16 a0v = vec_add(s0, s4); \
00359 \
00360 vec_s16 a2v = vec_sub(s0, s4); \
00361 \
00362 vec_s16 a4v = vec_sub(vec_sra(s2, onev), s6); \
00363 \
00364 vec_s16 a6v = vec_add(vec_sra(s6, onev), s2); \
00365 \
00366 vec_s16 b0v = vec_add(a0v, a6v); \
00367 \
00368 vec_s16 b2v = vec_add(a2v, a4v); \
00369 \
00370 vec_s16 b4v = vec_sub(a2v, a4v); \
00371 \
00372 vec_s16 b6v = vec_sub(a0v, a6v); \
00373 \
00374 \
00375 vec_s16 a1v = vec_sub( vec_sub(s5, s3), vec_add(s7, vec_sra(s7, onev)) ); \
00376 \
00377 \
00378 vec_s16 a3v = vec_sub( vec_add(s7, s1), vec_add(s3, vec_sra(s3, onev)) );\
00379 \
00380 \
00381 vec_s16 a5v = vec_add( vec_sub(s7, s1), vec_add(s5, vec_sra(s5, onev)) );\
00382 \
00383 vec_s16 a7v = vec_add( vec_add(s5, s3), vec_add(s1, vec_sra(s1, onev)) );\
00384 \
00385 vec_s16 b1v = vec_add( vec_sra(a7v, twov), a1v); \
00386 \
00387 vec_s16 b3v = vec_add(a3v, vec_sra(a5v, twov)); \
00388 \
00389 vec_s16 b5v = vec_sub( vec_sra(a3v, twov), a5v); \
00390 \
00391 vec_s16 b7v = vec_sub( a7v, vec_sra(a1v, twov)); \
00392 \
00393 d0 = vec_add(b0v, b7v); \
00394 \
00395 d1 = vec_add(b2v, b5v); \
00396 \
00397 d2 = vec_add(b4v, b3v); \
00398 \
00399 d3 = vec_add(b6v, b1v); \
00400 \
00401 d4 = vec_sub(b6v, b1v); \
00402 \
00403 d5 = vec_sub(b4v, b3v); \
00404 \
00405 d6 = vec_sub(b2v, b5v); \
00406 \
00407 d7 = vec_sub(b0v, b7v); \
00408 }
00409
00410 #define ALTIVEC_STORE_SUM_CLIP(dest, idctv, perm_ldv, perm_stv, sel) { \
00411 \
00412 vec_u8 hv = vec_ld( 0, dest ); \
00413 vec_u8 lv = vec_ld( 7, dest ); \
00414 vec_u8 dstv = vec_perm( hv, lv, (vec_u8)perm_ldv ); \
00415 vec_s16 idct_sh6 = vec_sra(idctv, sixv); \
00416 vec_u16 dst16 = (vec_u16)vec_mergeh(zero_u8v, dstv); \
00417 vec_s16 idstsum = vec_adds(idct_sh6, (vec_s16)dst16); \
00418 vec_u8 idstsum8 = vec_packsu(zero_s16v, idstsum); \
00419 vec_u8 edgehv; \
00420 \
00421 vec_u8 bodyv = vec_perm( idstsum8, idstsum8, perm_stv );\
00422 vec_u8 edgelv = vec_perm( sel, zero_u8v, perm_stv ); \
00423 lv = vec_sel( lv, bodyv, edgelv ); \
00424 vec_st( lv, 7, dest ); \
00425 hv = vec_ld( 0, dest ); \
00426 edgehv = vec_perm( zero_u8v, sel, perm_stv ); \
00427 hv = vec_sel( hv, bodyv, edgehv ); \
00428 vec_st( hv, 0, dest ); \
00429 }
00430
00431 static void ff_h264_idct8_add_altivec( uint8_t *dst, DCTELEM *dct, int stride ) {
00432 vec_s16 s0, s1, s2, s3, s4, s5, s6, s7;
00433 vec_s16 d0, d1, d2, d3, d4, d5, d6, d7;
00434 vec_s16 idct0, idct1, idct2, idct3, idct4, idct5, idct6, idct7;
00435
00436 vec_u8 perm_ldv = vec_lvsl(0, dst);
00437 vec_u8 perm_stv = vec_lvsr(8, dst);
00438
00439 const vec_u16 onev = vec_splat_u16(1);
00440 const vec_u16 twov = vec_splat_u16(2);
00441 const vec_u16 sixv = vec_splat_u16(6);
00442
00443 const vec_u8 sel = (vec_u8) {0,0,0,0,0,0,0,0,-1,-1,-1,-1,-1,-1,-1,-1};
00444 LOAD_ZERO;
00445
00446 dct[0] += 32;
00447
00448 s0 = vec_ld(0x00, (int16_t*)dct);
00449 s1 = vec_ld(0x10, (int16_t*)dct);
00450 s2 = vec_ld(0x20, (int16_t*)dct);
00451 s3 = vec_ld(0x30, (int16_t*)dct);
00452 s4 = vec_ld(0x40, (int16_t*)dct);
00453 s5 = vec_ld(0x50, (int16_t*)dct);
00454 s6 = vec_ld(0x60, (int16_t*)dct);
00455 s7 = vec_ld(0x70, (int16_t*)dct);
00456
00457 IDCT8_1D_ALTIVEC(s0, s1, s2, s3, s4, s5, s6, s7,
00458 d0, d1, d2, d3, d4, d5, d6, d7);
00459
00460 TRANSPOSE8( d0, d1, d2, d3, d4, d5, d6, d7 );
00461
00462 IDCT8_1D_ALTIVEC(d0, d1, d2, d3, d4, d5, d6, d7,
00463 idct0, idct1, idct2, idct3, idct4, idct5, idct6, idct7);
00464
00465 ALTIVEC_STORE_SUM_CLIP(&dst[0*stride], idct0, perm_ldv, perm_stv, sel);
00466 ALTIVEC_STORE_SUM_CLIP(&dst[1*stride], idct1, perm_ldv, perm_stv, sel);
00467 ALTIVEC_STORE_SUM_CLIP(&dst[2*stride], idct2, perm_ldv, perm_stv, sel);
00468 ALTIVEC_STORE_SUM_CLIP(&dst[3*stride], idct3, perm_ldv, perm_stv, sel);
00469 ALTIVEC_STORE_SUM_CLIP(&dst[4*stride], idct4, perm_ldv, perm_stv, sel);
00470 ALTIVEC_STORE_SUM_CLIP(&dst[5*stride], idct5, perm_ldv, perm_stv, sel);
00471 ALTIVEC_STORE_SUM_CLIP(&dst[6*stride], idct6, perm_ldv, perm_stv, sel);
00472 ALTIVEC_STORE_SUM_CLIP(&dst[7*stride], idct7, perm_ldv, perm_stv, sel);
00473 }
00474
00475 static av_always_inline void h264_idct_dc_add_internal(uint8_t *dst, DCTELEM *block, int stride, int size)
00476 {
00477 vec_s16 dc16;
00478 vec_u8 dcplus, dcminus, v0, v1, v2, v3, aligner;
00479 LOAD_ZERO;
00480 DECLARE_ALIGNED(16, int, dc);
00481 int i;
00482
00483 dc = (block[0] + 32) >> 6;
00484 dc16 = vec_splat((vec_s16) vec_lde(0, &dc), 1);
00485
00486 if (size == 4)
00487 dc16 = vec_sld(dc16, zero_s16v, 8);
00488 dcplus = vec_packsu(dc16, zero_s16v);
00489 dcminus = vec_packsu(vec_sub(zero_s16v, dc16), zero_s16v);
00490
00491 aligner = vec_lvsr(0, dst);
00492 dcplus = vec_perm(dcplus, dcplus, aligner);
00493 dcminus = vec_perm(dcminus, dcminus, aligner);
00494
00495 for (i = 0; i < size; i += 4) {
00496 v0 = vec_ld(0, dst+0*stride);
00497 v1 = vec_ld(0, dst+1*stride);
00498 v2 = vec_ld(0, dst+2*stride);
00499 v3 = vec_ld(0, dst+3*stride);
00500
00501 v0 = vec_adds(v0, dcplus);
00502 v1 = vec_adds(v1, dcplus);
00503 v2 = vec_adds(v2, dcplus);
00504 v3 = vec_adds(v3, dcplus);
00505
00506 v0 = vec_subs(v0, dcminus);
00507 v1 = vec_subs(v1, dcminus);
00508 v2 = vec_subs(v2, dcminus);
00509 v3 = vec_subs(v3, dcminus);
00510
00511 vec_st(v0, 0, dst+0*stride);
00512 vec_st(v1, 0, dst+1*stride);
00513 vec_st(v2, 0, dst+2*stride);
00514 vec_st(v3, 0, dst+3*stride);
00515
00516 dst += 4*stride;
00517 }
00518 }
00519
00520 static void h264_idct_dc_add_altivec(uint8_t *dst, DCTELEM *block, int stride)
00521 {
00522 h264_idct_dc_add_internal(dst, block, stride, 4);
00523 }
00524
00525 static void ff_h264_idct8_dc_add_altivec(uint8_t *dst, DCTELEM *block, int stride)
00526 {
00527 h264_idct_dc_add_internal(dst, block, stride, 8);
00528 }
00529
00530 static void ff_h264_idct_add16_altivec(uint8_t *dst, const int *block_offset, DCTELEM *block, int stride, const uint8_t nnzc[15*8]){
00531 int i;
00532 for(i=0; i<16; i++){
00533 int nnz = nnzc[ scan8[i] ];
00534 if(nnz){
00535 if(nnz==1 && block[i*16]) h264_idct_dc_add_altivec(dst + block_offset[i], block + i*16, stride);
00536 else ff_h264_idct_add_altivec(dst + block_offset[i], block + i*16, stride);
00537 }
00538 }
00539 }
00540
00541 static void ff_h264_idct_add16intra_altivec(uint8_t *dst, const int *block_offset, DCTELEM *block, int stride, const uint8_t nnzc[15*8]){
00542 int i;
00543 for(i=0; i<16; i++){
00544 if(nnzc[ scan8[i] ]) ff_h264_idct_add_altivec(dst + block_offset[i], block + i*16, stride);
00545 else if(block[i*16]) h264_idct_dc_add_altivec(dst + block_offset[i], block + i*16, stride);
00546 }
00547 }
00548
00549 static void ff_h264_idct8_add4_altivec(uint8_t *dst, const int *block_offset, DCTELEM *block, int stride, const uint8_t nnzc[15*8]){
00550 int i;
00551 for(i=0; i<16; i+=4){
00552 int nnz = nnzc[ scan8[i] ];
00553 if(nnz){
00554 if(nnz==1 && block[i*16]) ff_h264_idct8_dc_add_altivec(dst + block_offset[i], block + i*16, stride);
00555 else ff_h264_idct8_add_altivec (dst + block_offset[i], block + i*16, stride);
00556 }
00557 }
00558 }
00559
00560 static void ff_h264_idct_add8_altivec(uint8_t **dest, const int *block_offset, DCTELEM *block, int stride, const uint8_t nnzc[15*8]){
00561 int i, j;
00562 for (j = 1; j < 3; j++) {
00563 for(i = j * 16; i < j * 16 + 4; i++){
00564 if(nnzc[ scan8[i] ])
00565 ff_h264_idct_add_altivec(dest[j-1] + block_offset[i], block + i*16, stride);
00566 else if(block[i*16])
00567 h264_idct_dc_add_altivec(dest[j-1] + block_offset[i], block + i*16, stride);
00568 }
00569 }
00570 }
00571
00572 #define transpose4x16(r0, r1, r2, r3) { \
00573 register vec_u8 r4; \
00574 register vec_u8 r5; \
00575 register vec_u8 r6; \
00576 register vec_u8 r7; \
00577 \
00578 r4 = vec_mergeh(r0, r2); \
00579 r5 = vec_mergel(r0, r2); \
00580 r6 = vec_mergeh(r1, r3); \
00581 r7 = vec_mergel(r1, r3); \
00582 \
00583 r0 = vec_mergeh(r4, r6); \
00584 r1 = vec_mergel(r4, r6); \
00585 r2 = vec_mergeh(r5, r7); \
00586 r3 = vec_mergel(r5, r7); \
00587 }
00588
00589 static inline void write16x4(uint8_t *dst, int dst_stride,
00590 register vec_u8 r0, register vec_u8 r1,
00591 register vec_u8 r2, register vec_u8 r3) {
00592 DECLARE_ALIGNED(16, unsigned char, result)[64];
00593 uint32_t *src_int = (uint32_t *)result, *dst_int = (uint32_t *)dst;
00594 int int_dst_stride = dst_stride/4;
00595
00596 vec_st(r0, 0, result);
00597 vec_st(r1, 16, result);
00598 vec_st(r2, 32, result);
00599 vec_st(r3, 48, result);
00600
00601 *dst_int = *src_int;
00602 *(dst_int+ int_dst_stride) = *(src_int + 1);
00603 *(dst_int+ 2*int_dst_stride) = *(src_int + 2);
00604 *(dst_int+ 3*int_dst_stride) = *(src_int + 3);
00605 *(dst_int+ 4*int_dst_stride) = *(src_int + 4);
00606 *(dst_int+ 5*int_dst_stride) = *(src_int + 5);
00607 *(dst_int+ 6*int_dst_stride) = *(src_int + 6);
00608 *(dst_int+ 7*int_dst_stride) = *(src_int + 7);
00609 *(dst_int+ 8*int_dst_stride) = *(src_int + 8);
00610 *(dst_int+ 9*int_dst_stride) = *(src_int + 9);
00611 *(dst_int+10*int_dst_stride) = *(src_int + 10);
00612 *(dst_int+11*int_dst_stride) = *(src_int + 11);
00613 *(dst_int+12*int_dst_stride) = *(src_int + 12);
00614 *(dst_int+13*int_dst_stride) = *(src_int + 13);
00615 *(dst_int+14*int_dst_stride) = *(src_int + 14);
00616 *(dst_int+15*int_dst_stride) = *(src_int + 15);
00617 }
00618
00622 #define readAndTranspose16x6(src, src_stride, r8, r9, r10, r11, r12, r13) {\
00623 register vec_u8 r0 = unaligned_load(0, src); \
00624 register vec_u8 r1 = unaligned_load( src_stride, src); \
00625 register vec_u8 r2 = unaligned_load(2* src_stride, src); \
00626 register vec_u8 r3 = unaligned_load(3* src_stride, src); \
00627 register vec_u8 r4 = unaligned_load(4* src_stride, src); \
00628 register vec_u8 r5 = unaligned_load(5* src_stride, src); \
00629 register vec_u8 r6 = unaligned_load(6* src_stride, src); \
00630 register vec_u8 r7 = unaligned_load(7* src_stride, src); \
00631 register vec_u8 r14 = unaligned_load(14*src_stride, src); \
00632 register vec_u8 r15 = unaligned_load(15*src_stride, src); \
00633 \
00634 r8 = unaligned_load( 8*src_stride, src); \
00635 r9 = unaligned_load( 9*src_stride, src); \
00636 r10 = unaligned_load(10*src_stride, src); \
00637 r11 = unaligned_load(11*src_stride, src); \
00638 r12 = unaligned_load(12*src_stride, src); \
00639 r13 = unaligned_load(13*src_stride, src); \
00640 \
00641 \
00642 r0 = vec_mergeh(r0, r8); \
00643 r1 = vec_mergeh(r1, r9); \
00644 r2 = vec_mergeh(r2, r10); \
00645 r3 = vec_mergeh(r3, r11); \
00646 r4 = vec_mergeh(r4, r12); \
00647 r5 = vec_mergeh(r5, r13); \
00648 r6 = vec_mergeh(r6, r14); \
00649 r7 = vec_mergeh(r7, r15); \
00650 \
00651 \
00652 r8 = vec_mergeh(r0, r4); \
00653 r9 = vec_mergel(r0, r4); \
00654 r10 = vec_mergeh(r1, r5); \
00655 r11 = vec_mergel(r1, r5); \
00656 r12 = vec_mergeh(r2, r6); \
00657 r13 = vec_mergel(r2, r6); \
00658 r14 = vec_mergeh(r3, r7); \
00659 r15 = vec_mergel(r3, r7); \
00660 \
00661 \
00662 r0 = vec_mergeh(r8, r12); \
00663 r1 = vec_mergel(r8, r12); \
00664 r2 = vec_mergeh(r9, r13); \
00665 r4 = vec_mergeh(r10, r14); \
00666 r5 = vec_mergel(r10, r14); \
00667 r6 = vec_mergeh(r11, r15); \
00668 \
00669 \
00670 \
00671 r8 = vec_mergeh(r0, r4); \
00672 r9 = vec_mergel(r0, r4); \
00673 r10 = vec_mergeh(r1, r5); \
00674 r11 = vec_mergel(r1, r5); \
00675 r12 = vec_mergeh(r2, r6); \
00676 r13 = vec_mergel(r2, r6); \
00677 \
00678 \
00679 }
00680
00681
00682 static inline vec_u8 diff_lt_altivec ( register vec_u8 x,
00683 register vec_u8 y,
00684 register vec_u8 a) {
00685
00686 register vec_u8 diff = vec_subs(x, y);
00687 register vec_u8 diffneg = vec_subs(y, x);
00688 register vec_u8 o = vec_or(diff, diffneg);
00689 o = (vec_u8)vec_cmplt(o, a);
00690 return o;
00691 }
00692
00693 static inline vec_u8 h264_deblock_mask ( register vec_u8 p0,
00694 register vec_u8 p1,
00695 register vec_u8 q0,
00696 register vec_u8 q1,
00697 register vec_u8 alpha,
00698 register vec_u8 beta) {
00699
00700 register vec_u8 mask;
00701 register vec_u8 tempmask;
00702
00703 mask = diff_lt_altivec(p0, q0, alpha);
00704 tempmask = diff_lt_altivec(p1, p0, beta);
00705 mask = vec_and(mask, tempmask);
00706 tempmask = diff_lt_altivec(q1, q0, beta);
00707 mask = vec_and(mask, tempmask);
00708
00709 return mask;
00710 }
00711
00712
00713 static inline vec_u8 h264_deblock_q1(register vec_u8 p0,
00714 register vec_u8 p1,
00715 register vec_u8 p2,
00716 register vec_u8 q0,
00717 register vec_u8 tc0) {
00718
00719 register vec_u8 average = vec_avg(p0, q0);
00720 register vec_u8 temp;
00721 register vec_u8 uncliped;
00722 register vec_u8 ones;
00723 register vec_u8 max;
00724 register vec_u8 min;
00725 register vec_u8 newp1;
00726
00727 temp = vec_xor(average, p2);
00728 average = vec_avg(average, p2);
00729 ones = vec_splat_u8(1);
00730 temp = vec_and(temp, ones);
00731 uncliped = vec_subs(average, temp);
00732 max = vec_adds(p1, tc0);
00733 min = vec_subs(p1, tc0);
00734 newp1 = vec_max(min, uncliped);
00735 newp1 = vec_min(max, newp1);
00736 return newp1;
00737 }
00738
00739 #define h264_deblock_p0_q0(p0, p1, q0, q1, tc0masked) { \
00740 \
00741 const vec_u8 A0v = vec_sl(vec_splat_u8(10), vec_splat_u8(4)); \
00742 \
00743 register vec_u8 pq0bit = vec_xor(p0,q0); \
00744 register vec_u8 q1minus; \
00745 register vec_u8 p0minus; \
00746 register vec_u8 stage1; \
00747 register vec_u8 stage2; \
00748 register vec_u8 vec160; \
00749 register vec_u8 delta; \
00750 register vec_u8 deltaneg; \
00751 \
00752 q1minus = vec_nor(q1, q1); \
00753 stage1 = vec_avg(p1, q1minus); \
00754 stage2 = vec_sr(stage1, vec_splat_u8(1)); \
00755 p0minus = vec_nor(p0, p0); \
00756 stage1 = vec_avg(q0, p0minus); \
00757 pq0bit = vec_and(pq0bit, vec_splat_u8(1)); \
00758 stage2 = vec_avg(stage2, pq0bit); \
00759 stage2 = vec_adds(stage2, stage1); \
00760 vec160 = vec_ld(0, &A0v); \
00761 deltaneg = vec_subs(vec160, stage2); \
00762 delta = vec_subs(stage2, vec160); \
00763 deltaneg = vec_min(tc0masked, deltaneg); \
00764 delta = vec_min(tc0masked, delta); \
00765 p0 = vec_subs(p0, deltaneg); \
00766 q0 = vec_subs(q0, delta); \
00767 p0 = vec_adds(p0, delta); \
00768 q0 = vec_adds(q0, deltaneg); \
00769 }
00770
00771 #define h264_loop_filter_luma_altivec(p2, p1, p0, q0, q1, q2, alpha, beta, tc0) { \
00772 DECLARE_ALIGNED(16, unsigned char, temp)[16]; \
00773 register vec_u8 alphavec; \
00774 register vec_u8 betavec; \
00775 register vec_u8 mask; \
00776 register vec_u8 p1mask; \
00777 register vec_u8 q1mask; \
00778 register vector signed char tc0vec; \
00779 register vec_u8 finaltc0; \
00780 register vec_u8 tc0masked; \
00781 register vec_u8 newp1; \
00782 register vec_u8 newq1; \
00783 \
00784 temp[0] = alpha; \
00785 temp[1] = beta; \
00786 alphavec = vec_ld(0, temp); \
00787 betavec = vec_splat(alphavec, 0x1); \
00788 alphavec = vec_splat(alphavec, 0x0); \
00789 mask = h264_deblock_mask(p0, p1, q0, q1, alphavec, betavec); \
00790 \
00791 *((int *)temp) = *((int *)tc0); \
00792 tc0vec = vec_ld(0, (signed char*)temp); \
00793 tc0vec = vec_mergeh(tc0vec, tc0vec); \
00794 tc0vec = vec_mergeh(tc0vec, tc0vec); \
00795 mask = vec_and(mask, vec_cmpgt(tc0vec, vec_splat_s8(-1))); \
00796 finaltc0 = vec_and((vec_u8)tc0vec, mask); \
00797 \
00798 p1mask = diff_lt_altivec(p2, p0, betavec); \
00799 p1mask = vec_and(p1mask, mask); \
00800 tc0masked = vec_and(p1mask, (vec_u8)tc0vec); \
00801 finaltc0 = vec_sub(finaltc0, p1mask); \
00802 newp1 = h264_deblock_q1(p0, p1, p2, q0, tc0masked); \
00803 \
00804 \
00805 q1mask = diff_lt_altivec(q2, q0, betavec); \
00806 q1mask = vec_and(q1mask, mask); \
00807 tc0masked = vec_and(q1mask, (vec_u8)tc0vec); \
00808 finaltc0 = vec_sub(finaltc0, q1mask); \
00809 newq1 = h264_deblock_q1(p0, q1, q2, q0, tc0masked); \
00810 \
00811 \
00812 h264_deblock_p0_q0(p0, p1, q0, q1, finaltc0); \
00813 p1 = newp1; \
00814 q1 = newq1; \
00815 }
00816
00817 static void h264_v_loop_filter_luma_altivec(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0) {
00818
00819 if ((tc0[0] & tc0[1] & tc0[2] & tc0[3]) >= 0) {
00820 register vec_u8 p2 = vec_ld(-3*stride, pix);
00821 register vec_u8 p1 = vec_ld(-2*stride, pix);
00822 register vec_u8 p0 = vec_ld(-1*stride, pix);
00823 register vec_u8 q0 = vec_ld(0, pix);
00824 register vec_u8 q1 = vec_ld(stride, pix);
00825 register vec_u8 q2 = vec_ld(2*stride, pix);
00826 h264_loop_filter_luma_altivec(p2, p1, p0, q0, q1, q2, alpha, beta, tc0);
00827 vec_st(p1, -2*stride, pix);
00828 vec_st(p0, -1*stride, pix);
00829 vec_st(q0, 0, pix);
00830 vec_st(q1, stride, pix);
00831 }
00832 }
00833
00834 static void h264_h_loop_filter_luma_altivec(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0) {
00835
00836 register vec_u8 line0, line1, line2, line3, line4, line5;
00837 if ((tc0[0] & tc0[1] & tc0[2] & tc0[3]) < 0)
00838 return;
00839 readAndTranspose16x6(pix-3, stride, line0, line1, line2, line3, line4, line5);
00840 h264_loop_filter_luma_altivec(line0, line1, line2, line3, line4, line5, alpha, beta, tc0);
00841 transpose4x16(line1, line2, line3, line4);
00842 write16x4(pix-2, stride, line1, line2, line3, line4);
00843 }
00844
00845 static av_always_inline
00846 void weight_h264_WxH_altivec(uint8_t *block, int stride, int log2_denom, int weight, int offset, int w, int h)
00847 {
00848 int y, aligned;
00849 vec_u8 vblock;
00850 vec_s16 vtemp, vweight, voffset, v0, v1;
00851 vec_u16 vlog2_denom;
00852 DECLARE_ALIGNED(16, int32_t, temp)[4];
00853 LOAD_ZERO;
00854
00855 offset <<= log2_denom;
00856 if(log2_denom) offset += 1<<(log2_denom-1);
00857 temp[0] = log2_denom;
00858 temp[1] = weight;
00859 temp[2] = offset;
00860
00861 vtemp = (vec_s16)vec_ld(0, temp);
00862 vlog2_denom = (vec_u16)vec_splat(vtemp, 1);
00863 vweight = vec_splat(vtemp, 3);
00864 voffset = vec_splat(vtemp, 5);
00865 aligned = !((unsigned long)block & 0xf);
00866
00867 for (y=0; y<h; y++) {
00868 vblock = vec_ld(0, block);
00869
00870 v0 = (vec_s16)vec_mergeh(zero_u8v, vblock);
00871 v1 = (vec_s16)vec_mergel(zero_u8v, vblock);
00872
00873 if (w == 16 || aligned) {
00874 v0 = vec_mladd(v0, vweight, zero_s16v);
00875 v0 = vec_adds(v0, voffset);
00876 v0 = vec_sra(v0, vlog2_denom);
00877 }
00878 if (w == 16 || !aligned) {
00879 v1 = vec_mladd(v1, vweight, zero_s16v);
00880 v1 = vec_adds(v1, voffset);
00881 v1 = vec_sra(v1, vlog2_denom);
00882 }
00883 vblock = vec_packsu(v0, v1);
00884 vec_st(vblock, 0, block);
00885
00886 block += stride;
00887 }
00888 }
00889
00890 static av_always_inline
00891 void biweight_h264_WxH_altivec(uint8_t *dst, uint8_t *src, int stride, int log2_denom,
00892 int weightd, int weights, int offset, int w, int h)
00893 {
00894 int y, dst_aligned, src_aligned;
00895 vec_u8 vsrc, vdst;
00896 vec_s16 vtemp, vweights, vweightd, voffset, v0, v1, v2, v3;
00897 vec_u16 vlog2_denom;
00898 DECLARE_ALIGNED(16, int32_t, temp)[4];
00899 LOAD_ZERO;
00900
00901 offset = ((offset + 1) | 1) << log2_denom;
00902 temp[0] = log2_denom+1;
00903 temp[1] = weights;
00904 temp[2] = weightd;
00905 temp[3] = offset;
00906
00907 vtemp = (vec_s16)vec_ld(0, temp);
00908 vlog2_denom = (vec_u16)vec_splat(vtemp, 1);
00909 vweights = vec_splat(vtemp, 3);
00910 vweightd = vec_splat(vtemp, 5);
00911 voffset = vec_splat(vtemp, 7);
00912 dst_aligned = !((unsigned long)dst & 0xf);
00913 src_aligned = !((unsigned long)src & 0xf);
00914
00915 for (y=0; y<h; y++) {
00916 vdst = vec_ld(0, dst);
00917 vsrc = vec_ld(0, src);
00918
00919 v0 = (vec_s16)vec_mergeh(zero_u8v, vdst);
00920 v1 = (vec_s16)vec_mergel(zero_u8v, vdst);
00921 v2 = (vec_s16)vec_mergeh(zero_u8v, vsrc);
00922 v3 = (vec_s16)vec_mergel(zero_u8v, vsrc);
00923
00924 if (w == 8) {
00925 if (src_aligned)
00926 v3 = v2;
00927 else
00928 v2 = v3;
00929 }
00930
00931 if (w == 16 || dst_aligned) {
00932 v0 = vec_mladd(v0, vweightd, zero_s16v);
00933 v2 = vec_mladd(v2, vweights, zero_s16v);
00934
00935 v0 = vec_adds(v0, voffset);
00936 v0 = vec_adds(v0, v2);
00937 v0 = vec_sra(v0, vlog2_denom);
00938 }
00939 if (w == 16 || !dst_aligned) {
00940 v1 = vec_mladd(v1, vweightd, zero_s16v);
00941 v3 = vec_mladd(v3, vweights, zero_s16v);
00942
00943 v1 = vec_adds(v1, voffset);
00944 v1 = vec_adds(v1, v3);
00945 v1 = vec_sra(v1, vlog2_denom);
00946 }
00947 vdst = vec_packsu(v0, v1);
00948 vec_st(vdst, 0, dst);
00949
00950 dst += stride;
00951 src += stride;
00952 }
00953 }
00954
00955 #define H264_WEIGHT(W,H) \
00956 static void ff_weight_h264_pixels ## W ## x ## H ## _altivec(uint8_t *block, int stride, int log2_denom, int weight, int offset){ \
00957 weight_h264_WxH_altivec(block, stride, log2_denom, weight, offset, W, H); \
00958 }\
00959 static void ff_biweight_h264_pixels ## W ## x ## H ## _altivec(uint8_t *dst, uint8_t *src, int stride, int log2_denom, int weightd, int weights, int offset){ \
00960 biweight_h264_WxH_altivec(dst, src, stride, log2_denom, weightd, weights, offset, W, H); \
00961 }
00962
00963 H264_WEIGHT(16,16)
00964 H264_WEIGHT(16, 8)
00965 H264_WEIGHT( 8,16)
00966 H264_WEIGHT( 8, 8)
00967 H264_WEIGHT( 8, 4)
00968
00969 void dsputil_h264_init_ppc(DSPContext* c, AVCodecContext *avctx) {
00970 const int high_bit_depth = avctx->codec_id == CODEC_ID_H264 && avctx->bits_per_raw_sample > 8;
00971
00972 if (av_get_cpu_flags() & AV_CPU_FLAG_ALTIVEC) {
00973 if (!high_bit_depth) {
00974 c->put_h264_chroma_pixels_tab[0] = put_h264_chroma_mc8_altivec;
00975 c->avg_h264_chroma_pixels_tab[0] = avg_h264_chroma_mc8_altivec;
00976
00977 #define dspfunc(PFX, IDX, NUM) \
00978 c->PFX ## _pixels_tab[IDX][ 0] = PFX ## NUM ## _mc00_altivec; \
00979 c->PFX ## _pixels_tab[IDX][ 1] = PFX ## NUM ## _mc10_altivec; \
00980 c->PFX ## _pixels_tab[IDX][ 2] = PFX ## NUM ## _mc20_altivec; \
00981 c->PFX ## _pixels_tab[IDX][ 3] = PFX ## NUM ## _mc30_altivec; \
00982 c->PFX ## _pixels_tab[IDX][ 4] = PFX ## NUM ## _mc01_altivec; \
00983 c->PFX ## _pixels_tab[IDX][ 5] = PFX ## NUM ## _mc11_altivec; \
00984 c->PFX ## _pixels_tab[IDX][ 6] = PFX ## NUM ## _mc21_altivec; \
00985 c->PFX ## _pixels_tab[IDX][ 7] = PFX ## NUM ## _mc31_altivec; \
00986 c->PFX ## _pixels_tab[IDX][ 8] = PFX ## NUM ## _mc02_altivec; \
00987 c->PFX ## _pixels_tab[IDX][ 9] = PFX ## NUM ## _mc12_altivec; \
00988 c->PFX ## _pixels_tab[IDX][10] = PFX ## NUM ## _mc22_altivec; \
00989 c->PFX ## _pixels_tab[IDX][11] = PFX ## NUM ## _mc32_altivec; \
00990 c->PFX ## _pixels_tab[IDX][12] = PFX ## NUM ## _mc03_altivec; \
00991 c->PFX ## _pixels_tab[IDX][13] = PFX ## NUM ## _mc13_altivec; \
00992 c->PFX ## _pixels_tab[IDX][14] = PFX ## NUM ## _mc23_altivec; \
00993 c->PFX ## _pixels_tab[IDX][15] = PFX ## NUM ## _mc33_altivec
00994
00995 dspfunc(put_h264_qpel, 0, 16);
00996 dspfunc(avg_h264_qpel, 0, 16);
00997 #undef dspfunc
00998 }
00999 }
01000 }
01001
01002 void ff_h264dsp_init_ppc(H264DSPContext *c, const int bit_depth)
01003 {
01004 if (av_get_cpu_flags() & AV_CPU_FLAG_ALTIVEC) {
01005 if (bit_depth == 8) {
01006 c->h264_idct_add = ff_h264_idct_add_altivec;
01007 c->h264_idct_add8 = ff_h264_idct_add8_altivec;
01008 c->h264_idct_add16 = ff_h264_idct_add16_altivec;
01009 c->h264_idct_add16intra = ff_h264_idct_add16intra_altivec;
01010 c->h264_idct_dc_add= h264_idct_dc_add_altivec;
01011 c->h264_idct8_dc_add = ff_h264_idct8_dc_add_altivec;
01012 c->h264_idct8_add = ff_h264_idct8_add_altivec;
01013 c->h264_idct8_add4 = ff_h264_idct8_add4_altivec;
01014 c->h264_v_loop_filter_luma= h264_v_loop_filter_luma_altivec;
01015 c->h264_h_loop_filter_luma= h264_h_loop_filter_luma_altivec;
01016
01017 c->weight_h264_pixels_tab[0] = ff_weight_h264_pixels16x16_altivec;
01018 c->weight_h264_pixels_tab[1] = ff_weight_h264_pixels16x8_altivec;
01019 c->weight_h264_pixels_tab[2] = ff_weight_h264_pixels8x16_altivec;
01020 c->weight_h264_pixels_tab[3] = ff_weight_h264_pixels8x8_altivec;
01021 c->weight_h264_pixels_tab[4] = ff_weight_h264_pixels8x4_altivec;
01022 c->biweight_h264_pixels_tab[0] = ff_biweight_h264_pixels16x16_altivec;
01023 c->biweight_h264_pixels_tab[1] = ff_biweight_h264_pixels16x8_altivec;
01024 c->biweight_h264_pixels_tab[2] = ff_biweight_h264_pixels8x16_altivec;
01025 c->biweight_h264_pixels_tab[3] = ff_biweight_h264_pixels8x8_altivec;
01026 c->biweight_h264_pixels_tab[4] = ff_biweight_h264_pixels8x4_altivec;
01027 }
01028 }
01029 }