00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018
00019
00020
00021
00022
00023 #include "libavutil/avutil.h"
00024
00025 #define ALTIVEC_TRANSPOSE_8x8_SHORT(src_a,src_b,src_c,src_d,src_e,src_f,src_g,src_h) \
00026 do { \
00027 __typeof__(src_a) tempA1, tempB1, tempC1, tempD1; \
00028 __typeof__(src_a) tempE1, tempF1, tempG1, tempH1; \
00029 __typeof__(src_a) tempA2, tempB2, tempC2, tempD2; \
00030 __typeof__(src_a) tempE2, tempF2, tempG2, tempH2; \
00031 tempA1 = vec_mergeh (src_a, src_e); \
00032 tempB1 = vec_mergel (src_a, src_e); \
00033 tempC1 = vec_mergeh (src_b, src_f); \
00034 tempD1 = vec_mergel (src_b, src_f); \
00035 tempE1 = vec_mergeh (src_c, src_g); \
00036 tempF1 = vec_mergel (src_c, src_g); \
00037 tempG1 = vec_mergeh (src_d, src_h); \
00038 tempH1 = vec_mergel (src_d, src_h); \
00039 tempA2 = vec_mergeh (tempA1, tempE1); \
00040 tempB2 = vec_mergel (tempA1, tempE1); \
00041 tempC2 = vec_mergeh (tempB1, tempF1); \
00042 tempD2 = vec_mergel (tempB1, tempF1); \
00043 tempE2 = vec_mergeh (tempC1, tempG1); \
00044 tempF2 = vec_mergel (tempC1, tempG1); \
00045 tempG2 = vec_mergeh (tempD1, tempH1); \
00046 tempH2 = vec_mergel (tempD1, tempH1); \
00047 src_a = vec_mergeh (tempA2, tempE2); \
00048 src_b = vec_mergel (tempA2, tempE2); \
00049 src_c = vec_mergeh (tempB2, tempF2); \
00050 src_d = vec_mergel (tempB2, tempF2); \
00051 src_e = vec_mergeh (tempC2, tempG2); \
00052 src_f = vec_mergel (tempC2, tempG2); \
00053 src_g = vec_mergeh (tempD2, tempH2); \
00054 src_h = vec_mergel (tempD2, tempH2); \
00055 } while (0)
00056
00057
00058 static inline int vertClassify_altivec(uint8_t src[], int stride, PPContext *c) {
00059
00060
00061
00062
00063
00064
00065 short data_0 = ((c->nonBQP*c->ppMode.baseDcDiff)>>8) + 1;
00066 DECLARE_ALIGNED(16, short, data)[8] =
00067 {
00068 data_0,
00069 data_0 * 2 + 1,
00070 c->QP * 2,
00071 c->QP * 4
00072 };
00073 int numEq;
00074 uint8_t *src2 = src;
00075 vector signed short v_dcOffset;
00076 vector signed short v2QP;
00077 vector unsigned short v4QP;
00078 vector unsigned short v_dcThreshold;
00079 const int properStride = (stride % 16);
00080 const int srcAlign = ((unsigned long)src2 % 16);
00081 const int two_vectors = ((srcAlign > 8) || properStride) ? 1 : 0;
00082 const vector signed int zero = vec_splat_s32(0);
00083 const vector signed short mask = vec_splat_s16(1);
00084 vector signed int v_numEq = vec_splat_s32(0);
00085 vector signed short v_data = vec_ld(0, data);
00086 vector signed short v_srcAss0, v_srcAss1, v_srcAss2, v_srcAss3,
00087 v_srcAss4, v_srcAss5, v_srcAss6, v_srcAss7;
00088
00089 register int j0 = 0,
00090 j1 = stride,
00091 j2 = 2 * stride,
00092 j3 = 3 * stride,
00093 j4 = 4 * stride,
00094 j5 = 5 * stride,
00095 j6 = 6 * stride,
00096 j7 = 7 * stride;
00097 vector unsigned char v_srcA0, v_srcA1, v_srcA2, v_srcA3,
00098 v_srcA4, v_srcA5, v_srcA6, v_srcA7;
00099
00100 v_dcOffset = vec_splat(v_data, 0);
00101 v_dcThreshold = (vector unsigned short)vec_splat(v_data, 1);
00102 v2QP = vec_splat(v_data, 2);
00103 v4QP = (vector unsigned short)vec_splat(v_data, 3);
00104
00105 src2 += stride * 4;
00106
00107 #define LOAD_LINE(i) \
00108 { \
00109 vector unsigned char perm##i = vec_lvsl(j##i, src2); \
00110 vector unsigned char v_srcA2##i; \
00111 vector unsigned char v_srcA1##i = vec_ld(j##i, src2); \
00112 if (two_vectors) \
00113 v_srcA2##i = vec_ld(j##i + 16, src2); \
00114 v_srcA##i = \
00115 vec_perm(v_srcA1##i, v_srcA2##i, perm##i); \
00116 v_srcAss##i = \
00117 (vector signed short)vec_mergeh((vector signed char)zero, \
00118 (vector signed char)v_srcA##i); }
00119
00120 #define LOAD_LINE_ALIGNED(i) \
00121 v_srcA##i = vec_ld(j##i, src2); \
00122 v_srcAss##i = \
00123 (vector signed short)vec_mergeh((vector signed char)zero, \
00124 (vector signed char)v_srcA##i)
00125
00126
00127
00128
00129 if (properStride && srcAlign) {
00130 LOAD_LINE_ALIGNED(0);
00131 LOAD_LINE_ALIGNED(1);
00132 LOAD_LINE_ALIGNED(2);
00133 LOAD_LINE_ALIGNED(3);
00134 LOAD_LINE_ALIGNED(4);
00135 LOAD_LINE_ALIGNED(5);
00136 LOAD_LINE_ALIGNED(6);
00137 LOAD_LINE_ALIGNED(7);
00138 } else {
00139 LOAD_LINE(0);
00140 LOAD_LINE(1);
00141 LOAD_LINE(2);
00142 LOAD_LINE(3);
00143 LOAD_LINE(4);
00144 LOAD_LINE(5);
00145 LOAD_LINE(6);
00146 LOAD_LINE(7);
00147 }
00148 #undef LOAD_LINE
00149 #undef LOAD_LINE_ALIGNED
00150
00151 #define ITER(i, j) \
00152 const vector signed short v_diff##i = \
00153 vec_sub(v_srcAss##i, v_srcAss##j); \
00154 const vector signed short v_sum##i = \
00155 vec_add(v_diff##i, v_dcOffset); \
00156 const vector signed short v_comp##i = \
00157 (vector signed short)vec_cmplt((vector unsigned short)v_sum##i, \
00158 v_dcThreshold); \
00159 const vector signed short v_part##i = vec_and(mask, v_comp##i);
00160
00161 {
00162 ITER(0, 1)
00163 ITER(1, 2)
00164 ITER(2, 3)
00165 ITER(3, 4)
00166 ITER(4, 5)
00167 ITER(5, 6)
00168 ITER(6, 7)
00169
00170 v_numEq = vec_sum4s(v_part0, v_numEq);
00171 v_numEq = vec_sum4s(v_part1, v_numEq);
00172 v_numEq = vec_sum4s(v_part2, v_numEq);
00173 v_numEq = vec_sum4s(v_part3, v_numEq);
00174 v_numEq = vec_sum4s(v_part4, v_numEq);
00175 v_numEq = vec_sum4s(v_part5, v_numEq);
00176 v_numEq = vec_sum4s(v_part6, v_numEq);
00177 }
00178
00179 #undef ITER
00180
00181 v_numEq = vec_sums(v_numEq, zero);
00182
00183 v_numEq = vec_splat(v_numEq, 3);
00184 vec_ste(v_numEq, 0, &numEq);
00185
00186 if (numEq > c->ppMode.flatnessThreshold){
00187 const vector unsigned char mmoP1 = (const vector unsigned char)
00188 {0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f,
00189 0x00, 0x01, 0x12, 0x13, 0x08, 0x09, 0x1A, 0x1B};
00190 const vector unsigned char mmoP2 = (const vector unsigned char)
00191 {0x04, 0x05, 0x16, 0x17, 0x0C, 0x0D, 0x1E, 0x1F,
00192 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f};
00193 const vector unsigned char mmoP = (const vector unsigned char)
00194 vec_lvsl(8, (unsigned char*)0);
00195
00196 vector signed short mmoL1 = vec_perm(v_srcAss0, v_srcAss2, mmoP1);
00197 vector signed short mmoL2 = vec_perm(v_srcAss4, v_srcAss6, mmoP2);
00198 vector signed short mmoL = vec_perm(mmoL1, mmoL2, mmoP);
00199 vector signed short mmoR1 = vec_perm(v_srcAss5, v_srcAss7, mmoP1);
00200 vector signed short mmoR2 = vec_perm(v_srcAss1, v_srcAss3, mmoP2);
00201 vector signed short mmoR = vec_perm(mmoR1, mmoR2, mmoP);
00202 vector signed short mmoDiff = vec_sub(mmoL, mmoR);
00203 vector unsigned short mmoSum = (vector unsigned short)vec_add(mmoDiff, v2QP);
00204
00205 if (vec_any_gt(mmoSum, v4QP))
00206 return 0;
00207 else
00208 return 1;
00209 }
00210 else return 2;
00211 }
00212
00213 static inline void doVertLowPass_altivec(uint8_t *src, int stride, PPContext *c) {
00214
00215
00216
00217
00218
00219
00220
00221
00222 uint8_t *src2 = src;
00223 const vector signed int zero = vec_splat_s32(0);
00224 const int properStride = (stride % 16);
00225 const int srcAlign = ((unsigned long)src2 % 16);
00226 DECLARE_ALIGNED(16, short, qp)[8] = {c->QP};
00227 vector signed short vqp = vec_ld(0, qp);
00228 vector signed short vb0, vb1, vb2, vb3, vb4, vb5, vb6, vb7, vb8, vb9;
00229 vector unsigned char vbA0, av_uninit(vbA1), av_uninit(vbA2), av_uninit(vbA3), av_uninit(vbA4), av_uninit(vbA5), av_uninit(vbA6), av_uninit(vbA7), av_uninit(vbA8), vbA9;
00230 vector unsigned char vbB0, av_uninit(vbB1), av_uninit(vbB2), av_uninit(vbB3), av_uninit(vbB4), av_uninit(vbB5), av_uninit(vbB6), av_uninit(vbB7), av_uninit(vbB8), vbB9;
00231 vector unsigned char vbT0, vbT1, vbT2, vbT3, vbT4, vbT5, vbT6, vbT7, vbT8, vbT9;
00232 vector unsigned char perml0, perml1, perml2, perml3, perml4,
00233 perml5, perml6, perml7, perml8, perml9;
00234 register int j0 = 0,
00235 j1 = stride,
00236 j2 = 2 * stride,
00237 j3 = 3 * stride,
00238 j4 = 4 * stride,
00239 j5 = 5 * stride,
00240 j6 = 6 * stride,
00241 j7 = 7 * stride,
00242 j8 = 8 * stride,
00243 j9 = 9 * stride;
00244
00245 vqp = vec_splat(vqp, 0);
00246
00247 src2 += stride*3;
00248
00249 #define LOAD_LINE(i) \
00250 perml##i = vec_lvsl(i * stride, src2); \
00251 vbA##i = vec_ld(i * stride, src2); \
00252 vbB##i = vec_ld(i * stride + 16, src2); \
00253 vbT##i = vec_perm(vbA##i, vbB##i, perml##i); \
00254 vb##i = \
00255 (vector signed short)vec_mergeh((vector unsigned char)zero, \
00256 (vector unsigned char)vbT##i)
00257
00258 #define LOAD_LINE_ALIGNED(i) \
00259 vbT##i = vec_ld(j##i, src2); \
00260 vb##i = \
00261 (vector signed short)vec_mergeh((vector signed char)zero, \
00262 (vector signed char)vbT##i)
00263
00264
00265
00266
00267 if (properStride && srcAlign) {
00268 LOAD_LINE_ALIGNED(0);
00269 LOAD_LINE_ALIGNED(1);
00270 LOAD_LINE_ALIGNED(2);
00271 LOAD_LINE_ALIGNED(3);
00272 LOAD_LINE_ALIGNED(4);
00273 LOAD_LINE_ALIGNED(5);
00274 LOAD_LINE_ALIGNED(6);
00275 LOAD_LINE_ALIGNED(7);
00276 LOAD_LINE_ALIGNED(8);
00277 LOAD_LINE_ALIGNED(9);
00278 } else {
00279 LOAD_LINE(0);
00280 LOAD_LINE(1);
00281 LOAD_LINE(2);
00282 LOAD_LINE(3);
00283 LOAD_LINE(4);
00284 LOAD_LINE(5);
00285 LOAD_LINE(6);
00286 LOAD_LINE(7);
00287 LOAD_LINE(8);
00288 LOAD_LINE(9);
00289 }
00290 #undef LOAD_LINE
00291 #undef LOAD_LINE_ALIGNED
00292 {
00293 const vector unsigned short v_2 = vec_splat_u16(2);
00294 const vector unsigned short v_4 = vec_splat_u16(4);
00295
00296 const vector signed short v_diff01 = vec_sub(vb0, vb1);
00297 const vector unsigned short v_cmp01 =
00298 (const vector unsigned short) vec_cmplt(vec_abs(v_diff01), vqp);
00299 const vector signed short v_first = vec_sel(vb1, vb0, v_cmp01);
00300 const vector signed short v_diff89 = vec_sub(vb8, vb9);
00301 const vector unsigned short v_cmp89 =
00302 (const vector unsigned short) vec_cmplt(vec_abs(v_diff89), vqp);
00303 const vector signed short v_last = vec_sel(vb8, vb9, v_cmp89);
00304
00305 const vector signed short temp01 = vec_mladd(v_first, (vector signed short)v_4, vb1);
00306 const vector signed short temp02 = vec_add(vb2, vb3);
00307 const vector signed short temp03 = vec_add(temp01, (vector signed short)v_4);
00308 const vector signed short v_sumsB0 = vec_add(temp02, temp03);
00309
00310 const vector signed short temp11 = vec_sub(v_sumsB0, v_first);
00311 const vector signed short v_sumsB1 = vec_add(temp11, vb4);
00312
00313 const vector signed short temp21 = vec_sub(v_sumsB1, v_first);
00314 const vector signed short v_sumsB2 = vec_add(temp21, vb5);
00315
00316 const vector signed short temp31 = vec_sub(v_sumsB2, v_first);
00317 const vector signed short v_sumsB3 = vec_add(temp31, vb6);
00318
00319 const vector signed short temp41 = vec_sub(v_sumsB3, v_first);
00320 const vector signed short v_sumsB4 = vec_add(temp41, vb7);
00321
00322 const vector signed short temp51 = vec_sub(v_sumsB4, vb1);
00323 const vector signed short v_sumsB5 = vec_add(temp51, vb8);
00324
00325 const vector signed short temp61 = vec_sub(v_sumsB5, vb2);
00326 const vector signed short v_sumsB6 = vec_add(temp61, v_last);
00327
00328 const vector signed short temp71 = vec_sub(v_sumsB6, vb3);
00329 const vector signed short v_sumsB7 = vec_add(temp71, v_last);
00330
00331 const vector signed short temp81 = vec_sub(v_sumsB7, vb4);
00332 const vector signed short v_sumsB8 = vec_add(temp81, v_last);
00333
00334 const vector signed short temp91 = vec_sub(v_sumsB8, vb5);
00335 const vector signed short v_sumsB9 = vec_add(temp91, v_last);
00336
00337 #define COMPUTE_VR(i, j, k) \
00338 const vector signed short temps1##i = \
00339 vec_add(v_sumsB##i, v_sumsB##k); \
00340 const vector signed short temps2##i = \
00341 vec_mladd(vb##j, (vector signed short)v_2, temps1##i); \
00342 const vector signed short vr##j = vec_sra(temps2##i, v_4)
00343
00344 COMPUTE_VR(0, 1, 2);
00345 COMPUTE_VR(1, 2, 3);
00346 COMPUTE_VR(2, 3, 4);
00347 COMPUTE_VR(3, 4, 5);
00348 COMPUTE_VR(4, 5, 6);
00349 COMPUTE_VR(5, 6, 7);
00350 COMPUTE_VR(6, 7, 8);
00351 COMPUTE_VR(7, 8, 9);
00352
00353 const vector signed char neg1 = vec_splat_s8(-1);
00354 const vector unsigned char permHH = (const vector unsigned char){0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
00355 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F};
00356
00357 #define PACK_AND_STORE(i) \
00358 { const vector unsigned char perms##i = \
00359 vec_lvsr(i * stride, src2); \
00360 const vector unsigned char vf##i = \
00361 vec_packsu(vr##i, (vector signed short)zero); \
00362 const vector unsigned char vg##i = \
00363 vec_perm(vf##i, vbT##i, permHH); \
00364 const vector unsigned char mask##i = \
00365 vec_perm((vector unsigned char)zero, (vector unsigned char)neg1, perms##i); \
00366 const vector unsigned char vg2##i = \
00367 vec_perm(vg##i, vg##i, perms##i); \
00368 const vector unsigned char svA##i = \
00369 vec_sel(vbA##i, vg2##i, mask##i); \
00370 const vector unsigned char svB##i = \
00371 vec_sel(vg2##i, vbB##i, mask##i); \
00372 vec_st(svA##i, i * stride, src2); \
00373 vec_st(svB##i, i * stride + 16, src2);}
00374
00375 #define PACK_AND_STORE_ALIGNED(i) \
00376 { const vector unsigned char vf##i = \
00377 vec_packsu(vr##i, (vector signed short)zero); \
00378 const vector unsigned char vg##i = \
00379 vec_perm(vf##i, vbT##i, permHH); \
00380 vec_st(vg##i, i * stride, src2);}
00381
00382
00383
00384
00385 if (properStride && srcAlign) {
00386 PACK_AND_STORE_ALIGNED(1)
00387 PACK_AND_STORE_ALIGNED(2)
00388 PACK_AND_STORE_ALIGNED(3)
00389 PACK_AND_STORE_ALIGNED(4)
00390 PACK_AND_STORE_ALIGNED(5)
00391 PACK_AND_STORE_ALIGNED(6)
00392 PACK_AND_STORE_ALIGNED(7)
00393 PACK_AND_STORE_ALIGNED(8)
00394 } else {
00395 PACK_AND_STORE(1)
00396 PACK_AND_STORE(2)
00397 PACK_AND_STORE(3)
00398 PACK_AND_STORE(4)
00399 PACK_AND_STORE(5)
00400 PACK_AND_STORE(6)
00401 PACK_AND_STORE(7)
00402 PACK_AND_STORE(8)
00403 }
00404 #undef PACK_AND_STORE
00405 #undef PACK_AND_STORE_ALIGNED
00406 }
00407 }
00408
00409
00410
00411 static inline void doVertDefFilter_altivec(uint8_t src[], int stride, PPContext *c) {
00412
00413
00414
00415
00416
00417
00418
00419
00420 uint8_t *src2 = src + stride*3;
00421 const vector signed int zero = vec_splat_s32(0);
00422 DECLARE_ALIGNED(16, short, qp)[8] = {8*c->QP};
00423 vector signed short vqp = vec_splat(
00424 (vector signed short)vec_ld(0, qp), 0);
00425
00426 #define LOAD_LINE(i) \
00427 const vector unsigned char perm##i = \
00428 vec_lvsl(i * stride, src2); \
00429 const vector unsigned char vbA##i = \
00430 vec_ld(i * stride, src2); \
00431 const vector unsigned char vbB##i = \
00432 vec_ld(i * stride + 16, src2); \
00433 const vector unsigned char vbT##i = \
00434 vec_perm(vbA##i, vbB##i, perm##i); \
00435 const vector signed short vb##i = \
00436 (vector signed short)vec_mergeh((vector unsigned char)zero, \
00437 (vector unsigned char)vbT##i)
00438
00439 LOAD_LINE(1);
00440 LOAD_LINE(2);
00441 LOAD_LINE(3);
00442 LOAD_LINE(4);
00443 LOAD_LINE(5);
00444 LOAD_LINE(6);
00445 LOAD_LINE(7);
00446 LOAD_LINE(8);
00447 #undef LOAD_LINE
00448
00449 const vector signed short v_1 = vec_splat_s16(1);
00450 const vector signed short v_2 = vec_splat_s16(2);
00451 const vector signed short v_5 = vec_splat_s16(5);
00452 const vector signed short v_32 = vec_sl(v_1,
00453 (vector unsigned short)v_5);
00454
00455 const vector signed short l3minusl6 = vec_sub(vb3, vb6);
00456 const vector signed short l5minusl4 = vec_sub(vb5, vb4);
00457 const vector signed short twotimes_l3minusl6 = vec_mladd(v_2, l3minusl6, (vector signed short)zero);
00458 const vector signed short mE = vec_mladd(v_5, l5minusl4, twotimes_l3minusl6);
00459 const vector signed short absmE = vec_abs(mE);
00460
00461 const vector signed short l1minusl4 = vec_sub(vb1, vb4);
00462 const vector signed short l3minusl2 = vec_sub(vb3, vb2);
00463 const vector signed short l5minusl8 = vec_sub(vb5, vb8);
00464 const vector signed short l7minusl6 = vec_sub(vb7, vb6);
00465 const vector signed short twotimes_l1minusl4 = vec_mladd(v_2, l1minusl4, (vector signed short)zero);
00466 const vector signed short twotimes_l5minusl8 = vec_mladd(v_2, l5minusl8, (vector signed short)zero);
00467 const vector signed short lE = vec_mladd(v_5, l3minusl2, twotimes_l1minusl4);
00468 const vector signed short rE = vec_mladd(v_5, l7minusl6, twotimes_l5minusl8);
00469
00470 const vector signed short ddiff = vec_sub(absmE,
00471 vec_min(vec_abs(lE),
00472 vec_abs(rE)));
00473 const vector signed short ddiffclamp = vec_max(ddiff, (vector signed short)zero);
00474 const vector signed short dtimes64 = vec_mladd(v_5, ddiffclamp, v_32);
00475 const vector signed short d = vec_sra(dtimes64, vec_splat_u16(6));
00476 const vector signed short minusd = vec_sub((vector signed short)zero, d);
00477 const vector signed short finald = vec_sel(minusd,
00478 d,
00479 vec_cmpgt(vec_sub((vector signed short)zero, mE),
00480 (vector signed short)zero));
00481
00482 const vector signed short qtimes2 = vec_sub(vb4, vb5);
00483
00484
00485 const vector signed short rounddown = vec_sel((vector signed short)zero,
00486 v_1,
00487 vec_cmplt(qtimes2, (vector signed short)zero));
00488 const vector signed short q = vec_sra(vec_add(qtimes2, rounddown), vec_splat_u16(1));
00489
00490 const vector signed short dclamp_P1 = vec_max((vector signed short)zero, finald);
00491 const vector signed short dclamp_P = vec_min(dclamp_P1, q);
00492 const vector signed short dclamp_N1 = vec_min((vector signed short)zero, finald);
00493 const vector signed short dclamp_N = vec_max(dclamp_N1, q);
00494
00495 const vector signed short dclampedfinal = vec_sel(dclamp_N,
00496 dclamp_P,
00497 vec_cmpgt(q, (vector signed short)zero));
00498 const vector signed short dornotd = vec_sel((vector signed short)zero,
00499 dclampedfinal,
00500 vec_cmplt(absmE, vqp));
00501
00502 const vector signed short vb4minusd = vec_sub(vb4, dornotd);
00503 const vector signed short vb5plusd = vec_add(vb5, dornotd);
00504
00505 const vector unsigned char st4 = vec_packsu(vb4minusd, (vector signed short)zero);
00506 const vector unsigned char st5 = vec_packsu(vb5plusd, (vector signed short)zero);
00507
00508 const vector signed char neg1 = vec_splat_s8(-1);
00509 const vector unsigned char permHH = (const vector unsigned char){0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
00510 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F};
00511
00512 #define STORE(i) \
00513 { const vector unsigned char perms##i = \
00514 vec_lvsr(i * stride, src2); \
00515 const vector unsigned char vg##i = \
00516 vec_perm(st##i, vbT##i, permHH); \
00517 const vector unsigned char mask##i = \
00518 vec_perm((vector unsigned char)zero, (vector unsigned char)neg1, perms##i); \
00519 const vector unsigned char vg2##i = \
00520 vec_perm(vg##i, vg##i, perms##i); \
00521 const vector unsigned char svA##i = \
00522 vec_sel(vbA##i, vg2##i, mask##i); \
00523 const vector unsigned char svB##i = \
00524 vec_sel(vg2##i, vbB##i, mask##i); \
00525 vec_st(svA##i, i * stride, src2); \
00526 vec_st(svB##i, i * stride + 16, src2);}
00527
00528 STORE(4)
00529 STORE(5)
00530 }
00531
00532 static inline void dering_altivec(uint8_t src[], int stride, PPContext *c) {
00533
00534
00535
00536
00537
00538
00539
00540
00541 uint8_t *srcCopy = src;
00542 DECLARE_ALIGNED(16, uint8_t, dt)[16];
00543 const vector signed int zero = vec_splat_s32(0);
00544 vector unsigned char v_dt;
00545 dt[0] = deringThreshold;
00546 v_dt = vec_splat(vec_ld(0, dt), 0);
00547
00548 #define LOAD_LINE(i) \
00549 const vector unsigned char perm##i = \
00550 vec_lvsl(i * stride, srcCopy); \
00551 vector unsigned char sA##i = vec_ld(i * stride, srcCopy); \
00552 vector unsigned char sB##i = vec_ld(i * stride + 16, srcCopy); \
00553 vector unsigned char src##i = vec_perm(sA##i, sB##i, perm##i)
00554
00555 LOAD_LINE(0);
00556 LOAD_LINE(1);
00557 LOAD_LINE(2);
00558 LOAD_LINE(3);
00559 LOAD_LINE(4);
00560 LOAD_LINE(5);
00561 LOAD_LINE(6);
00562 LOAD_LINE(7);
00563 LOAD_LINE(8);
00564 LOAD_LINE(9);
00565 #undef LOAD_LINE
00566
00567 vector unsigned char v_avg;
00568 {
00569 const vector unsigned char trunc_perm = (vector unsigned char)
00570 {0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
00571 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18};
00572 const vector unsigned char trunc_src12 = vec_perm(src1, src2, trunc_perm);
00573 const vector unsigned char trunc_src34 = vec_perm(src3, src4, trunc_perm);
00574 const vector unsigned char trunc_src56 = vec_perm(src5, src6, trunc_perm);
00575 const vector unsigned char trunc_src78 = vec_perm(src7, src8, trunc_perm);
00576
00577 #define EXTRACT(op) do { \
00578 const vector unsigned char s##op##_1 = vec_##op(trunc_src12, trunc_src34); \
00579 const vector unsigned char s##op##_2 = vec_##op(trunc_src56, trunc_src78); \
00580 const vector unsigned char s##op##_6 = vec_##op(s##op##_1, s##op##_2); \
00581 const vector unsigned char s##op##_8h = vec_mergeh(s##op##_6, s##op##_6); \
00582 const vector unsigned char s##op##_8l = vec_mergel(s##op##_6, s##op##_6); \
00583 const vector unsigned char s##op##_9 = vec_##op(s##op##_8h, s##op##_8l); \
00584 const vector unsigned char s##op##_9h = vec_mergeh(s##op##_9, s##op##_9); \
00585 const vector unsigned char s##op##_9l = vec_mergel(s##op##_9, s##op##_9); \
00586 const vector unsigned char s##op##_10 = vec_##op(s##op##_9h, s##op##_9l); \
00587 const vector unsigned char s##op##_10h = vec_mergeh(s##op##_10, s##op##_10); \
00588 const vector unsigned char s##op##_10l = vec_mergel(s##op##_10, s##op##_10); \
00589 const vector unsigned char s##op##_11 = vec_##op(s##op##_10h, s##op##_10l); \
00590 const vector unsigned char s##op##_11h = vec_mergeh(s##op##_11, s##op##_11); \
00591 const vector unsigned char s##op##_11l = vec_mergel(s##op##_11, s##op##_11); \
00592 v_##op = vec_##op(s##op##_11h, s##op##_11l); } while (0)
00593
00594 vector unsigned char v_min;
00595 vector unsigned char v_max;
00596 EXTRACT(min);
00597 EXTRACT(max);
00598 #undef EXTRACT
00599
00600 if (vec_all_lt(vec_sub(v_max, v_min), v_dt))
00601 return;
00602
00603 v_avg = vec_avg(v_min, v_max);
00604 }
00605
00606 DECLARE_ALIGNED(16, signed int, S)[8];
00607 {
00608 const vector unsigned short mask1 = (vector unsigned short)
00609 {0x0001, 0x0002, 0x0004, 0x0008,
00610 0x0010, 0x0020, 0x0040, 0x0080};
00611 const vector unsigned short mask2 = (vector unsigned short)
00612 {0x0100, 0x0200, 0x0000, 0x0000,
00613 0x0000, 0x0000, 0x0000, 0x0000};
00614
00615 const vector unsigned int vuint32_16 = vec_sl(vec_splat_u32(1), vec_splat_u32(4));
00616 const vector unsigned int vuint32_1 = vec_splat_u32(1);
00617
00618 #define COMPARE(i) \
00619 vector signed int sum##i; \
00620 do { \
00621 const vector unsigned char cmp##i = \
00622 (vector unsigned char)vec_cmpgt(src##i, v_avg); \
00623 const vector unsigned short cmpHi##i = \
00624 (vector unsigned short)vec_mergeh(cmp##i, cmp##i); \
00625 const vector unsigned short cmpLi##i = \
00626 (vector unsigned short)vec_mergel(cmp##i, cmp##i); \
00627 const vector signed short cmpHf##i = \
00628 (vector signed short)vec_and(cmpHi##i, mask1); \
00629 const vector signed short cmpLf##i = \
00630 (vector signed short)vec_and(cmpLi##i, mask2); \
00631 const vector signed int sump##i = vec_sum4s(cmpHf##i, zero); \
00632 const vector signed int sumq##i = vec_sum4s(cmpLf##i, sump##i); \
00633 sum##i = vec_sums(sumq##i, zero); } while (0)
00634
00635 COMPARE(0);
00636 COMPARE(1);
00637 COMPARE(2);
00638 COMPARE(3);
00639 COMPARE(4);
00640 COMPARE(5);
00641 COMPARE(6);
00642 COMPARE(7);
00643 COMPARE(8);
00644 COMPARE(9);
00645 #undef COMPARE
00646
00647 vector signed int sumA2;
00648 vector signed int sumB2;
00649 {
00650 const vector signed int sump02 = vec_mergel(sum0, sum2);
00651 const vector signed int sump13 = vec_mergel(sum1, sum3);
00652 const vector signed int sumA = vec_mergel(sump02, sump13);
00653
00654 const vector signed int sump46 = vec_mergel(sum4, sum6);
00655 const vector signed int sump57 = vec_mergel(sum5, sum7);
00656 const vector signed int sumB = vec_mergel(sump46, sump57);
00657
00658 const vector signed int sump8A = vec_mergel(sum8, zero);
00659 const vector signed int sump9B = vec_mergel(sum9, zero);
00660 const vector signed int sumC = vec_mergel(sump8A, sump9B);
00661
00662 const vector signed int tA = vec_sl(vec_nor(zero, sumA), vuint32_16);
00663 const vector signed int tB = vec_sl(vec_nor(zero, sumB), vuint32_16);
00664 const vector signed int tC = vec_sl(vec_nor(zero, sumC), vuint32_16);
00665 const vector signed int t2A = vec_or(sumA, tA);
00666 const vector signed int t2B = vec_or(sumB, tB);
00667 const vector signed int t2C = vec_or(sumC, tC);
00668 const vector signed int t3A = vec_and(vec_sra(t2A, vuint32_1),
00669 vec_sl(t2A, vuint32_1));
00670 const vector signed int t3B = vec_and(vec_sra(t2B, vuint32_1),
00671 vec_sl(t2B, vuint32_1));
00672 const vector signed int t3C = vec_and(vec_sra(t2C, vuint32_1),
00673 vec_sl(t2C, vuint32_1));
00674 const vector signed int yA = vec_and(t2A, t3A);
00675 const vector signed int yB = vec_and(t2B, t3B);
00676 const vector signed int yC = vec_and(t2C, t3C);
00677
00678 const vector unsigned char strangeperm1 = vec_lvsl(4, (unsigned char*)0);
00679 const vector unsigned char strangeperm2 = vec_lvsl(8, (unsigned char*)0);
00680 const vector signed int sumAd4 = vec_perm(yA, yB, strangeperm1);
00681 const vector signed int sumAd8 = vec_perm(yA, yB, strangeperm2);
00682 const vector signed int sumBd4 = vec_perm(yB, yC, strangeperm1);
00683 const vector signed int sumBd8 = vec_perm(yB, yC, strangeperm2);
00684 const vector signed int sumAp = vec_and(yA,
00685 vec_and(sumAd4,sumAd8));
00686 const vector signed int sumBp = vec_and(yB,
00687 vec_and(sumBd4,sumBd8));
00688 sumA2 = vec_or(sumAp,
00689 vec_sra(sumAp,
00690 vuint32_16));
00691 sumB2 = vec_or(sumBp,
00692 vec_sra(sumBp,
00693 vuint32_16));
00694 }
00695 vec_st(sumA2, 0, S);
00696 vec_st(sumB2, 16, S);
00697 }
00698
00699
00700
00701
00702 DECLARE_ALIGNED(16, int, tQP2)[4];
00703 tQP2[0]= c->QP/2 + 1;
00704 vector signed int vQP2 = vec_ld(0, tQP2);
00705 vQP2 = vec_splat(vQP2, 0);
00706 const vector signed int vsint32_8 = vec_splat_s32(8);
00707 const vector unsigned int vuint32_4 = vec_splat_u32(4);
00708
00709 const vector unsigned char permA1 = (vector unsigned char)
00710 {0x00, 0x01, 0x02, 0x10, 0x11, 0x12, 0x1F, 0x1F,
00711 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F};
00712 const vector unsigned char permA2 = (vector unsigned char)
00713 {0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x10, 0x11,
00714 0x12, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F};
00715 const vector unsigned char permA1inc = (vector unsigned char)
00716 {0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x00, 0x00,
00717 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
00718 const vector unsigned char permA2inc = (vector unsigned char)
00719 {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01,
00720 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
00721 const vector unsigned char magic = (vector unsigned char)
00722 {0x01, 0x02, 0x01, 0x02, 0x04, 0x02, 0x01, 0x02,
00723 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
00724 const vector unsigned char extractPerm = (vector unsigned char)
00725 {0x10, 0x10, 0x10, 0x01, 0x10, 0x10, 0x10, 0x01,
00726 0x10, 0x10, 0x10, 0x01, 0x10, 0x10, 0x10, 0x01};
00727 const vector unsigned char extractPermInc = (vector unsigned char)
00728 {0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01,
00729 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01};
00730 const vector unsigned char identity = vec_lvsl(0,(unsigned char *)0);
00731 const vector unsigned char tenRight = (vector unsigned char)
00732 {0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
00733 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
00734 const vector unsigned char eightLeft = (vector unsigned char)
00735 {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
00736 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08};
00737
00738
00739 #define F_INIT(i) \
00740 vector unsigned char tenRightM##i = tenRight; \
00741 vector unsigned char permA1M##i = permA1; \
00742 vector unsigned char permA2M##i = permA2; \
00743 vector unsigned char extractPermM##i = extractPerm
00744
00745 #define F2(i, j, k, l) \
00746 if (S[i] & (1 << (l+1))) { \
00747 const vector unsigned char a_##j##_A##l = \
00748 vec_perm(src##i, src##j, permA1M##i); \
00749 const vector unsigned char a_##j##_B##l = \
00750 vec_perm(a_##j##_A##l, src##k, permA2M##i); \
00751 const vector signed int a_##j##_sump##l = \
00752 (vector signed int)vec_msum(a_##j##_B##l, magic, \
00753 (vector unsigned int)zero); \
00754 vector signed int F_##j##_##l = \
00755 vec_sr(vec_sums(a_##j##_sump##l, vsint32_8), vuint32_4); \
00756 F_##j##_##l = vec_splat(F_##j##_##l, 3); \
00757 const vector signed int p_##j##_##l = \
00758 (vector signed int)vec_perm(src##j, \
00759 (vector unsigned char)zero, \
00760 extractPermM##i); \
00761 const vector signed int sum_##j##_##l = vec_add( p_##j##_##l, vQP2);\
00762 const vector signed int diff_##j##_##l = vec_sub( p_##j##_##l, vQP2);\
00763 vector signed int newpm_##j##_##l; \
00764 if (vec_all_lt(sum_##j##_##l, F_##j##_##l)) \
00765 newpm_##j##_##l = sum_##j##_##l; \
00766 else if (vec_all_gt(diff_##j##_##l, F_##j##_##l)) \
00767 newpm_##j##_##l = diff_##j##_##l; \
00768 else newpm_##j##_##l = F_##j##_##l; \
00769 const vector unsigned char newpm2_##j##_##l = \
00770 vec_splat((vector unsigned char)newpm_##j##_##l, 15); \
00771 const vector unsigned char mask##j##l = vec_add(identity, \
00772 tenRightM##i); \
00773 src##j = vec_perm(src##j, newpm2_##j##_##l, mask##j##l); \
00774 } \
00775 permA1M##i = vec_add(permA1M##i, permA1inc); \
00776 permA2M##i = vec_add(permA2M##i, permA2inc); \
00777 tenRightM##i = vec_sro(tenRightM##i, eightLeft); \
00778 extractPermM##i = vec_add(extractPermM##i, extractPermInc)
00779
00780 #define ITER(i, j, k) \
00781 F_INIT(i); \
00782 F2(i, j, k, 0); \
00783 F2(i, j, k, 1); \
00784 F2(i, j, k, 2); \
00785 F2(i, j, k, 3); \
00786 F2(i, j, k, 4); \
00787 F2(i, j, k, 5); \
00788 F2(i, j, k, 6); \
00789 F2(i, j, k, 7)
00790
00791 ITER(0, 1, 2);
00792 ITER(1, 2, 3);
00793 ITER(2, 3, 4);
00794 ITER(3, 4, 5);
00795 ITER(4, 5, 6);
00796 ITER(5, 6, 7);
00797 ITER(6, 7, 8);
00798 ITER(7, 8, 9);
00799
00800 const vector signed char neg1 = vec_splat_s8(-1);
00801
00802 #define STORE_LINE(i) \
00803 const vector unsigned char permST##i = \
00804 vec_lvsr(i * stride, srcCopy); \
00805 const vector unsigned char maskST##i = \
00806 vec_perm((vector unsigned char)zero, \
00807 (vector unsigned char)neg1, permST##i);\
00808 src##i = vec_perm(src##i ,src##i, permST##i); \
00809 sA##i= vec_sel(sA##i, src##i, maskST##i); \
00810 sB##i= vec_sel(src##i, sB##i, maskST##i); \
00811 vec_st(sA##i, i * stride, srcCopy); \
00812 vec_st(sB##i, i * stride + 16, srcCopy)
00813
00814 STORE_LINE(1);
00815 STORE_LINE(2);
00816 STORE_LINE(3);
00817 STORE_LINE(4);
00818 STORE_LINE(5);
00819 STORE_LINE(6);
00820 STORE_LINE(7);
00821 STORE_LINE(8);
00822
00823 #undef STORE_LINE
00824 #undef ITER
00825 #undef F2
00826 }
00827
00828 #define doHorizLowPass_altivec(a...) doHorizLowPass_C(a)
00829 #define doHorizDefFilter_altivec(a...) doHorizDefFilter_C(a)
00830 #define do_a_deblock_altivec(a...) do_a_deblock_C(a)
00831
00832 static inline void RENAME(tempNoiseReducer)(uint8_t *src, int stride,
00833 uint8_t *tempBlurred, uint32_t *tempBlurredPast, int *maxNoise)
00834 {
00835 const vector signed int zero = vec_splat_s32(0);
00836 const vector signed short vsint16_1 = vec_splat_s16(1);
00837 vector signed int v_dp = zero;
00838 vector signed int v_sysdp = zero;
00839 int d, sysd, i;
00840
00841 tempBlurredPast[127]= maxNoise[0];
00842 tempBlurredPast[128]= maxNoise[1];
00843 tempBlurredPast[129]= maxNoise[2];
00844
00845 #define LOAD_LINE(src, i) \
00846 register int j##src##i = i * stride; \
00847 vector unsigned char perm##src##i = vec_lvsl(j##src##i, src); \
00848 const vector unsigned char v_##src##A1##i = vec_ld(j##src##i, src); \
00849 const vector unsigned char v_##src##A2##i = vec_ld(j##src##i + 16, src); \
00850 const vector unsigned char v_##src##A##i = \
00851 vec_perm(v_##src##A1##i, v_##src##A2##i, perm##src##i); \
00852 vector signed short v_##src##Ass##i = \
00853 (vector signed short)vec_mergeh((vector signed char)zero, \
00854 (vector signed char)v_##src##A##i)
00855
00856 LOAD_LINE(src, 0);
00857 LOAD_LINE(src, 1);
00858 LOAD_LINE(src, 2);
00859 LOAD_LINE(src, 3);
00860 LOAD_LINE(src, 4);
00861 LOAD_LINE(src, 5);
00862 LOAD_LINE(src, 6);
00863 LOAD_LINE(src, 7);
00864
00865 LOAD_LINE(tempBlurred, 0);
00866 LOAD_LINE(tempBlurred, 1);
00867 LOAD_LINE(tempBlurred, 2);
00868 LOAD_LINE(tempBlurred, 3);
00869 LOAD_LINE(tempBlurred, 4);
00870 LOAD_LINE(tempBlurred, 5);
00871 LOAD_LINE(tempBlurred, 6);
00872 LOAD_LINE(tempBlurred, 7);
00873 #undef LOAD_LINE
00874
00875 #define ACCUMULATE_DIFFS(i) \
00876 vector signed short v_d##i = vec_sub(v_tempBlurredAss##i, \
00877 v_srcAss##i); \
00878 v_dp = vec_msums(v_d##i, v_d##i, v_dp); \
00879 v_sysdp = vec_msums(v_d##i, vsint16_1, v_sysdp)
00880
00881 ACCUMULATE_DIFFS(0);
00882 ACCUMULATE_DIFFS(1);
00883 ACCUMULATE_DIFFS(2);
00884 ACCUMULATE_DIFFS(3);
00885 ACCUMULATE_DIFFS(4);
00886 ACCUMULATE_DIFFS(5);
00887 ACCUMULATE_DIFFS(6);
00888 ACCUMULATE_DIFFS(7);
00889 #undef ACCUMULATE_DIFFS
00890
00891 v_dp = vec_sums(v_dp, zero);
00892 v_sysdp = vec_sums(v_sysdp, zero);
00893
00894 v_dp = vec_splat(v_dp, 3);
00895 v_sysdp = vec_splat(v_sysdp, 3);
00896
00897 vec_ste(v_dp, 0, &d);
00898 vec_ste(v_sysdp, 0, &sysd);
00899
00900 i = d;
00901 d = (4*d
00902 +(*(tempBlurredPast-256))
00903 +(*(tempBlurredPast-1))+ (*(tempBlurredPast+1))
00904 +(*(tempBlurredPast+256))
00905 +4)>>3;
00906
00907 *tempBlurredPast=i;
00908
00909 if (d > maxNoise[1]) {
00910 if (d < maxNoise[2]) {
00911 #define OP(i) v_tempBlurredAss##i = vec_avg(v_tempBlurredAss##i, v_srcAss##i);
00912
00913 OP(0);
00914 OP(1);
00915 OP(2);
00916 OP(3);
00917 OP(4);
00918 OP(5);
00919 OP(6);
00920 OP(7);
00921 #undef OP
00922 } else {
00923 #define OP(i) v_tempBlurredAss##i = v_srcAss##i;
00924
00925 OP(0);
00926 OP(1);
00927 OP(2);
00928 OP(3);
00929 OP(4);
00930 OP(5);
00931 OP(6);
00932 OP(7);
00933 #undef OP
00934 }
00935 } else {
00936 if (d < maxNoise[0]) {
00937 const vector signed short vsint16_7 = vec_splat_s16(7);
00938 const vector signed short vsint16_4 = vec_splat_s16(4);
00939 const vector unsigned short vuint16_3 = vec_splat_u16(3);
00940
00941 #define OP(i) \
00942 const vector signed short v_temp##i = \
00943 vec_mladd(v_tempBlurredAss##i, \
00944 vsint16_7, v_srcAss##i); \
00945 const vector signed short v_temp2##i = \
00946 vec_add(v_temp##i, vsint16_4); \
00947 v_tempBlurredAss##i = vec_sr(v_temp2##i, vuint16_3)
00948
00949 OP(0);
00950 OP(1);
00951 OP(2);
00952 OP(3);
00953 OP(4);
00954 OP(5);
00955 OP(6);
00956 OP(7);
00957 #undef OP
00958 } else {
00959 const vector signed short vsint16_3 = vec_splat_s16(3);
00960 const vector signed short vsint16_2 = vec_splat_s16(2);
00961
00962 #define OP(i) \
00963 const vector signed short v_temp##i = \
00964 vec_mladd(v_tempBlurredAss##i, \
00965 vsint16_3, v_srcAss##i); \
00966 const vector signed short v_temp2##i = \
00967 vec_add(v_temp##i, vsint16_2); \
00968 v_tempBlurredAss##i = vec_sr(v_temp2##i, (vector unsigned short)vsint16_2)
00969
00970 OP(0);
00971 OP(1);
00972 OP(2);
00973 OP(3);
00974 OP(4);
00975 OP(5);
00976 OP(6);
00977 OP(7);
00978 #undef OP
00979 }
00980 }
00981
00982 const vector signed char neg1 = vec_splat_s8(-1);
00983 const vector unsigned char permHH = (const vector unsigned char){0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
00984 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F};
00985
00986 #define PACK_AND_STORE(src, i) \
00987 const vector unsigned char perms##src##i = \
00988 vec_lvsr(i * stride, src); \
00989 const vector unsigned char vf##src##i = \
00990 vec_packsu(v_tempBlurredAss##i, (vector signed short)zero); \
00991 const vector unsigned char vg##src##i = \
00992 vec_perm(vf##src##i, v_##src##A##i, permHH); \
00993 const vector unsigned char mask##src##i = \
00994 vec_perm((vector unsigned char)zero, (vector unsigned char)neg1, perms##src##i); \
00995 const vector unsigned char vg2##src##i = \
00996 vec_perm(vg##src##i, vg##src##i, perms##src##i); \
00997 const vector unsigned char svA##src##i = \
00998 vec_sel(v_##src##A1##i, vg2##src##i, mask##src##i); \
00999 const vector unsigned char svB##src##i = \
01000 vec_sel(vg2##src##i, v_##src##A2##i, mask##src##i); \
01001 vec_st(svA##src##i, i * stride, src); \
01002 vec_st(svB##src##i, i * stride + 16, src)
01003
01004 PACK_AND_STORE(src, 0);
01005 PACK_AND_STORE(src, 1);
01006 PACK_AND_STORE(src, 2);
01007 PACK_AND_STORE(src, 3);
01008 PACK_AND_STORE(src, 4);
01009 PACK_AND_STORE(src, 5);
01010 PACK_AND_STORE(src, 6);
01011 PACK_AND_STORE(src, 7);
01012 PACK_AND_STORE(tempBlurred, 0);
01013 PACK_AND_STORE(tempBlurred, 1);
01014 PACK_AND_STORE(tempBlurred, 2);
01015 PACK_AND_STORE(tempBlurred, 3);
01016 PACK_AND_STORE(tempBlurred, 4);
01017 PACK_AND_STORE(tempBlurred, 5);
01018 PACK_AND_STORE(tempBlurred, 6);
01019 PACK_AND_STORE(tempBlurred, 7);
01020 #undef PACK_AND_STORE
01021 }
01022
01023 static inline void transpose_16x8_char_toPackedAlign_altivec(unsigned char* dst, unsigned char* src, int stride) {
01024 const vector unsigned char zero = vec_splat_u8(0);
01025
01026 #define LOAD_DOUBLE_LINE(i, j) \
01027 vector unsigned char perm1##i = vec_lvsl(i * stride, src); \
01028 vector unsigned char perm2##i = vec_lvsl(j * stride, src); \
01029 vector unsigned char srcA##i = vec_ld(i * stride, src); \
01030 vector unsigned char srcB##i = vec_ld(i * stride + 16, src); \
01031 vector unsigned char srcC##i = vec_ld(j * stride, src); \
01032 vector unsigned char srcD##i = vec_ld(j * stride+ 16, src); \
01033 vector unsigned char src##i = vec_perm(srcA##i, srcB##i, perm1##i); \
01034 vector unsigned char src##j = vec_perm(srcC##i, srcD##i, perm2##i)
01035
01036 LOAD_DOUBLE_LINE(0, 1);
01037 LOAD_DOUBLE_LINE(2, 3);
01038 LOAD_DOUBLE_LINE(4, 5);
01039 LOAD_DOUBLE_LINE(6, 7);
01040 #undef LOAD_DOUBLE_LINE
01041
01042 vector unsigned char tempA = vec_mergeh(src0, zero);
01043 vector unsigned char tempB = vec_mergel(src0, zero);
01044 vector unsigned char tempC = vec_mergeh(src1, zero);
01045 vector unsigned char tempD = vec_mergel(src1, zero);
01046 vector unsigned char tempE = vec_mergeh(src2, zero);
01047 vector unsigned char tempF = vec_mergel(src2, zero);
01048 vector unsigned char tempG = vec_mergeh(src3, zero);
01049 vector unsigned char tempH = vec_mergel(src3, zero);
01050 vector unsigned char tempI = vec_mergeh(src4, zero);
01051 vector unsigned char tempJ = vec_mergel(src4, zero);
01052 vector unsigned char tempK = vec_mergeh(src5, zero);
01053 vector unsigned char tempL = vec_mergel(src5, zero);
01054 vector unsigned char tempM = vec_mergeh(src6, zero);
01055 vector unsigned char tempN = vec_mergel(src6, zero);
01056 vector unsigned char tempO = vec_mergeh(src7, zero);
01057 vector unsigned char tempP = vec_mergel(src7, zero);
01058
01059 vector unsigned char temp0 = vec_mergeh(tempA, tempI);
01060 vector unsigned char temp1 = vec_mergel(tempA, tempI);
01061 vector unsigned char temp2 = vec_mergeh(tempB, tempJ);
01062 vector unsigned char temp3 = vec_mergel(tempB, tempJ);
01063 vector unsigned char temp4 = vec_mergeh(tempC, tempK);
01064 vector unsigned char temp5 = vec_mergel(tempC, tempK);
01065 vector unsigned char temp6 = vec_mergeh(tempD, tempL);
01066 vector unsigned char temp7 = vec_mergel(tempD, tempL);
01067 vector unsigned char temp8 = vec_mergeh(tempE, tempM);
01068 vector unsigned char temp9 = vec_mergel(tempE, tempM);
01069 vector unsigned char temp10 = vec_mergeh(tempF, tempN);
01070 vector unsigned char temp11 = vec_mergel(tempF, tempN);
01071 vector unsigned char temp12 = vec_mergeh(tempG, tempO);
01072 vector unsigned char temp13 = vec_mergel(tempG, tempO);
01073 vector unsigned char temp14 = vec_mergeh(tempH, tempP);
01074 vector unsigned char temp15 = vec_mergel(tempH, tempP);
01075
01076 tempA = vec_mergeh(temp0, temp8);
01077 tempB = vec_mergel(temp0, temp8);
01078 tempC = vec_mergeh(temp1, temp9);
01079 tempD = vec_mergel(temp1, temp9);
01080 tempE = vec_mergeh(temp2, temp10);
01081 tempF = vec_mergel(temp2, temp10);
01082 tempG = vec_mergeh(temp3, temp11);
01083 tempH = vec_mergel(temp3, temp11);
01084 tempI = vec_mergeh(temp4, temp12);
01085 tempJ = vec_mergel(temp4, temp12);
01086 tempK = vec_mergeh(temp5, temp13);
01087 tempL = vec_mergel(temp5, temp13);
01088 tempM = vec_mergeh(temp6, temp14);
01089 tempN = vec_mergel(temp6, temp14);
01090 tempO = vec_mergeh(temp7, temp15);
01091 tempP = vec_mergel(temp7, temp15);
01092
01093 temp0 = vec_mergeh(tempA, tempI);
01094 temp1 = vec_mergel(tempA, tempI);
01095 temp2 = vec_mergeh(tempB, tempJ);
01096 temp3 = vec_mergel(tempB, tempJ);
01097 temp4 = vec_mergeh(tempC, tempK);
01098 temp5 = vec_mergel(tempC, tempK);
01099 temp6 = vec_mergeh(tempD, tempL);
01100 temp7 = vec_mergel(tempD, tempL);
01101 temp8 = vec_mergeh(tempE, tempM);
01102 temp9 = vec_mergel(tempE, tempM);
01103 temp10 = vec_mergeh(tempF, tempN);
01104 temp11 = vec_mergel(tempF, tempN);
01105 temp12 = vec_mergeh(tempG, tempO);
01106 temp13 = vec_mergel(tempG, tempO);
01107 temp14 = vec_mergeh(tempH, tempP);
01108 temp15 = vec_mergel(tempH, tempP);
01109
01110 vec_st(temp0, 0, dst);
01111 vec_st(temp1, 16, dst);
01112 vec_st(temp2, 32, dst);
01113 vec_st(temp3, 48, dst);
01114 vec_st(temp4, 64, dst);
01115 vec_st(temp5, 80, dst);
01116 vec_st(temp6, 96, dst);
01117 vec_st(temp7, 112, dst);
01118 vec_st(temp8, 128, dst);
01119 vec_st(temp9, 144, dst);
01120 vec_st(temp10, 160, dst);
01121 vec_st(temp11, 176, dst);
01122 vec_st(temp12, 192, dst);
01123 vec_st(temp13, 208, dst);
01124 vec_st(temp14, 224, dst);
01125 vec_st(temp15, 240, dst);
01126 }
01127
01128 static inline void transpose_8x16_char_fromPackedAlign_altivec(unsigned char* dst, unsigned char* src, int stride) {
01129 const vector unsigned char zero = vec_splat_u8(0);
01130
01131 #define LOAD_DOUBLE_LINE(i, j) \
01132 vector unsigned char src##i = vec_ld(i * 16, src); \
01133 vector unsigned char src##j = vec_ld(j * 16, src)
01134
01135 LOAD_DOUBLE_LINE(0, 1);
01136 LOAD_DOUBLE_LINE(2, 3);
01137 LOAD_DOUBLE_LINE(4, 5);
01138 LOAD_DOUBLE_LINE(6, 7);
01139 LOAD_DOUBLE_LINE(8, 9);
01140 LOAD_DOUBLE_LINE(10, 11);
01141 LOAD_DOUBLE_LINE(12, 13);
01142 LOAD_DOUBLE_LINE(14, 15);
01143 #undef LOAD_DOUBLE_LINE
01144
01145 vector unsigned char tempA = vec_mergeh(src0, src8);
01146 vector unsigned char tempB;
01147 vector unsigned char tempC = vec_mergeh(src1, src9);
01148 vector unsigned char tempD;
01149 vector unsigned char tempE = vec_mergeh(src2, src10);
01150 vector unsigned char tempG = vec_mergeh(src3, src11);
01151 vector unsigned char tempI = vec_mergeh(src4, src12);
01152 vector unsigned char tempJ;
01153 vector unsigned char tempK = vec_mergeh(src5, src13);
01154 vector unsigned char tempL;
01155 vector unsigned char tempM = vec_mergeh(src6, src14);
01156 vector unsigned char tempO = vec_mergeh(src7, src15);
01157
01158 vector unsigned char temp0 = vec_mergeh(tempA, tempI);
01159 vector unsigned char temp1 = vec_mergel(tempA, tempI);
01160 vector unsigned char temp2;
01161 vector unsigned char temp3;
01162 vector unsigned char temp4 = vec_mergeh(tempC, tempK);
01163 vector unsigned char temp5 = vec_mergel(tempC, tempK);
01164 vector unsigned char temp6;
01165 vector unsigned char temp7;
01166 vector unsigned char temp8 = vec_mergeh(tempE, tempM);
01167 vector unsigned char temp9 = vec_mergel(tempE, tempM);
01168 vector unsigned char temp12 = vec_mergeh(tempG, tempO);
01169 vector unsigned char temp13 = vec_mergel(tempG, tempO);
01170
01171 tempA = vec_mergeh(temp0, temp8);
01172 tempB = vec_mergel(temp0, temp8);
01173 tempC = vec_mergeh(temp1, temp9);
01174 tempD = vec_mergel(temp1, temp9);
01175 tempI = vec_mergeh(temp4, temp12);
01176 tempJ = vec_mergel(temp4, temp12);
01177 tempK = vec_mergeh(temp5, temp13);
01178 tempL = vec_mergel(temp5, temp13);
01179
01180 temp0 = vec_mergeh(tempA, tempI);
01181 temp1 = vec_mergel(tempA, tempI);
01182 temp2 = vec_mergeh(tempB, tempJ);
01183 temp3 = vec_mergel(tempB, tempJ);
01184 temp4 = vec_mergeh(tempC, tempK);
01185 temp5 = vec_mergel(tempC, tempK);
01186 temp6 = vec_mergeh(tempD, tempL);
01187 temp7 = vec_mergel(tempD, tempL);
01188
01189
01190 const vector signed char neg1 = vec_splat_s8(-1);
01191 #define STORE_DOUBLE_LINE(i, j) \
01192 vector unsigned char dstA##i = vec_ld(i * stride, dst); \
01193 vector unsigned char dstB##i = vec_ld(i * stride + 16, dst); \
01194 vector unsigned char dstA##j = vec_ld(j * stride, dst); \
01195 vector unsigned char dstB##j = vec_ld(j * stride+ 16, dst); \
01196 vector unsigned char align##i = vec_lvsr(i * stride, dst); \
01197 vector unsigned char align##j = vec_lvsr(j * stride, dst); \
01198 vector unsigned char mask##i = vec_perm(zero, (vector unsigned char)neg1, align##i); \
01199 vector unsigned char mask##j = vec_perm(zero, (vector unsigned char)neg1, align##j); \
01200 vector unsigned char dstR##i = vec_perm(temp##i, temp##i, align##i);\
01201 vector unsigned char dstR##j = vec_perm(temp##j, temp##j, align##j);\
01202 vector unsigned char dstAF##i = vec_sel(dstA##i, dstR##i, mask##i); \
01203 vector unsigned char dstBF##i = vec_sel(dstR##i, dstB##i, mask##i); \
01204 vector unsigned char dstAF##j = vec_sel(dstA##j, dstR##j, mask##j); \
01205 vector unsigned char dstBF##j = vec_sel(dstR##j, dstB##j, mask##j); \
01206 vec_st(dstAF##i, i * stride, dst); \
01207 vec_st(dstBF##i, i * stride + 16, dst); \
01208 vec_st(dstAF##j, j * stride, dst); \
01209 vec_st(dstBF##j, j * stride + 16, dst)
01210
01211 STORE_DOUBLE_LINE(0,1);
01212 STORE_DOUBLE_LINE(2,3);
01213 STORE_DOUBLE_LINE(4,5);
01214 STORE_DOUBLE_LINE(6,7);
01215 }