00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018
00019
00020
00021
00022
00023 #include "libavcodec/dsputil.h"
00024 #include "util_altivec.h"
00025 #include "types_altivec.h"
00026 #include "dsputil_altivec.h"
00027
00028
00029
00030
00031
00032 void gmc1_altivec(uint8_t *dst , uint8_t *src , int stride, int h, int x16, int y16, int rounder)
00033 {
00034 const DECLARE_ALIGNED(16, unsigned short, rounder_a) = rounder;
00035 const DECLARE_ALIGNED(16, unsigned short, ABCD)[8] =
00036 {
00037 (16-x16)*(16-y16),
00038 ( x16)*(16-y16),
00039 (16-x16)*( y16),
00040 ( x16)*( y16),
00041 0, 0, 0, 0
00042 };
00043 register const vector unsigned char vczero = (const vector unsigned char)vec_splat_u8(0);
00044 register const vector unsigned short vcsr8 = (const vector unsigned short)vec_splat_u16(8);
00045 register vector unsigned char dstv, dstv2, src_0, src_1, srcvA, srcvB, srcvC, srcvD;
00046 register vector unsigned short Av, Bv, Cv, Dv, rounderV, tempA, tempB, tempC, tempD;
00047 int i;
00048 unsigned long dst_odd = (unsigned long)dst & 0x0000000F;
00049 unsigned long src_really_odd = (unsigned long)src & 0x0000000F;
00050
00051 tempA = vec_ld(0, (unsigned short*)ABCD);
00052 Av = vec_splat(tempA, 0);
00053 Bv = vec_splat(tempA, 1);
00054 Cv = vec_splat(tempA, 2);
00055 Dv = vec_splat(tempA, 3);
00056
00057 rounderV = vec_splat((vec_u16)vec_lde(0, &rounder_a), 0);
00058
00059
00060
00061
00062
00063
00064 src_0 = vec_ld(0, src);
00065 src_1 = vec_ld(16, src);
00066 srcvA = vec_perm(src_0, src_1, vec_lvsl(0, src));
00067
00068 if (src_really_odd != 0x0000000F) {
00069
00070
00071 srcvB = vec_perm(src_0, src_1, vec_lvsl(1, src));
00072 } else {
00073 srcvB = src_1;
00074 }
00075 srcvA = vec_mergeh(vczero, srcvA);
00076 srcvB = vec_mergeh(vczero, srcvB);
00077
00078 for(i=0; i<h; i++) {
00079 dst_odd = (unsigned long)dst & 0x0000000F;
00080 src_really_odd = (((unsigned long)src) + stride) & 0x0000000F;
00081
00082 dstv = vec_ld(0, dst);
00083
00084
00085
00086
00087
00088 src_0 = vec_ld(stride + 0, src);
00089 src_1 = vec_ld(stride + 16, src);
00090 srcvC = vec_perm(src_0, src_1, vec_lvsl(stride + 0, src));
00091
00092 if (src_really_odd != 0x0000000F) {
00093
00094
00095 srcvD = vec_perm(src_0, src_1, vec_lvsl(stride + 1, src));
00096 } else {
00097 srcvD = src_1;
00098 }
00099
00100 srcvC = vec_mergeh(vczero, srcvC);
00101 srcvD = vec_mergeh(vczero, srcvD);
00102
00103
00104
00105
00106
00107 tempA = vec_mladd((vector unsigned short)srcvA, Av, rounderV);
00108 tempB = vec_mladd((vector unsigned short)srcvB, Bv, tempA);
00109 tempC = vec_mladd((vector unsigned short)srcvC, Cv, tempB);
00110 tempD = vec_mladd((vector unsigned short)srcvD, Dv, tempC);
00111
00112 srcvA = srcvC;
00113 srcvB = srcvD;
00114
00115 tempD = vec_sr(tempD, vcsr8);
00116
00117 dstv2 = vec_pack(tempD, (vector unsigned short)vczero);
00118
00119 if (dst_odd) {
00120 dstv2 = vec_perm(dstv, dstv2, vcprm(0,1,s0,s1));
00121 } else {
00122 dstv2 = vec_perm(dstv, dstv2, vcprm(s0,s1,2,3));
00123 }
00124
00125 vec_st(dstv2, 0, dst);
00126
00127 dst += stride;
00128 src += stride;
00129 }
00130 }