38 uint64_t clampmask = zap(-1, 0xaa);
41 uint64_t shorts0, shorts1;
44 shorts0 = maxsw4(shorts0, 0);
45 shorts0 = minsw4(shorts0, clampmask);
46 stl(pkwb(shorts0), pixels);
48 shorts1 = ldq(block + 4);
49 shorts1 = maxsw4(shorts1, 0);
50 shorts1 = minsw4(shorts1, clampmask);
51 stl(pkwb(shorts1), pixels + 4);
64 uint64_t clampmask = zap(-1, 0xaa);
65 uint64_t signmask = zap(-1, 0x33);
66 signmask ^= signmask >> 1;
69 uint64_t shorts0, pix0, signs0;
70 uint64_t shorts1, pix1, signs1;
73 shorts1 = ldq(block + 4);
75 pix0 = unpkbw(ldl(pixels));
77 signs0 = shorts0 & signmask;
82 shorts0 = maxsw4(shorts0, 0);
83 shorts0 = minsw4(shorts0, clampmask);
86 pix1 = unpkbw(ldl(pixels + 4));
87 signs1 = shorts1 & signmask;
91 shorts1 = maxsw4(shorts1, 0);
92 shorts1 = minsw4(shorts1, clampmask);
94 stl(pkwb(shorts0), pixels);
95 stl(pkwb(shorts1), pixels + 4);
104 uint64_t *p = (uint64_t *) blocks;
105 int n =
sizeof(
DCTELEM) * 6 * 64;
123 return (a & b) + (((a ^
b) &
BYTE_VEC(0xfe)) >> 1);
126 static inline uint64_t
avg2(uint64_t
a, uint64_t
b)
128 return (a | b) - (((a ^
b) &
BYTE_VEC(0xfe)) >> 1);
134 static inline uint64_t
avg4(uint64_t l1, uint64_t l2, uint64_t l3, uint64_t l4)
136 uint64_t r1 = ((l1 & ~
BYTE_VEC(0x03)) >> 2)
140 uint64_t r2 = (( (l1 &
BYTE_VEC(0x03))
149 #define OP(LOAD, STORE) \
151 STORE(LOAD(pixels), block); \
152 pixels += line_size; \
153 block += line_size; \
156 #define OP_X2(LOAD, STORE) \
158 uint64_t pix1, pix2; \
160 pix1 = LOAD(pixels); \
161 pix2 = pix1 >> 8 | ((uint64_t) pixels[8] << 56); \
162 STORE(AVG2(pix1, pix2), block); \
163 pixels += line_size; \
164 block += line_size; \
167 #define OP_Y2(LOAD, STORE) \
169 uint64_t pix = LOAD(pixels); \
173 pixels += line_size; \
174 next_pix = LOAD(pixels); \
175 STORE(AVG2(pix, next_pix), block); \
176 block += line_size; \
181 #define OP_XY2(LOAD, STORE) \
183 uint64_t pix1 = LOAD(pixels); \
184 uint64_t pix2 = pix1 >> 8 | ((uint64_t) pixels[8] << 56); \
185 uint64_t pix_l = (pix1 & BYTE_VEC(0x03)) \
186 + (pix2 & BYTE_VEC(0x03)); \
187 uint64_t pix_h = ((pix1 & ~BYTE_VEC(0x03)) >> 2) \
188 + ((pix2 & ~BYTE_VEC(0x03)) >> 2); \
191 uint64_t npix1, npix2; \
192 uint64_t npix_l, npix_h; \
195 pixels += line_size; \
196 npix1 = LOAD(pixels); \
197 npix2 = npix1 >> 8 | ((uint64_t) pixels[8] << 56); \
198 npix_l = (npix1 & BYTE_VEC(0x03)) \
199 + (npix2 & BYTE_VEC(0x03)); \
200 npix_h = ((npix1 & ~BYTE_VEC(0x03)) >> 2) \
201 + ((npix2 & ~BYTE_VEC(0x03)) >> 2); \
202 avg = (((pix_l + npix_l + AVG4_ROUNDER) >> 2) & BYTE_VEC(0x03)) \
206 block += line_size; \
212 #define MAKE_OP(OPNAME, SUFF, OPKIND, STORE) \
213 static void OPNAME ## _pixels ## SUFF ## _axp \
214 (uint8_t *av_restrict block, const uint8_t *av_restrict pixels, \
215 int line_size, int h) \
217 if ((size_t) pixels & 0x7) { \
218 OPKIND(uldq, STORE); \
220 OPKIND(ldq, STORE); \
224 static void OPNAME ## _pixels16 ## SUFF ## _axp \
225 (uint8_t *av_restrict block, const uint8_t *av_restrict pixels, \
226 int line_size, int h) \
228 OPNAME ## _pixels ## SUFF ## _axp(block, pixels, line_size, h); \
229 OPNAME ## _pixels ## SUFF ## _axp(block + 8, pixels + 8, line_size, h); \
232 #define PIXOP(OPNAME, STORE) \
233 MAKE_OP(OPNAME, , OP, STORE) \
234 MAKE_OP(OPNAME, _x2, OP_X2, STORE) \
235 MAKE_OP(OPNAME, _y2, OP_Y2, STORE) \
236 MAKE_OP(OPNAME, _xy2, OP_XY2, STORE)
241 #define AVG4_ROUNDER BYTE_VEC(0x02)
242 #define STORE(l, b) stq(l, b)
246 #define STORE(l, b) stq(AVG2(l, ldq(b)), b);
254 #define AVG2 avg2_no_rnd
255 #define AVG4 avg4_no_rnd
256 #define AVG4_ROUNDER BYTE_VEC(0x01)
257 #define STORE(l, b) stq(l, b)
261 #define STORE(l, b) stq(AVG2(l, ldq(b)), b);
265 int line_size,
int h)
275 if (!high_bit_depth) {