35 static void put_pixels_clamped_mvi(
const int16_t *
block,
uint8_t *pixels,
39 uint64_t clampmask = zap(-1, 0xaa);
42 uint64_t shorts0, shorts1;
45 shorts0 = maxsw4(shorts0, 0);
46 shorts0 = minsw4(shorts0, clampmask);
47 stl(pkwb(shorts0), pixels);
49 shorts1 = ldq(block + 4);
50 shorts1 = maxsw4(shorts1, 0);
51 shorts1 = minsw4(shorts1, clampmask);
52 stl(pkwb(shorts1), pixels + 4);
59 void add_pixels_clamped_mvi(
const int16_t *
block,
uint8_t *pixels,
65 uint64_t clampmask = zap(-1, 0xaa);
66 uint64_t signmask = zap(-1, 0x33);
67 signmask ^= signmask >> 1;
70 uint64_t shorts0, pix0, signs0;
71 uint64_t shorts1, pix1, signs1;
74 shorts1 = ldq(block + 4);
76 pix0 = unpkbw(ldl(pixels));
78 signs0 = shorts0 & signmask;
83 shorts0 = maxsw4(shorts0, 0);
84 shorts0 = minsw4(shorts0, clampmask);
87 pix1 = unpkbw(ldl(pixels + 4));
88 signs1 = shorts1 & signmask;
92 shorts1 = maxsw4(shorts1, 0);
93 shorts1 = minsw4(shorts1, clampmask);
95 stl(pkwb(shorts0), pixels);
96 stl(pkwb(shorts1), pixels + 4);
105 uint64_t *p = (uint64_t *) blocks;
106 int n =
sizeof(int16_t) * 6 * 64;
124 return (a & b) + (((a ^
b) &
BYTE_VEC(0xfe)) >> 1);
127 static inline uint64_t
avg2(uint64_t
a, uint64_t
b)
129 return (a | b) - (((a ^
b) &
BYTE_VEC(0xfe)) >> 1);
135 static inline uint64_t
avg4(uint64_t l1, uint64_t l2, uint64_t l3, uint64_t l4)
137 uint64_t r1 = ((l1 & ~
BYTE_VEC(0x03)) >> 2)
141 uint64_t r2 = (( (l1 &
BYTE_VEC(0x03))
150 #define OP(LOAD, STORE) \
152 STORE(LOAD(pixels), block); \
153 pixels += line_size; \
154 block += line_size; \
157 #define OP_X2(LOAD, STORE) \
159 uint64_t pix1, pix2; \
161 pix1 = LOAD(pixels); \
162 pix2 = pix1 >> 8 | ((uint64_t) pixels[8] << 56); \
163 STORE(AVG2(pix1, pix2), block); \
164 pixels += line_size; \
165 block += line_size; \
168 #define OP_Y2(LOAD, STORE) \
170 uint64_t pix = LOAD(pixels); \
174 pixels += line_size; \
175 next_pix = LOAD(pixels); \
176 STORE(AVG2(pix, next_pix), block); \
177 block += line_size; \
182 #define OP_XY2(LOAD, STORE) \
184 uint64_t pix1 = LOAD(pixels); \
185 uint64_t pix2 = pix1 >> 8 | ((uint64_t) pixels[8] << 56); \
186 uint64_t pix_l = (pix1 & BYTE_VEC(0x03)) \
187 + (pix2 & BYTE_VEC(0x03)); \
188 uint64_t pix_h = ((pix1 & ~BYTE_VEC(0x03)) >> 2) \
189 + ((pix2 & ~BYTE_VEC(0x03)) >> 2); \
192 uint64_t npix1, npix2; \
193 uint64_t npix_l, npix_h; \
196 pixels += line_size; \
197 npix1 = LOAD(pixels); \
198 npix2 = npix1 >> 8 | ((uint64_t) pixels[8] << 56); \
199 npix_l = (npix1 & BYTE_VEC(0x03)) \
200 + (npix2 & BYTE_VEC(0x03)); \
201 npix_h = ((npix1 & ~BYTE_VEC(0x03)) >> 2) \
202 + ((npix2 & ~BYTE_VEC(0x03)) >> 2); \
203 avg = (((pix_l + npix_l + AVG4_ROUNDER) >> 2) & BYTE_VEC(0x03)) \
207 block += line_size; \
213 #define MAKE_OP(OPNAME, SUFF, OPKIND, STORE) \
214 static void OPNAME ## _pixels ## SUFF ## _axp \
215 (uint8_t *restrict block, const uint8_t *restrict pixels, \
216 ptrdiff_t line_size, int h) \
218 if ((size_t) pixels & 0x7) { \
219 OPKIND(uldq, STORE); \
221 OPKIND(ldq, STORE); \
225 static void OPNAME ## _pixels16 ## SUFF ## _axp \
226 (uint8_t *restrict block, const uint8_t *restrict pixels, \
227 ptrdiff_t line_size, int h) \
229 OPNAME ## _pixels ## SUFF ## _axp(block, pixels, line_size, h); \
230 OPNAME ## _pixels ## SUFF ## _axp(block + 8, pixels + 8, line_size, h); \
233 #define PIXOP(OPNAME, STORE) \
234 MAKE_OP(OPNAME, , OP, STORE) \
235 MAKE_OP(OPNAME, _x2, OP_X2, STORE) \
236 MAKE_OP(OPNAME, _y2, OP_Y2, STORE) \
237 MAKE_OP(OPNAME, _xy2, OP_XY2, STORE)
242 #define AVG4_ROUNDER BYTE_VEC(0x02)
243 #define STORE(l, b) stq(l, b)
247 #define STORE(l, b) stq(AVG2(l, ldq(b)), b);
255 #define AVG2 avg2_no_rnd
256 #define AVG4 avg4_no_rnd
257 #define AVG4_ROUNDER BYTE_VEC(0x01)
258 #define STORE(l, b) stq(l, b)
262 #define STORE(l, b) stq(AVG2(l, ldq(b)), b);
266 ptrdiff_t line_size,
int h)
276 if (!high_bit_depth) {