35 stq(unpkbw(p), block);
36 stq(unpkbw(p >> 32), block + 4);
46 uint64_t
mask = 0x4040;
51 uint64_t x, y,
c, d,
a;
62 stq(unpkbw(d) | (unpkbw(signs) << 8), block);
63 stq(unpkbw(d >> 32) | (unpkbw(signs >> 32) << 8), block + 4);
71 static inline uint64_t
avg2(uint64_t
a, uint64_t
b)
73 return (a | b) - (((a ^
b) &
BYTE_VEC(0xfe)) >> 1);
76 static inline uint64_t
avg4(uint64_t l1, uint64_t l2, uint64_t l3, uint64_t l4)
78 uint64_t r1 = ((l1 & ~
BYTE_VEC(0x03)) >> 2)
82 uint64_t r2 = (( (l1 &
BYTE_VEC(0x03))
94 if ((
size_t) pix2 & 0x7) {
101 result += perr(p1, p2);
112 result += perr(p1, p2);
128 if ((
size_t) pix2 & 0x7) {
131 uint64_t p1_l, p1_r, p2_l, p2_r;
135 p1_r = ldq(pix1 + 8);
137 p2_l = extql(ldq_u(pix2), pix2) | extqh(t, pix2);
138 p2_r = extql(t, pix2) | extqh(ldq_u(pix2 + 16), pix2);
142 result += perr(p1_l, p2_l)
147 uint64_t p1_l, p1_r, p2_l, p2_r;
150 p1_r = ldq(pix1 + 8);
152 p2_r = ldq(pix2 + 8);
156 result += perr(p1_l, p2_l)
168 uint64_t disalign = (size_t) pix2 & 0x7;
173 uint64_t p1_l, p1_r, p2_l, p2_r;
177 p1_r = ldq(pix1 + 8);
180 p2_l =
avg2(l, (l >> 8) | ((uint64_t) r << 56));
181 p2_r =
avg2(r, (r >> 8) | ((uint64_t) pix2[16] << 56));
185 result += perr(p1_l, p2_l)
195 uint64_t p1_l, p1_r, p2_l, p2_r;
199 p1_r = ldq(pix1 + 8);
202 r = ldq_u(pix2 + 16);
203 p2_l =
avg2(extql(l, disalign) | extqh(m, disalign), m);
204 p2_r =
avg2(extql(m, disalign) | extqh(r, disalign), r);
208 result += perr(p1_l, p2_l)
214 uint64_t disalign1 = disalign + 1;
215 uint64_t p1_l, p1_r, p2_l, p2_r;
219 p1_r = ldq(pix1 + 8);
222 r = ldq_u(pix2 + 16);
223 p2_l =
avg2(extql(l, disalign) | extqh(m, disalign),
224 extql(l, disalign1) | extqh(m, disalign1));
225 p2_r =
avg2(extql(m, disalign) | extqh(r, disalign),
226 extql(m, disalign1) | extqh(r, disalign1));
230 result += perr(p1_l, p2_l)
242 if ((
size_t) pix2 & 0x7) {
243 uint64_t
t, p2_l, p2_r;
245 p2_l = extql(ldq_u(pix2), pix2) | extqh(t, pix2);
246 p2_r = extql(t, pix2) | extqh(ldq_u(pix2 + 16), pix2);
249 uint64_t p1_l, p1_r, np2_l, np2_r;
253 p1_r = ldq(pix1 + 8);
256 np2_l = extql(ldq_u(pix2), pix2) | extqh(t, pix2);
257 np2_r = extql(t, pix2) | extqh(ldq_u(pix2 + 16), pix2);
259 result += perr(p1_l,
avg2(p2_l, np2_l))
260 + perr(p1_r,
avg2(p2_r, np2_r));
270 p2_r = ldq(pix2 + 8);
272 uint64_t p1_l, p1_r, np2_l, np2_r;
275 p1_r = ldq(pix1 + 8);
278 np2_r = ldq(pix2 + 8);
280 result += perr(p1_l,
avg2(p2_l, np2_l))
281 + perr(p1_r,
avg2(p2_r, np2_r));
296 uint64_t p2_l, p2_r, p2_x;
299 p1_r = ldq(pix1 + 8);
301 if ((
size_t) pix2 & 0x7) {
303 p2_r = uldq(pix2 + 8);
304 p2_x = (uint64_t) pix2[16] << 56;
307 p2_r = ldq(pix2 + 8);
308 p2_x = ldq(pix2 + 16) << 56;
312 uint64_t np1_l, np1_r;
313 uint64_t np2_l, np2_r, np2_x;
319 np1_r = ldq(pix1 + 8);
321 if ((
size_t) pix2 & 0x7) {
323 np2_r = uldq(pix2 + 8);
324 np2_x = (uint64_t) pix2[16] << 56;
327 np2_r = ldq(pix2 + 8);
328 np2_x = ldq(pix2 + 16) << 56;
332 avg4( p2_l, ( p2_l >> 8) | ((uint64_t) p2_r << 56),
333 np2_l, (np2_l >> 8) | ((uint64_t) np2_r << 56)))
335 avg4( p2_r, ( p2_r >> 8) | ((uint64_t) p2_x),
336 np2_r, (np2_r >> 8) | ((uint64_t) np2_x)));