105 #undef PROFILE_THE_BEAST
108 typedef unsigned char ubyte;
109 typedef signed char sbyte;
147 static const vector
unsigned char
148 perm_rgb_0 = { 0x00, 0x01, 0x10, 0x02, 0x03, 0x11, 0x04, 0x05,
149 0x12, 0x06, 0x07, 0x13, 0x08, 0x09, 0x14, 0x0a },
150 perm_rgb_1 = { 0x0b, 0x15, 0x0c, 0x0d, 0x16, 0x0e, 0x0f, 0x17,
151 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f },
152 perm_rgb_2 = { 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
153 0x00, 0x01, 0x18, 0x02, 0x03, 0x19, 0x04, 0x05 },
154 perm_rgb_3 = { 0x1a, 0x06, 0x07, 0x1b, 0x08, 0x09, 0x1c, 0x0a,
155 0x0b, 0x1d, 0x0c, 0x0d, 0x1e, 0x0e, 0x0f, 0x1f };
157 #define vec_merge3(x2, x1, x0, y0, y1, y2) \
159 __typeof__(x0) o0, o2, o3; \
160 o0 = vec_mergeh(x0, x1); \
161 y0 = vec_perm(o0, x2, perm_rgb_0); \
162 o2 = vec_perm(o0, x2, perm_rgb_1); \
163 o3 = vec_mergel(x0, x1); \
164 y1 = vec_perm(o3, o2, perm_rgb_2); \
165 y2 = vec_perm(o3, o2, perm_rgb_3); \
168 #define vec_mstbgr24(x0, x1, x2, ptr) \
170 __typeof__(x0) _0, _1, _2; \
171 vec_merge3(x0, x1, x2, _0, _1, _2); \
172 vec_st(_0, 0, ptr++); \
173 vec_st(_1, 0, ptr++); \
174 vec_st(_2, 0, ptr++); \
177 #define vec_mstrgb24(x0, x1, x2, ptr) \
179 __typeof__(x0) _0, _1, _2; \
180 vec_merge3(x2, x1, x0, _0, _1, _2); \
181 vec_st(_0, 0, ptr++); \
182 vec_st(_1, 0, ptr++); \
183 vec_st(_2, 0, ptr++); \
190 #define vec_mstrgb32(T, x0, x1, x2, x3, ptr) \
193 _0 = vec_mergeh(x0, x1); \
194 _1 = vec_mergeh(x2, x3); \
195 _2 = (T) vec_mergeh((vector unsigned short) _0, \
196 (vector unsigned short) _1); \
197 _3 = (T) vec_mergel((vector unsigned short) _0, \
198 (vector unsigned short) _1); \
199 vec_st(_2, 0 * 16, (T *) ptr); \
200 vec_st(_3, 1 * 16, (T *) ptr); \
201 _0 = vec_mergel(x0, x1); \
202 _1 = vec_mergel(x2, x3); \
203 _2 = (T) vec_mergeh((vector unsigned short) _0, \
204 (vector unsigned short) _1); \
205 _3 = (T) vec_mergel((vector unsigned short) _0, \
206 (vector unsigned short) _1); \
207 vec_st(_2, 2 * 16, (T *) ptr); \
208 vec_st(_3, 3 * 16, (T *) ptr); \
226 (vector signed short) \
227 vec_perm(x, (__typeof__(x)) { 0 }, \
228 ((vector unsigned char) { \
229 0x10, 0x00, 0x10, 0x01, 0x10, 0x02, 0x10, 0x03, \
230 0x10, 0x04, 0x10, 0x05, 0x10, 0x06, 0x10, 0x07 }))
233 (vector signed short) \
234 vec_perm(x, (__typeof__(x)) { 0 }, \
235 ((vector unsigned char) { \
236 0x10, 0x08, 0x10, 0x09, 0x10, 0x0A, 0x10, 0x0B, \
237 0x10, 0x0C, 0x10, 0x0D, 0x10, 0x0E, 0x10, 0x0F }))
239 #define vec_clip_s16(x) \
240 vec_max(vec_min(x, ((vector signed short) { \
241 235, 235, 235, 235, 235, 235, 235, 235 })), \
242 ((vector signed short) { 16, 16, 16, 16, 16, 16, 16, 16 }))
244 #define vec_packclp(x, y) \
245 (vector unsigned char) \
246 vec_packs((vector unsigned short) \
247 vec_max(x, ((vector signed short) { 0 })), \
248 (vector unsigned short) \
249 vec_max(y, ((vector signed short) { 0 })))
251 static inline void cvtyuvtoRGB(
SwsContext *
c, vector
signed short Y,
252 vector
signed short U, vector
signed short V,
253 vector
signed short *
R, vector
signed short *
G,
254 vector
signed short *
B)
256 vector
signed short vx, ux, uvx;
258 Y = vec_mradds(Y, c->CY, c->OY);
259 U = vec_sub(U, (vector
signed short)
260 vec_splat((vector
signed short) { 128 }, 0));
261 V = vec_sub(V, (vector
signed short)
262 vec_splat((vector
signed short) { 128 }, 0));
265 ux = vec_sl(U, c->CSHIFT);
266 *B = vec_mradds(ux, c->CBU, Y);
269 vx = vec_sl(V, c->CSHIFT);
270 *R = vec_mradds(vx, c->CRV, Y);
273 uvx = vec_mradds(U, c->CGU, Y);
274 *G = vec_mradds(V, c->CGV, uvx);
283 #define DEFCSP420_CVT(name, out_pixels) \
284 static int altivec_ ## name(SwsContext *c, const unsigned char **in, \
285 int *instrides, int srcSliceY, int srcSliceH, \
286 unsigned char **oplanes, int *outstrides) \
291 int instrides_scl[3]; \
292 vector unsigned char y0, y1; \
294 vector signed char u, v; \
296 vector signed short Y0, Y1, Y2, Y3; \
297 vector signed short U, V; \
298 vector signed short vx, ux, uvx; \
299 vector signed short vx0, ux0, uvx0; \
300 vector signed short vx1, ux1, uvx1; \
301 vector signed short R0, G0, B0; \
302 vector signed short R1, G1, B1; \
303 vector unsigned char R, G, B; \
305 const vector unsigned char *y1ivP, *y2ivP, *uivP, *vivP; \
306 vector unsigned char align_perm; \
308 vector signed short lCY = c->CY; \
309 vector signed short lOY = c->OY; \
310 vector signed short lCRV = c->CRV; \
311 vector signed short lCBU = c->CBU; \
312 vector signed short lCGU = c->CGU; \
313 vector signed short lCGV = c->CGV; \
314 vector unsigned short lCSHIFT = c->CSHIFT; \
316 const ubyte *y1i = in[0]; \
317 const ubyte *y2i = in[0] + instrides[0]; \
318 const ubyte *ui = in[1]; \
319 const ubyte *vi = in[2]; \
321 vector unsigned char *oute, *outo; \
324 instrides_scl[0] = instrides[0] * 2 - w; \
326 instrides_scl[1] = instrides[1] - w / 2; \
328 instrides_scl[2] = instrides[2] - w / 2; \
330 for (i = 0; i < h / 2; i++) { \
331 oute = (vector unsigned char *)(oplanes[0] + outstrides[0] * \
332 (srcSliceY + i * 2)); \
333 outo = oute + (outstrides[0] >> 4); \
334 vec_dstst(outo, (0x02000002 | (((w * 3 + 32) / 32) << 16)), 0); \
335 vec_dstst(oute, (0x02000002 | (((w * 3 + 32) / 32) << 16)), 1); \
337 for (j = 0; j < w / 16; j++) { \
338 y1ivP = (const vector unsigned char *) y1i; \
339 y2ivP = (const vector unsigned char *) y2i; \
340 uivP = (const vector unsigned char *) ui; \
341 vivP = (const vector unsigned char *) vi; \
343 align_perm = vec_lvsl(0, y1i); \
344 y0 = (vector unsigned char) \
345 vec_perm(y1ivP[0], y1ivP[1], align_perm); \
347 align_perm = vec_lvsl(0, y2i); \
348 y1 = (vector unsigned char) \
349 vec_perm(y2ivP[0], y2ivP[1], align_perm); \
351 align_perm = vec_lvsl(0, ui); \
352 u = (vector signed char) \
353 vec_perm(uivP[0], uivP[1], align_perm); \
355 align_perm = vec_lvsl(0, vi); \
356 v = (vector signed char) \
357 vec_perm(vivP[0], vivP[1], align_perm); \
359 u = (vector signed char) \
361 (vector signed char) \
362 vec_splat((vector signed char) { 128 }, 0)); \
363 v = (vector signed char) \
365 (vector signed char) \
366 vec_splat((vector signed char) { 128 }, 0)); \
368 U = vec_unpackh(u); \
369 V = vec_unpackh(v); \
376 Y0 = vec_mradds(Y0, lCY, lOY); \
377 Y1 = vec_mradds(Y1, lCY, lOY); \
378 Y2 = vec_mradds(Y2, lCY, lOY); \
379 Y3 = vec_mradds(Y3, lCY, lOY); \
382 ux = vec_sl(U, lCSHIFT); \
383 ux = vec_mradds(ux, lCBU, (vector signed short) { 0 }); \
384 ux0 = vec_mergeh(ux, ux); \
385 ux1 = vec_mergel(ux, ux); \
388 vx = vec_sl(V, lCSHIFT); \
389 vx = vec_mradds(vx, lCRV, (vector signed short) { 0 }); \
390 vx0 = vec_mergeh(vx, vx); \
391 vx1 = vec_mergel(vx, vx); \
394 uvx = vec_mradds(U, lCGU, (vector signed short) { 0 }); \
395 uvx = vec_mradds(V, lCGV, uvx); \
396 uvx0 = vec_mergeh(uvx, uvx); \
397 uvx1 = vec_mergel(uvx, uvx); \
399 R0 = vec_add(Y0, vx0); \
400 G0 = vec_add(Y0, uvx0); \
401 B0 = vec_add(Y0, ux0); \
402 R1 = vec_add(Y1, vx1); \
403 G1 = vec_add(Y1, uvx1); \
404 B1 = vec_add(Y1, ux1); \
406 R = vec_packclp(R0, R1); \
407 G = vec_packclp(G0, G1); \
408 B = vec_packclp(B0, B1); \
410 out_pixels(R, G, B, oute); \
412 R0 = vec_add(Y2, vx0); \
413 G0 = vec_add(Y2, uvx0); \
414 B0 = vec_add(Y2, ux0); \
415 R1 = vec_add(Y3, vx1); \
416 G1 = vec_add(Y3, uvx1); \
417 B1 = vec_add(Y3, ux1); \
418 R = vec_packclp(R0, R1); \
419 G = vec_packclp(G0, G1); \
420 B = vec_packclp(B0, B1); \
423 out_pixels(R, G, B, outo); \
431 ui += instrides_scl[1]; \
432 vi += instrides_scl[2]; \
433 y1i += instrides_scl[0]; \
434 y2i += instrides_scl[0]; \
439 #define out_abgr(a, b, c, ptr) \
440 vec_mstrgb32(__typeof__(a), ((__typeof__(a)) { 255 }), c, b, a, ptr)
441 #define out_bgra(a, b, c, ptr) \
442 vec_mstrgb32(__typeof__(a), c, b, a, ((__typeof__(a)) { 255 }), ptr)
443 #define out_rgba(a, b, c, ptr) \
444 vec_mstrgb32(__typeof__(a), a, b, c, ((__typeof__(a)) { 255 }), ptr)
445 #define out_argb(a, b, c, ptr) \
446 vec_mstrgb32(__typeof__(a), ((__typeof__(a)) { 255 }), a, b, c, ptr)
447 #define out_rgb24(a, b, c, ptr) vec_mstrgb24(a, b, c, ptr)
448 #define out_bgr24(a, b, c, ptr) vec_mstbgr24(a, b, c, ptr)
450 DEFCSP420_CVT(yuv2_abgr, out_abgr)
451 DEFCSP420_CVT(yuv2_bgra, out_bgra)
452 DEFCSP420_CVT(yuv2_rgba, out_rgba)
453 DEFCSP420_CVT(yuv2_argb, out_argb)
454 DEFCSP420_CVT(yuv2_rgb24, out_rgb24)
455 DEFCSP420_CVT(yuv2_bgr24, out_bgr24)
459 static const vector
unsigned char
460 demux_u = { 0x10, 0x00, 0x10, 0x00,
461 0x10, 0x04, 0x10, 0x04,
462 0x10, 0x08, 0x10, 0x08,
463 0x10, 0x0c, 0x10, 0x0c },
464 demux_v = { 0x10, 0x02, 0x10, 0x02,
465 0x10, 0x06, 0x10, 0x06,
466 0x10, 0x0A, 0x10, 0x0A,
467 0x10, 0x0E, 0x10, 0x0E },
468 demux_y = { 0x10, 0x01, 0x10, 0x03,
469 0x10, 0x05, 0x10, 0x07,
470 0x10, 0x09, 0x10, 0x0B,
471 0x10, 0x0D, 0x10, 0x0F };
476 static int altivec_uyvy_rgb32(
SwsContext *c,
const unsigned char **
in,
477 int *instrides,
int srcSliceY,
int srcSliceH,
478 unsigned char **oplanes,
int *outstrides)
483 vector
unsigned char uyvy;
484 vector
signed short Y,
U,
V;
485 vector
signed short R0, G0,
B0,
R1, G1,
B1;
486 vector
unsigned char R,
G,
B;
487 vector
unsigned char *
out;
491 out = (vector
unsigned char *) (oplanes[0] + srcSliceY * outstrides[0]);
493 for (i = 0; i <
h; i++)
494 for (j = 0; j < w / 16; j++) {
495 uyvy = vec_ld(0, img);
497 U = (vector
signed short)
498 vec_perm(uyvy, (vector
unsigned char) { 0 }, demux_u);
499 V = (vector
signed short)
500 vec_perm(uyvy, (vector
unsigned char) { 0 }, demux_v);
501 Y = (vector
signed short)
502 vec_perm(uyvy, (vector
unsigned char) { 0 }, demux_y);
504 cvtyuvtoRGB(c, Y, U, V, &R0, &G0, &B0);
506 uyvy = vec_ld(16, img);
508 U = (vector
signed short)
509 vec_perm(uyvy, (vector
unsigned char) { 0 }, demux_u);
510 V = (vector
signed short)
511 vec_perm(uyvy, (vector
unsigned char) { 0 }, demux_v);
512 Y = (vector
signed short)
513 vec_perm(uyvy, (vector
unsigned char) { 0 }, demux_y);
515 cvtyuvtoRGB(c, Y, U, V, &R1, &G1, &B1);
517 R = vec_packclp(R0, R1);
518 G = vec_packclp(G0, G1);
519 B = vec_packclp(B0, B1);
522 out_rgba(R, G, B, out);
550 if ((c->
srcW & 0xf) != 0)
560 if ((c->
srcH & 0x1) != 0)
566 return altivec_yuv2_rgb24;
569 return altivec_yuv2_bgr24;
572 return altivec_yuv2_argb;
575 return altivec_yuv2_abgr;
578 return altivec_yuv2_rgba;
581 return altivec_yuv2_bgra;
582 default:
return NULL;
590 return altivec_uyvy_rgb32;
591 default:
return NULL;
601 const int inv_table[4],
609 vector
signed short vec;
615 buf.tmp[0] = ((0xffffLL) * contrast >> 8) >> 9;
616 buf.tmp[1] = -256 * brightness;
617 buf.tmp[2] = (inv_table[0] >> 3) * (contrast >> 16) * (saturation >> 16);
618 buf.tmp[3] = (inv_table[1] >> 3) * (contrast >> 16) * (saturation >> 16);
619 buf.tmp[4] = -((inv_table[2] >> 1) * (contrast >> 16) * (saturation >> 16));
620 buf.tmp[5] = -((inv_table[3] >> 1) * (contrast >> 16) * (saturation >> 16));
622 c->CSHIFT = (vector
unsigned short) vec_splat_u16(2);
623 c->CY = vec_splat((vector
signed short)
buf.vec, 0);
624 c->OY = vec_splat((vector
signed short)
buf.vec, 1);
625 c->CRV = vec_splat((vector
signed short)
buf.vec, 2);
626 c->CBU = vec_splat((vector
signed short)
buf.vec, 3);
627 c->CGU = vec_splat((vector
signed short)
buf.vec, 4);
628 c->CGV = vec_splat((vector
signed short)
buf.vec, 5);
636 const int16_t *lumFilter,
637 const int16_t **lumSrc,
639 const int16_t *chrFilter,
640 const int16_t **chrUSrc,
641 const int16_t **chrVSrc,
643 const int16_t **alpSrc,
649 vector
signed short X, X0, X1, Y0, U0, V0, Y1, U1, V1,
U,
V;
650 vector
signed short R0, G0,
B0,
R1, G1,
B1;
652 vector
unsigned char R,
G,
B;
653 vector
unsigned char *
out, *nout;
655 vector
signed short RND = vec_splat_s16(1 << 3);
656 vector
unsigned short SCL = vec_splat_u16(4);
659 vector
signed short *YCoeffs, *CCoeffs;
661 YCoeffs = c->vYCoeffsBank + dstY * lumFilterSize;
662 CCoeffs = c->vCCoeffsBank + dstY * chrFilterSize;
664 out = (vector
unsigned char *) dest;
666 for (i = 0; i < dstW; i += 16) {
670 for (j = 0; j < lumFilterSize; j++) {
671 X0 = vec_ld(0, &lumSrc[j][i]);
672 X1 = vec_ld(16, &lumSrc[j][i]);
673 Y0 = vec_mradds(X0, YCoeffs[j], Y0);
674 Y1 = vec_mradds(X1, YCoeffs[j], Y1);
680 for (j = 0; j < chrFilterSize; j++) {
681 X = vec_ld(0, &chrUSrc[j][i / 2]);
682 U = vec_mradds(X, CCoeffs[j], U);
683 X = vec_ld(0, &chrVSrc[j][i / 2]);
684 V = vec_mradds(X, CCoeffs[j], V);
688 Y0 = vec_sra(Y0, SCL);
689 Y1 = vec_sra(Y1, SCL);
693 Y0 = vec_clip_s16(Y0);
694 Y1 = vec_clip_s16(Y1);
707 U0 = vec_mergeh(U, U);
708 V0 = vec_mergeh(V, V);
710 U1 = vec_mergel(U, U);
711 V1 = vec_mergel(V, V);
713 cvtyuvtoRGB(c, Y0, U0, V0, &R0, &G0, &B0);
714 cvtyuvtoRGB(c, Y1, U1, V1, &R1, &G1, &B1);
716 R = vec_packclp(R0, R1);
717 G = vec_packclp(G0, G1);
718 B = vec_packclp(B0, B1);
722 out_abgr(R, G, B, out);
725 out_bgra(R, G, B, out);
728 out_rgba(R, G, B, out);
731 out_argb(R, G, B, out);
734 out_rgb24(R, G, B, out);
737 out_bgr24(R, G, B, out);
743 static int printed_error_message;
744 if (!printed_error_message) {
746 "altivec_yuv2packedX doesn't support %s output\n",
748 printed_error_message = 1;
761 for (j = 0; j < lumFilterSize; j++) {
762 X0 = vec_ld(0, &lumSrc[j][i]);
763 X1 = vec_ld(16, &lumSrc[j][i]);
764 Y0 = vec_mradds(X0, YCoeffs[j], Y0);
765 Y1 = vec_mradds(X1, YCoeffs[j], Y1);
771 for (j = 0; j < chrFilterSize; j++) {
772 X = vec_ld(0, &chrUSrc[j][i / 2]);
773 U = vec_mradds(X, CCoeffs[j], U);
774 X = vec_ld(0, &chrVSrc[j][i / 2]);
775 V = vec_mradds(X, CCoeffs[j], V);
779 Y0 = vec_sra(Y0, SCL);
780 Y1 = vec_sra(Y1, SCL);
784 Y0 = vec_clip_s16(Y0);
785 Y1 = vec_clip_s16(Y1);
798 U0 = vec_mergeh(U, U);
799 V0 = vec_mergeh(V, V);
801 U1 = vec_mergel(U, U);
802 V1 = vec_mergel(V, V);
804 cvtyuvtoRGB(c, Y0, U0, V0, &R0, &G0, &B0);
805 cvtyuvtoRGB(c, Y1, U1, V1, &R1, &G1, &B1);
807 R = vec_packclp(R0, R1);
808 G = vec_packclp(G0, G1);
809 B = vec_packclp(B0, B1);
811 nout = (vector
unsigned char *) scratch;
814 out_abgr(R, G, B, nout);
817 out_bgra(R, G, B, nout);
820 out_rgba(R, G, B, nout);
823 out_argb(R, G, B, nout);
826 out_rgb24(R, G, B, nout);
829 out_bgr24(R, G, B, nout);
834 "altivec_yuv2packedX doesn't support %s output\n",
839 memcpy(&((uint32_t *) dest)[i], scratch, (dstW - i) / 4);
843 #define YUV2PACKEDX_WRAPPER(suffix, pixfmt) \
844 void ff_yuv2 ## suffix ## _X_altivec(SwsContext *c, \
845 const int16_t *lumFilter, \
846 const int16_t **lumSrc, \
848 const int16_t *chrFilter, \
849 const int16_t **chrUSrc, \
850 const int16_t **chrVSrc, \
852 const int16_t **alpSrc, \
853 uint8_t *dest, int dstW, int dstY) \
855 yuv2packedX_altivec(c, lumFilter, lumSrc, lumFilterSize, \
856 chrFilter, chrUSrc, chrVSrc, \
857 chrFilterSize, alpSrc, \
858 dest, dstW, dstY, pixfmt); \
packed YUV 4:2:2, 16bpp, Cb Y0 Cr Y1
#define AV_CPU_FLAG_ALTIVEC
standard
av_cold void ff_yuv2rgb_init_tables_ppc(SwsContext *c, const int inv_table[4], int brightness, int contrast, int saturation)
#define AV_LOG_WARNING
Something somehow does not look correct.
packed RGB 8:8:8, 24bpp, RGBRGB...
#define DECLARE_ALIGNED(n, t, v)
Macro definitions for various function/variable attributes.
int srcH
Height of source luma/alpha planes.
packed ABGR 8:8:8:8, 32bpp, ABGRABGR...
enum AVPixelFormat dstFormat
Destination pixel format.
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
#define R0(v, w, x, y, z, i)
packed BGRA 8:8:8:8, 32bpp, BGRABGRA...
planar YUV 4:2:0, 12bpp, 1 plane for Y and 1 plane for the UV components, which are interleaved (firs...
packed ARGB 8:8:8:8, 32bpp, ARGBARGB...
packed RGBA 8:8:8:8, 32bpp, RGBARGBA...
as above, but U and V bytes are swapped
packed RGB 8:8:8, 24bpp, BGRBGR...
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(constuint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(constuint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(constint16_t *) pi >>8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t,*(constint16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t,*(constint16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(constint32_t *) pi >>24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t,*(constint32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t,*(constint32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(constfloat *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(constfloat *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(constfloat *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(constdouble *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(constdouble *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(constdouble *) pi *(1U<< 31))))#defineSET_CONV_FUNC_GROUP(ofmt, ifmt) staticvoidset_generic_function(AudioConvert *ac){}voidff_audio_convert_free(AudioConvert **ac){if(!*ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);}AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enumAVSampleFormatout_fmt, enumAVSampleFormatin_fmt, intchannels, intsample_rate, intapply_map){AudioConvert *ac;intin_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) returnNULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method!=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt)>2){ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc){av_free(ac);returnNULL;}returnac;}in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar){ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar?ac->channels:1;}elseif(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;elseac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);returnac;}intff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in){intuse_generic=1;intlen=in->nb_samples;intp;if(ac->dc){av_log(ac->avr, AV_LOG_TRACE,"%dsamples-audio_convert:%sto%s(dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));returnff_convert_dither(ac-> in
planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
int(* SwsFunc)(struct SwsContext *context, const uint8_t *src[], int srcStride[], int srcSliceY, int srcSliceH, uint8_t *dst[], int dstStride[])
int av_get_cpu_flags(void)
Return the flags which specify extensions supported by the CPU.
av_cold SwsFunc ff_yuv2rgb_init_ppc(SwsContext *c)
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
enum AVPixelFormat srcFormat
Source pixel format.
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(constuint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(constuint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(constint16_t *) pi >>8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t,*(constint16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t,*(constint16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(constint32_t *) pi >>24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t,*(constint32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t,*(constint32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(constfloat *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(constfloat *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(constfloat *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(constdouble *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(constdouble *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(constdouble *) pi *(1U<< 31))))#defineSET_CONV_FUNC_GROUP(ofmt, ifmt) staticvoidset_generic_function(AudioConvert *ac){}voidff_audio_convert_free(AudioConvert **ac){if(!*ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);}AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enumAVSampleFormatout_fmt, enumAVSampleFormatin_fmt, intchannels, intsample_rate, intapply_map){AudioConvert *ac;intin_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) returnNULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method!=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt)>2){ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc){av_free(ac);returnNULL;}returnac;}in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar){ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar?ac->channels:1;}elseif(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;elseac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);returnac;}intff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in){intuse_generic=1;intlen=in->nb_samples;intp;if(ac->dc){av_log(ac->avr, AV_LOG_TRACE,"%dsamples-audio_convert:%sto%s(dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));returnff_convert_dither(ac-> out
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.
int srcW
Width of source luma/alpha planes.
AVPixelFormat
Pixel format.