Go to the documentation of this file.
46 #define COSTABLE(size) \
47 static DECLARE_ALIGNED(32, FFTSample, FFT_NAME(ff_cos_##size))[size/2]
69 double freq = 2*
M_PI/m;
71 for(
int i = 0;
i <= m/4;
i++)
73 for(
int i = 1;
i < m/4;
i++)
82 #define INIT_FF_COS_TABS_FUNC(index, size) \
83 static av_cold void init_ff_cos_tabs_ ## size (void) \
85 init_ff_cos_tabs(index); \
159 #define BF(x, y, a, b) do { \
164 #define CMUL(dre, dim, are, aim, bre, bim) do { \
165 (dre) = (are) * (bre) - (aim) * (bim); \
166 (dim) = (are) * (bim) + (aim) * (bre); \
169 #define CMUL3(c, a, b) CMUL((c).re, (c).im, (a).re, (a).im, (b).re, (b).im)
176 tmp[0].re =
in[1].im -
in[2].im;
177 tmp[0].im =
in[1].re -
in[2].re;
178 tmp[1].re =
in[1].re +
in[2].re;
179 tmp[1].im =
in[1].im +
in[2].im;
195 #define DECL_FFT5(NAME, D0, D1, D2, D3, D4) \
196 static av_always_inline void NAME(FFTComplex *out, FFTComplex *in, \
199 FFTComplex z0[4], t[6]; \
201 t[0].re = in[1].re + in[4].re; \
202 t[0].im = in[1].im + in[4].im; \
203 t[1].im = in[1].re - in[4].re; \
204 t[1].re = in[1].im - in[4].im; \
205 t[2].re = in[2].re + in[3].re; \
206 t[2].im = in[2].im + in[3].im; \
207 t[3].im = in[2].re - in[3].re; \
208 t[3].re = in[2].im - in[3].im; \
210 out[D0*stride].re = in[0].re + in[1].re + in[2].re + \
211 in[3].re + in[4].re; \
212 out[D0*stride].im = in[0].im + in[1].im + in[2].im + \
213 in[3].im + in[4].im; \
215 t[4].re = ff_53_tabs[2].re * t[2].re - ff_53_tabs[3].re * t[0].re; \
216 t[4].im = ff_53_tabs[2].re * t[2].im - ff_53_tabs[3].re * t[0].im; \
217 t[0].re = ff_53_tabs[2].re * t[0].re - ff_53_tabs[3].re * t[2].re; \
218 t[0].im = ff_53_tabs[2].re * t[0].im - ff_53_tabs[3].re * t[2].im; \
219 t[5].re = ff_53_tabs[2].im * t[3].re - ff_53_tabs[3].im * t[1].re; \
220 t[5].im = ff_53_tabs[2].im * t[3].im - ff_53_tabs[3].im * t[1].im; \
221 t[1].re = ff_53_tabs[2].im * t[1].re + ff_53_tabs[3].im * t[3].re; \
222 t[1].im = ff_53_tabs[2].im * t[1].im + ff_53_tabs[3].im * t[3].im; \
224 z0[0].re = t[0].re - t[1].re; \
225 z0[0].im = t[0].im - t[1].im; \
226 z0[1].re = t[4].re + t[5].re; \
227 z0[1].im = t[4].im + t[5].im; \
229 z0[2].re = t[4].re - t[5].re; \
230 z0[2].im = t[4].im - t[5].im; \
231 z0[3].re = t[0].re + t[1].re; \
232 z0[3].im = t[0].im + t[1].im; \
234 out[D1*stride].re = in[0].re + z0[3].re; \
235 out[D1*stride].im = in[0].im + z0[0].im; \
236 out[D2*stride].re = in[0].re + z0[2].re; \
237 out[D2*stride].im = in[0].im + z0[1].im; \
238 out[D3*stride].re = in[0].re + z0[1].re; \
239 out[D3*stride].im = in[0].im + z0[2].im; \
240 out[D4*stride].re = in[0].re + z0[0].re; \
241 out[D4*stride].im = in[0].im + z0[3].im; \
254 for (
int i = 0;
i < 5;
i++)
262 #define BUTTERFLIES(a0,a1,a2,a3) {\
264 BF(a2.re, a0.re, a0.re, t5);\
265 BF(a3.im, a1.im, a1.im, t3);\
267 BF(a3.re, a1.re, a1.re, t4);\
268 BF(a2.im, a0.im, a0.im, t6);\
274 #define BUTTERFLIES_BIG(a0,a1,a2,a3) {\
275 FFTSample r0=a0.re, i0=a0.im, r1=a1.re, i1=a1.im;\
277 BF(a2.re, a0.re, r0, t5);\
278 BF(a3.im, a1.im, i1, t3);\
280 BF(a3.re, a1.re, r1, t4);\
281 BF(a2.im, a0.im, i0, t6);\
284 #define TRANSFORM(a0,a1,a2,a3,wre,wim) {\
285 CMUL(t1, t2, a2.re, a2.im, wre, -wim);\
286 CMUL(t5, t6, a3.re, a3.im, wre, wim);\
287 BUTTERFLIES(a0,a1,a2,a3)\
290 #define TRANSFORM_ZERO(a0,a1,a2,a3) {\
295 BUTTERFLIES(a0,a1,a2,a3)\
300 static void name(FFTComplex *z, const FFTSample *wre, unsigned int n)\
302 FFTSample t1, t2, t3, t4, t5, t6;\
306 const FFTSample *wim = wre+o1;\
309 TRANSFORM_ZERO(z[0],z[o1],z[o2],z[o3]);\
310 TRANSFORM(z[1],z[o1+1],z[o2+1],z[o3+1],wre[1],wim[-1]);\
315 TRANSFORM(z[0],z[o1],z[o2],z[o3],wre[0],wim[0]);\
316 TRANSFORM(z[1],z[o1+1],z[o2+1],z[o3+1],wre[1],wim[-1]);\
322 #define BUTTERFLIES BUTTERFLIES_BIG
325 #define DECL_FFT(n,n2,n4)\
326 static void fft##n(FFTComplex *z)\
331 pass(z,FFT_NAME(ff_cos_##n),n4/2);\
375 TRANSFORM(z[1],z[5],z[9],z[13],cos_16_1,cos_16_3);
376 TRANSFORM(z[3],z[7],z[11],z[15],cos_16_3,cos_16_1);
384 #define pass pass_big
395 fft4,
fft8,
fft16, fft32, fft64, fft128, fft256, fft512, fft1024,
396 fft2048, fft4096, fft8192, fft16384, fft32768, fft65536, fft131072
399 #define DECL_COMP_FFT(N) \
400 static void compound_fft_##N##xM(AVTXContext *s, void *_out, \
401 void *_in, ptrdiff_t stride) \
403 const int m = s->m, *in_map = s->pfatab, *out_map = in_map + N*m; \
404 FFTComplex *in = _in; \
405 FFTComplex *out = _out; \
406 FFTComplex fft##N##in[N]; \
407 void (*fftp)(FFTComplex *z) = fft_dispatch[av_log2(m) - 2]; \
409 for (int i = 0; i < m; i++) { \
410 for (int j = 0; j < N; j++) \
411 fft##N##in[j] = in[in_map[i*N + j]]; \
412 fft##N(s->tmp + s->revtab[i], fft##N##in, m); \
415 for (int i = 0; i < N; i++) \
416 fftp(s->tmp + m*i); \
418 for (int i = 0; i < N*m; i++) \
419 out[i] = s->tmp[out_map[i]]; \
432 for (
int i = 0;
i < m;
i++)
437 #define DECL_COMP_IMDCT(N) \
438 static void compound_imdct_##N##xM(AVTXContext *s, void *_dst, void *_src, \
441 FFTComplex fft##N##in[N]; \
442 FFTComplex *z = _dst, *exp = s->exptab; \
443 const int m = s->m, len8 = N*m >> 1; \
444 const int *in_map = s->pfatab, *out_map = in_map + N*m; \
445 const float *src = _src, *in1, *in2; \
446 void (*fftp)(FFTComplex *) = fft_dispatch[av_log2(m) - 2]; \
448 stride /= sizeof(*src); \
450 in2 = src + ((N*m*2) - 1) * stride; \
452 for (int i = 0; i < m; i++) { \
453 for (int j = 0; j < N; j++) { \
454 const int k = in_map[i*N + j]; \
455 FFTComplex tmp = { in2[-k*stride], in1[k*stride] }; \
456 CMUL3(fft##N##in[j], tmp, exp[k >> 1]); \
458 fft##N(s->tmp + s->revtab[i], fft##N##in, m); \
461 for (int i = 0; i < N; i++) \
462 fftp(s->tmp + m*i); \
464 for (int i = 0; i < len8; i++) { \
465 const int i0 = len8 + i, i1 = len8 - i - 1; \
466 const int s0 = out_map[i0], s1 = out_map[i1]; \
467 FFTComplex src1 = { s->tmp[s1].im, s->tmp[s1].re }; \
468 FFTComplex src0 = { s->tmp[s0].im, s->tmp[s0].re }; \
470 CMUL(z[i1].re, z[i0].im, src1.re, src1.im, exp[i1].im, exp[i1].re); \
471 CMUL(z[i0].re, z[i1].im, src0.re, src0.im, exp[i0].im, exp[i0].re); \
479 #define DECL_COMP_MDCT(N) \
480 static void compound_mdct_##N##xM(AVTXContext *s, void *_dst, void *_src, \
483 float *src = _src, *dst = _dst; \
484 FFTComplex *exp = s->exptab, tmp, fft##N##in[N]; \
485 const int m = s->m, len4 = N*m, len3 = len4 * 3, len8 = len4 >> 1; \
486 const int *in_map = s->pfatab, *out_map = in_map + N*m; \
487 void (*fftp)(FFTComplex *) = fft_dispatch[av_log2(m) - 2]; \
489 stride /= sizeof(*dst); \
491 for (int i = 0; i < m; i++) { \
492 for (int j = 0; j < N; j++) { \
493 const int k = in_map[i*N + j]; \
495 tmp.re = -src[ len4 + k] + src[1*len4 - 1 - k]; \
496 tmp.im = -src[ len3 + k] - src[1*len3 - 1 - k]; \
498 tmp.re = -src[ len4 + k] - src[5*len4 - 1 - k]; \
499 tmp.im = src[-len4 + k] - src[1*len3 - 1 - k]; \
501 CMUL(fft##N##in[j].im, fft##N##in[j].re, tmp.re, tmp.im, \
502 exp[k >> 1].re, exp[k >> 1].im); \
504 fft##N(s->tmp + s->revtab[i], fft##N##in, m); \
507 for (int i = 0; i < N; i++) \
508 fftp(s->tmp + m*i); \
510 for (int i = 0; i < len8; i++) { \
511 const int i0 = len8 + i, i1 = len8 - i - 1; \
512 const int s0 = out_map[i0], s1 = out_map[i1]; \
513 FFTComplex src1 = { s->tmp[s1].re, s->tmp[s1].im }; \
514 FFTComplex src0 = { s->tmp[s0].re, s->tmp[s0].im }; \
516 CMUL(dst[2*i1*stride + stride], dst[2*i0*stride], src0.re, src0.im, \
517 exp[i0].im, exp[i0].re); \
518 CMUL(dst[2*i0*stride + stride], dst[2*i1*stride], src1.re, src1.im, \
519 exp[i1].im, exp[i1].re); \
531 const int m =
s->m, len8 = m >> 1;
532 const float *
src = _src, *in1, *in2;
539 for (
int i = 0;
i < m;
i++) {
546 for (
int i = 0;
i < len8;
i++) {
547 const int i0 = len8 +
i, i1 = len8 -
i - 1;
559 float *
src = _src, *dst = _dst;
561 const int m =
s->m, len4 = m, len3 = len4 * 3, len8 = len4 >> 1;
566 for (
int i = 0;
i < m;
i++) {
569 tmp.re = -
src[ len4 + k] +
src[1*len4 - 1 - k];
570 tmp.im = -
src[ len3 + k] -
src[1*len3 - 1 - k];
572 tmp.re = -
src[ len4 + k] -
src[5*len4 - 1 - k];
573 tmp.im =
src[-len4 + k] -
src[1*len3 - 1 - k];
581 for (
int i = 0;
i < len8;
i++) {
582 const int i0 = len8 +
i, i1 = len8 -
i - 1;
597 for (
int x = 1; x < m; x++)
598 if (((
n * x) % m) == 1)
607 int *in_map, *out_map;
609 const int m_inv =
mulinv(m,
n);
610 const int n_inv =
mulinv(
n, m);
617 out_map =
s->pfatab +
n*m;
620 for (
int j = 0; j < m; j++) {
621 for (
int i = 0;
i <
n;
i++) {
623 in_map[j*
n +
i] = ((
i*m + j*
n) %
len) << mdct;
624 out_map[(
i*m*m_inv + j*
n*n_inv) %
len] =
i*m + j;
630 for (
int i = 0;
i < m;
i++) {
631 int *
in = &in_map[
i*
n + 1];
632 for (
int j = 0; j < ((
n - 1) >> 1); j++)
639 for (
int k = 0; k < m; k++) {
641 memcpy(
tmp, &in_map[k*15], 15*
sizeof(*
tmp));
642 for (
int i = 0;
i < 5;
i++) {
643 for (
int j = 0; j < 3; j++)
644 in_map[k*15 +
i*3 + j] =
tmp[(
i*3 + j*5) % 15];
669 if (!(
s->revtab =
av_malloc(m*
sizeof(*
s->revtab))))
673 for (
int i = 0;
i < m;
i++) {
683 const double theta = (scale < 0 ? len4 : 0) + 1.0/8.0;
688 scale = sqrt(fabs(scale));
689 for (
int i = 0;
i < len4;
i++) {
691 s->exptab[
i].re = cos(
alpha) * scale;
692 s->exptab[
i].im = sin(
alpha) * scale;
712 int inv,
int len,
const void *scale, uint64_t
flags)
719 #define CHECK_FACTOR(DST, FACTOR, SRC) \
720 if (DST == 1 && !(SRC % FACTOR)) { \
727 #undef CHECK_NPTWO_FACTOR
730 if (!(
len & (
len - 1)) &&
len >= 4 &&
len <= max_ptwo) {
736 if (
len > 1 || m == 1) {
738 "m = %i, residual = %i!\n",
n, m,
len);
740 }
else if (
n > 1 && m > 1) {
745 *tx =
n == 3 ? compound_fft_3xM :
746 n == 5 ? compound_fft_5xM :
749 *tx =
n == 3 ? inv ? compound_imdct_3xM : compound_mdct_3xM :
750 n == 5 ? inv ? compound_imdct_5xM : compound_mdct_5xM :
751 inv ? compound_imdct_15xM : compound_mdct_15xM;
777 int inv,
int len,
const void *scale, uint64_t
flags)
int(* func)(AVBPrint *dst, const char *in, const char *arg)
static void monolithic_imdct(AVTXContext *s, void *_dst, void *_src, ptrdiff_t stride)
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
static av_cold void ff_init_53_tabs(void)
static int gen_mdct_exptab(AVTXContext *s, int len4, double scale)
#define FFSWAP(type, a, b)
AVComplexFloat FFTComplex
static void fft5(FFTComplex *out, FFTComplex *in, FFTComplex exptab[2])
static void fft8(FFTComplex *z)
#define DECL_COMP_MDCT(N)
av_cold int av_tx_init(AVTXContext **ctx, av_tx_fn *tx, enum AVTXType type, int inv, int len, const void *scale, uint64_t flags)
Initialize a transform context with the given configuration Currently power of two lengths from 4 to ...
static void(*const fft_dispatch[])(FFTComplex *)
static av_always_inline void fft15(FFTComplex *out, FFTComplex *in, ptrdiff_t stride)
static av_cold void init_ff_cos_tabs(int index)
static const struct twinvq_data tab
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
static int ff_thread_once(char *control, void(*routine)(void))
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
void(* av_tx_fn)(AVTXContext *s, void *out, void *in, ptrdiff_t stride)
Function pointer to a function to perform the transform.
static av_cold void ff_init_ff_cos_tabs(int index)
@ AV_TX_FLOAT_MDCT
Standard MDCT with sample data type of float and a scale type of float.
#define av_assert0(cond)
assert() equivalent, that is always enabled.
@ AV_TX_FLOAT_FFT
Standard complex to complex FFT with sample data type AVComplexFloat.
static av_always_inline void fft3(FFTComplex *out, FFTComplex *in, ptrdiff_t stride)
#define TRANSFORM_ZERO(a0, a1, a2, a3)
#define BUTTERFLIES(a0, a1, a2, a3)
static void monolithic_mdct(AVTXContext *s, void *_dst, void *_src, ptrdiff_t stride)
#define TRANSFORM(a0, a1, a2, a3, wre, wim)
static int split_radix_permutation(int i, int n, int inverse)
static AVOnce tabs_53_once
av_cold void av_tx_uninit(AVTXContext **ctx)
Frees a context and sets ctx to NULL, does nothing when ctx == NULL.
static int get_ptwo_revtab(AVTXContext *s, int m, int inv)
#define DECLARE_ALIGNED(n, t, v)
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) #define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac) { } void ff_audio_convert_free(AudioConvert **ac) { if(! *ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);} AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map) { AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method !=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2) { ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc) { av_free(ac);return NULL;} return ac;} in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar) { ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar ? ac->channels :1;} else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;} int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in) { int use_generic=1;int len=in->nb_samples;int p;if(ac->dc) { av_log(ac->avr, AV_LOG_TRACE, "%d samples - audio_convert: %s to %s (dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> in
static int init_mdct_fft(AVTXContext *s, av_tx_fn *tx, enum AVTXType type, int inv, int len, const void *scale, uint64_t flags)
static FFTComplex ff_53_tabs[4]
#define INIT_FF_COS_TABS_FUNC(index, size)
#define i(width, name, range_min, range_max)
#define av_malloc_array(a, b)
static CosTabsInitOnce cos_tabs_init_once[]
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
static void fft16(FFTComplex *z)
#define DECL_FFT(n, n2, n4)
#define DECL_FFT5(NAME, D0, D1, D2, D3, D4)
#define CHECK_FACTOR(DST, FACTOR, SRC)
#define DECL_COMP_IMDCT(N)
#define FF_ARRAY_ELEMS(a)
#define CMUL(dre, dim, are, aim, bre, bim)
static int mulinv(int n, int m)
static int gen_compound_mapping(AVTXContext *s, int n, int m, int inv, enum AVTXType type)
static const int16_t alpha[]
static uint32_t inverse(uint32_t v)
find multiplicative inverse modulo 2 ^ 32
#define flags(name, subs,...)
static void monolithic_fft(AVTXContext *s, void *_out, void *_in, ptrdiff_t stride)
static void fft4(FFTComplex *z)