FFmpeg
af_headphone.c
Go to the documentation of this file.
1 /*
2  * Copyright (C) 2017 Paul B Mahol
3  * Copyright (C) 2013-2015 Andreas Fuchs, Wolfgang Hrauda
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 #include <math.h>
22 
23 #include "libavutil/avstring.h"
25 #include "libavutil/float_dsp.h"
26 #include "libavutil/intmath.h"
27 #include "libavutil/mem.h"
28 #include "libavutil/opt.h"
29 #include "libavutil/tx.h"
30 
31 #include "avfilter.h"
32 #include "filters.h"
33 #include "formats.h"
34 #include "audio.h"
35 
36 #define TIME_DOMAIN 0
37 #define FREQUENCY_DOMAIN 1
38 
39 #define HRIR_STEREO 0
40 #define HRIR_MULTI 1
41 
42 typedef struct HeadphoneContext {
43  const AVClass *class;
44 
45  char *map;
46  int type;
47 
49 
51  int eof_hrirs;
52 
53  int ir_len;
54  int air_len;
55 
57 
58  int nb_irs;
59 
60  float gain;
62 
63  float *ringbuffer[2];
64  int write[2];
65 
67  int n_fft;
68  int size;
69  int hrir_fmt;
70 
71  float *data_ir[2];
72  float *temp_src[2];
76 
77  AVTXContext *fft[2], *ifft[2];
80 
81  float (*scalarproduct_float)(const float *v1, const float *v2, int len);
82  struct hrir_inputs {
83  int ir_len;
84  int eof;
85  } hrir_in[64];
87  enum AVChannel mapping[64];
88  uint8_t hrir_map[64];
90 
91 static int parse_channel_name(const char *arg, enum AVChannel *rchannel)
92 {
94 
96  return AVERROR(EINVAL);
97  *rchannel = channel;
98  return 0;
99 }
100 
102 {
103  HeadphoneContext *s = ctx->priv;
104  char *arg, *tokenizer, *p;
105  uint64_t used_channels = 0;
106 
107  p = s->map;
108  while ((arg = av_strtok(p, "|", &tokenizer))) {
109  enum AVChannel out_channel;
110 
111  p = NULL;
112  if (parse_channel_name(arg, &out_channel)) {
113  av_log(ctx, AV_LOG_WARNING, "Failed to parse \'%s\' as channel name.\n", arg);
114  continue;
115  }
116  if (used_channels & (1ULL << out_channel)) {
117  av_log(ctx, AV_LOG_WARNING, "Ignoring duplicate channel '%s'.\n", arg);
118  continue;
119  }
120  used_channels |= (1ULL << out_channel);
121  s->mapping[s->nb_irs] = out_channel;
122  s->nb_irs++;
123  }
124  av_channel_layout_from_mask(&s->map_channel_layout, used_channels);
125 
126  if (s->hrir_fmt == HRIR_MULTI)
127  s->nb_hrir_inputs = 1;
128  else
129  s->nb_hrir_inputs = s->nb_irs;
130 }
131 
132 typedef struct ThreadData {
133  AVFrame *in, *out;
134  int *write;
135  float **ir;
137  float **ringbuffer;
138  float **temp_src;
142 } ThreadData;
143 
144 static int headphone_convolute(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
145 {
146  HeadphoneContext *s = ctx->priv;
147  ThreadData *td = arg;
148  AVFrame *in = td->in, *out = td->out;
149  int offset = jobnr;
150  int *write = &td->write[jobnr];
151  const float *const ir = td->ir[jobnr];
152  int *n_clippings = &td->n_clippings[jobnr];
153  float *ringbuffer = td->ringbuffer[jobnr];
154  float *temp_src = td->temp_src[jobnr];
155  const int ir_len = s->ir_len;
156  const int air_len = s->air_len;
157  const float *src = (const float *)in->data[0];
158  float *dst = (float *)out->data[0];
159  const int in_channels = in->ch_layout.nb_channels;
160  const int buffer_length = s->buffer_length;
161  const uint32_t modulo = (uint32_t)buffer_length - 1;
162  float *buffer[64];
163  int wr = *write;
164  int read;
165  int i, l;
166 
167  dst += offset;
168  for (l = 0; l < in_channels; l++) {
169  buffer[l] = ringbuffer + l * buffer_length;
170  }
171 
172  for (i = 0; i < in->nb_samples; i++) {
173  const float *cur_ir = ir;
174 
175  *dst = 0;
176  for (l = 0; l < in_channels; l++) {
177  *(buffer[l] + wr) = src[l];
178  }
179 
180  for (l = 0; l < in_channels; cur_ir += air_len, l++) {
181  const float *const bptr = buffer[l];
182 
183  if (l == s->lfe_channel) {
184  *dst += *(buffer[s->lfe_channel] + wr) * s->gain_lfe;
185  continue;
186  }
187 
188  read = (wr - (ir_len - 1)) & modulo;
189 
190  if (read + ir_len < buffer_length) {
191  memcpy(temp_src, bptr + read, ir_len * sizeof(*temp_src));
192  } else {
193  int len = FFMIN(air_len - (read % ir_len), buffer_length - read);
194 
195  memcpy(temp_src, bptr + read, len * sizeof(*temp_src));
196  memcpy(temp_src + len, bptr, (air_len - len) * sizeof(*temp_src));
197  }
198 
199  dst[0] += s->scalarproduct_float(cur_ir, temp_src, FFALIGN(ir_len, 32));
200  }
201 
202  if (fabsf(dst[0]) > 1)
203  n_clippings[0]++;
204 
205  dst += 2;
206  src += in_channels;
207  wr = (wr + 1) & modulo;
208  }
209 
210  *write = wr;
211 
212  return 0;
213 }
214 
215 static int headphone_fast_convolute(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
216 {
217  HeadphoneContext *s = ctx->priv;
218  ThreadData *td = arg;
219  AVFrame *in = td->in, *out = td->out;
220  int offset = jobnr;
221  int *write = &td->write[jobnr];
222  AVComplexFloat *hrtf = s->data_hrtf[jobnr];
223  int *n_clippings = &td->n_clippings[jobnr];
224  float *ringbuffer = td->ringbuffer[jobnr];
225  const int ir_len = s->ir_len;
226  const float *src = (const float *)in->data[0];
227  float *dst = (float *)out->data[0];
228  const int in_channels = in->ch_layout.nb_channels;
229  const int buffer_length = s->buffer_length;
230  const uint32_t modulo = (uint32_t)buffer_length - 1;
231  AVComplexFloat *fft_out = s->out_fft[jobnr];
232  AVComplexFloat *fft_in = s->in_fft[jobnr];
233  AVComplexFloat *fft_acc = s->temp_afft[jobnr];
234  AVTXContext *ifft = s->ifft[jobnr];
235  AVTXContext *fft = s->fft[jobnr];
236  av_tx_fn tx_fn = s->tx_fn[jobnr];
237  av_tx_fn itx_fn = s->itx_fn[jobnr];
238  const int n_fft = s->n_fft;
239  const float fft_scale = 1.0f / s->n_fft;
240  AVComplexFloat *hrtf_offset;
241  int wr = *write;
242  int n_read;
243  int i, j;
244 
245  dst += offset;
246 
247  n_read = FFMIN(ir_len, in->nb_samples);
248  for (j = 0; j < n_read; j++) {
249  dst[2 * j] = ringbuffer[wr];
250  ringbuffer[wr] = 0.0;
251  wr = (wr + 1) & modulo;
252  }
253 
254  for (j = n_read; j < in->nb_samples; j++) {
255  dst[2 * j] = 0;
256  }
257 
258  memset(fft_acc, 0, sizeof(AVComplexFloat) * n_fft);
259 
260  for (i = 0; i < in_channels; i++) {
261  if (i == s->lfe_channel) {
262  for (j = 0; j < in->nb_samples; j++) {
263  dst[2 * j] += src[i + j * in_channels] * s->gain_lfe;
264  }
265  continue;
266  }
267 
268  offset = i * n_fft;
269  hrtf_offset = hrtf + s->hrir_map[i] * n_fft;
270 
271  memset(fft_in, 0, sizeof(AVComplexFloat) * n_fft);
272 
273  for (j = 0; j < in->nb_samples; j++) {
274  fft_in[j].re = src[j * in_channels + i];
275  }
276 
277  tx_fn(fft, fft_out, fft_in, sizeof(*fft_in));
278 
279  for (j = 0; j < n_fft; j++) {
280  const AVComplexFloat *hcomplex = hrtf_offset + j;
281  const float re = fft_out[j].re;
282  const float im = fft_out[j].im;
283 
284  fft_acc[j].re += re * hcomplex->re - im * hcomplex->im;
285  fft_acc[j].im += re * hcomplex->im + im * hcomplex->re;
286  }
287  }
288 
289  itx_fn(ifft, fft_out, fft_acc, sizeof(*fft_acc));
290 
291  for (j = 0; j < in->nb_samples; j++) {
292  dst[2 * j] += fft_out[j].re * fft_scale;
293  if (fabsf(dst[2 * j]) > 1)
294  n_clippings[0]++;
295  }
296 
297  for (j = 0; j < ir_len - 1; j++) {
298  int write_pos = (wr + j) & modulo;
299 
300  *(ringbuffer + write_pos) += fft_out[in->nb_samples + j].re * fft_scale;
301  }
302 
303  *write = wr;
304 
305  return 0;
306 }
307 
308 static int check_ir(AVFilterLink *inlink, int input_number)
309 {
310  AVFilterContext *ctx = inlink->dst;
311  HeadphoneContext *s = ctx->priv;
312  int ir_len, max_ir_len;
313 
315  max_ir_len = 65536;
316  if (ir_len > max_ir_len) {
317  av_log(ctx, AV_LOG_ERROR, "Too big length of IRs: %d > %d.\n", ir_len, max_ir_len);
318  return AVERROR(EINVAL);
319  }
320  s->hrir_in[input_number].ir_len = ir_len;
321  s->ir_len = FFMAX(ir_len, s->ir_len);
322 
323  if (ff_inlink_check_available_samples(inlink, ir_len + 1) == 1) {
324  s->hrir_in[input_number].eof = 1;
325  return 1;
326  }
327 
328  if (!s->hrir_in[input_number].eof) {
330  return 0;
331  }
332 
333  return 0;
334 }
335 
337 {
338  AVFilterContext *ctx = outlink->src;
339  int n_clippings[2] = { 0 };
340  ThreadData td;
341  AVFrame *out;
342 
343  out = ff_get_audio_buffer(outlink, in->nb_samples);
344  if (!out) {
345  av_frame_free(&in);
346  return AVERROR(ENOMEM);
347  }
348  out->pts = in->pts;
349 
350  td.in = in; td.out = out; td.write = s->write;
351  td.ir = s->data_ir; td.n_clippings = n_clippings;
352  td.ringbuffer = s->ringbuffer; td.temp_src = s->temp_src;
353  td.out_fft = s->out_fft;
354  td.in_fft = s->in_fft;
355  td.temp_afft = s->temp_afft;
356 
357  if (s->type == TIME_DOMAIN) {
359  } else {
361  }
362 
363  if (n_clippings[0] + n_clippings[1] > 0) {
364  av_log(ctx, AV_LOG_WARNING, "%d of %d samples clipped. Please reduce gain.\n",
365  n_clippings[0] + n_clippings[1], out->nb_samples * 2);
366  }
367 
368  av_frame_free(&in);
369  return ff_filter_frame(outlink, out);
370 }
371 
373 {
374  struct HeadphoneContext *s = ctx->priv;
375  const int ir_len = s->ir_len;
376  int nb_input_channels = ctx->inputs[0]->ch_layout.nb_channels;
377  const int nb_hrir_channels = s->nb_hrir_inputs == 1 ? ctx->inputs[1]->ch_layout.nb_channels : s->nb_hrir_inputs * 2;
378  float gain_lin = expf((s->gain - 3 * nb_input_channels) / 20 * M_LN10);
379  AVFrame *frame;
380  int ret = 0;
381  int n_fft;
382  int i, j, k;
383 
384  s->air_len = 1 << (32 - ff_clz(ir_len));
385  if (s->type == TIME_DOMAIN) {
386  s->air_len = FFALIGN(s->air_len, 32);
387  }
388  s->buffer_length = 1 << (32 - ff_clz(s->air_len));
389  s->n_fft = n_fft = 1 << (32 - ff_clz(ir_len + s->size));
390 
391  if (s->type == FREQUENCY_DOMAIN) {
392  float scale = 1.f;
393 
394  ret = av_tx_init(&s->fft[0], &s->tx_fn[0], AV_TX_FLOAT_FFT, 0, s->n_fft, &scale, 0);
395  if (ret < 0)
396  goto fail;
397  ret = av_tx_init(&s->fft[1], &s->tx_fn[1], AV_TX_FLOAT_FFT, 0, s->n_fft, &scale, 0);
398  if (ret < 0)
399  goto fail;
400  ret = av_tx_init(&s->ifft[0], &s->itx_fn[0], AV_TX_FLOAT_FFT, 1, s->n_fft, &scale, 0);
401  if (ret < 0)
402  goto fail;
403  ret = av_tx_init(&s->ifft[1], &s->itx_fn[1], AV_TX_FLOAT_FFT, 1, s->n_fft, &scale, 0);
404  if (ret < 0)
405  goto fail;
406 
407  if (!s->fft[0] || !s->fft[1] || !s->ifft[0] || !s->ifft[1]) {
408  av_log(ctx, AV_LOG_ERROR, "Unable to create FFT contexts of size %d.\n", s->n_fft);
409  ret = AVERROR(ENOMEM);
410  goto fail;
411  }
412  }
413 
414  if (s->type == TIME_DOMAIN) {
415  s->ringbuffer[0] = av_calloc(s->buffer_length, sizeof(float) * nb_input_channels);
416  s->ringbuffer[1] = av_calloc(s->buffer_length, sizeof(float) * nb_input_channels);
417  } else {
418  s->ringbuffer[0] = av_calloc(s->buffer_length, sizeof(float));
419  s->ringbuffer[1] = av_calloc(s->buffer_length, sizeof(float));
420  s->out_fft[0] = av_calloc(s->n_fft, sizeof(AVComplexFloat));
421  s->out_fft[1] = av_calloc(s->n_fft, sizeof(AVComplexFloat));
422  s->in_fft[0] = av_calloc(s->n_fft, sizeof(AVComplexFloat));
423  s->in_fft[1] = av_calloc(s->n_fft, sizeof(AVComplexFloat));
424  s->temp_afft[0] = av_calloc(s->n_fft, sizeof(AVComplexFloat));
425  s->temp_afft[1] = av_calloc(s->n_fft, sizeof(AVComplexFloat));
426  if (!s->in_fft[0] || !s->in_fft[1] ||
427  !s->out_fft[0] || !s->out_fft[1] ||
428  !s->temp_afft[0] || !s->temp_afft[1]) {
429  ret = AVERROR(ENOMEM);
430  goto fail;
431  }
432  }
433 
434  if (!s->ringbuffer[0] || !s->ringbuffer[1]) {
435  ret = AVERROR(ENOMEM);
436  goto fail;
437  }
438 
439  if (s->type == TIME_DOMAIN) {
440  s->temp_src[0] = av_calloc(s->air_len, sizeof(float));
441  s->temp_src[1] = av_calloc(s->air_len, sizeof(float));
442 
443  s->data_ir[0] = av_calloc(nb_hrir_channels * s->air_len, sizeof(*s->data_ir[0]));
444  s->data_ir[1] = av_calloc(nb_hrir_channels * s->air_len, sizeof(*s->data_ir[1]));
445  if (!s->data_ir[0] || !s->data_ir[1] || !s->temp_src[0] || !s->temp_src[1]) {
446  ret = AVERROR(ENOMEM);
447  goto fail;
448  }
449  } else {
450  s->data_hrtf[0] = av_calloc(n_fft, sizeof(*s->data_hrtf[0]) * nb_hrir_channels);
451  s->data_hrtf[1] = av_calloc(n_fft, sizeof(*s->data_hrtf[1]) * nb_hrir_channels);
452  if (!s->data_hrtf[0] || !s->data_hrtf[1]) {
453  ret = AVERROR(ENOMEM);
454  goto fail;
455  }
456  }
457 
458  for (i = 0; i < s->nb_hrir_inputs; av_frame_free(&frame), i++) {
459  int len = s->hrir_in[i].ir_len;
460  float *ptr;
461 
462  ret = ff_inlink_consume_samples(ctx->inputs[i + 1], len, len, &frame);
463  if (ret < 0)
464  goto fail;
465  ptr = (float *)frame->extended_data[0];
466 
467  if (s->hrir_fmt == HRIR_STEREO) {
468  int idx = av_channel_layout_index_from_channel(&s->map_channel_layout,
469  s->mapping[i]);
470  if (idx < 0)
471  continue;
472 
473  s->hrir_map[i] = idx;
474  if (s->type == TIME_DOMAIN) {
475  float *data_ir_l = s->data_ir[0] + idx * s->air_len;
476  float *data_ir_r = s->data_ir[1] + idx * s->air_len;
477 
478  for (j = 0; j < len; j++) {
479  data_ir_l[j] = ptr[len * 2 - j * 2 - 2] * gain_lin;
480  data_ir_r[j] = ptr[len * 2 - j * 2 - 1] * gain_lin;
481  }
482  } else {
483  AVComplexFloat *fft_out_l = s->data_hrtf[0] + idx * n_fft;
484  AVComplexFloat *fft_out_r = s->data_hrtf[1] + idx * n_fft;
485  AVComplexFloat *fft_in_l = s->in_fft[0];
486  AVComplexFloat *fft_in_r = s->in_fft[1];
487 
488  for (j = 0; j < len; j++) {
489  fft_in_l[j].re = ptr[j * 2 ] * gain_lin;
490  fft_in_r[j].re = ptr[j * 2 + 1] * gain_lin;
491  }
492 
493  s->tx_fn[0](s->fft[0], fft_out_l, fft_in_l, sizeof(*fft_in_l));
494  s->tx_fn[0](s->fft[0], fft_out_r, fft_in_r, sizeof(*fft_in_r));
495  }
496  } else {
497  int I, N = ctx->inputs[1]->ch_layout.nb_channels;
498 
499  for (k = 0; k < N / 2; k++) {
500  int idx = av_channel_layout_index_from_channel(&inlink->ch_layout,
501  s->mapping[k]);
502  if (idx < 0)
503  continue;
504 
505  s->hrir_map[k] = idx;
506  I = k * 2;
507  if (s->type == TIME_DOMAIN) {
508  float *data_ir_l = s->data_ir[0] + idx * s->air_len;
509  float *data_ir_r = s->data_ir[1] + idx * s->air_len;
510 
511  for (j = 0; j < len; j++) {
512  data_ir_l[j] = ptr[len * N - j * N - N + I ] * gain_lin;
513  data_ir_r[j] = ptr[len * N - j * N - N + I + 1] * gain_lin;
514  }
515  } else {
516  AVComplexFloat *fft_out_l = s->data_hrtf[0] + idx * n_fft;
517  AVComplexFloat *fft_out_r = s->data_hrtf[1] + idx * n_fft;
518  AVComplexFloat *fft_in_l = s->in_fft[0];
519  AVComplexFloat *fft_in_r = s->in_fft[1];
520 
521  for (j = 0; j < len; j++) {
522  fft_in_l[j].re = ptr[j * N + I ] * gain_lin;
523  fft_in_r[j].re = ptr[j * N + I + 1] * gain_lin;
524  }
525 
526  s->tx_fn[0](s->fft[0], fft_out_l, fft_in_l, sizeof(*fft_in_l));
527  s->tx_fn[0](s->fft[0], fft_out_r, fft_in_r, sizeof(*fft_in_r));
528  }
529  }
530  }
531  }
532 
533  s->have_hrirs = 1;
534 
535 fail:
536  return ret;
537 }
538 
540 {
541  HeadphoneContext *s = ctx->priv;
542  AVFilterLink *inlink = ctx->inputs[0];
543  AVFilterLink *outlink = ctx->outputs[0];
544  AVFrame *in = NULL;
545  int i, ret;
546 
548  if (!s->eof_hrirs) {
549  int eof = 1;
550  for (i = 0; i < s->nb_hrir_inputs; i++) {
551  AVFilterLink *input = ctx->inputs[i + 1];
552 
553  if (s->hrir_in[i].eof)
554  continue;
555 
556  if ((ret = check_ir(input, i)) <= 0)
557  return ret;
558 
559  if (s->hrir_in[i].eof) {
561  av_log(ctx, AV_LOG_ERROR, "No samples provided for "
562  "HRIR stream %d.\n", i);
563  return AVERROR_INVALIDDATA;
564  }
565  } else {
566  eof = 0;
567  }
568  }
569  if (!eof) {
570  ff_filter_set_ready(ctx, 100);
571  return 0;
572  }
573  s->eof_hrirs = 1;
574 
576  if (ret < 0)
577  return ret;
578  } else if (!s->have_hrirs)
579  return AVERROR_EOF;
580 
581  if ((ret = ff_inlink_consume_samples(inlink, s->size, s->size, &in)) > 0) {
582  ret = headphone_frame(s, in, outlink);
583  if (ret < 0)
584  return ret;
585  }
586 
587  if (ret < 0)
588  return ret;
589 
591  if (ff_outlink_frame_wanted(outlink))
593 
594  return 0;
595 }
596 
598  AVFilterFormatsConfig **cfg_in,
599  AVFilterFormatsConfig **cfg_out)
600 {
601  static const enum AVSampleFormat formats[] = {
604  };
605 
606  const HeadphoneContext *s = ctx->priv;
607 
609  AVFilterChannelLayouts *stereo_layout = NULL;
610  AVFilterChannelLayouts *hrir_layouts = NULL;
611  int ret, i;
612 
613  ret = ff_set_common_formats_from_list2(ctx, cfg_in, cfg_out, formats);
614  if (ret)
615  return ret;
616 
618  if (!layouts)
619  return AVERROR(ENOMEM);
620 
622  if (ret)
623  return ret;
624 
626  if (ret)
627  return ret;
628  ret = ff_channel_layouts_ref(stereo_layout, &cfg_out[0]->channel_layouts);
629  if (ret)
630  return ret;
631 
632  if (s->hrir_fmt == HRIR_MULTI) {
633  hrir_layouts = ff_all_channel_counts();
634  if (!hrir_layouts)
635  return AVERROR(ENOMEM);
636  ret = ff_channel_layouts_ref(hrir_layouts, &cfg_in[1]->channel_layouts);
637  if (ret)
638  return ret;
639  } else {
640  for (i = 1; i <= s->nb_hrir_inputs; i++) {
641  ret = ff_channel_layouts_ref(stereo_layout, &cfg_in[1]->channel_layouts);
642  if (ret)
643  return ret;
644  }
645  }
646 
647  return 0;
648 }
649 
651 {
652  AVFilterContext *ctx = inlink->dst;
653  HeadphoneContext *s = ctx->priv;
654 
655  if (s->nb_irs < inlink->ch_layout.nb_channels) {
656  av_log(ctx, AV_LOG_ERROR, "Number of HRIRs must be >= %d.\n", inlink->ch_layout.nb_channels);
657  return AVERROR(EINVAL);
658  }
659 
660  s->lfe_channel = av_channel_layout_index_from_channel(&inlink->ch_layout,
662  return 0;
663 }
664 
666 {
667  HeadphoneContext *s = ctx->priv;
668  int i, ret;
669 
670  AVFilterPad pad = {
671  .name = "in0",
672  .type = AVMEDIA_TYPE_AUDIO,
673  .config_props = config_input,
674  };
675  if ((ret = ff_append_inpad(ctx, &pad)) < 0)
676  return ret;
677 
678  if (!s->map) {
679  av_log(ctx, AV_LOG_ERROR, "Valid mapping must be set.\n");
680  return AVERROR(EINVAL);
681  }
682 
683  parse_map(ctx);
684 
685  for (i = 0; i < s->nb_hrir_inputs; i++) {
686  char *name = av_asprintf("hrir%d", i);
687  AVFilterPad pad = {
688  .name = name,
689  .type = AVMEDIA_TYPE_AUDIO,
690  };
691  if (!name)
692  return AVERROR(ENOMEM);
693  if ((ret = ff_append_inpad_free_name(ctx, &pad)) < 0)
694  return ret;
695  }
696 
697  if (s->type == TIME_DOMAIN) {
699  if (!fdsp)
700  return AVERROR(ENOMEM);
701  s->scalarproduct_float = fdsp->scalarproduct_float;
702  av_free(fdsp);
703  }
704 
705  return 0;
706 }
707 
708 static int config_output(AVFilterLink *outlink)
709 {
710  AVFilterContext *ctx = outlink->src;
711  HeadphoneContext *s = ctx->priv;
712  AVFilterLink *inlink = ctx->inputs[0];
713 
714  if (s->hrir_fmt == HRIR_MULTI) {
715  AVFilterLink *hrir_link = ctx->inputs[1];
716 
717  if (hrir_link->ch_layout.nb_channels < inlink->ch_layout.nb_channels * 2) {
718  av_log(ctx, AV_LOG_ERROR, "Number of channels in HRIR stream must be >= %d.\n", inlink->ch_layout.nb_channels * 2);
719  return AVERROR(EINVAL);
720  }
721  }
722 
723  s->gain_lfe = expf((s->gain - 3 * inlink->ch_layout.nb_channels + s->lfe_gain) / 20 * M_LN10);
724 
725  return 0;
726 }
727 
729 {
730  HeadphoneContext *s = ctx->priv;
731 
732  av_tx_uninit(&s->ifft[0]);
733  av_tx_uninit(&s->ifft[1]);
734  av_tx_uninit(&s->fft[0]);
735  av_tx_uninit(&s->fft[1]);
736  av_freep(&s->data_ir[0]);
737  av_freep(&s->data_ir[1]);
738  av_freep(&s->ringbuffer[0]);
739  av_freep(&s->ringbuffer[1]);
740  av_freep(&s->temp_src[0]);
741  av_freep(&s->temp_src[1]);
742  av_freep(&s->out_fft[0]);
743  av_freep(&s->out_fft[1]);
744  av_freep(&s->in_fft[0]);
745  av_freep(&s->in_fft[1]);
746  av_freep(&s->temp_afft[0]);
747  av_freep(&s->temp_afft[1]);
748  av_freep(&s->data_hrtf[0]);
749  av_freep(&s->data_hrtf[1]);
750 }
751 
752 #define OFFSET(x) offsetof(HeadphoneContext, x)
753 #define FLAGS AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
754 
755 static const AVOption headphone_options[] = {
756  { "map", "set channels convolution mappings", OFFSET(map), AV_OPT_TYPE_STRING, {.str=NULL}, .flags = FLAGS },
757  { "gain", "set gain in dB", OFFSET(gain), AV_OPT_TYPE_FLOAT, {.dbl=0}, -20, 40, .flags = FLAGS },
758  { "lfe", "set lfe gain in dB", OFFSET(lfe_gain), AV_OPT_TYPE_FLOAT, {.dbl=0}, -20, 40, .flags = FLAGS },
759  { "type", "set processing", OFFSET(type), AV_OPT_TYPE_INT, {.i64=1}, 0, 1, .flags = FLAGS, .unit = "type" },
760  { "time", "time domain", 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, .flags = FLAGS, .unit = "type" },
761  { "freq", "frequency domain", 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, .flags = FLAGS, .unit = "type" },
762  { "size", "set frame size", OFFSET(size), AV_OPT_TYPE_INT, {.i64=1024},1024,96000, .flags = FLAGS },
763  { "hrir", "set hrir format", OFFSET(hrir_fmt), AV_OPT_TYPE_INT, {.i64=HRIR_STEREO}, 0, 1, .flags = FLAGS, .unit = "hrir" },
764  { "stereo", "hrir files have exactly 2 channels", 0, AV_OPT_TYPE_CONST, {.i64=HRIR_STEREO}, 0, 0, .flags = FLAGS, .unit = "hrir" },
765  { "multich", "single multichannel hrir file", 0, AV_OPT_TYPE_CONST, {.i64=HRIR_MULTI}, 0, 0, .flags = FLAGS, .unit = "hrir" },
766  { NULL }
767 };
768 
769 AVFILTER_DEFINE_CLASS(headphone);
770 
771 static const AVFilterPad outputs[] = {
772  {
773  .name = "default",
774  .type = AVMEDIA_TYPE_AUDIO,
775  .config_props = config_output,
776  },
777 };
778 
780  .name = "headphone",
781  .description = NULL_IF_CONFIG_SMALL("Apply headphone binaural spatialization with HRTFs in additional streams."),
782  .priv_size = sizeof(HeadphoneContext),
783  .priv_class = &headphone_class,
784  .init = init,
785  .uninit = uninit,
786  .activate = activate,
787  .inputs = NULL,
791 };
formats
formats
Definition: signature.h:47
HeadphoneContext::hrir_inputs
Definition: af_headphone.c:82
convert_coeffs
static int convert_coeffs(AVFilterContext *ctx, AVFilterLink *inlink)
Definition: af_headphone.c:372
ff_get_audio_buffer
AVFrame * ff_get_audio_buffer(AVFilterLink *link, int nb_samples)
Request an audio samples buffer with a specific set of permissions.
Definition: audio.c:98
FREQUENCY_DOMAIN
#define FREQUENCY_DOMAIN
Definition: af_headphone.c:37
AVFilterChannelLayouts
A list of supported channel layouts.
Definition: formats.h:85
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:215
name
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default minimum maximum flags name is the option name
Definition: writing_filters.txt:88
HeadphoneContext::gain_lfe
float gain_lfe
Definition: af_headphone.c:61
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
HeadphoneContext::data_ir
float * data_ir[2]
Definition: af_headphone.c:71
ThreadData::out_fft
AVComplexFloat ** out_fft
Definition: af_headphone.c:139
out
FILE * out
Definition: movenc.c:55
AV_CHANNEL_LAYOUT_STEREO
#define AV_CHANNEL_LAYOUT_STEREO
Definition: channel_layout.h:393
ff_filter_frame
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1062
ff_channel_layouts_ref
int ff_channel_layouts_ref(AVFilterChannelLayouts *f, AVFilterChannelLayouts **ref)
Add *ref as a new reference to f.
Definition: formats.c:673
layouts
enum MovChannelLayoutTag * layouts
Definition: mov_chan.c:335
AVERROR_EOF
#define AVERROR_EOF
End of file.
Definition: error.h:57
HeadphoneContext::size
int size
Definition: af_headphone.c:68
HeadphoneContext::temp_afft
AVComplexFloat * temp_afft[2]
Definition: af_headphone.c:75
AVTXContext
Definition: tx_priv.h:235
inlink
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
Definition: filter_design.txt:212
av_asprintf
char * av_asprintf(const char *fmt,...)
Definition: avstring.c:115
ff_clz
#define ff_clz
Definition: intmath.h:143
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:162
ff_all_channel_counts
AVFilterChannelLayouts * ff_all_channel_counts(void)
Construct an AVFilterChannelLayouts coding for any channel layout, with known or unknown disposition.
Definition: formats.c:621
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:389
AVFrame::pts
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:501
TIME_DOMAIN
#define TIME_DOMAIN
Definition: af_headphone.c:36
AVOption
AVOption.
Definition: opt.h:429
HeadphoneContext::ringbuffer
float * ringbuffer[2]
Definition: af_headphone.c:63
expf
#define expf(x)
Definition: libm.h:283
HeadphoneContext::eof_hrirs
int eof_hrirs
Definition: af_headphone.c:51
AVComplexFloat
Definition: tx.h:27
HeadphoneContext::fft
AVTXContext * fft[2]
Definition: af_headphone.c:77
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
AVFilter::name
const char * name
Filter name.
Definition: avfilter.h:205
ThreadData::out
AVFrame * out
Definition: af_adeclick.c:526
AVChannelLayout::nb_channels
int nb_channels
Number of channels in this layout.
Definition: channel_layout.h:327
ThreadData::in
AVFrame * in
Definition: af_adecorrelate.c:155
av_tx_init
av_cold int av_tx_init(AVTXContext **ctx, av_tx_fn *tx, enum AVTXType type, int inv, int len, const void *scale, uint64_t flags)
Initialize a transform context with the given configuration (i)MDCTs with an odd length are currently...
Definition: tx.c:903
ThreadData::temp_src
float ** temp_src
Definition: af_headphone.c:138
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:410
formats.h
HeadphoneContext::data_hrtf
AVComplexFloat * data_hrtf[2]
Definition: af_headphone.c:79
HeadphoneContext::hrir_inputs::ir_len
int ir_len
Definition: af_headphone.c:83
AVComplexFloat::im
float im
Definition: tx.h:28
HeadphoneContext::air_len
int air_len
Definition: af_headphone.c:54
FF_FILTER_FORWARD_STATUS_BACK_ALL
#define FF_FILTER_FORWARD_STATUS_BACK_ALL(outlink, filter)
Forward the status on an output link to all input links.
Definition: filters.h:447
ff_append_inpad
int ff_append_inpad(AVFilterContext *f, AVFilterPad *p)
Append a new input/output pad to the filter's list of such pads.
Definition: avfilter.c:127
HeadphoneContext::ir_len
int ir_len
Definition: af_headphone.c:53
fail
#define fail()
Definition: checkasm.h:189
HeadphoneContext::scalarproduct_float
float(* scalarproduct_float)(const float *v1, const float *v2, int len)
Definition: af_headphone.c:81
activate
static int activate(AVFilterContext *ctx)
Definition: af_headphone.c:539
HeadphoneContext::hrir_inputs::eof
int eof
Definition: af_headphone.c:84
ThreadData::ringbuffer
float ** ringbuffer
Definition: af_headphone.c:137
AVFrame::ch_layout
AVChannelLayout ch_layout
Channel layout of the audio data.
Definition: frame.h:790
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
AVFILTER_FLAG_DYNAMIC_INPUTS
#define AVFILTER_FLAG_DYNAMIC_INPUTS
The number of the filter inputs is not determined just by AVFilter.inputs.
Definition: avfilter.h:141
fabsf
static __device__ float fabsf(float a)
Definition: cuda_runtime.h:181
config_input
static int config_input(AVFilterLink *inlink)
Definition: af_headphone.c:650
AVFilterPad
A filter pad used for either input or output.
Definition: filters.h:38
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:209
ff_inlink_check_available_samples
int ff_inlink_check_available_samples(AVFilterLink *link, unsigned min)
Test if enough samples are available on the link.
Definition: avfilter.c:1472
av_cold
#define av_cold
Definition: attributes.h:90
OFFSET
#define OFFSET(x)
Definition: af_headphone.c:752
HRIR_MULTI
#define HRIR_MULTI
Definition: af_headphone.c:40
HRIR_STEREO
#define HRIR_STEREO
Definition: af_headphone.c:39
av_tx_fn
void(* av_tx_fn)(AVTXContext *s, void *out, void *in, ptrdiff_t stride)
Function pointer to a function to perform the transform.
Definition: tx.h:151
float
float
Definition: af_crystalizer.c:122
AVFloatDSPContext::scalarproduct_float
float(* scalarproduct_float)(const float *v1, const float *v2, int len)
Calculate the scalar product of two vectors of floats.
Definition: float_dsp.h:175
ff_inlink_request_frame
void ff_inlink_request_frame(AVFilterLink *link)
Mark that a frame is wanted on the link.
Definition: avfilter.c:1594
s
#define s(width, name)
Definition: cbs_vp9.c:198
HeadphoneContext::buffer_length
int buffer_length
Definition: af_headphone.c:66
AVMEDIA_TYPE_AUDIO
@ AVMEDIA_TYPE_AUDIO
Definition: avutil.h:202
av_channel_layout_from_mask
int av_channel_layout_from_mask(AVChannelLayout *channel_layout, uint64_t mask)
Initialize a native channel layout from a bitmask indicating which channels are present.
Definition: channel_layout.c:250
av_strtok
char * av_strtok(char *s, const char *delim, char **saveptr)
Split the string into several tokens which can be accessed by successive calls to av_strtok().
Definition: avstring.c:178
query_formats
static int query_formats(const AVFilterContext *ctx, AVFilterFormatsConfig **cfg_in, AVFilterFormatsConfig **cfg_out)
Definition: af_headphone.c:597
filters.h
AV_TX_FLOAT_FFT
@ AV_TX_FLOAT_FFT
Standard complex to complex FFT with sample data type of AVComplexFloat, AVComplexDouble or AVComplex...
Definition: tx.h:47
ctx
AVFormatContext * ctx
Definition: movenc.c:49
HeadphoneContext::lfe_gain
float lfe_gain
Definition: af_headphone.c:61
parse_channel_name
static int parse_channel_name(const char *arg, enum AVChannel *rchannel)
Definition: af_headphone.c:91
uninit
static av_cold void uninit(AVFilterContext *ctx)
Definition: af_headphone.c:728
FILTER_OUTPUTS
#define FILTER_OUTPUTS(array)
Definition: filters.h:263
arg
const char * arg
Definition: jacosubdec.c:67
if
if(ret)
Definition: filter_design.txt:179
headphone_convolute
static int headphone_convolute(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: af_headphone.c:144
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:75
ff_inlink_consume_samples
int ff_inlink_consume_samples(AVFilterLink *link, unsigned min, unsigned max, AVFrame **rframe)
Take samples from the link's FIFO and update the link's stats.
Definition: avfilter.c:1511
NULL
#define NULL
Definition: coverity.c:32
FLAGS
#define FLAGS
Definition: af_headphone.c:753
parse_map
static void parse_map(AVFilterContext *ctx)
Definition: af_headphone.c:101
ff_append_inpad_free_name
int ff_append_inpad_free_name(AVFilterContext *f, AVFilterPad *p)
Definition: avfilter.c:132
outputs
static const AVFilterPad outputs[]
Definition: af_headphone.c:771
inputs
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several inputs
Definition: filter_design.txt:243
ThreadData::in_fft
AVComplexFloat ** in_fft
Definition: af_headphone.c:140
ff_add_channel_layout
int ff_add_channel_layout(AVFilterChannelLayouts **l, const AVChannelLayout *channel_layout)
Definition: formats.c:521
AVFilterFormatsConfig
Lists of formats / etc.
Definition: avfilter.h:111
AVFILTER_DEFINE_CLASS
AVFILTER_DEFINE_CLASS(headphone)
float_dsp.h
headphone_frame
static int headphone_frame(HeadphoneContext *s, AVFrame *in, AVFilterLink *outlink)
Definition: af_headphone.c:336
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:94
AVChannelLayout
An AVChannelLayout holds information about the channel layout of audio data.
Definition: channel_layout.h:317
dst
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t intptr_t int int16_t * dst
Definition: dsp.h:83
HeadphoneContext::write
int write[2]
Definition: af_headphone.c:64
AV_SAMPLE_FMT_NONE
@ AV_SAMPLE_FMT_NONE
Definition: samplefmt.h:56
AV_CHAN_LOW_FREQUENCY
@ AV_CHAN_LOW_FREQUENCY
Definition: channel_layout.h:53
size
int size
Definition: twinvq_data.h:10344
AVComplexFloat::re
float re
Definition: tx.h:28
HeadphoneContext::gain
float gain
Definition: af_headphone.c:60
AVFloatDSPContext
Definition: float_dsp.h:24
HeadphoneContext
Definition: af_headphone.c:42
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
N
#define N
Definition: af_mcompand.c:54
ff_all_channel_layouts
AVFilterChannelLayouts * ff_all_channel_layouts(void)
Construct an empty AVFilterChannelLayouts/AVFilterFormats struct – representing any channel layout (w...
Definition: formats.c:612
input
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some input
Definition: filter_design.txt:172
av_tx_uninit
av_cold void av_tx_uninit(AVTXContext **ctx)
Frees a context and sets *ctx to NULL, does nothing when *ctx == NULL.
Definition: tx.c:295
headphone_options
static const AVOption headphone_options[]
Definition: af_headphone.c:755
HeadphoneContext::have_hrirs
int have_hrirs
Definition: af_headphone.c:50
AV_OPT_TYPE_FLOAT
@ AV_OPT_TYPE_FLOAT
Underlying C type is float.
Definition: opt.h:271
AVChannel
AVChannel
Definition: channel_layout.h:47
AVFrame::nb_samples
int nb_samples
number of audio samples (per channel) described by this frame
Definition: frame.h:469
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:256
HeadphoneContext::map
char * map
Definition: af_headphone.c:45
HeadphoneContext::tx_fn
av_tx_fn tx_fn[2]
Definition: af_headphone.c:78
init
static av_cold int init(AVFilterContext *ctx)
Definition: af_headphone.c:665
AVSampleFormat
AVSampleFormat
Audio sample formats.
Definition: samplefmt.h:55
ThreadData
Used for passing data between threads.
Definition: dsddec.c:71
FILTER_QUERY_FUNC2
#define FILTER_QUERY_FUNC2(func)
Definition: filters.h:239
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
HeadphoneContext::hrir_fmt
int hrir_fmt
Definition: af_headphone.c:69
ThreadData::n_clippings
int * n_clippings
Definition: af_headphone.c:136
HeadphoneContext::mapping
enum AVChannel mapping[64]
Definition: af_headphone.c:87
len
int len
Definition: vorbis_enc_data.h:426
AVFilterPad::name
const char * name
Pad name.
Definition: filters.h:44
ff_inlink_queued_samples
int ff_inlink_queued_samples(AVFilterLink *link)
Definition: avfilter.c:1466
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Definition: mem.c:264
HeadphoneContext::in_fft
AVComplexFloat * in_fft[2]
Definition: af_headphone.c:74
AVFilter
Filter definition.
Definition: avfilter.h:201
HeadphoneContext::hrir_map
uint8_t hrir_map[64]
Definition: af_headphone.c:88
ret
ret
Definition: filter_design.txt:187
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
ThreadData::write
int * write
Definition: af_headphone.c:134
av_channel_from_string
enum AVChannel av_channel_from_string(const char *str)
This is the inverse function of av_channel_name().
Definition: channel_layout.c:152
ThreadData::temp_afft
AVComplexFloat ** temp_afft
Definition: af_headphone.c:141
HeadphoneContext::map_channel_layout
AVChannelLayout map_channel_layout
Definition: af_headphone.c:86
ff_set_common_formats_from_list2
int ff_set_common_formats_from_list2(const AVFilterContext *ctx, AVFilterFormatsConfig **cfg_in, AVFilterFormatsConfig **cfg_out, const int *fmts)
Definition: formats.c:1016
channel_layout.h
HeadphoneContext::nb_hrir_inputs
int nb_hrir_inputs
Definition: af_headphone.c:56
ff_filter_execute
int ff_filter_execute(AVFilterContext *ctx, avfilter_action_func *func, void *arg, int *ret, int nb_jobs)
Definition: avfilter.c:1667
buffer
the frame and frame reference mechanism is intended to as much as expensive copies of that data while still allowing the filters to produce correct results The data is stored in buffers represented by AVFrame structures Several references can point to the same frame buffer
Definition: filter_design.txt:49
av_channel_layout_index_from_channel
int av_channel_layout_index_from_channel(const AVChannelLayout *channel_layout, enum AVChannel channel)
Get the index of a given channel in a channel layout.
Definition: channel_layout.c:711
HeadphoneContext::ifft
AVTXContext * ifft[2]
Definition: af_headphone.c:77
HeadphoneContext::itx_fn
av_tx_fn itx_fn[2]
Definition: af_headphone.c:78
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Underlying C type is int.
Definition: opt.h:259
avfilter.h
headphone_fast_convolute
static int headphone_fast_convolute(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: af_headphone.c:215
AVFilterContext
An instance of a filter.
Definition: avfilter.h:457
AVFILTER_FLAG_SLICE_THREADS
#define AVFILTER_FLAG_SLICE_THREADS
The filter supports multithreading by splitting frames into multiple parts and processing them concur...
Definition: avfilter.h:152
mem.h
audio.h
M_LN10
#define M_LN10
Definition: mathematics.h:49
map
const VDPAUPixFmtMap * map
Definition: hwcontext_vdpau.c:71
av_free
#define av_free(p)
Definition: tableprint_vlc.h:33
scale
static void scale(int *out, const int *in, const int w, const int h, const int shift)
Definition: intra.c:291
FF_FILTER_FORWARD_STATUS
FF_FILTER_FORWARD_STATUS(inlink, outlink)
FFALIGN
#define FFALIGN(x, a)
Definition: macros.h:78
channel_layouts
static const uint16_t channel_layouts[7]
Definition: dca_lbr.c:112
ff_af_headphone
const AVFilter ff_af_headphone
Definition: af_headphone.c:779
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
avpriv_float_dsp_alloc
av_cold AVFloatDSPContext * avpriv_float_dsp_alloc(int bit_exact)
Allocate a float DSP context.
Definition: float_dsp.c:146
ThreadData::ir
float ** ir
Definition: af_headphone.c:135
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
HeadphoneContext::temp_src
float * temp_src[2]
Definition: af_headphone.c:72
ff_outlink_frame_wanted
the definition of that something depends on the semantic of the filter The callback must examine the status of the filter s links and proceed accordingly The status of output links is stored in the status_in and status_out fields and tested by the ff_outlink_frame_wanted() function. If this function returns true
avstring.h
AV_OPT_TYPE_STRING
@ AV_OPT_TYPE_STRING
Underlying C type is a uint8_t* that is either NULL or points to a C string allocated with the av_mal...
Definition: opt.h:276
HeadphoneContext::type
int type
Definition: af_headphone.c:46
HeadphoneContext::lfe_channel
int lfe_channel
Definition: af_headphone.c:48
HeadphoneContext::hrir_in
struct HeadphoneContext::hrir_inputs hrir_in[64]
AV_OPT_TYPE_CONST
@ AV_OPT_TYPE_CONST
Special option type for declaring named constants.
Definition: opt.h:299
HeadphoneContext::nb_irs
int nb_irs
Definition: af_headphone.c:58
config_output
static int config_output(AVFilterLink *outlink)
Definition: af_headphone.c:708
HeadphoneContext::n_fft
int n_fft
Definition: af_headphone.c:67
AV_SAMPLE_FMT_FLT
@ AV_SAMPLE_FMT_FLT
float
Definition: samplefmt.h:60
HeadphoneContext::out_fft
AVComplexFloat * out_fft[2]
Definition: af_headphone.c:73
src
#define src
Definition: vp8dsp.c:248
channel
channel
Definition: ebur128.h:39
read
static uint32_t BS_FUNC() read(BSCTX *bc, unsigned int n)
Return n bits from the buffer, n has to be in the 0-32 range.
Definition: bitstream_template.h:231
ff_filter_set_ready
void ff_filter_set_ready(AVFilterContext *filter, unsigned priority)
Mark a filter ready and schedule it for activation.
Definition: avfilter.c:239
tx.h
check_ir
static int check_ir(AVFilterLink *inlink, int input_number)
Definition: af_headphone.c:308
intmath.h