FFmpeg
af_loudnorm.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2016 Kyle Swanson <k@ylo.ph>.
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /* http://k.ylo.ph/2016/04/04/loudnorm.html */
22 
23 #include "libavutil/mem.h"
24 #include "libavutil/opt.h"
25 #include "avfilter.h"
26 #include "filters.h"
27 #include "formats.h"
28 #include "audio.h"
29 #include "ebur128.h"
30 
31 enum FrameType {
37 };
38 
40  OUT,
45 };
46 
52 };
53 
54 typedef struct LoudNormContext {
55  const AVClass *class;
56  double target_i;
57  double target_lra;
58  double target_tp;
59  double measured_i;
60  double measured_lra;
61  double measured_tp;
63  double offset;
64  int linear;
65  int dual_mono;
67 
68  double *buf;
69  int buf_size;
70  int buf_index;
72 
73  double delta[30];
74  double weights[21];
75  double prev_delta;
76  int index;
77 
78  double gain_reduction[2];
79  double *limiter_buf;
80  double *prev_smp;
85  int env_index;
86  int env_cnt;
89 
90  int64_t pts[30];
94  int channels;
95 
99 
100 #define OFFSET(x) offsetof(LoudNormContext, x)
101 #define FLAGS AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
102 
103 static const AVOption loudnorm_options[] = {
104  { "I", "set integrated loudness target", OFFSET(target_i), AV_OPT_TYPE_DOUBLE, {.dbl = -24.}, -70., -5., FLAGS },
105  { "i", "set integrated loudness target", OFFSET(target_i), AV_OPT_TYPE_DOUBLE, {.dbl = -24.}, -70., -5., FLAGS },
106  { "LRA", "set loudness range target", OFFSET(target_lra), AV_OPT_TYPE_DOUBLE, {.dbl = 7.}, 1., 50., FLAGS },
107  { "lra", "set loudness range target", OFFSET(target_lra), AV_OPT_TYPE_DOUBLE, {.dbl = 7.}, 1., 50., FLAGS },
108  { "TP", "set maximum true peak", OFFSET(target_tp), AV_OPT_TYPE_DOUBLE, {.dbl = -2.}, -9., 0., FLAGS },
109  { "tp", "set maximum true peak", OFFSET(target_tp), AV_OPT_TYPE_DOUBLE, {.dbl = -2.}, -9., 0., FLAGS },
110  { "measured_I", "measured IL of input file", OFFSET(measured_i), AV_OPT_TYPE_DOUBLE, {.dbl = 0.}, -99., 0., FLAGS },
111  { "measured_i", "measured IL of input file", OFFSET(measured_i), AV_OPT_TYPE_DOUBLE, {.dbl = 0.}, -99., 0., FLAGS },
112  { "measured_LRA", "measured LRA of input file", OFFSET(measured_lra), AV_OPT_TYPE_DOUBLE, {.dbl = 0.}, 0., 99., FLAGS },
113  { "measured_lra", "measured LRA of input file", OFFSET(measured_lra), AV_OPT_TYPE_DOUBLE, {.dbl = 0.}, 0., 99., FLAGS },
114  { "measured_TP", "measured true peak of input file", OFFSET(measured_tp), AV_OPT_TYPE_DOUBLE, {.dbl = 99.}, -99., 99., FLAGS },
115  { "measured_tp", "measured true peak of input file", OFFSET(measured_tp), AV_OPT_TYPE_DOUBLE, {.dbl = 99.}, -99., 99., FLAGS },
116  { "measured_thresh", "measured threshold of input file", OFFSET(measured_thresh), AV_OPT_TYPE_DOUBLE, {.dbl = -70.}, -99., 0., FLAGS },
117  { "offset", "set offset gain", OFFSET(offset), AV_OPT_TYPE_DOUBLE, {.dbl = 0.}, -99., 99., FLAGS },
118  { "linear", "normalize linearly if possible", OFFSET(linear), AV_OPT_TYPE_BOOL, {.i64 = 1}, 0, 1, FLAGS },
119  { "dual_mono", "treat mono input as dual-mono", OFFSET(dual_mono), AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, FLAGS },
120  { "print_format", "set print format for stats", OFFSET(print_format), AV_OPT_TYPE_INT, {.i64 = NONE}, NONE, PF_NB -1, FLAGS, .unit = "print_format" },
121  { "none", 0, 0, AV_OPT_TYPE_CONST, {.i64 = NONE}, 0, 0, FLAGS, .unit = "print_format" },
122  { "json", 0, 0, AV_OPT_TYPE_CONST, {.i64 = JSON}, 0, 0, FLAGS, .unit = "print_format" },
123  { "summary", 0, 0, AV_OPT_TYPE_CONST, {.i64 = SUMMARY}, 0, 0, FLAGS, .unit = "print_format" },
124  { NULL }
125 };
126 
127 AVFILTER_DEFINE_CLASS(loudnorm);
128 
129 static inline int frame_size(int sample_rate, int frame_len_msec)
130 {
131  const int frame_size = round((double)sample_rate * (frame_len_msec / 1000.0));
132  return frame_size + (frame_size % 2);
133 }
134 
136 {
137  double total_weight = 0.0;
138  const double sigma = 3.5;
139  double adjust;
140  int i;
141 
142  const int offset = 21 / 2;
143  const double c1 = 1.0 / (sigma * sqrt(2.0 * M_PI));
144  const double c2 = 2.0 * pow(sigma, 2.0);
145 
146  for (i = 0; i < 21; i++) {
147  const int x = i - offset;
148  s->weights[i] = c1 * exp(-(pow(x, 2.0) / c2));
149  total_weight += s->weights[i];
150  }
151 
152  adjust = 1.0 / total_weight;
153  for (i = 0; i < 21; i++)
154  s->weights[i] *= adjust;
155 }
156 
158 {
159  double result = 0.;
160  int i;
161 
162  index = index - 10 > 0 ? index - 10 : index + 20;
163  for (i = 0; i < 21; i++)
164  result += s->delta[((index + i) < 30) ? (index + i) : (index + i - 30)] * s->weights[i];
165 
166  return result;
167 }
168 
169 static void detect_peak(LoudNormContext *s, int offset, int nb_samples, int channels, int *peak_delta, double *peak_value)
170 {
171  int n, c, i, index;
172  double ceiling;
173  double *buf;
174 
175  *peak_delta = -1;
176  buf = s->limiter_buf;
177  ceiling = s->target_tp;
178 
179  index = s->limiter_buf_index + (offset * channels) + (1920 * channels);
180  if (index >= s->limiter_buf_size)
181  index -= s->limiter_buf_size;
182 
183  if (s->frame_type == FIRST_FRAME) {
184  for (c = 0; c < channels; c++)
185  s->prev_smp[c] = fabs(buf[index + c - channels]);
186  }
187 
188  for (n = 0; n < nb_samples; n++) {
189  for (c = 0; c < channels; c++) {
190  double this, next, max_peak;
191 
192  this = fabs(buf[(index + c) < s->limiter_buf_size ? (index + c) : (index + c - s->limiter_buf_size)]);
193  next = fabs(buf[(index + c + channels) < s->limiter_buf_size ? (index + c + channels) : (index + c + channels - s->limiter_buf_size)]);
194 
195  if ((s->prev_smp[c] <= this) && (next <= this) && (this > ceiling) && (n > 0)) {
196  int detected;
197 
198  detected = 1;
199  for (i = 2; i < 12; i++) {
200  next = fabs(buf[(index + c + (i * channels)) < s->limiter_buf_size ? (index + c + (i * channels)) : (index + c + (i * channels) - s->limiter_buf_size)]);
201  if (next > this) {
202  detected = 0;
203  break;
204  }
205  }
206 
207  if (!detected)
208  continue;
209 
210  for (c = 0; c < channels; c++) {
211  if (c == 0 || fabs(buf[index + c]) > max_peak)
212  max_peak = fabs(buf[index + c]);
213 
214  s->prev_smp[c] = fabs(buf[(index + c) < s->limiter_buf_size ? (index + c) : (index + c - s->limiter_buf_size)]);
215  }
216 
217  *peak_delta = n;
218  s->peak_index = index;
219  *peak_value = max_peak;
220  return;
221  }
222 
223  s->prev_smp[c] = this;
224  }
225 
226  index += channels;
227  if (index >= s->limiter_buf_size)
228  index -= s->limiter_buf_size;
229  }
230 }
231 
232 static void true_peak_limiter(LoudNormContext *s, double *out, int nb_samples, int channels)
233 {
234  int n, c, index, peak_delta, smp_cnt;
235  double ceiling, peak_value;
236  double *buf;
237 
238  buf = s->limiter_buf;
239  ceiling = s->target_tp;
240  index = s->limiter_buf_index;
241  smp_cnt = 0;
242 
243  if (s->frame_type == FIRST_FRAME) {
244  double max;
245 
246  max = 0.;
247  for (n = 0; n < 1920; n++) {
248  for (c = 0; c < channels; c++) {
249  max = fabs(buf[c]) > max ? fabs(buf[c]) : max;
250  }
251  buf += channels;
252  }
253 
254  if (max > ceiling) {
255  s->gain_reduction[1] = ceiling / max;
256  s->limiter_state = SUSTAIN;
257  buf = s->limiter_buf;
258 
259  for (n = 0; n < 1920; n++) {
260  for (c = 0; c < channels; c++) {
261  double env;
262  env = s->gain_reduction[1];
263  buf[c] *= env;
264  }
265  buf += channels;
266  }
267  }
268 
269  buf = s->limiter_buf;
270  }
271 
272  do {
273 
274  switch(s->limiter_state) {
275  case OUT:
276  detect_peak(s, smp_cnt, nb_samples - smp_cnt, channels, &peak_delta, &peak_value);
277  if (peak_delta != -1) {
278  s->env_cnt = 0;
279  smp_cnt += (peak_delta - s->attack_length);
280  s->gain_reduction[0] = 1.;
281  s->gain_reduction[1] = ceiling / peak_value;
282  s->limiter_state = ATTACK;
283 
284  s->env_index = s->peak_index - (s->attack_length * channels);
285  if (s->env_index < 0)
286  s->env_index += s->limiter_buf_size;
287 
288  s->env_index += (s->env_cnt * channels);
289  if (s->env_index > s->limiter_buf_size)
290  s->env_index -= s->limiter_buf_size;
291 
292  } else {
293  smp_cnt = nb_samples;
294  }
295  break;
296 
297  case ATTACK:
298  for (; s->env_cnt < s->attack_length; s->env_cnt++) {
299  for (c = 0; c < channels; c++) {
300  double env;
301  env = s->gain_reduction[0] - ((double) s->env_cnt / (s->attack_length - 1) * (s->gain_reduction[0] - s->gain_reduction[1]));
302  buf[s->env_index + c] *= env;
303  }
304 
305  s->env_index += channels;
306  if (s->env_index >= s->limiter_buf_size)
307  s->env_index -= s->limiter_buf_size;
308 
309  smp_cnt++;
310  if (smp_cnt >= nb_samples) {
311  s->env_cnt++;
312  break;
313  }
314  }
315 
316  if (smp_cnt < nb_samples) {
317  s->env_cnt = 0;
318  s->attack_length = 1920;
319  s->limiter_state = SUSTAIN;
320  }
321  break;
322 
323  case SUSTAIN:
324  detect_peak(s, smp_cnt, nb_samples, channels, &peak_delta, &peak_value);
325  if (peak_delta == -1) {
326  s->limiter_state = RELEASE;
327  s->gain_reduction[0] = s->gain_reduction[1];
328  s->gain_reduction[1] = 1.;
329  s->env_cnt = 0;
330  break;
331  } else {
332  double gain_reduction;
333  gain_reduction = ceiling / peak_value;
334 
335  if (gain_reduction < s->gain_reduction[1]) {
336  s->limiter_state = ATTACK;
337 
338  s->attack_length = peak_delta;
339  if (s->attack_length <= 1)
340  s->attack_length = 2;
341 
342  s->gain_reduction[0] = s->gain_reduction[1];
343  s->gain_reduction[1] = gain_reduction;
344  s->env_cnt = 0;
345  break;
346  }
347 
348  for (s->env_cnt = 0; s->env_cnt < peak_delta; s->env_cnt++) {
349  for (c = 0; c < channels; c++) {
350  double env;
351  env = s->gain_reduction[1];
352  buf[s->env_index + c] *= env;
353  }
354 
355  s->env_index += channels;
356  if (s->env_index >= s->limiter_buf_size)
357  s->env_index -= s->limiter_buf_size;
358 
359  smp_cnt++;
360  if (smp_cnt >= nb_samples) {
361  s->env_cnt++;
362  break;
363  }
364  }
365  }
366  break;
367 
368  case RELEASE:
369  for (; s->env_cnt < s->release_length; s->env_cnt++) {
370  for (c = 0; c < channels; c++) {
371  double env;
372  env = s->gain_reduction[0] + (((double) s->env_cnt / (s->release_length - 1)) * (s->gain_reduction[1] - s->gain_reduction[0]));
373  buf[s->env_index + c] *= env;
374  }
375 
376  s->env_index += channels;
377  if (s->env_index >= s->limiter_buf_size)
378  s->env_index -= s->limiter_buf_size;
379 
380  smp_cnt++;
381  if (smp_cnt >= nb_samples) {
382  s->env_cnt++;
383  break;
384  }
385  }
386 
387  if (smp_cnt < nb_samples) {
388  s->env_cnt = 0;
389  s->limiter_state = OUT;
390  }
391 
392  break;
393  }
394 
395  } while (smp_cnt < nb_samples);
396 
397  for (n = 0; n < nb_samples; n++) {
398  for (c = 0; c < channels; c++) {
399  out[c] = buf[index + c];
400  if (fabs(out[c]) > ceiling) {
401  out[c] = ceiling * (out[c] < 0 ? -1 : 1);
402  }
403  }
404  out += channels;
405  index += channels;
406  if (index >= s->limiter_buf_size)
407  index -= s->limiter_buf_size;
408  }
409 }
410 
412 {
413  AVFilterContext *ctx = inlink->dst;
414  LoudNormContext *s = ctx->priv;
415  AVFilterLink *outlink = ctx->outputs[0];
416  AVFrame *out;
417  const double *src;
418  double *dst;
419  double *buf;
420  double *limiter_buf;
421  int i, n, c, subframe_length, src_index;
422  double gain, gain_next, env_global, env_shortterm,
423  global, shortterm, lra, relative_threshold;
424 
425  if (av_frame_is_writable(in)) {
426  out = in;
427  } else {
428  out = ff_get_audio_buffer(outlink, in->nb_samples);
429  if (!out) {
430  av_frame_free(&in);
431  return AVERROR(ENOMEM);
432  }
434  }
435 
436  out->pts = s->pts[0];
437  memmove(s->pts, &s->pts[1], (FF_ARRAY_ELEMS(s->pts) - 1) * sizeof(s->pts[0]));
438 
439  src = (const double *)in->data[0];
440  dst = (double *)out->data[0];
441  buf = s->buf;
442  limiter_buf = s->limiter_buf;
443 
445 
446  if (s->frame_type == FIRST_FRAME && in->nb_samples < frame_size(inlink->sample_rate, 3000)) {
447  double offset, offset_tp, true_peak;
448 
449  ff_ebur128_loudness_global(s->r128_in, &global);
450  for (c = 0; c < inlink->ch_layout.nb_channels; c++) {
451  double tmp;
452  ff_ebur128_sample_peak(s->r128_in, c, &tmp);
453  if (c == 0 || tmp > true_peak)
454  true_peak = tmp;
455  }
456 
457  offset = pow(10., (s->target_i - global) / 20.);
458  offset_tp = true_peak * offset;
459  s->offset = offset_tp < s->target_tp ? offset : s->target_tp / true_peak;
460  s->frame_type = LINEAR_MODE;
461  }
462 
463  switch (s->frame_type) {
464  case FIRST_FRAME:
465  for (n = 0; n < in->nb_samples; n++) {
466  for (c = 0; c < inlink->ch_layout.nb_channels; c++) {
467  buf[s->buf_index + c] = src[c];
468  }
469  src += inlink->ch_layout.nb_channels;
470  s->buf_index += inlink->ch_layout.nb_channels;
471  }
472 
473  ff_ebur128_loudness_shortterm(s->r128_in, &shortterm);
474 
475  if (shortterm < s->measured_thresh) {
476  s->above_threshold = 0;
477  env_shortterm = shortterm <= -70. ? 0. : s->target_i - s->measured_i;
478  } else {
479  s->above_threshold = 1;
480  env_shortterm = shortterm <= -70. ? 0. : s->target_i - shortterm;
481  }
482 
483  for (n = 0; n < 30; n++)
484  s->delta[n] = pow(10., env_shortterm / 20.);
485  s->prev_delta = s->delta[s->index];
486 
487  s->buf_index =
488  s->limiter_buf_index = 0;
489 
490  for (n = 0; n < (s->limiter_buf_size / inlink->ch_layout.nb_channels); n++) {
491  for (c = 0; c < inlink->ch_layout.nb_channels; c++) {
492  limiter_buf[s->limiter_buf_index + c] = buf[s->buf_index + c] * s->delta[s->index] * s->offset;
493  }
494  s->limiter_buf_index += inlink->ch_layout.nb_channels;
495  if (s->limiter_buf_index >= s->limiter_buf_size)
496  s->limiter_buf_index -= s->limiter_buf_size;
497 
498  s->buf_index += inlink->ch_layout.nb_channels;
499  }
500 
501  subframe_length = frame_size(inlink->sample_rate, 100);
502  true_peak_limiter(s, dst, subframe_length, inlink->ch_layout.nb_channels);
503  ff_ebur128_add_frames_double(s->r128_out, dst, subframe_length);
504 
505  out->nb_samples = subframe_length;
506 
507  s->frame_type = INNER_FRAME;
508  break;
509 
510  case INNER_FRAME:
511  gain = gaussian_filter(s, s->index + 10 < 30 ? s->index + 10 : s->index + 10 - 30);
512  gain_next = gaussian_filter(s, s->index + 11 < 30 ? s->index + 11 : s->index + 11 - 30);
513 
514  for (n = 0; n < in->nb_samples; n++) {
515  for (c = 0; c < inlink->ch_layout.nb_channels; c++) {
516  buf[s->prev_buf_index + c] = src[c];
517  limiter_buf[s->limiter_buf_index + c] = buf[s->buf_index + c] * (gain + (((double) n / in->nb_samples) * (gain_next - gain))) * s->offset;
518  }
519  src += inlink->ch_layout.nb_channels;
520 
521  s->limiter_buf_index += inlink->ch_layout.nb_channels;
522  if (s->limiter_buf_index >= s->limiter_buf_size)
523  s->limiter_buf_index -= s->limiter_buf_size;
524 
525  s->prev_buf_index += inlink->ch_layout.nb_channels;
526  if (s->prev_buf_index >= s->buf_size)
527  s->prev_buf_index -= s->buf_size;
528 
529  s->buf_index += inlink->ch_layout.nb_channels;
530  if (s->buf_index >= s->buf_size)
531  s->buf_index -= s->buf_size;
532  }
533 
534  subframe_length = (frame_size(inlink->sample_rate, 100) - in->nb_samples) * inlink->ch_layout.nb_channels;
535  s->limiter_buf_index = s->limiter_buf_index + subframe_length < s->limiter_buf_size ? s->limiter_buf_index + subframe_length : s->limiter_buf_index + subframe_length - s->limiter_buf_size;
536 
537  true_peak_limiter(s, dst, in->nb_samples, inlink->ch_layout.nb_channels);
538  ff_ebur128_add_frames_double(s->r128_out, dst, in->nb_samples);
539 
540  ff_ebur128_loudness_range(s->r128_in, &lra);
541  ff_ebur128_loudness_global(s->r128_in, &global);
542  ff_ebur128_loudness_shortterm(s->r128_in, &shortterm);
543  ff_ebur128_relative_threshold(s->r128_in, &relative_threshold);
544 
545  if (s->above_threshold == 0) {
546  double shortterm_out;
547 
548  if (shortterm > s->measured_thresh)
549  s->prev_delta *= 1.0058;
550 
551  ff_ebur128_loudness_shortterm(s->r128_out, &shortterm_out);
552  if (shortterm_out >= s->target_i)
553  s->above_threshold = 1;
554  }
555 
556  if (shortterm < relative_threshold || shortterm <= -70. || s->above_threshold == 0) {
557  s->delta[s->index] = s->prev_delta;
558  } else {
559  env_global = fabs(shortterm - global) < (s->target_lra / 2.) ? shortterm - global : (s->target_lra / 2.) * ((shortterm - global) < 0 ? -1 : 1);
560  env_shortterm = s->target_i - shortterm;
561  s->delta[s->index] = pow(10., (env_global + env_shortterm) / 20.);
562  }
563 
564  s->prev_delta = s->delta[s->index];
565  s->index++;
566  if (s->index >= 30)
567  s->index -= 30;
568  s->prev_nb_samples = in->nb_samples;
569  break;
570 
571  case FINAL_FRAME:
572  gain = gaussian_filter(s, s->index + 10 < 30 ? s->index + 10 : s->index + 10 - 30);
573  s->limiter_buf_index = 0;
574  src_index = 0;
575 
576  for (n = 0; n < s->limiter_buf_size / inlink->ch_layout.nb_channels; n++) {
577  for (c = 0; c < inlink->ch_layout.nb_channels; c++) {
578  s->limiter_buf[s->limiter_buf_index + c] = src[src_index + c] * gain * s->offset;
579  }
580  src_index += inlink->ch_layout.nb_channels;
581 
582  s->limiter_buf_index += inlink->ch_layout.nb_channels;
583  if (s->limiter_buf_index >= s->limiter_buf_size)
584  s->limiter_buf_index -= s->limiter_buf_size;
585  }
586 
587  subframe_length = frame_size(inlink->sample_rate, 100);
588  for (i = 0; i < in->nb_samples / subframe_length; i++) {
589  true_peak_limiter(s, dst, subframe_length, inlink->ch_layout.nb_channels);
590 
591  for (n = 0; n < subframe_length; n++) {
592  for (c = 0; c < inlink->ch_layout.nb_channels; c++) {
593  if (src_index < (in->nb_samples * inlink->ch_layout.nb_channels)) {
594  limiter_buf[s->limiter_buf_index + c] = src[src_index + c] * gain * s->offset;
595  } else {
596  limiter_buf[s->limiter_buf_index + c] = 0.;
597  }
598  }
599 
600  if (src_index < (in->nb_samples * inlink->ch_layout.nb_channels))
601  src_index += inlink->ch_layout.nb_channels;
602 
603  s->limiter_buf_index += inlink->ch_layout.nb_channels;
604  if (s->limiter_buf_index >= s->limiter_buf_size)
605  s->limiter_buf_index -= s->limiter_buf_size;
606  }
607 
608  dst += (subframe_length * inlink->ch_layout.nb_channels);
609  }
610 
611  dst = (double *)out->data[0];
612  ff_ebur128_add_frames_double(s->r128_out, dst, in->nb_samples);
613  break;
614 
615  case LINEAR_MODE:
616  for (n = 0; n < in->nb_samples; n++) {
617  for (c = 0; c < inlink->ch_layout.nb_channels; c++) {
618  dst[c] = src[c] * s->offset;
619  }
620  src += inlink->ch_layout.nb_channels;
621  dst += inlink->ch_layout.nb_channels;
622  }
623 
624  dst = (double *)out->data[0];
625  ff_ebur128_add_frames_double(s->r128_out, dst, in->nb_samples);
626  break;
627  }
628 
629  if (in != out)
630  av_frame_free(&in);
631  return ff_filter_frame(outlink, out);
632 }
633 
634 static int flush_frame(AVFilterLink *outlink)
635 {
636  AVFilterContext *ctx = outlink->src;
637  AVFilterLink *inlink = ctx->inputs[0];
638  LoudNormContext *s = ctx->priv;
639  int ret = 0;
640 
641  if (s->frame_type == INNER_FRAME) {
642  double *src;
643  double *buf;
644  int nb_samples, n, c, offset;
645  AVFrame *frame;
646 
647  nb_samples = (s->buf_size / inlink->ch_layout.nb_channels) - s->prev_nb_samples;
648  nb_samples -= (frame_size(inlink->sample_rate, 100) - s->prev_nb_samples);
649 
650  frame = ff_get_audio_buffer(outlink, nb_samples);
651  if (!frame)
652  return AVERROR(ENOMEM);
653  frame->nb_samples = nb_samples;
654 
655  buf = s->buf;
656  src = (double *)frame->data[0];
657 
658  offset = ((s->limiter_buf_size / inlink->ch_layout.nb_channels) - s->prev_nb_samples) * inlink->ch_layout.nb_channels;
659  offset -= (frame_size(inlink->sample_rate, 100) - s->prev_nb_samples) * inlink->ch_layout.nb_channels;
660  s->buf_index = s->buf_index - offset < 0 ? s->buf_index - offset + s->buf_size : s->buf_index - offset;
661 
662  for (n = 0; n < nb_samples; n++) {
663  for (c = 0; c < inlink->ch_layout.nb_channels; c++) {
664  src[c] = buf[s->buf_index + c];
665  }
666  src += inlink->ch_layout.nb_channels;
667  s->buf_index += inlink->ch_layout.nb_channels;
668  if (s->buf_index >= s->buf_size)
669  s->buf_index -= s->buf_size;
670  }
671 
672  s->frame_type = FINAL_FRAME;
674  }
675  return ret;
676 }
677 
679 {
680  AVFilterLink *inlink = ctx->inputs[0];
681  AVFilterLink *outlink = ctx->outputs[0];
682  LoudNormContext *s = ctx->priv;
683  AVFrame *in = NULL;
684  int ret = 0, status;
685  int64_t pts;
686 
688 
689  if (s->frame_type != LINEAR_MODE) {
690  int nb_samples;
691 
692  if (s->frame_type == FIRST_FRAME) {
693  nb_samples = frame_size(inlink->sample_rate, 3000);
694  } else {
695  nb_samples = frame_size(inlink->sample_rate, 100);
696  }
697 
698  ret = ff_inlink_consume_samples(inlink, nb_samples, nb_samples, &in);
699  } else {
701  }
702 
703  if (ret < 0)
704  return ret;
705  if (ret > 0) {
706  if (s->frame_type == FIRST_FRAME) {
707  const int nb_samples = frame_size(inlink->sample_rate, 100);
708 
709  for (int i = 0; i < FF_ARRAY_ELEMS(s->pts); i++)
710  s->pts[i] = in->pts + i * nb_samples;
711  } else if (s->frame_type == LINEAR_MODE) {
712  s->pts[0] = in->pts;
713  } else {
714  s->pts[FF_ARRAY_ELEMS(s->pts) - 1] = in->pts;
715  }
716  ret = filter_frame(inlink, in);
717  }
718  if (ret < 0)
719  return ret;
720 
722  ff_outlink_set_status(outlink, status, pts);
723  return flush_frame(outlink);
724  }
725 
727 
728  return FFERROR_NOT_READY;
729 }
730 
732  AVFilterFormatsConfig **cfg_in,
733  AVFilterFormatsConfig **cfg_out)
734 {
735  LoudNormContext *s = ctx->priv;
736  static const int input_srate[] = {192000, -1};
737  static const enum AVSampleFormat sample_fmts[] = {
740  };
741  int ret;
742 
744  if (ret < 0)
745  return ret;
746 
747  if (s->frame_type != LINEAR_MODE) {
748  return ff_set_common_samplerates_from_list2(ctx, cfg_in, cfg_out, input_srate);
749  }
750  return 0;
751 }
752 
754 {
755  AVFilterContext *ctx = inlink->dst;
756  LoudNormContext *s = ctx->priv;
757 
758  s->r128_in = ff_ebur128_init(inlink->ch_layout.nb_channels, inlink->sample_rate, 0, FF_EBUR128_MODE_I | FF_EBUR128_MODE_S | FF_EBUR128_MODE_LRA | FF_EBUR128_MODE_SAMPLE_PEAK);
759  if (!s->r128_in)
760  return AVERROR(ENOMEM);
761 
762  s->r128_out = ff_ebur128_init(inlink->ch_layout.nb_channels, inlink->sample_rate, 0, FF_EBUR128_MODE_I | FF_EBUR128_MODE_S | FF_EBUR128_MODE_LRA | FF_EBUR128_MODE_SAMPLE_PEAK);
763  if (!s->r128_out)
764  return AVERROR(ENOMEM);
765 
766  if (inlink->ch_layout.nb_channels == 1 && s->dual_mono) {
769  }
770 
771  s->buf_size = frame_size(inlink->sample_rate, 3000) * inlink->ch_layout.nb_channels;
772  s->buf = av_malloc_array(s->buf_size, sizeof(*s->buf));
773  if (!s->buf)
774  return AVERROR(ENOMEM);
775 
776  s->limiter_buf_size = frame_size(inlink->sample_rate, 210) * inlink->ch_layout.nb_channels;
777  s->limiter_buf = av_malloc_array(s->buf_size, sizeof(*s->limiter_buf));
778  if (!s->limiter_buf)
779  return AVERROR(ENOMEM);
780 
781  s->prev_smp = av_malloc_array(inlink->ch_layout.nb_channels, sizeof(*s->prev_smp));
782  if (!s->prev_smp)
783  return AVERROR(ENOMEM);
784 
786 
787  s->buf_index =
788  s->prev_buf_index =
789  s->limiter_buf_index = 0;
790  s->channels = inlink->ch_layout.nb_channels;
791  s->index = 1;
792  s->limiter_state = OUT;
793  s->offset = pow(10., s->offset / 20.);
794  s->target_tp = pow(10., s->target_tp / 20.);
795  s->attack_length = frame_size(inlink->sample_rate, 10);
796  s->release_length = frame_size(inlink->sample_rate, 100);
797 
798  return 0;
799 }
800 
802 {
803  LoudNormContext *s = ctx->priv;
804  s->frame_type = FIRST_FRAME;
805 
806  if (s->linear) {
807  double offset, offset_tp;
808  offset = s->target_i - s->measured_i;
809  offset_tp = s->measured_tp + offset;
810 
811  if (s->measured_tp != 99 && s->measured_thresh != -70 && s->measured_lra != 0 && s->measured_i != 0) {
812  if ((offset_tp <= s->target_tp) && (s->measured_lra <= s->target_lra)) {
813  s->frame_type = LINEAR_MODE;
814  s->offset = offset;
815  }
816  }
817  }
818 
819  return 0;
820 }
821 
823 {
824  LoudNormContext *s = ctx->priv;
825  double i_in, i_out, lra_in, lra_out, thresh_in, thresh_out, tp_in, tp_out;
826  int c;
827 
828  if (!s->r128_in || !s->r128_out)
829  goto end;
830 
831  ff_ebur128_loudness_range(s->r128_in, &lra_in);
832  ff_ebur128_loudness_global(s->r128_in, &i_in);
833  ff_ebur128_relative_threshold(s->r128_in, &thresh_in);
834  for (c = 0; c < s->channels; c++) {
835  double tmp;
836  ff_ebur128_sample_peak(s->r128_in, c, &tmp);
837  if ((c == 0) || (tmp > tp_in))
838  tp_in = tmp;
839  }
840 
841  ff_ebur128_loudness_range(s->r128_out, &lra_out);
842  ff_ebur128_loudness_global(s->r128_out, &i_out);
843  ff_ebur128_relative_threshold(s->r128_out, &thresh_out);
844  for (c = 0; c < s->channels; c++) {
845  double tmp;
846  ff_ebur128_sample_peak(s->r128_out, c, &tmp);
847  if ((c == 0) || (tmp > tp_out))
848  tp_out = tmp;
849  }
850 
851  switch(s->print_format) {
852  case NONE:
853  break;
854 
855  case JSON:
857  "\n{\n"
858  "\t\"input_i\" : \"%.2f\",\n"
859  "\t\"input_tp\" : \"%.2f\",\n"
860  "\t\"input_lra\" : \"%.2f\",\n"
861  "\t\"input_thresh\" : \"%.2f\",\n"
862  "\t\"output_i\" : \"%.2f\",\n"
863  "\t\"output_tp\" : \"%+.2f\",\n"
864  "\t\"output_lra\" : \"%.2f\",\n"
865  "\t\"output_thresh\" : \"%.2f\",\n"
866  "\t\"normalization_type\" : \"%s\",\n"
867  "\t\"target_offset\" : \"%.2f\"\n"
868  "}\n",
869  i_in,
870  20. * log10(tp_in),
871  lra_in,
872  thresh_in,
873  i_out,
874  20. * log10(tp_out),
875  lra_out,
876  thresh_out,
877  s->frame_type == LINEAR_MODE ? "linear" : "dynamic",
878  s->target_i - i_out
879  );
880  break;
881 
882  case SUMMARY:
884  "\n"
885  "Input Integrated: %+6.1f LUFS\n"
886  "Input True Peak: %+6.1f dBTP\n"
887  "Input LRA: %6.1f LU\n"
888  "Input Threshold: %+6.1f LUFS\n"
889  "\n"
890  "Output Integrated: %+6.1f LUFS\n"
891  "Output True Peak: %+6.1f dBTP\n"
892  "Output LRA: %6.1f LU\n"
893  "Output Threshold: %+6.1f LUFS\n"
894  "\n"
895  "Normalization Type: %s\n"
896  "Target Offset: %+6.1f LU\n",
897  i_in,
898  20. * log10(tp_in),
899  lra_in,
900  thresh_in,
901  i_out,
902  20. * log10(tp_out),
903  lra_out,
904  thresh_out,
905  s->frame_type == LINEAR_MODE ? "Linear" : "Dynamic",
906  s->target_i - i_out
907  );
908  break;
909  }
910 
911 end:
912  if (s->r128_in)
913  ff_ebur128_destroy(&s->r128_in);
914  if (s->r128_out)
915  ff_ebur128_destroy(&s->r128_out);
916  av_freep(&s->limiter_buf);
917  av_freep(&s->prev_smp);
918  av_freep(&s->buf);
919 }
920 
922  {
923  .name = "default",
924  .type = AVMEDIA_TYPE_AUDIO,
925  .config_props = config_input,
926  },
927 };
928 
930  .name = "loudnorm",
931  .description = NULL_IF_CONFIG_SMALL("EBU R128 loudness normalization"),
932  .priv_size = sizeof(LoudNormContext),
933  .priv_class = &loudnorm_class,
934  .init = init,
935  .activate = activate,
936  .uninit = uninit,
940 };
init
static av_cold int init(AVFilterContext *ctx)
Definition: af_loudnorm.c:801
STATE_NB
@ STATE_NB
Definition: af_loudnorm.c:44
LoudNormContext::pts
int64_t pts[30]
Definition: af_loudnorm.c:90
ff_get_audio_buffer
AVFrame * ff_get_audio_buffer(AVFilterLink *link, int nb_samples)
Request an audio samples buffer with a specific set of permissions.
Definition: audio.c:98
LoudNormContext::weights
double weights[21]
Definition: af_loudnorm.c:74
SUSTAIN
@ SUSTAIN
Definition: af_loudnorm.c:42
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
NONE
@ NONE
Definition: af_loudnorm.c:48
uninit
static av_cold void uninit(AVFilterContext *ctx)
Definition: af_loudnorm.c:822
out
FILE * out
Definition: movenc.c:55
ff_filter_frame
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1061
sample_fmts
static enum AVSampleFormat sample_fmts[]
Definition: adpcmenc.c:948
FFERROR_NOT_READY
return FFERROR_NOT_READY
Definition: filter_design.txt:204
frame_size
static int frame_size(int sample_rate, int frame_len_msec)
Definition: af_loudnorm.c:129
int64_t
long long int64_t
Definition: coverity.c:34
inlink
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
Definition: filter_design.txt:212
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:162
FILTER_INPUTS
#define FILTER_INPUTS(array)
Definition: filters.h:262
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:389
tmp
static uint8_t tmp[11]
Definition: aes_ctr.c:28
AVFrame::pts
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:501
LoudNormContext::peak_index
int peak_index
Definition: af_loudnorm.c:84
AVOption
AVOption.
Definition: opt.h:429
LoudNormContext::prev_smp
double * prev_smp
Definition: af_loudnorm.c:80
linear
static int linear(InterplayACMContext *s, unsigned ind, unsigned col)
Definition: interplayacm.c:135
FF_EBUR128_MODE_I
@ FF_EBUR128_MODE_I
can call ff_ebur128_loudness_global_* and ff_ebur128_relative_threshold
Definition: ebur128.h:89
LoudNormContext::r128_in
FFEBUR128State * r128_in
Definition: af_loudnorm.c:96
max
#define max(a, b)
Definition: cuda_runtime.h:33
LoudNormContext::print_format
enum PrintFormat print_format
Definition: af_loudnorm.c:66
AVFilter::name
const char * name
Filter name.
Definition: avfilter.h:205
c1
static const uint64_t c1
Definition: murmur3.c:52
FF_FILTER_FORWARD_STATUS_BACK
#define FF_FILTER_FORWARD_STATUS_BACK(outlink, inlink)
Forward the status on an output link to an input link.
Definition: filters.h:434
LoudNormContext::prev_nb_samples
int prev_nb_samples
Definition: af_loudnorm.c:93
ff_ebur128_loudness_range
int ff_ebur128_loudness_range(FFEBUR128State *st, double *out)
Get loudness range (LRA) of programme in LU.
Definition: ebur128.c:709
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:410
JSON
@ JSON
Definition: af_loudnorm.c:49
formats.h
LoudNormContext::target_tp
double target_tp
Definition: af_loudnorm.c:58
ff_inlink_consume_frame
int ff_inlink_consume_frame(AVFilterLink *link, AVFrame **rframe)
Take a frame from the link's FIFO and update the link's stats.
Definition: avfilter.c:1490
INNER_FRAME
@ INNER_FRAME
Definition: af_loudnorm.c:33
LoudNormContext::above_threshold
int above_threshold
Definition: af_loudnorm.c:92
ff_ebur128_destroy
void ff_ebur128_destroy(FFEBUR128State **st)
Destroy library state.
Definition: ebur128.c:304
ATTACK
@ ATTACK
Definition: af_loudnorm.c:41
pts
static int64_t pts
Definition: transcode_aac.c:644
LoudNormContext::env_index
int env_index
Definition: af_loudnorm.c:85
RELEASE
@ RELEASE
Definition: af_loudnorm.c:43
true_peak_limiter
static void true_peak_limiter(LoudNormContext *s, double *out, int nb_samples, int channels)
Definition: af_loudnorm.c:232
AVFilterPad
A filter pad used for either input or output.
Definition: filters.h:38
FF_EBUR128_DUAL_MONO
@ FF_EBUR128_DUAL_MONO
a channel that is counted twice
Definition: ebur128.h:51
flush_frame
static int flush_frame(AVFilterLink *outlink)
Definition: af_loudnorm.c:634
FLAGS
#define FLAGS
Definition: af_loudnorm.c:101
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
filter_frame
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
Definition: af_loudnorm.c:411
av_cold
#define av_cold
Definition: attributes.h:90
LoudNormContext::index
int index
Definition: af_loudnorm.c:76
ff_outlink_set_status
static void ff_outlink_set_status(AVFilterLink *link, int status, int64_t pts)
Set the status field of a link from the source filter.
Definition: filters.h:424
FF_EBUR128_MODE_LRA
@ FF_EBUR128_MODE_LRA
can call ff_ebur128_loudness_range
Definition: ebur128.h:91
SUMMARY
@ SUMMARY
Definition: af_loudnorm.c:50
s
#define s(width, name)
Definition: cbs_vp9.c:198
ff_ebur128_add_frames_double
void ff_ebur128_add_frames_double(FFEBUR128State *st, const double *src, size_t frames)
Add frames to be processed.
adjust
static int adjust(int x, int size)
Definition: mobiclip.c:513
AV_OPT_TYPE_DOUBLE
@ AV_OPT_TYPE_DOUBLE
Underlying C type is double.
Definition: opt.h:267
AVMEDIA_TYPE_AUDIO
@ AVMEDIA_TYPE_AUDIO
Definition: avutil.h:202
LoudNormContext::measured_tp
double measured_tp
Definition: af_loudnorm.c:61
filters.h
ff_set_common_samplerates_from_list2
int ff_set_common_samplerates_from_list2(const AVFilterContext *ctx, AVFilterFormatsConfig **cfg_in, AVFilterFormatsConfig **cfg_out, const int *samplerates)
Definition: formats.c:944
LoudNormContext::limiter_state
enum LimiterState limiter_state
Definition: af_loudnorm.c:83
ctx
AVFormatContext * ctx
Definition: movenc.c:49
channels
channels
Definition: aptx.h:31
LoudNormContext::env_cnt
int env_cnt
Definition: af_loudnorm.c:86
LoudNormContext::prev_delta
double prev_delta
Definition: af_loudnorm.c:75
FILTER_OUTPUTS
#define FILTER_OUTPUTS(array)
Definition: filters.h:263
FrameType
FrameType
G723.1 frame types.
Definition: g723_1.h:63
ff_af_loudnorm
const AVFilter ff_af_loudnorm
Definition: af_loudnorm.c:929
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:75
result
and forward the result(frame or status change) to the corresponding input. If nothing is possible
fabs
static __device__ float fabs(float a)
Definition: cuda_runtime.h:182
ff_inlink_consume_samples
int ff_inlink_consume_samples(AVFilterLink *link, unsigned min, unsigned max, AVFrame **rframe)
Take samples from the link's FIFO and update the link's stats.
Definition: avfilter.c:1510
NULL
#define NULL
Definition: coverity.c:32
av_frame_copy_props
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:725
FRAME_NB
@ FRAME_NB
Definition: af_loudnorm.c:36
LoudNormContext::measured_lra
double measured_lra
Definition: af_loudnorm.c:60
LoudNormContext::delta
double delta[30]
Definition: af_loudnorm.c:73
ff_ebur128_sample_peak
int ff_ebur128_sample_peak(FFEBUR128State *st, unsigned int channel_number, double *out)
Get maximum sample peak of selected channel in float format.
Definition: ebur128.c:714
loudnorm_options
static const AVOption loudnorm_options[]
Definition: af_loudnorm.c:103
ff_audio_default_filterpad
const AVFilterPad ff_audio_default_filterpad[1]
An AVFilterPad array whose only entry has name "default" and is of type AVMEDIA_TYPE_AUDIO.
Definition: audio.c:34
double
double
Definition: af_crystalizer.c:132
LoudNormContext::buf_index
int buf_index
Definition: af_loudnorm.c:70
LoudNormContext::attack_length
int attack_length
Definition: af_loudnorm.c:87
activate
static int activate(AVFilterContext *ctx)
Definition: af_loudnorm.c:678
LoudNormContext::limiter_buf_index
int limiter_buf_index
Definition: af_loudnorm.c:81
exp
int8_t exp
Definition: eval.c:73
ff_inlink_acknowledge_status
int ff_inlink_acknowledge_status(AVFilterLink *link, int *rstatus, int64_t *rpts)
Test and acknowledge the change of status on the link.
Definition: avfilter.c:1437
index
int index
Definition: gxfenc.c:90
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
AVFilterFormatsConfig
Lists of formats / etc.
Definition: avfilter.h:111
LoudNormContext::limiter_buf
double * limiter_buf
Definition: af_loudnorm.c:79
LoudNormContext::release_length
int release_length
Definition: af_loudnorm.c:88
LoudNormContext::measured_i
double measured_i
Definition: af_loudnorm.c:59
AVFILTER_DEFINE_CLASS
AVFILTER_DEFINE_CLASS(loudnorm)
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:94
dst
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t intptr_t int int16_t * dst
Definition: dsp.h:83
for
for(k=2;k<=8;++k)
Definition: h264pred_template.c:425
ff_ebur128_loudness_shortterm
int ff_ebur128_loudness_shortterm(FFEBUR128State *st, double *out)
Get short-term loudness (last 3s) in LUFS.
Definition: ebur128.c:617
AV_SAMPLE_FMT_NONE
@ AV_SAMPLE_FMT_NONE
Definition: samplefmt.h:56
init_gaussian_filter
static void init_gaussian_filter(LoudNormContext *s)
Definition: af_loudnorm.c:135
av_frame_is_writable
int av_frame_is_writable(AVFrame *frame)
Check if the frame data is writable.
Definition: frame.c:661
PrintFormat
PrintFormat
Definition: af_loudnorm.c:47
FF_EBUR128_MODE_S
@ FF_EBUR128_MODE_S
can call ff_ebur128_loudness_shortterm
Definition: ebur128.h:87
ff_ebur128_init
FFEBUR128State * ff_ebur128_init(unsigned int channels, unsigned long samplerate, unsigned long window, int mode)
Initialize library state.
Definition: ebur128.c:219
LoudNormContext::prev_buf_index
int prev_buf_index
Definition: af_loudnorm.c:71
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
FF_FILTER_FORWARD_WANTED
FF_FILTER_FORWARD_WANTED(outlink, inlink)
OUT
@ OUT
Definition: af_loudnorm.c:40
LoudNormContext::target_i
double target_i
Definition: af_loudnorm.c:56
LoudNormContext
Definition: af_loudnorm.c:54
M_PI
#define M_PI
Definition: mathematics.h:67
AV_LOG_INFO
#define AV_LOG_INFO
Standard information.
Definition: log.h:220
LoudNormContext::frame_type
enum FrameType frame_type
Definition: af_loudnorm.c:91
LimiterState
LimiterState
Definition: af_loudnorm.c:39
AVFrame::nb_samples
int nb_samples
number of audio samples (per channel) described by this frame
Definition: frame.h:469
LoudNormContext::measured_thresh
double measured_thresh
Definition: af_loudnorm.c:62
LoudNormContext::buf
double * buf
Definition: af_loudnorm.c:68
LoudNormContext::offset
double offset
Definition: af_loudnorm.c:63
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:256
LoudNormContext::limiter_buf_size
int limiter_buf_size
Definition: af_loudnorm.c:82
ff_ebur128_set_channel
int ff_ebur128_set_channel(FFEBUR128State *st, unsigned int channel_number, int value)
Set channel type.
Definition: ebur128.c:445
round
static av_always_inline av_const double round(double x)
Definition: libm.h:444
ebur128.h
libebur128 - a library for loudness measurement according to the EBU R128 standard.
av_malloc_array
#define av_malloc_array(a, b)
Definition: tableprint_vlc.h:31
AVSampleFormat
AVSampleFormat
Audio sample formats.
Definition: samplefmt.h:55
FILTER_QUERY_FUNC2
#define FILTER_QUERY_FUNC2(func)
Definition: filters.h:239
FFEBUR128State
Contains information about the state of a loudness measurement.
Definition: ebur128.h:103
AVFilterPad::name
const char * name
Pad name.
Definition: filters.h:44
avfilter_af_loudnorm_inputs
static const AVFilterPad avfilter_af_loudnorm_inputs[]
Definition: af_loudnorm.c:921
AVFilter
Filter definition.
Definition: avfilter.h:201
ret
ret
Definition: filter_design.txt:187
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
LoudNormContext::channels
int channels
Definition: af_loudnorm.c:94
FINAL_FRAME
@ FINAL_FRAME
Definition: af_loudnorm.c:34
LoudNormContext::buf_size
int buf_size
Definition: af_loudnorm.c:69
c2
static const uint64_t c2
Definition: murmur3.c:53
status
ov_status_e status
Definition: dnn_backend_openvino.c:100
ff_set_common_formats_from_list2
int ff_set_common_formats_from_list2(const AVFilterContext *ctx, AVFilterFormatsConfig **cfg_in, AVFilterFormatsConfig **cfg_out, const int *fmts)
Definition: formats.c:1016
config_input
static int config_input(AVFilterLink *inlink)
Definition: af_loudnorm.c:753
LoudNormContext::gain_reduction
double gain_reduction[2]
Definition: af_loudnorm.c:78
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Underlying C type is int.
Definition: opt.h:259
avfilter.h
PF_NB
@ PF_NB
Definition: af_loudnorm.c:51
AVFilterContext
An instance of a filter.
Definition: avfilter.h:457
LoudNormContext::linear
int linear
Definition: af_loudnorm.c:64
mem.h
audio.h
LINEAR_MODE
@ LINEAR_MODE
Definition: af_loudnorm.c:35
LoudNormContext::dual_mono
int dual_mono
Definition: af_loudnorm.c:65
OFFSET
#define OFFSET(x)
Definition: af_loudnorm.c:100
ff_ebur128_relative_threshold
int ff_ebur128_relative_threshold(FFEBUR128State *st, double *out)
Get relative threshold in LUFS.
Definition: ebur128.c:580
AV_OPT_TYPE_BOOL
@ AV_OPT_TYPE_BOOL
Underlying C type is int.
Definition: opt.h:327
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
FF_EBUR128_MODE_SAMPLE_PEAK
@ FF_EBUR128_MODE_SAMPLE_PEAK
can call ff_ebur128_sample_peak
Definition: ebur128.h:93
detect_peak
static void detect_peak(LoudNormContext *s, int offset, int nb_samples, int channels, int *peak_delta, double *peak_value)
Definition: af_loudnorm.c:169
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
gaussian_filter
static double gaussian_filter(LoudNormContext *s, int index)
Definition: af_loudnorm.c:157
LoudNormContext::r128_out
FFEBUR128State * r128_out
Definition: af_loudnorm.c:97
AV_SAMPLE_FMT_DBL
@ AV_SAMPLE_FMT_DBL
double
Definition: samplefmt.h:61
FIRST_FRAME
@ FIRST_FRAME
Definition: af_loudnorm.c:32
ff_ebur128_loudness_global
int ff_ebur128_loudness_global(FFEBUR128State *st, double *out)
Get global integrated loudness in LUFS.
Definition: ebur128.c:596
AV_OPT_TYPE_CONST
@ AV_OPT_TYPE_CONST
Special option type for declaring named constants.
Definition: opt.h:299
query_formats
static int query_formats(const AVFilterContext *ctx, AVFilterFormatsConfig **cfg_in, AVFilterFormatsConfig **cfg_out)
Definition: af_loudnorm.c:731
LoudNormContext::target_lra
double target_lra
Definition: af_loudnorm.c:57
src
#define src
Definition: vp8dsp.c:248