Go to the documentation of this file.
30 #define HISTOGRAM_SIZE 8192
31 #define HISTOGRAM_MAX (HISTOGRAM_SIZE-1)
33 #define MEASURE_ALL UINT_MAX
34 #define MEASURE_NONE 0
36 #define MEASURE_DC_OFFSET (1 << 0)
37 #define MEASURE_MIN_LEVEL (1 << 1)
38 #define MEASURE_MAX_LEVEL (1 << 2)
39 #define MEASURE_MIN_DIFFERENCE (1 << 3)
40 #define MEASURE_MAX_DIFFERENCE (1 << 4)
41 #define MEASURE_MEAN_DIFFERENCE (1 << 5)
42 #define MEASURE_RMS_DIFFERENCE (1 << 6)
43 #define MEASURE_PEAK_LEVEL (1 << 7)
44 #define MEASURE_RMS_LEVEL (1 << 8)
45 #define MEASURE_RMS_PEAK (1 << 9)
46 #define MEASURE_RMS_TROUGH (1 << 10)
47 #define MEASURE_CREST_FACTOR (1 << 11)
48 #define MEASURE_FLAT_FACTOR (1 << 12)
49 #define MEASURE_PEAK_COUNT (1 << 13)
50 #define MEASURE_BIT_DEPTH (1 << 14)
51 #define MEASURE_DYNAMIC_RANGE (1 << 15)
52 #define MEASURE_ZERO_CROSSINGS (1 << 16)
53 #define MEASURE_ZERO_CROSSINGS_RATE (1 << 17)
54 #define MEASURE_NUMBER_OF_SAMPLES (1 << 18)
55 #define MEASURE_NUMBER_OF_NANS (1 << 19)
56 #define MEASURE_NUMBER_OF_INFS (1 << 20)
57 #define MEASURE_NUMBER_OF_DENORMALS (1 << 21)
58 #define MEASURE_NOISE_FLOOR (1 << 22)
59 #define MEASURE_NOISE_FLOOR_COUNT (1 << 23)
61 #define MEASURE_MINMAXPEAK (MEASURE_MIN_LEVEL | MEASURE_MAX_LEVEL | MEASURE_PEAK_LEVEL)
108 #define OFFSET(x) offsetof(AudioStatsContext, x)
109 #define FLAGS AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
186 for (
c = 0;
c <
s->nb_channels;
c++) {
204 p->
imask = 0xFFFFFFFFFFFFFFFF;
229 s->tc_samples = 5 *
s->time_constant * outlink->
sample_rate + .5;
232 for (
int i = 0;
i <
s->nb_channels;
i++) {
256 unsigned result =
s->maxbitdepth;
289 }
else if (d == p->
min) {
292 }
else if (p->
last == p->
min) {
305 }
else if (d == p->
max) {
308 }
else if (p->
last == p->
max) {
342 double noise_floor = 1.;
355 if (noise_floor < p->noise_floor) {
377 int type = fpclassify(d);
386 int type = fpclassify(d);
394 const char *fmt,
double val)
401 snprintf(key2,
sizeof(key2),
"lavfi.astats.%d.%s", chan,
key);
403 snprintf(key2,
sizeof(key2),
"lavfi.astats.%s",
key);
407 #define LINEAR_TO_DB(x) (log10(x) * 20)
411 uint64_t
mask = 0, imask = 0xFFFFFFFFFFFFFFFF, min_count = 0, max_count = 0, nb_samples = 0, noise_floor_count = 0;
412 uint64_t nb_nans = 0, nb_infs = 0, nb_denormals = 0;
413 double min_runs = 0, max_runs = 0,
414 min = DBL_MAX,
max =-DBL_MAX, min_diff = DBL_MAX, max_diff = 0,
415 nmin = DBL_MAX, nmax =-DBL_MAX,
422 min_sigma_x2 = DBL_MAX,
423 max_sigma_x2 =-DBL_MAX;
427 for (
c = 0;
c <
s->nb_channels;
c++) {
494 set_meta(metadata,
c + 1,
"Bit_depth",
"%f", depth.
num);
495 set_meta(metadata,
c + 1,
"Bit_depth2",
"%f", depth.
den);
512 set_meta(metadata, 0,
"Overall.DC_offset",
"%f", max_sigma_x / (nb_samples /
s->nb_channels));
514 set_meta(metadata, 0,
"Overall.Min_level",
"%f",
min);
516 set_meta(metadata, 0,
"Overall.Max_level",
"%f",
max);
518 set_meta(metadata, 0,
"Overall.Min_difference",
"%f", min_diff);
520 set_meta(metadata, 0,
"Overall.Max_difference",
"%f", max_diff);
522 set_meta(metadata, 0,
"Overall.Mean_difference",
"%f", diff1_sum / (nb_samples -
s->nb_channels));
524 set_meta(metadata, 0,
"Overall.RMS_difference",
"%f", sqrt(diff1_sum_x2 / (nb_samples -
s->nb_channels)));
534 set_meta(metadata, 0,
"Overall.Flat_factor",
"%f",
LINEAR_TO_DB((min_runs + max_runs) / (min_count + max_count)));
536 set_meta(metadata, 0,
"Overall.Peak_count",
"%f", (
float)(min_count + max_count) / (
double)
s->nb_channels);
540 set_meta(metadata, 0,
"Overall.Noise_floor_count",
"%f", noise_floor_count / (
double)
s->nb_channels);
543 set_meta(metadata, 0,
"Overall.Bit_depth",
"%f", depth.
num);
544 set_meta(metadata, 0,
"Overall.Bit_depth2",
"%f", depth.
den);
547 set_meta(metadata, 0,
"Overall.Number_of_samples",
"%f", nb_samples /
s->nb_channels);
549 set_meta(metadata, 0,
"Number of NaNs",
"%f", nb_nans / (
float)
s->nb_channels);
551 set_meta(metadata, 0,
"Number of Infs",
"%f", nb_infs / (
float)
s->nb_channels);
553 set_meta(metadata, 0,
"Number of denormals",
"%f", nb_denormals / (
float)
s->nb_channels);
556 #define UPDATE_STATS_P(type, update_func, update_float, channel_func) \
557 for (int c = start; c < end; c++) { \
558 ChannelStats *p = &s->chstats[c]; \
559 const type *src = (const type *)data[c]; \
560 const type * const srcend = src + samples; \
561 for (; src < srcend; src++) { \
568 #define UPDATE_STATS_I(type, update_func, update_float, channel_func) \
569 for (int c = start; c < end; c++) { \
570 ChannelStats *p = &s->chstats[c]; \
571 const type *src = (const type *)data[0]; \
572 const type * const srcend = src + samples * channels; \
573 for (src += c; src < srcend; src += channels) { \
580 #define UPDATE_STATS(planar, type, sample, normalizer_suffix, int_sample) \
581 if ((s->measure_overall | s->measure_perchannel) & ~MEASURE_MINMAXPEAK) { \
582 UPDATE_STATS_##planar(type, update_stat(s, p, sample, sample normalizer_suffix, int_sample), s->is_float ? update_float_stat(s, p, sample) : s->is_double ? update_double_stat(s, p, sample) : (void)NULL, ); \
584 UPDATE_STATS_##planar(type, update_minmax(s, p, sample), , p->nmin = p->min normalizer_suffix; p->nmax = p->max normalizer_suffix;); \
595 const int start = (buf->
channels * jobnr) / nb_jobs;
596 const int end = (buf->
channels * (jobnr+1)) / nb_jobs;
640 if (
s->reset_count > 0) {
641 if (
s->nb_frames >=
s->reset_count) {
659 uint64_t
mask = 0, imask = 0xFFFFFFFFFFFFFFFF, min_count = 0, max_count = 0, nb_samples = 0, noise_floor_count = 0;
660 uint64_t nb_nans = 0, nb_infs = 0, nb_denormals = 0;
661 double min_runs = 0, max_runs = 0,
662 min = DBL_MAX,
max =-DBL_MAX, min_diff = DBL_MAX, max_diff = 0,
663 nmin = DBL_MAX, nmax =-DBL_MAX,
670 min_sigma_x2 = DBL_MAX,
671 max_sigma_x2 =-DBL_MAX;
675 for (
c = 0;
c <
s->nb_channels;
c++) {
774 av_log(
ctx,
AV_LOG_INFO,
"RMS difference: %f\n", sqrt(diff1_sum_x2 / (nb_samples -
s->nb_channels)));
782 if (min_sigma_x2 != 1)
813 for (
int i = 0;
i <
s->nb_channels;
i++) {
845 .priv_class = &astats_class,
@ AV_SAMPLE_FMT_FLTP
float, planar
A list of supported channel layouts.
static void bit_depth(AudioStatsContext *s, uint64_t mask, uint64_t imask, AVRational *depth)
static void set_meta(AVDictionary **metadata, int chan, const char *key, const char *fmt, double val)
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
#define MEASURE_PEAK_COUNT
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
static enum AVSampleFormat sample_fmts[]
static const AVOption astats_options[]
enum MovChannelLayoutTag * layouts
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
#define MEASURE_RMS_TROUGH
#define MEASURE_MIN_LEVEL
This structure describes decoded (raw) audio or video data.
@ AV_SAMPLE_FMT_S32P
signed 32 bits, planar
static int config_output(AVFilterLink *outlink)
const char * name
Filter name.
AVFormatInternal * internal
An opaque field for libavformat internal usage.
A link between two filters.
unsigned histogram[HISTOGRAM_SIZE]
static void reset_stats(AudioStatsContext *s)
int channels
Number of channels.
static void update_stat(AudioStatsContext *s, ChannelStats *p, double d, double nd, int64_t i)
void * priv
private data for use by the filter
static void update_minmax(AudioStatsContext *s, ChannelStats *p, double d)
static double val(void *priv, double ch)
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
@ AV_SAMPLE_FMT_S64P
signed 64 bits, planar
A filter pad used for either input or output.
static const uint16_t mask[17]
int channels
number of audio channels, only used for audio.
static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
static const AVFilterPad outputs[]
#define UPDATE_STATS(planar, type, sample, normalizer_suffix, int_sample)
static void update_double_stat(AudioStatsContext *s, ChannelStats *p, double d)
static void update_float_stat(AudioStatsContext *s, ChannelStats *p, float d)
static av_cold void uninit(AVFilterContext *ctx)
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
#define MEASURE_RMS_DIFFERENCE
uint64_t noise_floor_count
Describe the class of an AVClass context structure.
and forward the result(frame or status change) to the corresponding input. If nothing is possible
static __device__ float fabs(float a)
#define MEASURE_NOISE_FLOOR_COUNT
#define MEASURE_ZERO_CROSSINGS_RATE
#define MEASURE_DC_OFFSET
Rational number (pair of numerator and denominator).
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several inputs
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
#define MEASURE_FLAT_FACTOR
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
#define MEASURE_DYNAMIC_RANGE
int format
agreed upon media format
AVFilterContext * src
source filter
#define MEASURE_NUMBER_OF_SAMPLES
@ AV_SAMPLE_FMT_S16P
signed 16 bits, planar
#define AV_LOG_INFO
Standard information.
static const AVFilterPad astats_inputs[]
int sample_rate
samples per second
#define MEASURE_MEAN_DIFFERENCE
int nb_samples
number of audio samples (per channel) described by this frame
#define MEASURE_NUMBER_OF_DENORMALS
int av_get_bytes_per_sample(enum AVSampleFormat sample_fmt)
Return number of bytes per sample.
static int filter_channel(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
uint8_t ** extended_data
pointers to the data planes/channels.
int ff_filter_get_nb_threads(AVFilterContext *ctx)
Get number of threads for current filter instance.
AVSampleFormat
Audio sample formats.
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default value
static int query_formats(AVFilterContext *ctx)
#define MEASURE_PEAK_LEVEL
@ AV_SAMPLE_FMT_S16
signed 16 bits
static void set_metadata(AudioStatsContext *s, AVDictionary **metadata)
const char * name
Pad name.
#define MEASURE_RMS_LEVEL
#define MEASURE_NUMBER_OF_NANS
#define MEASURE_BIT_DEPTH
void * av_calloc(size_t nmemb, size_t size)
Non-inlined equivalent of av_mallocz_array().
AVDictionary * metadata
metadata.
@ AV_SAMPLE_FMT_DBLP
double, planar
Filter the word “frame” indicates either a video frame or a group of audio samples
#define MEASURE_NUMBER_OF_INFS
#define AVFILTER_FLAG_SLICE_THREADS
The filter supports multithreading by splitting frames into multiple parts and processing them concur...
#define MEASURE_NOISE_FLOOR
AVFILTER_DEFINE_CLASS(astats)
static void print_stats(AVFilterContext *ctx)
#define MEASURE_MAX_LEVEL
#define MEASURE_MIN_DIFFERENCE
int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags)
Set the given entry in *pm, overwriting an existing entry.
#define flags(name, subs,...)
#define MEASURE_CREST_FACTOR
@ AV_SAMPLE_FMT_DBL
double
@ AV_SAMPLE_FMT_S32
signed 32 bits
#define MEASURE_MAX_DIFFERENCE
static const AVFilterPad astats_outputs[]
@ AV_SAMPLE_FMT_S64
signed 64 bits
#define MEASURE_ZERO_CROSSINGS