Go to the source code of this file.
|
| AVFILTER_DEFINE_CLASS (speechnorm) |
|
static int | get_pi_samples (PeriodItem *pi, int start, int end, int remain) |
|
static int | available_samples (AVFilterContext *ctx) |
|
static void | consume_pi (ChannelContext *cc, int nb_samples) |
|
static double | next_gain (AVFilterContext *ctx, double pi_max_peak, int bypass, double state, double pi_rms_sum, int pi_size) |
|
static void | next_pi (AVFilterContext *ctx, ChannelContext *cc, int bypass) |
|
static double | min_gain (AVFilterContext *ctx, ChannelContext *cc, int max_size) |
|
static double | dlerp (double min, double max, double mix) |
|
static float | flerp (float min, float max, float mix) |
|
static int | filter_frame (AVFilterContext *ctx) |
|
static int | activate (AVFilterContext *ctx) |
|
static int | config_input (AVFilterLink *inlink) |
|
static int | process_command (AVFilterContext *ctx, const char *cmd, const char *args, char *res, int res_len, int flags) |
|
static av_cold void | uninit (AVFilterContext *ctx) |
|
Speech Normalizer
Definition in file af_speechnorm.c.
◆ FF_BUFQUEUE_SIZE
#define FF_BUFQUEUE_SIZE (1024) |
◆ MAX_ITEMS
◆ MIN_PEAK
#define MIN_PEAK (1. / 32768.) |
◆ OFFSET
◆ FLAGS
◆ ANALYZE_CHANNEL
#define ANALYZE_CHANNEL |
( |
|
name, |
|
|
|
ptype, |
|
|
|
zero, |
|
|
|
min_peak |
|
) |
| |
◆ FILTER_CHANNELS
#define FILTER_CHANNELS |
( |
|
name, |
|
|
|
ptype |
|
) |
| |
Value:
{ \
SpeechNormalizerContext *
s =
ctx->priv; \
for (
int ch = 0; ch <
inlink->ch_layout.nb_channels; ch++) { \
ChannelContext *cc = &
s->cc[ch]; \
const ptype *
src = (
const ptype *)in->extended_data[ch]; \
ptype *
dst = (ptype *)
out->extended_data[ch]; \
int n = 0; \
\
while (n < nb_samples) { \
ptype gain; \
size =
FFMIN(nb_samples - n, cc->pi_size); \
gain = cc->gain_state; \
for (
int i = n; !
ctx->is_disabled &&
i < n +
size;
i++) \
} \
} \
}
Definition at line 314 of file af_speechnorm.c.
◆ FILTER_LINK_CHANNELS
#define FILTER_LINK_CHANNELS |
( |
|
name, |
|
|
|
ptype, |
|
|
|
tlerp |
|
) |
| |
◆ AVFILTER_DEFINE_CLASS()
AVFILTER_DEFINE_CLASS |
( |
speechnorm |
| ) |
|
◆ get_pi_samples()
static int get_pi_samples |
( |
PeriodItem * |
pi, |
|
|
int |
start, |
|
|
int |
end, |
|
|
int |
remain |
|
) |
| |
|
static |
◆ available_samples()
◆ consume_pi()
◆ next_gain()
◆ next_pi()
◆ min_gain()
◆ dlerp()
◆ flerp()
◆ filter_frame()
◆ activate()
◆ config_input()
◆ process_command()
static int process_command |
( |
AVFilterContext * |
ctx, |
|
|
const char * |
cmd, |
|
|
const char * |
args, |
|
|
char * |
res, |
|
|
int |
res_len, |
|
|
int |
flags |
|
) |
| |
|
static |
◆ uninit()
◆ speechnorm_options
◆ inputs
Initial value:= {
{
.name = "default",
},
}
Definition at line 585 of file af_speechnorm.c.
◆ ff_af_speechnorm
Initial value:= {
.name = "speechnorm",
.priv_class = &speechnorm_class,
}
Definition at line 593 of file af_speechnorm.c.
static const AVFilterPad inputs[]
@ AV_SAMPLE_FMT_FLTP
float, planar
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default minimum maximum flags name is the option name
static int process_command(AVFilterContext *ctx, const char *cmd, const char *args, char *res, int res_len, int flags)
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
#define FILTER_INPUTS(array)
This structure describes decoded (raw) audio or video data.
enum AVChannel av_channel_layout_channel_from_index(const AVChannelLayout *channel_layout, unsigned int idx)
Get the channel with the given index in a channel layout.
#define FILTER_SAMPLEFMTS(...)
@ AV_OPT_TYPE_DOUBLE
Underlying C type is double.
#define FILTER_OUTPUTS(array)
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a link
const AVFilterPad ff_audio_default_filterpad[1]
An AVFilterPad array whose only entry has name "default" and is of type AVMEDIA_TYPE_AUDIO.
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t intptr_t int int16_t * dst
#define i(width, name, range_min, range_max)
static void invert(float *h, int n)
static int activate(AVFilterContext *ctx)
static av_cold void uninit(AVFilterContext *ctx)
static int config_input(AVFilterLink *inlink)
int av_channel_layout_index_from_channel(const AVChannelLayout *channel_layout, enum AVChannel channel)
Get the index of a given channel in a channel layout.
static void next_pi(AVFilterContext *ctx, ChannelContext *cc, int bypass)
@ AV_SAMPLE_FMT_DBLP
double, planar
@ AV_OPT_TYPE_BOOL
Underlying C type is int.
#define AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL
Same as AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC, except that the filter will have its filter_frame() c...
@ AV_OPT_TYPE_STRING
Underlying C type is a uint8_t* that is either NULL or points to a C string allocated with the av_mal...