Go to the documentation of this file.
101 #define OFFSET(x) offsetof(ShowWavesContext, x)
102 #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
116 {
"colors",
"set channels colors",
OFFSET(colors),
AV_OPT_TYPE_STRING, {.str =
"red|green|blue|yellow|orange|lime|pink|magenta|brown" }, 0, 0,
FLAGS },
228 buf[
h * linesize + 0] +=
color[0];
229 buf[
h * linesize + 1] +=
color[1];
230 buf[
h * linesize + 2] +=
color[2];
231 buf[
h * linesize + 3] +=
color[3];
240 buf[
h * linesize + 0] =
color[0];
241 buf[
h * linesize + 1] =
color[1];
242 buf[
h * linesize + 2] =
color[2];
243 buf[
h * linesize + 3] =
color[3];
255 FFSWAP(int16_t, start, end);
256 for (k = start; k < end; k++) {
257 buf[k * linesize + 0] +=
color[0];
258 buf[k * linesize + 1] +=
color[1];
259 buf[k * linesize + 2] +=
color[2];
260 buf[k * linesize + 3] +=
color[3];
272 FFSWAP(int16_t, start, end);
273 for (k = start; k < end; k++) {
274 buf[k * linesize + 0] =
color[0];
275 buf[k * linesize + 1] =
color[1];
276 buf[k * linesize + 2] =
color[2];
277 buf[k * linesize + 3] =
color[3];
287 buf[
h * linesize + 0] +=
color[0];
288 buf[
h * linesize + 1] +=
color[1];
289 buf[
h * linesize + 2] +=
color[2];
290 buf[
h * linesize + 3] +=
color[3];
291 if (*prev_y &&
h != *prev_y) {
295 FFSWAP(int16_t, start, end);
296 for (k = start + 1; k < end; k++) {
297 buf[k * linesize + 0] +=
color[0];
298 buf[k * linesize + 1] +=
color[1];
299 buf[k * linesize + 2] +=
color[2];
300 buf[k * linesize + 3] +=
color[3];
313 buf[
h * linesize + 0] =
color[0];
314 buf[
h * linesize + 1] =
color[1];
315 buf[
h * linesize + 2] =
color[2];
316 buf[
h * linesize + 3] =
color[3];
317 if (*prev_y &&
h != *prev_y) {
321 FFSWAP(int16_t, start, end);
322 for (k = start + 1; k < end; k++) {
323 buf[k * linesize + 0] =
color[0];
324 buf[k * linesize + 1] =
color[1];
325 buf[k * linesize + 2] =
color[2];
326 buf[k * linesize + 3] =
color[3];
338 const int start = (
height -
h) / 2;
339 const int end = start +
h;
340 for (k = start; k < end; k++) {
341 buf[k * linesize + 0] +=
color[0];
342 buf[k * linesize + 1] +=
color[1];
343 buf[k * linesize + 2] +=
color[2];
344 buf[k * linesize + 3] +=
color[3];
352 const int start = (
height -
h) / 2;
353 const int end = start +
h;
354 for (k = start; k < end; k++) {
355 buf[k * linesize + 0] =
color[0];
356 buf[k * linesize + 1] =
color[1];
357 buf[k * linesize + 2] =
color[2];
358 buf[k * linesize + 3] =
color[3];
367 buf[
h * linesize] +=
color[0];
378 FFSWAP(int16_t, start, end);
379 for (k = start; k < end; k++)
380 buf[k * linesize] +=
color[0];
389 buf[
h * linesize] +=
color[0];
390 if (*prev_y &&
h != *prev_y) {
394 FFSWAP(int16_t, start, end);
395 for (k = start + 1; k < end; k++)
396 buf[k * linesize] +=
color[0];
407 const int start = (
height -
h) / 2;
408 const int end = start +
h;
409 for (k = start; k < end; k++)
410 buf[k * linesize] +=
color[0];
419 char *colors, *saveptr =
NULL;
434 outlink->
w = showwaves->
w;
435 outlink->
h = showwaves->
h;
444 switch (outlink->
format) {
446 switch (showwaves->
mode) {
457 switch (showwaves->
mode) {
469 switch (showwaves->
scale) {
471 switch (showwaves->
mode) {
481 switch (showwaves->
mode) {
491 switch (showwaves->
mode) {
501 switch (showwaves->
mode) {
527 uint8_t fg[4] = { 0xff, 0xff, 0xff, 0xff };
535 showwaves->
fg[4*ch + 0] = fg[0] * x / 255.;
536 showwaves->
fg[4*ch + 1] = fg[1] * x / 255.;
537 showwaves->
fg[4*ch + 2] = fg[2] * x / 255.;
538 showwaves->
fg[4*ch + 3] = fg[3] * x / 255.;
542 showwaves->
fg[4 * ch + 0] = x;
570 int64_t n = 0, column_max_samples = showwaves->
total_samples / outlink->
w;
571 int64_t remaining_samples = showwaves->
total_samples - (column_max_samples * outlink->
w);
572 int64_t last_column_samples = column_max_samples + remaining_samples;
577 const int linesize =
out->linesize[0];
578 const int pixstep = showwaves->
pixstep;
580 int64_t *sum = showwaves->
sum;
582 if (column_max_samples == 0) {
587 av_log(
ctx,
AV_LOG_DEBUG,
"Create frame averaging %"PRId64
" samples per column\n", column_max_samples);
594 const int16_t *p = (
const int16_t *)
frame->data[0];
597 int64_t max_samples = col == outlink->
w - 1 ? last_column_samples: column_max_samples;
612 if (n == max_samples) {
619 buf += ch*ch_height*linesize;
622 showwaves->
draw_sample(buf, ch_height, linesize, &showwaves->
buf_idy[ch], &showwaves->
fg[ch * 4],
h);
662 out->width = outlink->
w;
663 out->height = outlink->
h;
667 for (j = 0; j < outlink->
h; j++)
668 memset(
out->data[0] + j*
out->linesize[0], 0, outlink->
w * showwaves->
pixstep);
677 if (!strcmp(
ctx->filter->name,
"showwavespic")) {
685 #if CONFIG_SHOWWAVES_FILTER
694 int16_t *p = (int16_t *)insamples->
data[0];
697 const int pixstep = showwaves->
pixstep;
698 const int n = showwaves->
n;
702 for (
i = 0;
i < nb_samples;
i++) {
711 const int linesize = outpicref->
linesize[0];
715 buf += j*ch_height*linesize;
716 h = showwaves->
get_h(*p++, ch_height);
718 &showwaves->
buf_idy[j], &showwaves->
fg[j * 4],
h);
726 if (showwaves->
buf_idx == showwaves->
w ||
744 const int nb_samples = showwaves->
n * outlink->
w;
753 return showwaves_filter_frame(
inlink,
in);
785 .
inputs = showwaves_inputs,
788 .priv_class = &showwaves_class,
791 #endif // CONFIG_SHOWWAVES_FILTER
793 #if CONFIG_SHOWWAVESPIC_FILTER
795 #define OFFSET(x) offsetof(ShowWavesContext, x)
796 #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
798 static const AVOption showwavespic_options[] = {
802 {
"colors",
"set channels colors",
OFFSET(colors),
AV_OPT_TYPE_STRING, {.str =
"red|green|blue|yellow|orange|lime|pink|magenta|brown" }, 0, 0,
FLAGS },
838 int16_t *p = (int16_t *)insamples->
data[0];
854 f->frame = insamples;
877 .config_props = showwavespic_config_input,
878 .filter_frame = showwavespic_filter_frame,
883 static const AVFilterPad showwavespic_outputs[] = {
894 .
name =
"showwavespic",
900 .
inputs = showwavespic_inputs,
901 .
outputs = showwavespic_outputs,
902 .priv_class = &showwavespic_class,
905 #endif // CONFIG_SHOWWAVESPIC_FILTER
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
A list of supported channel layouts.
AVPixelFormat
Pixel format.
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
static int get_cbrt_h(int16_t sample, int height)
#define FFSWAP(type, a, b)
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
static enum AVSampleFormat sample_fmts[]
enum MovChannelLayoutTag * layouts
#define AVERROR_EOF
End of file.
int av_parse_color(uint8_t *rgba_color, const char *color_string, int slen, void *log_ctx)
Put the RGBA values that correspond to color_string in rgba_color.
@ AV_OPT_TYPE_VIDEO_RATE
offset must point to AVRational
AVRational av_div_q(AVRational b, AVRational c)
Divide one rational by another.
int(* get_h)(int16_t sample, int height)
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
static void draw_sample_point_rgba_scale(uint8_t *buf, int height, int linesize, int16_t *prev_y, const uint8_t color[4], int h)
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
This structure describes decoded (raw) audio or video data.
int ff_request_frame(AVFilterLink *link)
Request an input frame from the filter at the other end of the link.
void * av_mallocz_array(size_t nmemb, size_t size)
#define AV_LOG_VERBOSE
Detailed information.
static int get_cbrt_h2(int16_t sample, int height)
static const AVOption showwaves_options[]
AVFILTER_DEFINE_CLASS(showwaves)
const char * name
Filter name.
static int get_sqrt_h2(int16_t sample, int height)
A link between two filters.
#define FF_FILTER_FORWARD_STATUS_BACK(outlink, inlink)
Forward the status on an output link to an input link.
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
static void draw_sample_line_rgba_scale(uint8_t *buf, int height, int linesize, int16_t *prev_y, const uint8_t color[4], int h)
static void draw_sample_cline_rgba_full(uint8_t *buf, int height, int linesize, int16_t *prev_y, const uint8_t color[4], int h)
static int get_log_h2(int16_t sample, int height)
void * priv
private data for use by the filter
static int get_sqrt_h(int16_t sample, int height)
static int get_lin_h2(int16_t sample, int height)
static int get_lin_h(int16_t sample, int height)
A filter pad used for either input or output.
static void draw_sample_point_gray(uint8_t *buf, int height, int linesize, int16_t *prev_y, const uint8_t color[4], int h)
static void draw_sample_p2p_gray(uint8_t *buf, int height, int linesize, int16_t *prev_y, const uint8_t color[4], int h)
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
struct frame_node * last_frame
static double av_q2d(AVRational a)
Convert an AVRational to a double.
struct frame_node * audio_frames
char * av_strtok(char *s, const char *delim, char **saveptr)
Split the string into several tokens which can be accessed by successive calls to av_strtok().
AVRational sample_aspect_ratio
agreed upon sample aspect ratio
#define av_assert0(cond)
assert() equivalent, that is always enabled.
static const AVFilterPad outputs[]
AVRational frame_rate
Frame rate of the stream on the link, or 1/0 if unknown or variable; if left to 0/0,...
static enum AVPixelFormat pix_fmts[]
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
static av_cold int init(AVFilterContext *ctx)
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
@ AV_PIX_FMT_RGBA
packed RGBA 8:8:8:8, 32bpp, RGBARGBA...
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
static int config_output(AVFilterLink *outlink)
static int push_frame(AVFilterLink *outlink)
Describe the class of an AVClass context structure.
int ff_inlink_consume_samples(AVFilterLink *link, unsigned min, unsigned max, AVFrame **rframe)
Take samples from the link's FIFO and update the link's stats.
Rational number (pair of numerator and denominator).
static int get_log_h(int16_t sample, int height)
filter_frame For filters that do not use the activate() callback
@ AV_OPT_TYPE_IMAGE_SIZE
offset must point to two consecutive integers
AVFilterLink ** inputs
array of pointers to input links
static av_cold void uninit(AVFilterContext *ctx)
static int request_frame(AVFilterLink *outlink)
static int query_formats(AVFilterContext *ctx)
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several inputs
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
int draw_mode
ShowWavesDrawMode.
static void draw_sample_cline_gray(uint8_t *buf, int height, int linesize, int16_t *prev_y, const uint8_t color[4], int h)
AVFilter ff_avf_showwavespic
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
static void draw_sample_p2p_rgba_full(uint8_t *buf, int height, int linesize, int16_t *prev_y, const uint8_t color[4], int h)
int format
agreed upon media format
static const uint32_t color[16+AV_CLASS_CATEGORY_NB]
static AVRational av_make_q(int num, int den)
Create an AVRational.
AVFilterContext * src
source filter
AVFilterFormatsConfig incfg
Lists of supported formats / etc.
FF_FILTER_FORWARD_WANTED(outlink, inlink)
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) #define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac) { } void ff_audio_convert_free(AudioConvert **ac) { if(! *ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);} AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map) { AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method !=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2) { ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc) { av_free(ac);return NULL;} return ac;} in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar) { ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar ? ac->channels :1;} else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;} int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in) { int use_generic=1;int len=in->nb_samples;int p;if(ac->dc) { av_log(ac->avr, AV_LOG_TRACE, "%d samples - audio_convert: %s to %s (dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> in
static int push_single_pic(AVFilterLink *outlink)
int nb_samples
number of audio samples (per channel) described by this frame
int w
agreed upon image width
#define av_malloc_array(a, b)
AVSampleFormat
Audio sample formats.
static void draw_sample_line_gray(uint8_t *buf, int height, int linesize, int16_t *prev_y, const uint8_t color[4], int h)
@ AV_SAMPLE_FMT_S16
signed 16 bits
static void draw_sample_p2p_rgba_scale(uint8_t *buf, int height, int linesize, int16_t *prev_y, const uint8_t color[4], int h)
const char * name
Pad name.
int64_t av_rescale(int64_t a, int64_t b, int64_t c)
Rescale a 64-bit integer with rounding to nearest.
void(* draw_sample)(uint8_t *buf, int height, int linesize, int16_t *prev_y, const uint8_t color[4], int h)
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
int h
agreed upon image height
static void draw_sample_cline_rgba_scale(uint8_t *buf, int height, int linesize, int16_t *prev_y, const uint8_t color[4], int h)
AVRational time_base
Define the time base used by the PTS of the frames/samples which will pass through this link.
static int alloc_out_frame(ShowWavesContext *showwaves, const int16_t *p, const AVFilterLink *inlink, AVFilterLink *outlink, const AVFrame *in)
int ff_outlink_get_status(AVFilterLink *link)
Get the status on an output link.
char * av_strdup(const char *s)
Duplicate a string.
FF_FILTER_FORWARD_STATUS(inlink, outlink)
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
AVFilter ff_avf_showwaves
static void draw_sample_line_rgba_full(uint8_t *buf, int height, int linesize, int16_t *prev_y, const uint8_t color[4], int h)
static void draw_sample_point_rgba_full(uint8_t *buf, int height, int linesize, int16_t *prev_y, const uint8_t color[4], int h)