Go to the documentation of this file.
65 #define OFFSET(x) offsetof(GraphMonitorContext, x)
66 #define VF AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
113 for (
int i = 0;
i <
out->height;
i++)
114 for (
int j = 0; j <
out->width; j++)
126 if (y + 8 >= pic->
height ||
127 x + strlen(txt) * 8 >= pic->
width)
130 for (
i = 0; txt[
i];
i++) {
134 for (char_y = 0; char_y < font_height; char_y++) {
136 if (font[txt[
i] * font_height + char_y] &
mask) {
150 for (
int j = 0; j <
filter->nb_inputs; j++) {
158 for (
int j = 0; j <
filter->nb_outputs; j++) {
175 char buffer[1024] = { 0 };
186 xpos += strlen(
buffer) * 8;
195 xpos += strlen(
buffer) * 8;
204 xpos += strlen(
buffer) * 8;
209 xpos += strlen(
buffer) * 8;
214 xpos += strlen(
buffer) * 8;
217 xpos += strlen(
buffer) * 8;
222 xpos += strlen(
buffer) * 8;
227 xpos += strlen(
buffer) * 8;
232 xpos += strlen(
buffer) * 8;
237 xpos += strlen(
buffer) * 8;
254 for (
int i = 0;
i <
ctx->graph->nb_filters;
i++) {
256 char buffer[1024] = { 0 };
263 xpos += strlen(
filter->name) * 8 + 10;
266 for (
int j = 0; j <
filter->nb_inputs; j++) {
276 xpos += strlen(
buffer) * 8;
278 xpos += strlen(l->
src->
name) * 8 + 10;
284 for (
int j = 0; j <
filter->nb_outputs; j++) {
294 xpos += strlen(
buffer) * 8;
296 xpos += strlen(l->
dst->
name) * 8 + 10;
350 s->bg[3] = 255 *
s->opacity;
351 s->white[0] =
s->white[1] =
s->white[2] = 255;
352 s->yellow[0] =
s->yellow[1] = 255;
366 #if CONFIG_GRAPHMONITOR_FILTER
378 static const AVFilterPad graphmonitor_outputs[] = {
388 .
name =
"graphmonitor",
391 .priv_class = &graphmonitor_class,
394 .
inputs = graphmonitor_inputs,
395 .
outputs = graphmonitor_outputs,
398 #endif // CONFIG_GRAPHMONITOR_FILTER
400 #if CONFIG_AGRAPHMONITOR_FILTER
402 #define agraphmonitor_options graphmonitor_options
405 static const AVFilterPad agraphmonitor_inputs[] = {
413 static const AVFilterPad agraphmonitor_outputs[] = {
423 .
name =
"agraphmonitor",
426 .priv_class = &agraphmonitor_class,
429 .
inputs = agraphmonitor_inputs,
430 .
outputs = agraphmonitor_outputs,
432 #endif // CONFIG_AGRAPHMONITOR_FILTER
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
AVPixelFormat
Pixel format.
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
@ AV_OPT_TYPE_VIDEO_RATE
offset must point to AVRational
AVFilterFormats * in_formats
Lists of formats and channel layouts supported by the input and output filters respectively.
#define AV_TIME_BASE_Q
Internal time base represented as fractional value.
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
This structure describes decoded (raw) audio or video data.
filter_frame For filters that do not use the this method is called when a frame is pushed to the filter s input It can be called at any time except in a reentrant way If the input frame is enough to produce then the filter should push the output frames on the output link immediately As an exception to the previous rule if the input frame is enough to produce several output frames then the filter needs output only at least one per link The additional frames can be left buffered in the filter
const char * name
Filter name.
A link between two filters.
#define FF_FILTER_FORWARD_STATUS_BACK(outlink, inlink)
Forward the status on an output link to an input link.
static int create_frame(AVFilterContext *ctx, int64_t pts)
int channels
Number of channels.
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
AVFilter ff_avf_agraphmonitor
int ff_inlink_consume_frame(AVFilterLink *link, AVFrame **rframe)
Take a frame from the link's FIFO and update the link's stats.
void * priv
private data for use by the filter
int64_t current_pts_us
Current timestamp of the link, as defined by the most recent frame(s), in AV_TIME_BASE units.
if it could not because there are no more frames
static int query_formats(AVFilterContext *ctx)
A filter pad used for either input or output.
enum AVMediaType type
filter media type
static const uint16_t mask[17]
AVRational sample_aspect_ratio
agreed upon sample aspect ratio
static const AVFilterPad outputs[]
AVRational frame_rate
Frame rate of the stream on the link, or 1/0 if unknown or variable; if left to 0/0,...
static enum AVPixelFormat pix_fmts[]
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
const char * av_get_sample_fmt_name(enum AVSampleFormat sample_fmt)
Return the name of sample_fmt, or NULL if sample_fmt is not recognized.
@ AV_PIX_FMT_RGBA
packed RGBA 8:8:8:8, 32bpp, RGBARGBA...
Describe the class of an AVClass context structure.
Rational number (pair of numerator and denominator).
@ AV_OPT_TYPE_IMAGE_SIZE
offset must point to two consecutive integers
char * name
name of this filter instance
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several inputs
size_t ff_inlink_queued_frames(AVFilterLink *link)
Get the number of frames available on the link.
static int activate(AVFilterContext *ctx)
#define av_ts2timestr(ts, tb)
Convenience macro, the return value should be used only directly in function arguments but never stan...
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
int format
agreed upon media format
#define AV_NOPTS_VALUE
Undefined timestamp value.
AVFilter ff_vf_graphmonitor
AVFilterContext * dst
dest filter
AVFilterContext * src
source filter
FF_FILTER_FORWARD_WANTED(outlink, inlink)
static void drawtext(AVFrame *pic, int x, int y, const char *txt, uint8_t *color)
#define AVFILTER_DEFINE_CLASS(fname)
int sample_rate
samples per second
#define i(width, name, range_min, range_max)
int w
agreed upon image width
static const AVOption graphmonitor_options[]
static av_always_inline AVRational av_inv_q(AVRational q)
Invert a rational.
const char * name
Pad name.
static int filter_have_queued(AVFilterContext *filter)
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
static void clear_image(GraphMonitorContext *s, AVFrame *out, AVFilterLink *outlink)
static int config_output(AVFilterLink *outlink)
int h
agreed upon image height
the frame and frame reference mechanism is intended to as much as expensive copies of that data while still allowing the filters to produce correct results The data is stored in buffers represented by AVFrame structures Several references can point to the same frame buffer
AVRational time_base
Define the time base used by the PTS of the frames/samples which will pass through this link.
int64_t frame_count_in
Number of past frames sent through the link.
const uint8_t avpriv_cga_font[2048]
FF_FILTER_FORWARD_STATUS(inlink, outlink)
#define flags(name, subs,...)
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
#define av_ts2str(ts)
Convenience macro, the return value should be used only directly in function arguments but never stan...
the definition of that something depends on the semantic of the filter The callback must examine the status of the filter s links and proceed accordingly The status of output links is stored in the status_in and status_out fields and tested by the ff_outlink_frame_wanted() function. If this function returns true
static void draw_items(AVFilterContext *ctx, AVFrame *out, int xpos, int ypos, AVFilterLink *l, size_t frames)
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.