Go to the documentation of this file.
69 #define OFFSET(x) offsetof(GraphMonitorContext, x)
70 #define VF AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
120 for (
int i = 0;
i <
out->height;
i++)
121 for (
int j = 0; j <
out->width; j++)
133 if (y + 8 >= pic->
height ||
134 x + strlen(txt) * 8 >= pic->
width)
137 for (
i = 0; txt[
i];
i++) {
140 uint8_t *p = pic->
data[0] + y*pic->
linesize[0] + (x +
i*8)*4;
141 for (char_y = 0; char_y < font_height; char_y++) {
143 if (font[txt[
i] * font_height + char_y] &
mask) {
157 for (
int j = 0; j <
filter->nb_inputs; j++) {
165 for (
int j = 0; j <
filter->nb_outputs; j++) {
182 char buffer[1024] = { 0 };
193 xpos += strlen(
buffer) * 8;
202 xpos += strlen(
buffer) * 8;
211 xpos += strlen(
buffer) * 8;
216 xpos += strlen(
buffer) * 8;
221 xpos += strlen(
buffer) * 8;
224 xpos += strlen(
buffer) * 8;
229 xpos += strlen(
buffer) * 8;
234 xpos += strlen(
buffer) * 8;
239 xpos += strlen(
buffer) * 8;
244 xpos += strlen(
buffer) * 8;
249 xpos += strlen(
buffer) * 8;
254 xpos += strlen(
buffer) * 8;
259 xpos += strlen(
buffer) * 8;
276 for (
int i = 0;
i <
ctx->graph->nb_filters;
i++) {
278 char buffer[1024] = { 0 };
285 xpos += strlen(
filter->name) * 8 + 10;
288 for (
int j = 0; j <
filter->nb_inputs; j++) {
298 xpos += strlen(
buffer) * 8;
300 xpos += strlen(l->
src->
name) * 8 + 10;
306 for (
int j = 0; j <
filter->nb_outputs; j++) {
316 xpos += strlen(
buffer) * 8;
318 xpos += strlen(l->
dst->
name) * 8 + 10;
372 s->bg[3] = 255 *
s->opacity;
373 s->white[0] =
s->white[1] =
s->white[2] = 255;
374 s->yellow[0] =
s->yellow[1] = 255;
391 #if CONFIG_GRAPHMONITOR_FILTER
400 static const AVFilterPad graphmonitor_outputs[] = {
409 .
name =
"graphmonitor",
412 .priv_class = &graphmonitor_class,
419 #endif // CONFIG_GRAPHMONITOR_FILTER
421 #if CONFIG_AGRAPHMONITOR_FILTER
423 static const AVFilterPad agraphmonitor_inputs[] = {
430 static const AVFilterPad agraphmonitor_outputs[] = {
439 .
name =
"agraphmonitor",
441 .priv_class = &graphmonitor_class,
448 #endif // CONFIG_AGRAPHMONITOR_FILTER
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
AVPixelFormat
Pixel format.
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
@ AV_OPT_TYPE_VIDEO_RATE
offset must point to AVRational
#define AV_TIME_BASE_Q
Internal time base represented as fractional value.
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
This structure describes decoded (raw) audio or video data.
#define FILTER_QUERY_FUNC(func)
filter_frame For filters that do not use the this method is called when a frame is pushed to the filter s input It can be called at any time except in a reentrant way If the input frame is enough to produce then the filter should push the output frames on the output link immediately As an exception to the previous rule if the input frame is enough to produce several output frames then the filter needs output only at least one per link The additional frames can be left buffered in the filter
const char * name
Filter name.
A link between two filters.
#define FF_FILTER_FORWARD_STATUS_BACK(outlink, inlink)
Forward the status on an output link to an input link.
static int create_frame(AVFilterContext *ctx, int64_t pts)
int channels
Number of channels.
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
const AVFilter ff_avf_agraphmonitor
int ff_inlink_consume_frame(AVFilterLink *link, AVFrame **rframe)
Take a frame from the link's FIFO and update the link's stats.
void * priv
private data for use by the filter
int64_t current_pts_us
Current timestamp of the link, as defined by the most recent frame(s), in AV_TIME_BASE units.
if it could not because there are no more frames
static int query_formats(AVFilterContext *ctx)
A filter pad used for either input or output.
enum AVMediaType type
filter media type
static const uint16_t mask[17]
AVRational sample_aspect_ratio
agreed upon sample aspect ratio
AVRational frame_rate
Frame rate of the stream on the link, or 1/0 if unknown or variable; if left to 0/0,...
static enum AVPixelFormat pix_fmts[]
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
const char * av_get_sample_fmt_name(enum AVSampleFormat sample_fmt)
Return the name of sample_fmt, or NULL if sample_fmt is not recognized.
#define FILTER_INPUTS(array)
@ AV_PIX_FMT_RGBA
packed RGBA 8:8:8:8, 32bpp, RGBARGBA...
Describe the class of an AVClass context structure.
Rational number (pair of numerator and denominator).
@ AV_OPT_TYPE_IMAGE_SIZE
offset must point to two consecutive integers
char * name
name of this filter instance
size_t ff_inlink_queued_frames(AVFilterLink *link)
Get the number of frames available on the link.
static int activate(AVFilterContext *ctx)
#define av_ts2timestr(ts, tb)
Convenience macro, the return value should be used only directly in function arguments but never stan...
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
int format
agreed upon media format
#define AV_NOPTS_VALUE
Undefined timestamp value.
AVFilterContext * dst
dest filter
int64_t sample_count_in
Number of past samples sent through the link.
AVFilterContext * src
source filter
AVFilterFormatsConfig incfg
Lists of supported formats / etc.
FF_FILTER_FORWARD_WANTED(outlink, inlink)
static void drawtext(AVFrame *pic, int x, int y, const char *txt, uint8_t *color)
int sample_rate
samples per second
#define i(width, name, range_min, range_max)
int w
agreed upon image width
static const AVOption graphmonitor_options[]
static av_always_inline AVRational av_inv_q(AVRational q)
Invert a rational.
const char * name
Pad name.
static int filter_have_queued(AVFilterContext *filter)
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
static void clear_image(GraphMonitorContext *s, AVFrame *out, AVFilterLink *outlink)
static int config_output(AVFilterLink *outlink)
int h
agreed upon image height
the frame and frame reference mechanism is intended to as much as expensive copies of that data while still allowing the filters to produce correct results The data is stored in buffers represented by AVFrame structures Several references can point to the same frame buffer
AVRational time_base
Define the time base used by the PTS of the frames/samples which will pass through this link.
int ff_outlink_get_status(AVFilterLink *link)
Get the status on an output link.
int64_t frame_count_in
Number of past frames sent through the link.
const uint8_t avpriv_cga_font[2048]
FF_FILTER_FORWARD_STATUS(inlink, outlink)
#define FILTER_OUTPUTS(array)
#define flags(name, subs,...)
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
#define av_ts2str(ts)
Convenience macro, the return value should be used only directly in function arguments but never stan...
the definition of that something depends on the semantic of the filter The callback must examine the status of the filter s links and proceed accordingly The status of output links is stored in the status_in and status_out fields and tested by the ff_outlink_frame_wanted() function. If this function returns true
AVFILTER_DEFINE_CLASS_EXT(graphmonitor, "(a)graphmonitor", graphmonitor_options)
static void draw_items(AVFilterContext *ctx, AVFrame *out, int xpos, int ypos, AVFilterLink *l, size_t frames)
const AVFilter ff_vf_graphmonitor
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.