Go to the documentation of this file.
19 #define OFFSET(x) offsetof(StackHWContext, x)
20 #define FLAGS (AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_FILTERING_PARAM)
22 #define SET_OUTPUT_REGION(region, rx, ry, rw, rh) do { \
26 region->height = rh; \
89 int xpos = 0, ypos = 0;
117 char *arg2, *p2, *saveptr2 =
NULL;
118 char *arg3, *p3, *saveptr3 =
NULL;
119 int xpos, ypos,
size;
139 for (
int j = 0; j < 3; j++) {
140 if (!(arg2 =
av_strtok(p2,
"_", &saveptr2))) {
159 while ((arg3 =
av_strtok(p3,
"+", &saveptr3))) {
161 if (sscanf(arg3,
"w%d", &
size) == 1) {
169 }
else if (sscanf(arg3,
"h%d", &
size) == 1) {
177 }
else if (sscanf(arg3,
"%d", &
size) == 1) {
208 "Video inputs have different frame rates, output will be VFR\n");
239 if (sctx->
layout && is_grid) {
240 av_log(avctx,
AV_LOG_ERROR,
"Both layout and grid were specified. Only one is allowed.\n");
244 if (!sctx->
layout && !is_grid) {
308 #define STACK_COMMON_OPTS \
309 { "inputs", "Set number of inputs", OFFSET(base.nb_inputs), AV_OPT_TYPE_INT, { .i64 = 2 }, 2, UINT16_MAX, .flags = FLAGS }, \
310 { "shortest", "Force termination when the shortest input terminates", OFFSET(base.shortest), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, FLAGS },
312 #define DEFINE_HSTACK_OPTIONS(api) \
313 static const AVOption hstack_##api##_options[] = { \
315 { "height", "Set output height (0 to use the height of input 0)", OFFSET(base.tile_height), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, UINT16_MAX, FLAGS }, \
319 #define DEFINE_VSTACK_OPTIONS(api) \
320 static const AVOption vstack_##api##_options[] = { \
322 { "width", "Set output width (0 to use the width of input 0)", OFFSET(base.tile_width), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, UINT16_MAX, FLAGS }, \
326 #define DEFINE_XSTACK_OPTIONS(api) \
327 static const AVOption xstack_##api##_options[] = { \
329 { "layout", "Set custom layout", OFFSET(base.layout), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, .flags = FLAGS }, \
330 { "grid", "set fixed size grid layout", OFFSET(base.nb_grid_columns), AV_OPT_TYPE_IMAGE_SIZE, {.str = NULL}, 0, 0, .flags = FLAGS }, \
331 { "grid_tile_size", "set tile size in grid layout", OFFSET(base.tile_width), AV_OPT_TYPE_IMAGE_SIZE, {.str = NULL}, 0, 0, .flags = FLAGS }, \
332 { "fill", "Set the color for unused pixels", OFFSET(base.fillcolor_str), AV_OPT_TYPE_STRING, {.str = "none"}, .flags = FLAGS }, \
336 #define DEFINE_STACK_FILTER(category, api, capi, filter_flags) \
337 static const AVClass category##_##api##_class = { \
338 .class_name = #category "_" #api, \
339 .item_name = av_default_item_name, \
340 .option = category##_##api##_options, \
341 .version = LIBAVUTIL_VERSION_INT, \
343 const AVFilter ff_vf_##category##_##api = { \
344 .name = #category "_" #api, \
345 .description = NULL_IF_CONFIG_SMALL(#capi " " #category), \
346 .priv_size = sizeof(StackHWContext), \
347 .priv_class = &category##_##api##_class, \
348 .init = api##_stack_init, \
349 .uninit = api##_stack_uninit, \
350 .activate = stack_activate, \
351 FILTER_QUERY_FUNC(api##_stack_query_formats), \
352 FILTER_OUTPUTS(stack_outputs), \
353 .flags_internal = FF_FILTER_FLAG_HWFRAME_AWARE, \
354 .flags = AVFILTER_FLAG_DYNAMIC_INPUTS | filter_flags, \
AVRational time_base
Time base for the incoming frames.
int ff_framesync_configure(FFFrameSync *fs)
Configure a frame sync structure.
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
void ff_framesync_uninit(FFFrameSync *fs)
Free all memory currently allocated.
int av_parse_color(uint8_t *rgba_color, const char *color_string, int slen, void *log_ctx)
Put the RGBA values that correspond to color_string in rgba_color.
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
char * av_asprintf(const char *fmt,...)
AVRational time_base
Time base for the output events.
#define AV_LOG_VERBOSE
Detailed information.
const char * name
Filter name.
@ EXT_INFINITY
Extend the frame to infinity.
A link between two filters.
static av_always_inline int process_frame(WriterContext *w, InputFile *ifile, AVFrame *frame, const AVPacket *pkt, int *packet_new)
@ EXT_STOP
Completely stop all streams with this one.
int(* on_event)(struct FFFrameSync *fs)
Callback called when a frame event is ready.
void * priv
private data for use by the filter
unsigned sync
Synchronization level: frames on input at the highest sync level will generate output frame events.
A filter pad used for either input or output.
static int config_output(AVFilterLink *outlink)
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
static av_cold void stack_uninit(AVFilterContext *avctx)
char * av_strtok(char *s, const char *delim, char **saveptr)
Split the string into several tokens which can be accessed by successive calls to av_strtok().
AVRational sample_aspect_ratio
agreed upon sample aspect ratio
#define av_assert0(cond)
assert() equivalent, that is always enabled.
AVRational frame_rate
Frame rate of the stream on the link, or 1/0 if unknown or variable; if left to 0/0,...
FFFrameSyncIn * in
Pointer to array of inputs.
int ff_append_inpad_free_name(AVFilterContext *f, AVFilterPad *p)
AVFilterLink ** inputs
array of pointers to input links
unsigned nb_inputs
number of input pads
void * opaque
Opaque pointer, not used by the API.
static AVRational av_make_q(int num, int den)
Create an AVRational.
static int init_framesync(AVFilterContext *avctx)
static const AVFilterPad stack_outputs[]
AVFilterContext * src
source filter
int av_parse_video_size(int *width_ptr, int *height_ptr, const char *str)
Parse str and put in width_ptr and height_ptr the detected values.
#define SET_OUTPUT_REGION(region, rx, ry, rw, rh)
StackItemRegion * regions
#define i(width, name, range_min, range_max)
int w
agreed upon image width
static int config_comm_output(AVFilterLink *outlink)
const char * name
Pad name.
int64_t av_rescale(int64_t a, int64_t b, int64_t c)
Rescale a 64-bit integer with rounding to nearest.
void * av_calloc(size_t nmemb, size_t size)
enum AVMediaType type
AVFilterPad type.
int ff_framesync_init(FFFrameSync *fs, AVFilterContext *parent, unsigned nb_in)
Initialize a frame sync structure.
enum FFFrameSyncExtMode before
Extrapolation mode for timestamps before the first frame.
int h
agreed upon image height
static int stack_activate(AVFilterContext *avctx)
static int stack_init(AVFilterContext *avctx)
AVRational time_base
Define the time base used by the PTS of the frames/samples which will pass through this link.
enum FFFrameSyncExtMode after
Extrapolation mode for timestamps after the last frame.
int ff_framesync_activate(FFFrameSync *fs)
Examine the frames in the filter's input and try to produce output.
const AVFilter * filter
the AVFilter of which this is an instance