Go to the documentation of this file.
105 "Left and right sizes differ (%dx%d vs %dx%d).\n",
111 "Left and right time bases differ (%d/%d vs %d/%d).\n",
112 time_base.
num, time_base.
den,
118 "Left and right framerates differ (%d/%d vs %d/%d).\n",
119 frame_rate.
num, frame_rate.
den,
121 ctx->inputs[
RIGHT]->frame_rate.den);
128 s->depth =
s->pix_desc->comp[0].depth;
165 if (interleaved &&
s->depth <= 8) {
166 const uint8_t *leftp =
s->input_views[
LEFT]->data[0];
167 const uint8_t *rightp =
s->input_views[
RIGHT]->data[0];
168 uint8_t *dstp =
out->data[0];
169 int length =
out->width / 2;
170 int lines =
out->height;
172 for (plane = 0; plane <
s->pix_desc->nb_components; plane++) {
173 if (plane == 1 || plane == 2) {
177 for (
i = 0;
i < lines;
i++) {
179 leftp =
s->input_views[
LEFT]->data[plane] +
180 s->input_views[
LEFT]->linesize[plane] *
i;
181 rightp =
s->input_views[
RIGHT]->data[plane] +
182 s->input_views[
RIGHT]->linesize[plane] *
i;
183 dstp =
out->data[plane] +
out->linesize[plane] *
i;
184 for (j = 0; j < length; j++) {
186 if ((
s->pix_desc->log2_chroma_w ||
187 s->pix_desc->log2_chroma_h) &&
188 (plane == 1 || plane == 2)) {
189 *dstp++ = (*leftp + *rightp) / 2;
190 *dstp++ = (*leftp + *rightp) / 2;
200 }
else if (interleaved &&
s->depth > 8) {
201 const uint16_t *leftp = (
const uint16_t *)
s->input_views[
LEFT]->data[0];
202 const uint16_t *rightp = (
const uint16_t *)
s->input_views[
RIGHT]->data[0];
203 uint16_t *dstp = (uint16_t *)
out->data[0];
204 int length =
out->width / 2;
205 int lines =
out->height;
207 for (plane = 0; plane <
s->pix_desc->nb_components; plane++) {
208 if (plane == 1 || plane == 2) {
212 for (
i = 0;
i < lines;
i++) {
214 leftp = (
const uint16_t *)
s->input_views[
LEFT]->data[plane] +
215 s->input_views[
LEFT]->linesize[plane] *
i / 2;
216 rightp = (
const uint16_t *)
s->input_views[
RIGHT]->data[plane] +
217 s->input_views[
RIGHT]->linesize[plane] *
i / 2;
218 dstp = (uint16_t *)
out->data[plane] +
out->linesize[plane] *
i / 2;
219 for (j = 0; j < length; j++) {
221 if ((
s->pix_desc->log2_chroma_w ||
222 s->pix_desc->log2_chroma_h) &&
223 (plane == 1 || plane == 2)) {
224 *dstp++ = (*leftp + *rightp) / 2;
225 *dstp++ = (*leftp + *rightp) / 2;
236 for (
i = 0;
i < 2;
i++) {
237 const int psize = 1 + (
s->depth > 8);
238 const uint8_t *
src[4];
240 int sub_w = psize *
s->input_views[
i]->width >>
s->pix_desc->log2_chroma_w;
242 src[0] =
s->input_views[
i]->data[0];
243 src[1] =
s->input_views[
i]->data[1];
244 src[2] =
s->input_views[
i]->data[2];
246 dst[0] =
out->data[0] +
i *
s->input_views[
i]->width * psize;
247 dst[1] =
out->data[1] +
i * sub_w;
248 dst[2] =
out->data[2] +
i * sub_w;
251 s->input_views[
i]->format,
252 s->input_views[
i]->width,
253 s->input_views[
i]->height);
266 for (
i = 0;
i < 2;
i++) {
267 const uint8_t *
src[4];
270 int sub_h =
s->input_views[
i]->height >>
s->pix_desc->log2_chroma_h;
272 src[0] =
s->input_views[
i]->data[0];
273 src[1] =
s->input_views[
i]->data[1];
274 src[2] =
s->input_views[
i]->data[2];
276 dst[0] =
out->data[0] +
i *
out->linesize[0] *
277 (interleaved +
s->input_views[
i]->height * (1 - interleaved));
278 dst[1] =
out->data[1] +
i *
out->linesize[1] *
279 (interleaved + sub_h * (1 - interleaved));
280 dst[2] =
out->data[2] +
i *
out->linesize[2] *
281 (interleaved + sub_h * (1 - interleaved));
283 linesizes[0] =
out->linesize[0] +
284 interleaved *
out->linesize[0];
285 linesizes[1] =
out->linesize[1] +
286 interleaved *
out->linesize[1];
287 linesizes[2] =
out->linesize[2] +
288 interleaved *
out->linesize[2];
291 s->input_views[
i]->format,
292 s->input_views[
i]->width,
293 s->input_views[
i]->height);
325 if (!(
s->input_views[0] &&
s->input_views[1]))
328 int64_t
pts =
s->input_views[0]->pts;
330 for (
i = 0;
i < 2;
i++) {
339 stereo->
type =
s->format;
364 for (
i = 0;
i < 2;
i++)
373 stereo->
type =
s->format;
387 if (!
s->input_views[0]) {
393 if (!
s->input_views[1]) {
399 if (
s->input_views[0] &&
s->input_views[1])
407 !
s->input_views[0]) {
414 !
s->input_views[1]) {
422 #define OFFSET(x) offsetof(FramepackContext, x)
423 #define VF AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_FILTERING_PARAM
465 .priv_class = &framepack_class,
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
#define AV_PIX_FMT_YUVA422P16
#define AV_PIX_FMT_GBRAP16
AVPixelFormat
Pixel format.
@ AV_STEREO3D_VIEW_LEFT
Frame contains only the left view.
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
static av_always_inline void spatial_frame_pack(AVFilterLink *outlink, AVFrame *dst)
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
#define FILTER_PIXFMTS_ARRAY(array)
static const AVFilterPad framepack_outputs[]
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
#define AV_PIX_FMT_YUVA422P9
This structure describes decoded (raw) audio or video data.
#define AV_PIX_FMT_YUVA420P16
#define AV_PIX_FMT_YUVA420P10
#define AV_PIX_FMT_YUV420P10
@ AV_PIX_FMT_YUV440P
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
static int try_push_frame(AVFilterContext *ctx)
const char * name
Filter name.
A link between two filters.
#define AV_PIX_FMT_YUVA422P10
@ AV_STEREO3D_VIEW_RIGHT
Frame contains only the right view.
@ AV_STEREO3D_SIDEBYSIDE
Views are next to each other.
int ff_inlink_consume_frame(AVFilterLink *link, AVFrame **rframe)
Take a frame from the link's FIFO and update the link's stats.
#define AV_PIX_FMT_YUVA420P9
#define FF_FILTER_FORWARD_STATUS_BACK_ALL(outlink, filter)
Forward the status on an output link to all input links.
#define AV_PIX_FMT_GBRP14
@ AV_PIX_FMT_GBRAP
planar GBRA 4:4:4:4 32bpp
void * priv
private data for use by the filter
#define AV_PIX_FMT_GBRP10
#define AV_PIX_FMT_YUVA444P16
#define AV_PIX_FMT_YUV422P9
#define AV_PIX_FMT_GRAY16
A filter pad used for either input or output.
#define AV_PIX_FMT_YUV444P10
@ AV_PIX_FMT_YUVJ411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples) full scale (JPEG), deprecated in favor ...
@ AV_STEREO3D_FRAMESEQUENCE
Views are alternated temporally.
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
static void vertical_frame_pack(AVFilterLink *outlink, AVFrame *out, int interleaved)
#define AV_PIX_FMT_YUV422P16
@ AV_PIX_FMT_YUVJ422P
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
#define AV_PIX_FMT_GBRAP10
void ff_inlink_request_frame(AVFilterLink *link)
Mark that a frame is wanted on the link.
@ AV_STEREO3D_LINES
Views are packed per line, as if interlaced.
#define AV_PIX_FMT_GBRAP12
@ AV_PIX_FMT_YUVA420P
planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)
#define AV_PIX_FMT_YUV444P16
#define AV_CEIL_RSHIFT(a, b)
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample format(the sample packing is implied by the sample format) and sample rate. The lists are not just lists
AVRational frame_rate
Frame rate of the stream on the link, or 1/0 if unknown or variable; if left to 0/0,...
#define AV_PIX_FMT_YUVA444P12
#define AV_PIX_FMT_YUV420P9
#define AV_PIX_FMT_YUV420P16
AVFILTER_DEFINE_CLASS(framepack)
#define AV_PIX_FMT_GRAY14
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
#define FILTER_INPUTS(array)
@ AV_PIX_FMT_YUVJ444P
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
#define AV_PIX_FMT_GRAY10
#define AV_PIX_FMT_GBRP16
Describe the class of an AVClass context structure.
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
AVFrame * input_views[2]
input frames
Rational number (pair of numerator and denominator).
static void horizontal_frame_pack(AVFilterLink *outlink, AVFrame *out, int interleaved)
@ AV_PIX_FMT_YUVJ420P
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
#define AV_PIX_FMT_YUV440P10
#define AV_PIX_FMT_YUV422P10
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
const AVFilter ff_vf_framepack
int format
agreed upon media format
#define AV_PIX_FMT_YUV422P12
#define AV_NOPTS_VALUE
Undefined timestamp value.
enum AVStereo3DType format
frame pack type output
#define AV_PIX_FMT_YUV444P12
static int activate(AVFilterContext *ctx)
AVFilterContext * src
source filter
@ AV_PIX_FMT_YUVA444P
planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples)
#define AV_PIX_FMT_YUVA444P10
#define i(width, name, range_min, range_max)
static int config_output(AVFilterLink *outlink)
int w
agreed upon image width
#define AV_PIX_FMT_GBRP12
@ AV_STEREO3D_TOPBOTTOM
Views are on top of each other.
@ AV_PIX_FMT_YUVJ440P
planar YUV 4:4:0 full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV440P and setting color_range
static av_always_inline AVRational av_inv_q(AVRational q)
Invert a rational.
const char * name
Pad name.
static av_cold void framepack_uninit(AVFilterContext *ctx)
static const AVOption framepack_options[]
#define AV_PIX_FMT_YUV444P9
static int av_cmp_q(AVRational a, AVRational b)
Compare two rationals.
@ AV_STEREO3D_COLUMNS
Views are packed per column.
enum AVStereo3DType type
How views are packed within the video.
#define AV_PIX_FMT_YUVA444P9
#define AV_PIX_FMT_YUV420P12
#define AV_PIX_FMT_YUV422P14
const AVPixFmtDescriptor * pix_desc
agreed pixel format
void av_image_copy(uint8_t *dst_data[4], int dst_linesizes[4], const uint8_t *src_data[4], const int src_linesizes[4], enum AVPixelFormat pix_fmt, int width, int height)
Copy image in src_data to dst_data.
int h
agreed upon image height
#define AV_PIX_FMT_YUVA422P12
AVRational time_base
Define the time base used by the PTS of the frames/samples which will pass through this link.
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
int ff_outlink_get_status(AVFilterLink *link)
Get the status on an output link.
@ AV_PIX_FMT_GBRP
planar GBR 4:4:4 24bpp
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
AVStereo3D * av_stereo3d_create_side_data(AVFrame *frame)
Allocate a complete AVFrameSideData and add it to the frame.
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
enum AVStereo3DView view
Determines which views are packed.
FF_FILTER_FORWARD_STATUS(inlink, outlink)
static const AVFilterPad framepack_inputs[]
#define FILTER_OUTPUTS(array)
@ AV_PIX_FMT_YUV411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
AVStereo3DType
List of possible 3D Types.
@ AV_PIX_FMT_YUV410P
planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
#define AV_PIX_FMT_YUV440P12
the definition of that something depends on the semantic of the filter The callback must examine the status of the filter s links and proceed accordingly The status of output links is stored in the status_in and status_out fields and tested by the ff_outlink_frame_wanted() function. If this function returns true
#define AV_PIX_FMT_YUV444P14
Stereo 3D type: this structure describes how two videos are packed within a single video surface,...
#define AV_PIX_FMT_GRAY12
@ AV_PIX_FMT_YUVA422P
planar YUV 4:2:2 24bpp, (1 Cr & Cb sample per 2x1 Y & A samples)
#define AV_PIX_FMT_YUV420P14
static enum AVPixelFormat formats_supported[]