Go to the documentation of this file.
24 #include <linux/videodev2.h>
25 #include <sys/ioctl.h>
42 struct v4l2_selection selection = { 0 };
59 ret = ioctl(
s->fd, VIDIOC_G_FMT, &capture->
format);
70 selection.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
73 ret = ioctl(
s->fd, VIDIOC_S_SELECTION, &selection);
75 ret = ioctl(
s->fd, VIDIOC_G_SELECTION, &selection);
79 av_log(avctx,
AV_LOG_DEBUG,
"crop output %dx%d\n", selection.r.width, selection.r.height);
81 capture->
height = selection.r.height;
82 capture->
width = selection.r.width;
107 struct v4l2_event_subscription
sub;
114 memset(&
sub, 0,
sizeof(
sub));
115 sub.type = V4L2_EVENT_SOURCE_CHANGE;
116 ret = ioctl(
s->fd, VIDIOC_SUBSCRIBE_EVENT, &
sub);
120 "the v4l2 driver does not support VIDIOC_SUBSCRIBE_EVENT\n"
121 "you must provide codec_height and codec_width on input\n");
126 memset(&
sub, 0,
sizeof(
sub));
127 sub.type = V4L2_EVENT_EOS;
128 ret = ioctl(
s->fd, VIDIOC_SUBSCRIBE_EVENT, &
sub);
131 "the v4l2 driver does not support end of stream VIDIOC_SUBSCRIBE_EVENT\n");
143 if (!
s->buf_pkt.size) {
192 capture = &
s->capture;
223 #define OFFSET(x) offsetof(V4L2m2mPriv, x)
224 #define FLAGS AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_DECODING_PARAM
228 {
"num_capture_buffers",
"Number of buffers in the capture context",
233 #define M2MDEC_CLASS(NAME) \
234 static const AVClass v4l2_m2m_ ## NAME ## _dec_class = { \
235 .class_name = #NAME "_v4l2m2m_decoder", \
236 .item_name = av_default_item_name, \
238 .version = LIBAVUTIL_VERSION_INT, \
241 #define M2MDEC(NAME, LONGNAME, CODEC, bsf_name) \
243 const FFCodec ff_ ## NAME ## _v4l2m2m_decoder = { \
244 .p.name = #NAME "_v4l2m2m" , \
245 CODEC_LONG_NAME("V4L2 mem2mem " LONGNAME " decoder wrapper"), \
246 .p.type = AVMEDIA_TYPE_VIDEO, \
248 .priv_data_size = sizeof(V4L2m2mPriv), \
249 .p.priv_class = &v4l2_m2m_ ## NAME ## _dec_class, \
250 .init = v4l2_decode_init, \
251 FF_CODEC_RECEIVE_FRAME_CB(v4l2_receive_frame), \
252 .close = v4l2_decode_close, \
254 .p.capabilities = AV_CODEC_CAP_HARDWARE | AV_CODEC_CAP_DELAY | AV_CODEC_CAP_AVOID_PROBING, \
255 .caps_internal = FF_CODEC_CAP_NOT_INIT_THREADSAFE | \
256 FF_CODEC_CAP_SETS_PKT_DTS | FF_CODEC_CAP_INIT_CLEANUP, \
257 .p.wrapper_name = "v4l2m2m", \
int ff_v4l2_context_init(V4L2Context *ctx)
Initializes a V4L2Context.
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
int ff_decode_get_packet(AVCodecContext *avctx, AVPacket *pkt)
Called by decoders to get the next packet for decoding.
#define AV_LOG_WARNING
Something somehow does not look correct.
enum AVPixelFormat av_pix_fmt
AVPixelFormat corresponding to this buffer context.
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
static av_cold int v4l2_decode_close(AVCodecContext *avctx)
static float sub(float src0, float src1)
#define AVERROR_EOF
End of file.
filter_frame For filters that do not use the this method is called when a frame is pushed to the filter s input It can be called at any time except in a reentrant way If the input frame is enough to produce output
enum AVCodecID av_codec_id
AVCodecID corresponding to this buffer context.
This structure describes decoded (raw) audio or video data.
int streamon
Whether the stream has been started (VIDIOC_STREAMON has been sent).
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
enum AVPixelFormat ff_v4l2_format_v4l2_to_avfmt(uint32_t v4l2_fmt, enum AVCodecID avcodec)
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
int ff_v4l2_m2m_codec_init(V4L2m2mPriv *priv)
Probes the video nodes looking for the required codec capabilities.
enum v4l2_buf_type type
Type of this buffer context.
int ff_v4l2_context_enqueue_packet(V4L2Context *ctx, const AVPacket *pkt)
Enqueues a buffer to a V4L2Context from an AVPacket.
int ff_v4l2_m2m_create_context(V4L2m2mPriv *priv, V4L2m2mContext **s)
Allocate a new context and references for a V4L2 M2M instance.
#define V4L_M2M_DEFAULT_OPTS
static const AVOption options[]
#define M2MDEC(NAME, LONGNAME, CODEC, bsf_name)
static av_cold int v4l2_decode_init(AVCodecContext *avctx)
int ff_v4l2_m2m_codec_end(V4L2m2mPriv *priv)
Releases all the codec resources if all AVBufferRefs have been returned to the ctx.
int width
Width and height of the frames it produces (in case of a capture context, e.g.
int ff_v4l2_context_dequeue_frame(V4L2Context *ctx, AVFrame *frame, int timeout)
Dequeues a buffer from a V4L2Context to an AVFrame.
int ff_v4l2_context_set_status(V4L2Context *ctx, uint32_t cmd)
Sets the status of a V4L2Context.
V4L2Buffer * buffers
Indexed array of V4L2Buffers.
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
static int v4l2_receive_frame(AVCodecContext *avctx, AVFrame *frame)
main external API structure.
static int v4l2_prepare_decoder(V4L2m2mContext *s)
int coded_width
Bitstream width / height, may be different from width/height e.g.
static int v4l2_try_start(AVCodecContext *avctx)
struct v4l2_format format
Format returned by the driver after initializing the buffer context.
@ AV_CODEC_ID_MPEG2VIDEO
preferred ID for MPEG-1/2 video decoding