Go to the documentation of this file.
24 #include <linux/videodev2.h>
25 #include <sys/ioctl.h>
47 return V4L2_TYPE_IS_OUTPUT(
ctx->type) ?
59 return V4L2_TYPE_IS_MULTIPLANAR(fmt->type) ? fmt->fmt.pix_mp.width : fmt->fmt.pix.width;
64 return V4L2_TYPE_IS_MULTIPLANAR(fmt->type) ? fmt->fmt.pix_mp.height : fmt->fmt.pix.height;
70 struct v4l2_cropcap cropcap;
73 memset(&cropcap, 0,
sizeof(cropcap));
74 cropcap.type =
ctx->type;
80 sar.
num = cropcap.pixelaspect.numerator;
81 sar.
den = cropcap.pixelaspect.denominator;
87 struct v4l2_format *fmt1 = &
ctx->format;
88 int ret = V4L2_TYPE_IS_MULTIPLANAR(
ctx->type) ?
89 fmt1->fmt.pix_mp.width != fmt2->fmt.pix_mp.width ||
90 fmt1->fmt.pix_mp.height != fmt2->fmt.pix_mp.height
92 fmt1->fmt.pix.width != fmt2->fmt.pix.width ||
93 fmt1->fmt.pix.height != fmt2->fmt.pix.height;
106 return ctx->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE ||
107 ctx->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE ||
108 ctx->type == V4L2_BUF_TYPE_VIDEO_CAPTURE ||
109 ctx->type == V4L2_BUF_TYPE_VIDEO_OUTPUT;
115 const int SZ_4K = 0x1000;
128 ctx->format.type =
ctx->type;
133 if (V4L2_TYPE_IS_MULTIPLANAR(
ctx->type)) {
135 ctx->format.fmt.pix_mp.height =
ctx->height;
136 ctx->format.fmt.pix_mp.width =
ctx->width;
138 ctx->format.fmt.pix_mp.pixelformat = fmt->
v4l2_fmt;
141 ctx->format.fmt.pix_mp.plane_fmt[0].sizeimage =
145 ctx->format.fmt.pix.height =
ctx->height;
146 ctx->format.fmt.pix.width =
ctx->width;
151 ctx->format.fmt.pix.sizeimage =
159 struct v4l2_decoder_cmd cmd = {
160 .cmd = V4L2_DEC_CMD_START,
180 struct v4l2_format cap_fmt =
s->capture.format;
181 struct v4l2_event evt = { 0 };
184 ret = ioctl(
s->fd, VIDIOC_DQEVENT, &evt);
190 if (evt.type == V4L2_EVENT_EOS) {
195 if (evt.type != V4L2_EVENT_SOURCE_CHANGE)
198 ret = ioctl(
s->fd, VIDIOC_G_FMT, &cap_fmt);
232 struct v4l2_decoder_cmd cmd = {
233 .cmd = V4L2_DEC_CMD_STOP,
252 struct v4l2_encoder_cmd cmd = {
253 .cmd = V4L2_ENC_CMD_STOP,
272 struct v4l2_plane
planes[VIDEO_MAX_PLANES];
273 struct v4l2_buffer buf = { 0 };
275 struct pollfd pfd = {
276 .events = POLLIN | POLLRDNORM | POLLPRI | POLLOUT | POLLWRNORM,
281 if (!V4L2_TYPE_IS_OUTPUT(
ctx->type) &&
ctx->buffers) {
282 for (
i = 0;
i <
ctx->num_buffers;
i++) {
286 if (
i ==
ctx->num_buffers)
288 "userspace. Increase num_capture_buffers "
289 "to prevent device deadlock or dropped "
290 "packets/frames.\n");
295 for (
i = 0;
i <
ctx->num_buffers;
i++) {
310 if (V4L2_TYPE_IS_OUTPUT(
ctx->type))
311 pfd.events = POLLOUT | POLLWRNORM;
315 pfd.events = POLLIN | POLLRDNORM | POLLPRI;
319 ret = poll(&pfd, 1, timeout);
328 if (pfd.revents & POLLERR) {
336 for (
i = 0;
i <
ctx->num_buffers;
i++) {
348 if (pfd.revents & POLLPRI) {
364 if (pfd.revents & (POLLIN | POLLRDNORM | POLLOUT | POLLWRNORM)) {
366 if (!V4L2_TYPE_IS_OUTPUT(
ctx->type)) {
368 if (pfd.revents & (POLLIN | POLLRDNORM))
374 if (pfd.revents & (POLLOUT | POLLWRNORM))
379 memset(&buf, 0,
sizeof(buf));
380 buf.memory = V4L2_MEMORY_MMAP;
381 buf.type =
ctx->type;
382 if (V4L2_TYPE_IS_MULTIPLANAR(
ctx->type)) {
384 buf.length = VIDEO_MAX_PLANES;
390 if (errno != EAGAIN) {
400 int bytesused = V4L2_TYPE_IS_MULTIPLANAR(buf.type) ?
401 buf.m.planes[0].bytesused : buf.bytesused;
402 if (bytesused == 0) {
406 #ifdef V4L2_BUF_FLAG_LAST
407 if (buf.flags & V4L2_BUF_FLAG_LAST)
412 avbuf = &
ctx->buffers[buf.index];
415 if (V4L2_TYPE_IS_MULTIPLANAR(
ctx->type)) {
431 if (V4L2_TYPE_IS_OUTPUT(
ctx->type)) {
436 for (
i = 0;
i <
ctx->num_buffers;
i++) {
438 return &
ctx->buffers[
i];
446 struct v4l2_requestbuffers req = {
447 .memory = V4L2_MEMORY_MMAP,
453 for (
i = 0;
i <
ctx->num_buffers;
i++) {
456 for (j = 0; j <
buffer->num_planes; j++) {
457 struct V4L2Plane_info *p = &
buffer->plane_info[j];
458 if (p->mm_addr && p->length)
459 if (munmap(p->mm_addr, p->length) < 0)
469 struct v4l2_format *fmt = &
ctx->format;
477 if (V4L2_TYPE_IS_MULTIPLANAR(
ctx->type))
478 fmt->fmt.pix_mp.pixelformat = v4l2_fmt;
480 fmt->fmt.pix.pixelformat = v4l2_fmt;
482 fmt->type =
ctx->type;
494 struct v4l2_fmtdesc fdesc;
497 memset(&fdesc, 0,
sizeof(fdesc));
498 fdesc.type =
ctx->type;
528 struct v4l2_fmtdesc fdesc;
538 memset(&fdesc, 0,
sizeof(fdesc));
539 fdesc.type =
ctx->type;
546 if (fdesc.pixelformat == v4l2_fmt)
572 ctx->streamon = (cmd == VIDIOC_STREAMON);
716 struct v4l2_requestbuffers req;
724 ret = ioctl(
s->fd, VIDIOC_G_FMT, &
ctx->format);
728 memset(&req, 0,
sizeof(req));
729 req.count =
ctx->num_buffers;
730 req.memory = V4L2_MEMORY_MMAP;
731 req.type =
ctx->type;
732 ret = ioctl(
s->fd, VIDIOC_REQBUFS, &req);
738 ctx->num_buffers = req.count;
745 for (
i = 0;
i < req.count;
i++) {
759 V4L2_TYPE_IS_MULTIPLANAR(
ctx->type) ?
ctx->format.fmt.pix_mp.plane_fmt[0].sizeimage :
ctx->format.fmt.pix.sizeimage,
760 V4L2_TYPE_IS_MULTIPLANAR(
ctx->type) ?
ctx->format.fmt.pix_mp.plane_fmt[0].bytesperline :
ctx->format.fmt.pix.bytesperline);
static void error(const char *err)
int ff_v4l2_context_init(V4L2Context *ctx)
Initializes a V4L2Context.
#define AV_LOG_WARNING
Something somehow does not look correct.
AVPixelFormat
Pixel format.
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
static unsigned int v4l2_get_width(struct v4l2_format *fmt)
#define AVERROR_EOF
End of file.
int ff_v4l2_m2m_codec_reinit(V4L2m2mContext *s)
Reinitializes the V4L2m2mContext when the driver cannot continue processing with the capture paramete...
filter_frame For filters that do not use the this method is called when a frame is pushed to the filter s input It can be called at any time except in a reentrant way If the input frame is enough to produce output
int ff_v4l2_buffer_buf_to_avpkt(AVPacket *pkt, V4L2Buffer *avbuf)
Extracts the data from a V4L2Buffer to an AVPacket.
This structure describes decoded (raw) audio or video data.
int ff_v4l2_context_dequeue_packet(V4L2Context *ctx, AVPacket *pkt)
Dequeues a buffer from a V4L2Context to an AVPacket.
void ff_v4l2_context_release(V4L2Context *ctx)
Releases a V4L2Context.
static int v4l2_stop_encode(V4L2Context *ctx)
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
static const struct @466 planes[]
static V4L2m2mContext * ctx_to_m2mctx(V4L2Context *ctx)
static int v4l2_handle_event(V4L2Context *ctx)
handle resolution change event and end of stream event returns 1 if reinit was successful,...
static int v4l2_start_decode(V4L2Context *ctx)
static int v4l2_get_framesize_compressed(V4L2Context *ctx, int width, int height)
int ff_v4l2_buffer_buf_to_avframe(AVFrame *frame, V4L2Buffer *avbuf)
Extracts the data from a V4L2Buffer to an AVFrame.
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
static void v4l2_save_to_context(V4L2Context *ctx, struct v4l2_format_update *fmt)
static int v4l2_release_buffers(V4L2Context *ctx)
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
int ff_v4l2_buffer_avframe_to_buf(const AVFrame *frame, V4L2Buffer *out)
Extracts the data from an AVFrame to a V4L2Buffer.
int ff_v4l2_context_set_format(V4L2Context *ctx)
Sets the V4L2Context format in the v4l2 driver.
V4L2Buffer (wrapper for v4l2_buffer management)
int ff_v4l2_context_get_format(V4L2Context *ctx, int probe)
Queries the driver for a valid v4l2 format and copies it to the context.
static V4L2Buffer * v4l2_getfree_v4l2buf(V4L2Context *ctx)
enum AVPixelFormat ff_v4l2_format_v4l2_to_avfmt(uint32_t v4l2_fmt, enum AVCodecID avcodec)
static AVCodecContext * logger(V4L2Context *ctx)
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
int ff_v4l2_buffer_enqueue(V4L2Buffer *avbuf)
Enqueues a V4L2Buffer.
static int v4l2_stop_decode(V4L2Context *ctx)
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
#define container_of(ptr, type, member)
Rational number (pair of numerator and denominator).
int ff_v4l2_context_enqueue_packet(V4L2Context *ctx, const AVPacket *pkt)
Enqueues a buffer to a V4L2Context from an AVPacket.
static V4L2Buffer * v4l2_dequeue_v4l2buf(V4L2Context *ctx, int timeout)
int ff_v4l2_buffer_avpkt_to_buf(const AVPacket *pkt, V4L2Buffer *out)
Extracts the data from an AVPacket to a V4L2Buffer.
int av_codec_is_decoder(const AVCodec *codec)
static int v4l2_get_raw_format(V4L2Context *ctx, enum AVPixelFormat *p)
#define av_err2str(errnum)
Convenience macro, the return value should be used only directly in function arguments but never stan...
static AVRational v4l2_get_sar(V4L2Context *ctx)
int ff_v4l2_context_dequeue_frame(V4L2Context *ctx, AVFrame *frame, int timeout)
Dequeues a buffer from a V4L2Context to an AVFrame.
int ff_v4l2_context_set_status(V4L2Context *ctx, uint32_t cmd)
Sets the status of a V4L2Context.
uint32_t ff_v4l2_format_avcodec_to_v4l2(enum AVCodecID avcodec)
static int v4l2_try_raw_format(V4L2Context *ctx, enum AVPixelFormat pixfmt)
#define i(width, name, range_min, range_max)
struct v4l2_plane planes[VIDEO_MAX_PLANES]
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
static int v4l2_get_coded_format(V4L2Context *ctx, uint32_t *p)
int ff_v4l2_context_enqueue_frame(V4L2Context *ctx, const AVFrame *frame)
Enqueues a buffer to a V4L2Context from an AVFrame.
enum AVPixelFormat pixfmt
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
main external API structure.
static int probe(const AVProbeData *p)
static unsigned int v4l2_get_height(struct v4l2_format *fmt)
the frame and frame reference mechanism is intended to as much as expensive copies of that data while still allowing the filters to produce correct results The data is stored in buffers represented by AVFrame structures Several references can point to the same frame buffer
static unsigned int v4l2_resolution_changed(V4L2Context *ctx, struct v4l2_format *fmt2)
uint32_t ff_v4l2_format_avfmt_to_v4l2(enum AVPixelFormat avfmt)
This structure stores compressed data.
int ff_v4l2_buffer_initialize(V4L2Buffer *avbuf, int index)
Initializes a V4L2Buffer.
#define av_fourcc2str(fourcc)
static int v4l2_type_supported(V4L2Context *ctx)
enum V4L2Buffer_status status