Go to the documentation of this file.
24 #include <linux/videodev2.h>
25 #include <sys/ioctl.h>
46 return V4L2_TYPE_IS_OUTPUT(
ctx->type) ?
58 return V4L2_TYPE_IS_MULTIPLANAR(fmt->type) ? fmt->fmt.pix_mp.width : fmt->fmt.pix.width;
63 return V4L2_TYPE_IS_MULTIPLANAR(fmt->type) ? fmt->fmt.pix_mp.height : fmt->fmt.pix.height;
69 struct v4l2_cropcap cropcap;
72 memset(&cropcap, 0,
sizeof(cropcap));
73 cropcap.type =
ctx->type;
79 sar.
num = cropcap.pixelaspect.numerator;
80 sar.
den = cropcap.pixelaspect.denominator;
86 struct v4l2_format *fmt1 = &
ctx->format;
87 int ret = V4L2_TYPE_IS_MULTIPLANAR(
ctx->type) ?
88 fmt1->fmt.pix_mp.width != fmt2->fmt.pix_mp.width ||
89 fmt1->fmt.pix_mp.height != fmt2->fmt.pix_mp.height
91 fmt1->fmt.pix.width != fmt2->fmt.pix.width ||
92 fmt1->fmt.pix.height != fmt2->fmt.pix.height;
105 return ctx->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE ||
106 ctx->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE ||
107 ctx->type == V4L2_BUF_TYPE_VIDEO_CAPTURE ||
108 ctx->type == V4L2_BUF_TYPE_VIDEO_OUTPUT;
114 const int SZ_4K = 0x1000;
127 ctx->format.type =
ctx->type;
132 if (V4L2_TYPE_IS_MULTIPLANAR(
ctx->type)) {
134 ctx->format.fmt.pix_mp.height =
ctx->height;
135 ctx->format.fmt.pix_mp.width =
ctx->width;
137 ctx->format.fmt.pix_mp.pixelformat = fmt->
v4l2_fmt;
140 ctx->format.fmt.pix_mp.plane_fmt[0].sizeimage =
144 ctx->format.fmt.pix.height =
ctx->height;
145 ctx->format.fmt.pix.width =
ctx->width;
150 ctx->format.fmt.pix.sizeimage =
158 struct v4l2_decoder_cmd cmd = {
159 .cmd = V4L2_DEC_CMD_START,
179 struct v4l2_format cap_fmt =
s->capture.format;
180 struct v4l2_event evt = { 0 };
183 ret = ioctl(
s->fd, VIDIOC_DQEVENT, &evt);
189 if (evt.type == V4L2_EVENT_EOS) {
194 if (evt.type != V4L2_EVENT_SOURCE_CHANGE)
197 ret = ioctl(
s->fd, VIDIOC_G_FMT, &cap_fmt);
231 struct v4l2_decoder_cmd cmd = {
232 .cmd = V4L2_DEC_CMD_STOP,
251 struct v4l2_encoder_cmd cmd = {
252 .cmd = V4L2_ENC_CMD_STOP,
271 struct v4l2_plane
planes[VIDEO_MAX_PLANES];
272 struct v4l2_buffer buf = { 0 };
274 struct pollfd pfd = {
275 .events = POLLIN | POLLRDNORM | POLLPRI | POLLOUT | POLLWRNORM,
280 if (!V4L2_TYPE_IS_OUTPUT(
ctx->type) &&
ctx->buffers) {
281 for (
i = 0;
i <
ctx->num_buffers;
i++) {
285 if (
i ==
ctx->num_buffers)
287 "userspace. Increase num_capture_buffers "
288 "to prevent device deadlock or dropped "
289 "packets/frames.\n");
294 for (
i = 0;
i <
ctx->num_buffers;
i++) {
309 if (V4L2_TYPE_IS_OUTPUT(
ctx->type))
310 pfd.events = POLLOUT | POLLWRNORM;
314 pfd.events = POLLIN | POLLRDNORM | POLLPRI;
318 ret = poll(&pfd, 1, timeout);
327 if (pfd.revents & POLLERR) {
335 for (
i = 0;
i <
ctx->num_buffers;
i++) {
347 if (pfd.revents & POLLPRI) {
363 if (pfd.revents & (POLLIN | POLLRDNORM | POLLOUT | POLLWRNORM)) {
365 if (!V4L2_TYPE_IS_OUTPUT(
ctx->type)) {
367 if (pfd.revents & (POLLIN | POLLRDNORM))
373 if (pfd.revents & (POLLOUT | POLLWRNORM))
378 memset(&buf, 0,
sizeof(buf));
379 buf.memory = V4L2_MEMORY_MMAP;
380 buf.type =
ctx->type;
381 if (V4L2_TYPE_IS_MULTIPLANAR(
ctx->type)) {
383 buf.length = VIDEO_MAX_PLANES;
389 if (errno != EAGAIN) {
399 int bytesused = V4L2_TYPE_IS_MULTIPLANAR(buf.type) ?
400 buf.m.planes[0].bytesused : buf.bytesused;
401 if (bytesused == 0) {
405 #ifdef V4L2_BUF_FLAG_LAST
406 if (buf.flags & V4L2_BUF_FLAG_LAST)
411 avbuf = &
ctx->buffers[buf.index];
414 if (V4L2_TYPE_IS_MULTIPLANAR(
ctx->type)) {
430 if (V4L2_TYPE_IS_OUTPUT(
ctx->type)) {
435 for (
i = 0;
i <
ctx->num_buffers;
i++) {
437 return &
ctx->buffers[
i];
445 struct v4l2_requestbuffers req = {
446 .memory = V4L2_MEMORY_MMAP,
452 for (
i = 0;
i <
ctx->num_buffers;
i++) {
455 for (j = 0; j <
buffer->num_planes; j++) {
456 struct V4L2Plane_info *p = &
buffer->plane_info[j];
457 if (p->mm_addr && p->length)
458 if (munmap(p->mm_addr, p->length) < 0)
468 struct v4l2_format *fmt = &
ctx->format;
476 if (V4L2_TYPE_IS_MULTIPLANAR(
ctx->type))
477 fmt->fmt.pix_mp.pixelformat = v4l2_fmt;
479 fmt->fmt.pix.pixelformat = v4l2_fmt;
481 fmt->type =
ctx->type;
493 struct v4l2_fmtdesc fdesc;
496 memset(&fdesc, 0,
sizeof(fdesc));
497 fdesc.type =
ctx->type;
527 struct v4l2_fmtdesc fdesc;
537 memset(&fdesc, 0,
sizeof(fdesc));
538 fdesc.type =
ctx->type;
545 if (fdesc.pixelformat == v4l2_fmt)
571 ctx->streamon = (cmd == VIDIOC_STREAMON);
715 struct v4l2_requestbuffers req;
723 ret = ioctl(
s->fd, VIDIOC_G_FMT, &
ctx->format);
727 memset(&req, 0,
sizeof(req));
728 req.count =
ctx->num_buffers;
729 req.memory = V4L2_MEMORY_MMAP;
730 req.type =
ctx->type;
731 ret = ioctl(
s->fd, VIDIOC_REQBUFS, &req);
737 ctx->num_buffers = req.count;
744 for (
i = 0;
i < req.count;
i++) {
758 V4L2_TYPE_IS_MULTIPLANAR(
ctx->type) ?
ctx->format.fmt.pix_mp.plane_fmt[0].sizeimage :
ctx->format.fmt.pix.sizeimage,
759 V4L2_TYPE_IS_MULTIPLANAR(
ctx->type) ?
ctx->format.fmt.pix_mp.plane_fmt[0].bytesperline :
ctx->format.fmt.pix.bytesperline);
static void error(const char *err)
int ff_v4l2_context_init(V4L2Context *ctx)
Initializes a V4L2Context.
#define AV_LOG_WARNING
Something somehow does not look correct.
AVPixelFormat
Pixel format.
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
static unsigned int v4l2_get_width(struct v4l2_format *fmt)
#define AVERROR_EOF
End of file.
int ff_v4l2_m2m_codec_reinit(V4L2m2mContext *s)
Reinitializes the V4L2m2mContext when the driver cannot continue processing with the capture paramete...
filter_frame For filters that do not use the this method is called when a frame is pushed to the filter s input It can be called at any time except in a reentrant way If the input frame is enough to produce output
int ff_v4l2_buffer_buf_to_avpkt(AVPacket *pkt, V4L2Buffer *avbuf)
Extracts the data from a V4L2Buffer to an AVPacket.
This structure describes decoded (raw) audio or video data.
int ff_v4l2_context_dequeue_packet(V4L2Context *ctx, AVPacket *pkt)
Dequeues a buffer from a V4L2Context to an AVPacket.
void ff_v4l2_context_release(V4L2Context *ctx)
Releases a V4L2Context.
static int v4l2_stop_encode(V4L2Context *ctx)
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
static V4L2m2mContext * ctx_to_m2mctx(V4L2Context *ctx)
static int v4l2_handle_event(V4L2Context *ctx)
handle resolution change event and end of stream event returns 1 if reinit was successful,...
static int v4l2_start_decode(V4L2Context *ctx)
static int v4l2_get_framesize_compressed(V4L2Context *ctx, int width, int height)
int ff_v4l2_buffer_buf_to_avframe(AVFrame *frame, V4L2Buffer *avbuf)
Extracts the data from a V4L2Buffer to an AVFrame.
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
static void v4l2_save_to_context(V4L2Context *ctx, struct v4l2_format_update *fmt)
static int v4l2_release_buffers(V4L2Context *ctx)
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
int ff_v4l2_buffer_avframe_to_buf(const AVFrame *frame, V4L2Buffer *out)
Extracts the data from an AVFrame to a V4L2Buffer.
int ff_v4l2_context_set_format(V4L2Context *ctx)
Sets the V4L2Context format in the v4l2 driver.
V4L2Buffer (wrapper for v4l2_buffer management)
int ff_v4l2_context_get_format(V4L2Context *ctx, int probe)
Queries the driver for a valid v4l2 format and copies it to the context.
static V4L2Buffer * v4l2_getfree_v4l2buf(V4L2Context *ctx)
enum AVPixelFormat ff_v4l2_format_v4l2_to_avfmt(uint32_t v4l2_fmt, enum AVCodecID avcodec)
static AVCodecContext * logger(V4L2Context *ctx)
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
int ff_v4l2_buffer_enqueue(V4L2Buffer *avbuf)
Enqueues a V4L2Buffer.
static int v4l2_stop_decode(V4L2Context *ctx)
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
#define container_of(ptr, type, member)
Rational number (pair of numerator and denominator).
int ff_v4l2_context_enqueue_packet(V4L2Context *ctx, const AVPacket *pkt)
Enqueues a buffer to a V4L2Context from an AVPacket.
static V4L2Buffer * v4l2_dequeue_v4l2buf(V4L2Context *ctx, int timeout)
int ff_v4l2_buffer_avpkt_to_buf(const AVPacket *pkt, V4L2Buffer *out)
Extracts the data from an AVPacket to a V4L2Buffer.
int av_codec_is_decoder(const AVCodec *codec)
static int v4l2_get_raw_format(V4L2Context *ctx, enum AVPixelFormat *p)
#define av_err2str(errnum)
Convenience macro, the return value should be used only directly in function arguments but never stan...
static AVRational v4l2_get_sar(V4L2Context *ctx)
int ff_v4l2_context_dequeue_frame(V4L2Context *ctx, AVFrame *frame, int timeout)
Dequeues a buffer from a V4L2Context to an AVFrame.
int ff_v4l2_context_set_status(V4L2Context *ctx, uint32_t cmd)
Sets the status of a V4L2Context.
uint32_t ff_v4l2_format_avcodec_to_v4l2(enum AVCodecID avcodec)
static int v4l2_try_raw_format(V4L2Context *ctx, enum AVPixelFormat pixfmt)
#define i(width, name, range_min, range_max)
struct v4l2_plane planes[VIDEO_MAX_PLANES]
static const struct @363 planes[]
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
static int v4l2_get_coded_format(V4L2Context *ctx, uint32_t *p)
int ff_v4l2_context_enqueue_frame(V4L2Context *ctx, const AVFrame *frame)
Enqueues a buffer to a V4L2Context from an AVFrame.
enum AVPixelFormat pixfmt
main external API structure.
static int probe(const AVProbeData *p)
static unsigned int v4l2_get_height(struct v4l2_format *fmt)
the frame and frame reference mechanism is intended to as much as expensive copies of that data while still allowing the filters to produce correct results The data is stored in buffers represented by AVFrame structures Several references can point to the same frame buffer
static unsigned int v4l2_resolution_changed(V4L2Context *ctx, struct v4l2_format *fmt2)
uint32_t ff_v4l2_format_avfmt_to_v4l2(enum AVPixelFormat avfmt)
This structure stores compressed data.
int ff_v4l2_buffer_initialize(V4L2Buffer *avbuf, int index)
Initializes a V4L2Buffer.
#define av_fourcc2str(fourcc)
static int v4l2_type_supported(V4L2Context *ctx)
enum V4L2Buffer_status status