Go to the documentation of this file.
24 #include <linux/videodev2.h>
25 #include <sys/ioctl.h>
46 return V4L2_TYPE_IS_OUTPUT(
ctx->type) ?
58 return V4L2_TYPE_IS_MULTIPLANAR(fmt->type) ? fmt->fmt.pix_mp.width : fmt->fmt.pix.width;
63 return V4L2_TYPE_IS_MULTIPLANAR(fmt->type) ? fmt->fmt.pix_mp.height : fmt->fmt.pix.height;
69 struct v4l2_cropcap cropcap;
72 memset(&cropcap, 0,
sizeof(cropcap));
73 cropcap.type =
ctx->type;
79 sar.
num = cropcap.pixelaspect.numerator;
80 sar.
den = cropcap.pixelaspect.denominator;
86 struct v4l2_format *fmt1 = &
ctx->format;
87 int ret = V4L2_TYPE_IS_MULTIPLANAR(
ctx->type) ?
88 fmt1->fmt.pix_mp.width != fmt2->fmt.pix_mp.width ||
89 fmt1->fmt.pix_mp.height != fmt2->fmt.pix_mp.height
91 fmt1->fmt.pix.width != fmt2->fmt.pix.width ||
92 fmt1->fmt.pix.height != fmt2->fmt.pix.height;
105 return ctx->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE ||
106 ctx->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE ||
107 ctx->type == V4L2_BUF_TYPE_VIDEO_CAPTURE ||
108 ctx->type == V4L2_BUF_TYPE_VIDEO_OUTPUT;
114 const int SZ_4K = 0x1000;
127 ctx->format.type =
ctx->type;
132 if (V4L2_TYPE_IS_MULTIPLANAR(
ctx->type)) {
134 ctx->format.fmt.pix_mp.height =
ctx->height;
135 ctx->format.fmt.pix_mp.width =
ctx->width;
137 ctx->format.fmt.pix_mp.pixelformat = fmt->
v4l2_fmt;
140 ctx->format.fmt.pix_mp.plane_fmt[0].sizeimage =
144 ctx->format.fmt.pix.height =
ctx->height;
145 ctx->format.fmt.pix.width =
ctx->width;
150 ctx->format.fmt.pix.sizeimage =
158 struct v4l2_decoder_cmd cmd = {
159 .cmd = V4L2_DEC_CMD_START,
179 struct v4l2_format cap_fmt =
s->capture.format;
180 struct v4l2_event evt = { 0 };
183 ret = ioctl(
s->fd, VIDIOC_DQEVENT, &evt);
189 if (evt.type == V4L2_EVENT_EOS) {
194 if (evt.type != V4L2_EVENT_SOURCE_CHANGE)
197 ret = ioctl(
s->fd, VIDIOC_G_FMT, &cap_fmt);
240 struct v4l2_decoder_cmd cmd = {
241 .cmd = V4L2_DEC_CMD_STOP,
260 struct v4l2_encoder_cmd cmd = {
261 .cmd = V4L2_ENC_CMD_STOP,
280 struct v4l2_plane
planes[VIDEO_MAX_PLANES];
281 struct v4l2_buffer buf = { 0 };
283 struct pollfd pfd = {
284 .events = POLLIN | POLLRDNORM | POLLPRI | POLLOUT | POLLWRNORM,
289 if (!V4L2_TYPE_IS_OUTPUT(
ctx->type) &&
ctx->buffers) {
290 for (
i = 0;
i <
ctx->num_buffers;
i++) {
294 if (
i ==
ctx->num_buffers)
296 "userspace. Increase num_capture_buffers "
297 "to prevent device deadlock or dropped "
298 "packets/frames.\n");
303 for (
i = 0;
i <
ctx->num_buffers;
i++) {
318 if (V4L2_TYPE_IS_OUTPUT(
ctx->type))
319 pfd.events = POLLOUT | POLLWRNORM;
323 pfd.events = POLLIN | POLLRDNORM | POLLPRI;
327 ret = poll(&pfd, 1, timeout);
336 if (pfd.revents & POLLERR) {
340 for (
i = 0;
i <
ctx->num_buffers;
i++) {
352 if (pfd.revents & POLLPRI) {
368 if (pfd.revents & (POLLIN | POLLRDNORM | POLLOUT | POLLWRNORM)) {
370 if (!V4L2_TYPE_IS_OUTPUT(
ctx->type)) {
372 if (pfd.revents & (POLLIN | POLLRDNORM))
378 if (pfd.revents & (POLLOUT | POLLWRNORM))
383 memset(&buf, 0,
sizeof(buf));
384 buf.memory = V4L2_MEMORY_MMAP;
385 buf.type =
ctx->type;
386 if (V4L2_TYPE_IS_MULTIPLANAR(
ctx->type)) {
388 buf.length = VIDEO_MAX_PLANES;
394 if (errno != EAGAIN) {
404 int bytesused = V4L2_TYPE_IS_MULTIPLANAR(buf.type) ?
405 buf.m.planes[0].bytesused : buf.bytesused;
406 if (bytesused == 0) {
410 #ifdef V4L2_BUF_FLAG_LAST
411 if (buf.flags & V4L2_BUF_FLAG_LAST)
416 avbuf = &
ctx->buffers[buf.index];
419 if (V4L2_TYPE_IS_MULTIPLANAR(
ctx->type)) {
435 if (V4L2_TYPE_IS_OUTPUT(
ctx->type)) {
440 for (
i = 0;
i <
ctx->num_buffers;
i++) {
442 return &
ctx->buffers[
i];
450 struct v4l2_requestbuffers req = {
451 .memory = V4L2_MEMORY_MMAP,
457 for (
i = 0;
i <
ctx->num_buffers;
i++) {
460 for (j = 0; j <
buffer->num_planes; j++) {
461 struct V4L2Plane_info *p = &
buffer->plane_info[j];
462 if (p->mm_addr && p->length)
463 if (munmap(p->mm_addr, p->length) < 0)
473 struct v4l2_format *fmt = &
ctx->format;
481 if (V4L2_TYPE_IS_MULTIPLANAR(
ctx->type))
482 fmt->fmt.pix_mp.pixelformat = v4l2_fmt;
484 fmt->fmt.pix.pixelformat = v4l2_fmt;
486 fmt->type =
ctx->type;
498 struct v4l2_fmtdesc fdesc;
501 memset(&fdesc, 0,
sizeof(fdesc));
502 fdesc.type =
ctx->type;
532 struct v4l2_fmtdesc fdesc;
542 memset(&fdesc, 0,
sizeof(fdesc));
543 fdesc.type =
ctx->type;
550 if (fdesc.pixelformat == v4l2_fmt)
576 ctx->streamon = (cmd == VIDIOC_STREAMON);
720 struct v4l2_requestbuffers req;
728 ret = ioctl(
s->fd, VIDIOC_G_FMT, &
ctx->format);
732 memset(&req, 0,
sizeof(req));
733 req.count =
ctx->num_buffers;
734 req.memory = V4L2_MEMORY_MMAP;
735 req.type =
ctx->type;
736 ret = ioctl(
s->fd, VIDIOC_REQBUFS, &req);
742 ctx->num_buffers = req.count;
749 for (
i = 0;
i < req.count;
i++) {
763 V4L2_TYPE_IS_MULTIPLANAR(
ctx->type) ?
ctx->format.fmt.pix_mp.plane_fmt[0].sizeimage :
ctx->format.fmt.pix.sizeimage,
764 V4L2_TYPE_IS_MULTIPLANAR(
ctx->type) ?
ctx->format.fmt.pix_mp.plane_fmt[0].bytesperline :
ctx->format.fmt.pix.bytesperline);
static void error(const char *err)
int ff_v4l2_context_init(V4L2Context *ctx)
Initializes a V4L2Context.
#define AV_LOG_WARNING
Something somehow does not look correct.
AVPixelFormat
Pixel format.
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
static unsigned int v4l2_get_width(struct v4l2_format *fmt)
#define AVERROR_EOF
End of file.
int ff_v4l2_m2m_codec_reinit(V4L2m2mContext *s)
Reinitializes the V4L2m2mContext when the driver cannot continue processing with the capture paramete...
filter_frame For filters that do not use the this method is called when a frame is pushed to the filter s input It can be called at any time except in a reentrant way If the input frame is enough to produce output
int ff_v4l2_buffer_buf_to_avpkt(AVPacket *pkt, V4L2Buffer *avbuf)
Extracts the data from a V4L2Buffer to an AVPacket.
This structure describes decoded (raw) audio or video data.
int ff_v4l2_context_dequeue_packet(V4L2Context *ctx, AVPacket *pkt)
Dequeues a buffer from a V4L2Context to an AVPacket.
void ff_v4l2_context_release(V4L2Context *ctx)
Releases a V4L2Context.
static int v4l2_stop_encode(V4L2Context *ctx)
static V4L2m2mContext * ctx_to_m2mctx(V4L2Context *ctx)
static int v4l2_handle_event(V4L2Context *ctx)
handle resolution change event and end of stream event returns 1 if reinit was successful,...
static int v4l2_start_decode(V4L2Context *ctx)
static int v4l2_get_framesize_compressed(V4L2Context *ctx, int width, int height)
int ff_v4l2_buffer_buf_to_avframe(AVFrame *frame, V4L2Buffer *avbuf)
Extracts the data from a V4L2Buffer to an AVFrame.
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
static void v4l2_save_to_context(V4L2Context *ctx, struct v4l2_format_update *fmt)
static int v4l2_release_buffers(V4L2Context *ctx)
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
int ff_v4l2_buffer_avframe_to_buf(const AVFrame *frame, V4L2Buffer *out)
Extracts the data from an AVFrame to a V4L2Buffer.
int ff_v4l2_context_set_format(V4L2Context *ctx)
Sets the V4L2Context format in the v4l2 driver.
V4L2Buffer (wrapper for v4l2_buffer management)
int ff_v4l2_context_get_format(V4L2Context *ctx, int probe)
Queries the driver for a valid v4l2 format and copies it to the context.
static V4L2Buffer * v4l2_getfree_v4l2buf(V4L2Context *ctx)
enum AVPixelFormat ff_v4l2_format_v4l2_to_avfmt(uint32_t v4l2_fmt, enum AVCodecID avcodec)
static AVCodecContext * logger(V4L2Context *ctx)
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
static const struct @321 planes[]
int ff_v4l2_buffer_enqueue(V4L2Buffer *avbuf)
Enqueues a V4L2Buffer.
static int v4l2_stop_decode(V4L2Context *ctx)
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
#define container_of(ptr, type, member)
Rational number (pair of numerator and denominator).
int ff_v4l2_context_enqueue_packet(V4L2Context *ctx, const AVPacket *pkt)
Enqueues a buffer to a V4L2Context from an AVPacket.
static V4L2Buffer * v4l2_dequeue_v4l2buf(V4L2Context *ctx, int timeout)
int ff_v4l2_buffer_avpkt_to_buf(const AVPacket *pkt, V4L2Buffer *out)
Extracts the data from an AVPacket to a V4L2Buffer.
int av_codec_is_decoder(const AVCodec *codec)
static int v4l2_get_raw_format(V4L2Context *ctx, enum AVPixelFormat *p)
#define av_err2str(errnum)
Convenience macro, the return value should be used only directly in function arguments but never stan...
static AVRational v4l2_get_sar(V4L2Context *ctx)
int ff_v4l2_context_dequeue_frame(V4L2Context *ctx, AVFrame *frame, int timeout)
Dequeues a buffer from a V4L2Context to an AVFrame.
int ff_v4l2_context_set_status(V4L2Context *ctx, uint32_t cmd)
Sets the status of a V4L2Context.
static void reinit(Jpeg2000EncoderContext *s)
uint32_t ff_v4l2_format_avcodec_to_v4l2(enum AVCodecID avcodec)
static int v4l2_try_raw_format(V4L2Context *ctx, enum AVPixelFormat pixfmt)
#define i(width, name, range_min, range_max)
struct v4l2_plane planes[VIDEO_MAX_PLANES]
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
static int v4l2_get_coded_format(V4L2Context *ctx, uint32_t *p)
int ff_v4l2_context_enqueue_frame(V4L2Context *ctx, const AVFrame *frame)
Enqueues a buffer to a V4L2Context from an AVFrame.
enum AVPixelFormat pixfmt
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
main external API structure.
static int probe(const AVProbeData *p)
static unsigned int v4l2_get_height(struct v4l2_format *fmt)
the frame and frame reference mechanism is intended to as much as expensive copies of that data while still allowing the filters to produce correct results The data is stored in buffers represented by AVFrame structures Several references can point to the same frame buffer
static unsigned int v4l2_resolution_changed(V4L2Context *ctx, struct v4l2_format *fmt2)
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
uint32_t ff_v4l2_format_avfmt_to_v4l2(enum AVPixelFormat avfmt)
This structure stores compressed data.
int ff_v4l2_buffer_initialize(V4L2Buffer *avbuf, int index)
Initializes a V4L2Buffer.
#define av_fourcc2str(fourcc)
static int v4l2_type_supported(V4L2Context *ctx)
enum V4L2Buffer_status status