Go to the documentation of this file.
24 #include <linux/videodev2.h>
25 #include <sys/ioctl.h>
36 #define USEC_PER_SEC 1000000
55 if (
s->avctx->pkt_timebase.num)
56 return s->avctx->pkt_timebase;
57 return s->avctx->time_base;
79 avbuf->
buf.timestamp.tv_usec;
86 enum v4l2_ycbcr_encoding ycbcr;
87 enum v4l2_colorspace cs;
89 cs = V4L2_TYPE_IS_MULTIPLANAR(buf->
buf.type) ?
93 ycbcr = V4L2_TYPE_IS_MULTIPLANAR(buf->
buf.type) ?
98 case V4L2_YCBCR_ENC_XV709:
100 case V4L2_YCBCR_ENC_XV601:
120 enum v4l2_quantization qt;
122 qt = V4L2_TYPE_IS_MULTIPLANAR(buf->
buf.type) ?
138 enum v4l2_ycbcr_encoding ycbcr;
139 enum v4l2_colorspace cs;
141 cs = V4L2_TYPE_IS_MULTIPLANAR(buf->
buf.type) ?
145 ycbcr = V4L2_TYPE_IS_MULTIPLANAR(buf->
buf.type) ?
156 case V4L2_COLORSPACE_BT2020:
157 if (ycbcr == V4L2_YCBCR_ENC_BT2020_CONST_LUM)
170 enum v4l2_ycbcr_encoding ycbcr;
171 enum v4l2_xfer_func xfer;
172 enum v4l2_colorspace cs;
174 cs = V4L2_TYPE_IS_MULTIPLANAR(buf->
buf.type) ?
178 ycbcr = V4L2_TYPE_IS_MULTIPLANAR(buf->
buf.type) ?
182 xfer = V4L2_TYPE_IS_MULTIPLANAR(buf->
buf.type) ?
203 case V4L2_YCBCR_ENC_XV709:
224 if (
s->draining && V4L2_TYPE_IS_OUTPUT(avbuf->
context->
type)) {
278 unsigned int bytesused, length;
280 if (plane >=
out->num_planes)
283 length =
out->plane_info[plane].length;
288 if (V4L2_TYPE_IS_MULTIPLANAR(
out->buf.type)) {
289 out->planes[plane].bytesused = bytesused;
290 out->planes[plane].length = length;
292 out->buf.bytesused = bytesused;
293 out->buf.length = length;
343 struct v4l2_format fmt =
out->context->format;
344 int pixel_format = V4L2_TYPE_IS_MULTIPLANAR(fmt.type) ?
345 fmt.fmt.pix_mp.pixelformat : fmt.fmt.pix.pixelformat;
346 int height = V4L2_TYPE_IS_MULTIPLANAR(fmt.type) ?
347 fmt.fmt.pix_mp.height : fmt.fmt.pix.height;
348 int is_planar_format = 0;
350 switch (pixel_format) {
351 case V4L2_PIX_FMT_YUV420M:
352 case V4L2_PIX_FMT_YVU420M:
353 #ifdef V4L2_PIX_FMT_YUV422M
354 case V4L2_PIX_FMT_YUV422M:
356 #ifdef V4L2_PIX_FMT_YVU422M
357 case V4L2_PIX_FMT_YVU422M:
359 #ifdef V4L2_PIX_FMT_YUV444M
360 case V4L2_PIX_FMT_YUV444M:
362 #ifdef V4L2_PIX_FMT_YVU444M
363 case V4L2_PIX_FMT_YVU444M:
365 case V4L2_PIX_FMT_NV12M:
366 case V4L2_PIX_FMT_NV21M:
367 case V4L2_PIX_FMT_NV12MT_16X16:
368 case V4L2_PIX_FMT_NV12MT:
369 case V4L2_PIX_FMT_NV16M:
370 case V4L2_PIX_FMT_NV61M:
371 is_planar_format = 1;
374 if (!is_planar_format) {
379 for (
i = 0;
i <
desc->nb_components;
i++)
380 planes_nb =
FFMAX(planes_nb,
desc->comp[
i].plane + 1);
382 for (
i = 0;
i < planes_nb;
i++) {
384 if (
i == 1 ||
i == 2) {
396 for (
i = 0;
i <
out->num_planes;
i++) {
430 if (avbuf->
buf.flags & V4L2_BUF_FLAG_KEYFRAME)
445 if (avbuf->
buf.flags & V4L2_BUF_FLAG_ERROR) {
462 pkt->
size = V4L2_TYPE_IS_MULTIPLANAR(avbuf->
buf.type) ? avbuf->
buf.m.planes[0].bytesused : avbuf->
buf.bytesused;
465 if (avbuf->
buf.flags & V4L2_BUF_FLAG_KEYFRAME)
468 if (avbuf->
buf.flags & V4L2_BUF_FLAG_ERROR) {
489 out->flags = V4L2_BUF_FLAG_KEYFRAME;
499 avbuf->
buf.memory = V4L2_MEMORY_MMAP;
500 avbuf->
buf.type =
ctx->type;
503 if (V4L2_TYPE_IS_MULTIPLANAR(
ctx->type)) {
504 avbuf->
buf.length = VIDEO_MAX_PLANES;
512 if (V4L2_TYPE_IS_MULTIPLANAR(
ctx->type)) {
515 for (
i = 0;
i < avbuf->
buf.length;
i++) {
516 if (avbuf->
buf.m.planes[
i].length)
525 ctx->format.fmt.pix_mp.plane_fmt[
i].bytesperline :
526 ctx->format.fmt.pix.bytesperline;
528 if (V4L2_TYPE_IS_MULTIPLANAR(
ctx->type)) {
531 PROT_READ | PROT_WRITE, MAP_SHARED,
536 PROT_READ | PROT_WRITE, MAP_SHARED,
546 if (V4L2_TYPE_IS_OUTPUT(
ctx->type))
549 if (V4L2_TYPE_IS_MULTIPLANAR(
ctx->type)) {
554 avbuf->
buf.bytesused = avbuf->
planes[0].bytesused;
555 avbuf->
buf.length = avbuf->
planes[0].length;
enum AVColorTransferCharacteristic color_trc
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
enum AVColorRange color_range
MPEG vs JPEG YUV range.
enum AVPixelFormat av_pix_fmt
AVPixelFormat corresponding to this buffer context.
static enum AVColorSpace v4l2_get_color_space(V4L2Buffer *buf)
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
AVColorTransferCharacteristic
Color Transfer Characteristic.
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
uint8_t * data
The data buffer.
filter_frame For filters that do not use the this method is called when a frame is pushed to the filter s input It can be called at any time except in a reentrant way If the input frame is enough to produce output
int ff_v4l2_buffer_buf_to_avpkt(AVPacket *pkt, V4L2Buffer *avbuf)
Extracts the data from a V4L2Buffer to an AVPacket.
enum AVColorPrimaries color_primaries
enum AVColorSpace colorspace
YUV colorspace type.
This structure describes decoded (raw) audio or video data.
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
static void v4l2_set_pts(V4L2Buffer *out, int64_t pts)
@ AVCOL_RANGE_JPEG
Full range content.
@ AVCOL_SPC_RGB
order of coefficients is actually GBR, also IEC 61966-2-1 (sRGB), YZX and ST 428-1
int flags
Frame flags, a combination of AV_FRAME_FLAGS.
AVBufferRef * av_buffer_ref(const AVBufferRef *buf)
Create a new reference to an AVBuffer.
AVColorPrimaries
Chromaticity coordinates of the source primaries.
AVBufferRef * buf[AV_NUM_DATA_POINTERS]
AVBuffer references backing the data for this frame.
static AVCodecContext * logger(V4L2Buffer *buf)
@ AVCOL_SPC_BT2020_CL
ITU-R BT2020 constant luminance system.
#define AV_PKT_FLAG_KEY
The packet contains a keyframe.
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
static AVRational v4l2_get_timebase(V4L2Buffer *avbuf)
static int v4l2_bufref_to_buf(V4L2Buffer *out, int plane, const uint8_t *data, int size, int offset)
@ AVCOL_SPC_BT470BG
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601
@ AVCOL_TRC_IEC61966_2_1
IEC 61966-2-1 (sRGB or sYCC)
static int v4l2_buf_increase_ref(V4L2Buffer *in)
int ff_v4l2_buffer_buf_to_avframe(AVFrame *frame, V4L2Buffer *avbuf)
Extracts the data from a V4L2Buffer to an AVFrame.
@ AVCOL_TRC_GAMMA28
also ITU-R BT470BG
#define atomic_fetch_sub(object, operand)
int streamon
Whether the stream has been started (VIDIOC_STREAMON has been sent).
@ AVCOL_TRC_GAMMA22
also ITU-R BT470M / ITU-R BT1700 625 PAL & SECAM
struct V4L2Context * context
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
int ff_v4l2_buffer_avframe_to_buf(const AVFrame *frame, V4L2Buffer *out)
Extracts the data from an AVFrame to a V4L2Buffer.
#define AV_PKT_FLAG_CORRUPT
The packet content is corrupted.
#define AV_FRAME_FLAG_KEY
A flag to mark frames that are keyframes.
V4L2Buffer (wrapper for v4l2_buffer management)
@ AVCOL_TRC_BT1361_ECG
ITU-R BT1361 Extended Colour Gamut.
#define AV_CEIL_RSHIFT(a, b)
@ AVCOL_SPC_SMPTE170M
also ITU-R BT601-6 525 / ITU-R BT1358 525 / ITU-R BT1700 NTSC / functionally identical to above
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
static enum AVColorPrimaries v4l2_get_color_primaries(V4L2Buffer *buf)
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
@ AVCOL_PRI_SMPTE240M
identical to above, also called "SMPTE C" even though it uses D65
#define atomic_load(object)
@ AVCOL_PRI_BT470BG
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM
@ AVCOL_PRI_SMPTE170M
also ITU-R BT601-6 525 / ITU-R BT1358 525 / ITU-R BT1700 NTSC
int ff_v4l2_buffer_enqueue(V4L2Buffer *avbuf)
Enqueues a V4L2Buffer.
AVBufferRef * buf
A reference to the reference-counted buffer where the packet data is stored.
static int v4l2_buffer_swframe_to_buf(const AVFrame *frame, V4L2Buffer *out)
#define FF_DECODE_ERROR_INVALID_BITSTREAM
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it.
#define container_of(ptr, type, member)
Rational number (pair of numerator and denominator).
enum v4l2_buf_type type
Type of this buffer context.
@ AVCOL_PRI_BT709
also ITU-R BT1361 / IEC 61966-2-4 / SMPTE RP 177 Annex B
static enum AVColorRange v4l2_get_color_range(V4L2Buffer *buf)
int64_t pkt_dts
DTS copied from the AVPacket that triggered returning this frame.
#define atomic_fetch_sub_explicit(object, operand, order)
@ AVCOL_RANGE_UNSPECIFIED
int ff_v4l2_buffer_avpkt_to_buf(const AVPacket *pkt, V4L2Buffer *out)
Extracts the data from an AVPacket to a V4L2Buffer.
AVBufferRef * av_buffer_create(uint8_t *data, size_t size, void(*free)(void *opaque, uint8_t *data), void *opaque, int flags)
Create an AVBuffer from an existing array.
@ AVCOL_PRI_BT2020
ITU-R BT2020.
int width
Width and height of the frames it produces (in case of a capture context, e.g.
AVBufferRef * context_ref
struct V4L2Buffer::V4L2Plane_info plane_info[VIDEO_MAX_PLANES]
#define atomic_fetch_add_explicit(object, operand, order)
#define AV_NOPTS_VALUE
Undefined timestamp value.
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames,...
static int64_t v4l2_get_pts(V4L2Buffer *avbuf)
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed.
static AVRational v4l2_timebase
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
int flags
A combination of AV_PKT_FLAG values.
size_t size
Size of data in bytes.
atomic_uint context_refcount
@ AVCOL_TRC_BT709
also ITU-R BT1361
@ AVCOL_SPC_SMPTE240M
derived from 170M primaries and D65 white point, 170M is derived from BT470 System M's primaries
#define i(width, name, range_min, range_max)
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
@ AVCOL_SPC_BT2020_NCL
ITU-R BT2020 non-constant luminance system.
static int v4l2_buffer_buf_to_swframe(AVFrame *frame, V4L2Buffer *avbuf)
AVColorSpace
YUV colorspace type.
struct v4l2_plane planes[VIDEO_MAX_PLANES]
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
@ AV_PIX_FMT_NV21
as above, but U and V bytes are swapped
static int v4l2_buf_to_bufref(V4L2Buffer *in, int plane, AVBufferRef **buf)
@ AVCOL_RANGE_MPEG
Narrow or limited range content.
@ AVCOL_PRI_BT470M
also FCC Title 47 Code of Federal Regulations 73.682 (a)(20)
int decode_error_flags
decode error flags of the frame, set to a combination of FF_DECODE_ERROR_xxx flags if the decoder pro...
@ AV_PIX_FMT_NV12
planar YUV 4:2:0, 12bpp, 1 plane for Y and 1 plane for the UV components, which are interleaved (firs...
static V4L2m2mContext * buf_to_m2mctx(V4L2Buffer *buf)
AVRational sample_aspect_ratio
Sample aspect ratio for the video frame, 0/1 if unknown/unspecified.
main external API structure.
#define atomic_fetch_add(object, operand)
@ AVCOL_SPC_FCC
FCC Title 47 Code of Federal Regulations 73.682 (a)(20)
@ AVCOL_TRC_SMPTE170M
also ITU-R BT601-6 525 or 625 / ITU-R BT1358 525 or 625 / ITU-R BT1700 NTSC
static void v4l2_free_buffer(void *opaque, uint8_t *unused)
const char * name
context name.
A reference to a data buffer.
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
This structure stores compressed data.
struct v4l2_format format
Format returned by the driver after initializing the buffer context.
int ff_v4l2_buffer_initialize(V4L2Buffer *avbuf, int index)
Initializes a V4L2Buffer.
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
static enum AVColorTransferCharacteristic v4l2_get_color_trc(V4L2Buffer *buf)
@ AVCOL_SPC_BT709
also ITU-R BT1361 / IEC 61966-2-4 xvYCC709 / derived in SMPTE RP 177 Annex B
AVColorRange
Visual content value range.
AVRational sample_aspect_ratio
enum V4L2Buffer_status status