FFmpeg
v4l2_buffers.c
Go to the documentation of this file.
1 /*
2  * V4L2 buffer helper functions.
3  *
4  * Copyright (C) 2017 Alexis Ballier <aballier@gentoo.org>
5  * Copyright (C) 2017 Jorge Ramirez <jorge.ramirez-ortiz@linaro.org>
6  *
7  * This file is part of FFmpeg.
8  *
9  * FFmpeg is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU Lesser General Public
11  * License as published by the Free Software Foundation; either
12  * version 2.1 of the License, or (at your option) any later version.
13  *
14  * FFmpeg is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17  * Lesser General Public License for more details.
18  *
19  * You should have received a copy of the GNU Lesser General Public
20  * License along with FFmpeg; if not, write to the Free Software
21  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22  */
23 
24 #include <linux/videodev2.h>
25 #include <sys/ioctl.h>
26 #include <sys/mman.h>
27 #include <unistd.h>
28 #include <fcntl.h>
29 #include <poll.h>
30 #include "libavcodec/avcodec.h"
31 #include "libavutil/pixdesc.h"
32 #include "v4l2_context.h"
33 #include "v4l2_buffers.h"
34 #include "v4l2_m2m.h"
35 
36 #define USEC_PER_SEC 1000000
38 
40 {
41  return V4L2_TYPE_IS_OUTPUT(buf->context->type) ?
43  container_of(buf->context, V4L2m2mContext, capture);
44 }
45 
46 static inline AVCodecContext *logger(V4L2Buffer *buf)
47 {
48  return buf_to_m2mctx(buf)->avctx;
49 }
50 
52 {
53  V4L2m2mContext *s = buf_to_m2mctx(avbuf);
54 
55  if (s->avctx->pkt_timebase.num)
56  return s->avctx->pkt_timebase;
57  return s->avctx->time_base;
58 }
59 
60 static inline void v4l2_set_pts(V4L2Buffer *out, int64_t pts)
61 {
62  int64_t v4l2_pts;
63 
64  if (pts == AV_NOPTS_VALUE)
65  pts = 0;
66 
67  /* convert pts to v4l2 timebase */
69  out->buf.timestamp.tv_usec = v4l2_pts % USEC_PER_SEC;
70  out->buf.timestamp.tv_sec = v4l2_pts / USEC_PER_SEC;
71 }
72 
73 static inline int64_t v4l2_get_pts(V4L2Buffer *avbuf)
74 {
75  int64_t v4l2_pts;
76 
77  /* convert pts back to encoder timebase */
78  v4l2_pts = (int64_t)avbuf->buf.timestamp.tv_sec * USEC_PER_SEC +
79  avbuf->buf.timestamp.tv_usec;
80 
81  return av_rescale_q(v4l2_pts, v4l2_timebase, v4l2_get_timebase(avbuf));
82 }
83 
85 {
86  enum v4l2_ycbcr_encoding ycbcr;
87  enum v4l2_colorspace cs;
88 
89  cs = V4L2_TYPE_IS_MULTIPLANAR(buf->buf.type) ?
90  buf->context->format.fmt.pix_mp.colorspace :
91  buf->context->format.fmt.pix.colorspace;
92 
93  ycbcr = V4L2_TYPE_IS_MULTIPLANAR(buf->buf.type) ?
94  buf->context->format.fmt.pix_mp.ycbcr_enc:
95  buf->context->format.fmt.pix.ycbcr_enc;
96 
97  switch(ycbcr) {
98  case V4L2_YCBCR_ENC_XV709:
99  case V4L2_YCBCR_ENC_709: return AVCOL_PRI_BT709;
100  case V4L2_YCBCR_ENC_XV601:
101  case V4L2_YCBCR_ENC_601:return AVCOL_PRI_BT470M;
102  default:
103  break;
104  }
105 
106  switch(cs) {
107  case V4L2_COLORSPACE_470_SYSTEM_BG: return AVCOL_PRI_BT470BG;
108  case V4L2_COLORSPACE_SMPTE170M: return AVCOL_PRI_SMPTE170M;
109  case V4L2_COLORSPACE_SMPTE240M: return AVCOL_PRI_SMPTE240M;
110  case V4L2_COLORSPACE_BT2020: return AVCOL_PRI_BT2020;
111  default:
112  break;
113  }
114 
115  return AVCOL_PRI_UNSPECIFIED;
116 }
117 
119 {
120  enum v4l2_quantization qt;
121 
122  qt = V4L2_TYPE_IS_MULTIPLANAR(buf->buf.type) ?
123  buf->context->format.fmt.pix_mp.quantization :
124  buf->context->format.fmt.pix.quantization;
125 
126  switch (qt) {
127  case V4L2_QUANTIZATION_LIM_RANGE: return AVCOL_RANGE_MPEG;
128  case V4L2_QUANTIZATION_FULL_RANGE: return AVCOL_RANGE_JPEG;
129  default:
130  break;
131  }
132 
134 }
135 
137 {
138  enum v4l2_ycbcr_encoding ycbcr;
139  enum v4l2_colorspace cs;
140 
141  cs = V4L2_TYPE_IS_MULTIPLANAR(buf->buf.type) ?
142  buf->context->format.fmt.pix_mp.colorspace :
143  buf->context->format.fmt.pix.colorspace;
144 
145  ycbcr = V4L2_TYPE_IS_MULTIPLANAR(buf->buf.type) ?
146  buf->context->format.fmt.pix_mp.ycbcr_enc:
147  buf->context->format.fmt.pix.ycbcr_enc;
148 
149  switch(cs) {
150  case V4L2_COLORSPACE_SRGB: return AVCOL_SPC_RGB;
151  case V4L2_COLORSPACE_REC709: return AVCOL_SPC_BT709;
152  case V4L2_COLORSPACE_470_SYSTEM_M: return AVCOL_SPC_FCC;
153  case V4L2_COLORSPACE_470_SYSTEM_BG: return AVCOL_SPC_BT470BG;
154  case V4L2_COLORSPACE_SMPTE170M: return AVCOL_SPC_SMPTE170M;
155  case V4L2_COLORSPACE_SMPTE240M: return AVCOL_SPC_SMPTE240M;
156  case V4L2_COLORSPACE_BT2020:
157  if (ycbcr == V4L2_YCBCR_ENC_BT2020_CONST_LUM)
158  return AVCOL_SPC_BT2020_CL;
159  else
160  return AVCOL_SPC_BT2020_NCL;
161  default:
162  break;
163  }
164 
165  return AVCOL_SPC_UNSPECIFIED;
166 }
167 
169 {
170  enum v4l2_ycbcr_encoding ycbcr;
171  enum v4l2_xfer_func xfer;
172  enum v4l2_colorspace cs;
173 
174  cs = V4L2_TYPE_IS_MULTIPLANAR(buf->buf.type) ?
175  buf->context->format.fmt.pix_mp.colorspace :
176  buf->context->format.fmt.pix.colorspace;
177 
178  ycbcr = V4L2_TYPE_IS_MULTIPLANAR(buf->buf.type) ?
179  buf->context->format.fmt.pix_mp.ycbcr_enc:
180  buf->context->format.fmt.pix.ycbcr_enc;
181 
182  xfer = V4L2_TYPE_IS_MULTIPLANAR(buf->buf.type) ?
183  buf->context->format.fmt.pix_mp.xfer_func:
184  buf->context->format.fmt.pix.xfer_func;
185 
186  switch (xfer) {
187  case V4L2_XFER_FUNC_709: return AVCOL_TRC_BT709;
188  case V4L2_XFER_FUNC_SRGB: return AVCOL_TRC_IEC61966_2_1;
189  default:
190  break;
191  }
192 
193  switch (cs) {
194  case V4L2_COLORSPACE_470_SYSTEM_M: return AVCOL_TRC_GAMMA22;
195  case V4L2_COLORSPACE_470_SYSTEM_BG: return AVCOL_TRC_GAMMA28;
196  case V4L2_COLORSPACE_SMPTE170M: return AVCOL_TRC_SMPTE170M;
197  case V4L2_COLORSPACE_SMPTE240M: return AVCOL_TRC_SMPTE240M;
198  default:
199  break;
200  }
201 
202  switch (ycbcr) {
203  case V4L2_YCBCR_ENC_XV709:
204  case V4L2_YCBCR_ENC_XV601: return AVCOL_TRC_BT1361_ECG;
205  default:
206  break;
207  }
208 
209  return AVCOL_TRC_UNSPECIFIED;
210 }
211 
212 static void v4l2_free_buffer(void *opaque, uint8_t *unused)
213 {
214  V4L2Buffer* avbuf = opaque;
215  V4L2m2mContext *s = buf_to_m2mctx(avbuf);
216 
217  if (atomic_fetch_sub(&avbuf->context_refcount, 1) == 1) {
218  atomic_fetch_sub_explicit(&s->refcount, 1, memory_order_acq_rel);
219 
220  if (s->reinit) {
221  if (!atomic_load(&s->refcount))
222  sem_post(&s->refsync);
223  } else {
224  if (s->draining && V4L2_TYPE_IS_OUTPUT(avbuf->context->type)) {
225  /* no need to queue more buffers to the driver */
226  avbuf->status = V4L2BUF_AVAILABLE;
227  }
228  else if (avbuf->context->streamon)
229  ff_v4l2_buffer_enqueue(avbuf);
230  }
231 
232  av_buffer_unref(&avbuf->context_ref);
233  }
234 }
235 
237 {
239 
240  if (in->context_ref)
242  else {
243  in->context_ref = av_buffer_ref(s->self_ref);
244  if (!in->context_ref)
245  return AVERROR(ENOMEM);
246 
247  in->context_refcount = 1;
248  }
249 
250  in->status = V4L2BUF_RET_USER;
251  atomic_fetch_add_explicit(&s->refcount, 1, memory_order_relaxed);
252 
253  return 0;
254 }
255 
256 static int v4l2_buf_to_bufref(V4L2Buffer *in, int plane, AVBufferRef **buf)
257 {
258  int ret;
259 
260  if (plane >= in->num_planes)
261  return AVERROR(EINVAL);
262 
263  /* even though most encoders return 0 in data_offset encoding vp8 does require this value */
264  *buf = av_buffer_create((char *)in->plane_info[plane].mm_addr + in->planes[plane].data_offset,
265  in->plane_info[plane].length, v4l2_free_buffer, in, 0);
266  if (!*buf)
267  return AVERROR(ENOMEM);
268 
270  if (ret)
271  av_buffer_unref(buf);
272 
273  return ret;
274 }
275 
276 static int v4l2_bufref_to_buf(V4L2Buffer *out, int plane, const uint8_t* data, int size, int offset)
277 {
278  unsigned int bytesused, length;
279 
280  if (plane >= out->num_planes)
281  return AVERROR(EINVAL);
282 
283  length = out->plane_info[plane].length;
284  bytesused = FFMIN(size+offset, length);
285 
286  memcpy((uint8_t*)out->plane_info[plane].mm_addr+offset, data, FFMIN(size, length-offset));
287 
288  if (V4L2_TYPE_IS_MULTIPLANAR(out->buf.type)) {
289  out->planes[plane].bytesused = bytesused;
290  out->planes[plane].length = length;
291  } else {
292  out->buf.bytesused = bytesused;
293  out->buf.length = length;
294  }
295 
296  return 0;
297 }
298 
300 {
301  int i, ret;
302 
303  frame->format = avbuf->context->av_pix_fmt;
304 
305  for (i = 0; i < avbuf->num_planes; i++) {
306  ret = v4l2_buf_to_bufref(avbuf, i, &frame->buf[i]);
307  if (ret)
308  return ret;
309 
310  frame->linesize[i] = avbuf->plane_info[i].bytesperline;
311  frame->data[i] = frame->buf[i]->data;
312  }
313 
314  /* fixup special cases */
315  switch (avbuf->context->av_pix_fmt) {
316  case AV_PIX_FMT_NV12:
317  case AV_PIX_FMT_NV21:
318  if (avbuf->num_planes > 1)
319  break;
320  frame->linesize[1] = avbuf->plane_info[0].bytesperline;
321  frame->data[1] = frame->buf[0]->data + avbuf->plane_info[0].bytesperline * avbuf->context->format.fmt.pix_mp.height;
322  break;
323 
324  case AV_PIX_FMT_YUV420P:
325  if (avbuf->num_planes > 1)
326  break;
327  frame->linesize[1] = avbuf->plane_info[0].bytesperline >> 1;
328  frame->linesize[2] = avbuf->plane_info[0].bytesperline >> 1;
329  frame->data[1] = frame->buf[0]->data + avbuf->plane_info[0].bytesperline * avbuf->context->format.fmt.pix_mp.height;
330  frame->data[2] = frame->data[1] + ((avbuf->plane_info[0].bytesperline * avbuf->context->format.fmt.pix_mp.height) >> 2);
331  break;
332 
333  default:
334  break;
335  }
336 
337  return 0;
338 }
339 
341 {
342  int i, ret;
343  struct v4l2_format fmt = out->context->format;
344  int pixel_format = V4L2_TYPE_IS_MULTIPLANAR(fmt.type) ?
345  fmt.fmt.pix_mp.pixelformat : fmt.fmt.pix.pixelformat;
346  int height = V4L2_TYPE_IS_MULTIPLANAR(fmt.type) ?
347  fmt.fmt.pix_mp.height : fmt.fmt.pix.height;
348  int is_planar_format = 0;
349 
350  switch (pixel_format) {
351  case V4L2_PIX_FMT_YUV420M:
352  case V4L2_PIX_FMT_YVU420M:
353 #ifdef V4L2_PIX_FMT_YUV422M
354  case V4L2_PIX_FMT_YUV422M:
355 #endif
356 #ifdef V4L2_PIX_FMT_YVU422M
357  case V4L2_PIX_FMT_YVU422M:
358 #endif
359 #ifdef V4L2_PIX_FMT_YUV444M
360  case V4L2_PIX_FMT_YUV444M:
361 #endif
362 #ifdef V4L2_PIX_FMT_YVU444M
363  case V4L2_PIX_FMT_YVU444M:
364 #endif
365  case V4L2_PIX_FMT_NV12M:
366  case V4L2_PIX_FMT_NV21M:
367  case V4L2_PIX_FMT_NV12MT_16X16:
368  case V4L2_PIX_FMT_NV12MT:
369  case V4L2_PIX_FMT_NV16M:
370  case V4L2_PIX_FMT_NV61M:
371  is_planar_format = 1;
372  }
373 
374  if (!is_planar_format) {
376  int planes_nb = 0;
377  int offset = 0;
378 
379  for (i = 0; i < desc->nb_components; i++)
380  planes_nb = FFMAX(planes_nb, desc->comp[i].plane + 1);
381 
382  for (i = 0; i < planes_nb; i++) {
383  int size, h = height;
384  if (i == 1 || i == 2) {
385  h = AV_CEIL_RSHIFT(h, desc->log2_chroma_h);
386  }
387  size = frame->linesize[i] * h;
389  if (ret)
390  return ret;
391  offset += size;
392  }
393  return 0;
394  }
395 
396  for (i = 0; i < out->num_planes; i++) {
397  ret = v4l2_bufref_to_buf(out, i, frame->buf[i]->data, frame->buf[i]->size, 0);
398  if (ret)
399  return ret;
400  }
401 
402  return 0;
403 }
404 
405 /******************************************************************************
406  *
407  * V4L2Buffer interface
408  *
409  ******************************************************************************/
410 
412 {
414 
416 }
417 
419 {
420  int ret;
421 
423 
424  /* 1. get references to the actual data */
426  if (ret)
427  return ret;
428 
429  /* 2. get frame information */
430  if (avbuf->buf.flags & V4L2_BUF_FLAG_KEYFRAME)
436  frame->pts = v4l2_get_pts(avbuf);
438 
439  /* these values are updated also during re-init in v4l2_process_driver_event */
440  frame->height = avbuf->context->height;
441  frame->width = avbuf->context->width;
443 
444  /* 3. report errors upstream */
445  if (avbuf->buf.flags & V4L2_BUF_FLAG_ERROR) {
446  av_log(logger(avbuf), AV_LOG_ERROR, "%s: driver decode error\n", avbuf->context->name);
448  }
449 
450  return 0;
451 }
452 
454 {
455  int ret;
456 
458  ret = v4l2_buf_to_bufref(avbuf, 0, &pkt->buf);
459  if (ret)
460  return ret;
461 
462  pkt->size = V4L2_TYPE_IS_MULTIPLANAR(avbuf->buf.type) ? avbuf->buf.m.planes[0].bytesused : avbuf->buf.bytesused;
463  pkt->data = pkt->buf->data;
464 
465  if (avbuf->buf.flags & V4L2_BUF_FLAG_KEYFRAME)
467 
468  if (avbuf->buf.flags & V4L2_BUF_FLAG_ERROR) {
469  av_log(logger(avbuf), AV_LOG_ERROR, "%s driver encode error\n", avbuf->context->name);
471  }
472 
473  pkt->dts = pkt->pts = v4l2_get_pts(avbuf);
474 
475  return 0;
476 }
477 
479 {
480  int ret;
481 
482  ret = v4l2_bufref_to_buf(out, 0, pkt->data, pkt->size, 0);
483  if (ret)
484  return ret;
485 
486  v4l2_set_pts(out, pkt->pts);
487 
488  if (pkt->flags & AV_PKT_FLAG_KEY)
489  out->flags = V4L2_BUF_FLAG_KEYFRAME;
490 
491  return 0;
492 }
493 
495 {
496  V4L2Context *ctx = avbuf->context;
497  int ret, i;
498 
499  avbuf->buf.memory = V4L2_MEMORY_MMAP;
500  avbuf->buf.type = ctx->type;
501  avbuf->buf.index = index;
502 
503  if (V4L2_TYPE_IS_MULTIPLANAR(ctx->type)) {
504  avbuf->buf.length = VIDEO_MAX_PLANES;
505  avbuf->buf.m.planes = avbuf->planes;
506  }
507 
508  ret = ioctl(buf_to_m2mctx(avbuf)->fd, VIDIOC_QUERYBUF, &avbuf->buf);
509  if (ret < 0)
510  return AVERROR(errno);
511 
512  if (V4L2_TYPE_IS_MULTIPLANAR(ctx->type)) {
513  avbuf->num_planes = 0;
514  /* in MP, the V4L2 API states that buf.length means num_planes */
515  for (i = 0; i < avbuf->buf.length; i++) {
516  if (avbuf->buf.m.planes[i].length)
517  avbuf->num_planes++;
518  }
519  } else
520  avbuf->num_planes = 1;
521 
522  for (i = 0; i < avbuf->num_planes; i++) {
523 
524  avbuf->plane_info[i].bytesperline = V4L2_TYPE_IS_MULTIPLANAR(ctx->type) ?
525  ctx->format.fmt.pix_mp.plane_fmt[i].bytesperline :
526  ctx->format.fmt.pix.bytesperline;
527 
528  if (V4L2_TYPE_IS_MULTIPLANAR(ctx->type)) {
529  avbuf->plane_info[i].length = avbuf->buf.m.planes[i].length;
530  avbuf->plane_info[i].mm_addr = mmap(NULL, avbuf->buf.m.planes[i].length,
531  PROT_READ | PROT_WRITE, MAP_SHARED,
532  buf_to_m2mctx(avbuf)->fd, avbuf->buf.m.planes[i].m.mem_offset);
533  } else {
534  avbuf->plane_info[i].length = avbuf->buf.length;
535  avbuf->plane_info[i].mm_addr = mmap(NULL, avbuf->buf.length,
536  PROT_READ | PROT_WRITE, MAP_SHARED,
537  buf_to_m2mctx(avbuf)->fd, avbuf->buf.m.offset);
538  }
539 
540  if (avbuf->plane_info[i].mm_addr == MAP_FAILED)
541  return AVERROR(ENOMEM);
542  }
543 
544  avbuf->status = V4L2BUF_AVAILABLE;
545 
546  if (V4L2_TYPE_IS_OUTPUT(ctx->type))
547  return 0;
548 
549  if (V4L2_TYPE_IS_MULTIPLANAR(ctx->type)) {
550  avbuf->buf.m.planes = avbuf->planes;
551  avbuf->buf.length = avbuf->num_planes;
552 
553  } else {
554  avbuf->buf.bytesused = avbuf->planes[0].bytesused;
555  avbuf->buf.length = avbuf->planes[0].length;
556  }
557 
558  return ff_v4l2_buffer_enqueue(avbuf);
559 }
560 
562 {
563  int ret;
564 
565  avbuf->buf.flags = avbuf->flags;
566 
567  ret = ioctl(buf_to_m2mctx(avbuf)->fd, VIDIOC_QBUF, &avbuf->buf);
568  if (ret < 0)
569  return AVERROR(errno);
570 
571  avbuf->status = V4L2BUF_IN_DRIVER;
572 
573  return 0;
574 }
V4L2Buffer::num_planes
int num_planes
Definition: v4l2_buffers.h:60
AVFrame::color_trc
enum AVColorTransferCharacteristic color_trc
Definition: frame.h:627
av_packet_unref
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
Definition: avpacket.c:427
AVFrame::color_range
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: frame.h:623
V4L2Context::av_pix_fmt
enum AVPixelFormat av_pix_fmt
AVPixelFormat corresponding to this buffer context.
Definition: v4l2_context.h:54
v4l2_get_color_space
static enum AVColorSpace v4l2_get_color_space(V4L2Buffer *buf)
Definition: v4l2_buffers.c:136
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
AVColorTransferCharacteristic
AVColorTransferCharacteristic
Color Transfer Characteristic.
Definition: pixfmt.h:580
out
FILE * out
Definition: movenc.c:54
v4l2_buffers.h
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2962
AVBufferRef::data
uint8_t * data
The data buffer.
Definition: buffer.h:90
int64_t
long long int64_t
Definition: coverity.c:34
output
filter_frame For filters that do not use the this method is called when a frame is pushed to the filter s input It can be called at any time except in a reentrant way If the input frame is enough to produce output
Definition: filter_design.txt:225
ff_v4l2_buffer_buf_to_avpkt
int ff_v4l2_buffer_buf_to_avpkt(AVPacket *pkt, V4L2Buffer *avbuf)
Extracts the data from a V4L2Buffer to an AVPacket.
Definition: v4l2_buffers.c:453
V4L2m2mContext
Definition: v4l2_m2m.h:43
AVFrame::color_primaries
enum AVColorPrimaries color_primaries
Definition: frame.h:625
AVFrame::colorspace
enum AVColorSpace colorspace
YUV colorspace type.
Definition: frame.h:634
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:344
V4L2Buffer::V4L2Plane_info::length
size_t length
Definition: v4l2_buffers.h:57
pixdesc.h
AVFrame::pts
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:456
AVFrame::width
int width
Definition: frame.h:416
v4l2_set_pts
static void v4l2_set_pts(V4L2Buffer *out, int64_t pts)
Definition: v4l2_buffers.c:60
AVCOL_RANGE_JPEG
@ AVCOL_RANGE_JPEG
Full range content.
Definition: pixfmt.h:683
AVPacket::data
uint8_t * data
Definition: packet.h:522
AVCOL_TRC_UNSPECIFIED
@ AVCOL_TRC_UNSPECIFIED
Definition: pixfmt.h:583
data
const char data[16]
Definition: mxf.c:148
AVCOL_SPC_RGB
@ AVCOL_SPC_RGB
order of coefficients is actually GBR, also IEC 61966-2-1 (sRGB), YZX and ST 428-1
Definition: pixfmt.h:610
USEC_PER_SEC
#define USEC_PER_SEC
Definition: v4l2_buffers.c:36
AVFrame::flags
int flags
Frame flags, a combination of AV_FRAME_FLAGS.
Definition: frame.h:616
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
av_buffer_ref
AVBufferRef * av_buffer_ref(const AVBufferRef *buf)
Create a new reference to an AVBuffer.
Definition: buffer.c:103
AVColorPrimaries
AVColorPrimaries
Chromaticity coordinates of the source primaries.
Definition: pixfmt.h:555
AVFrame::buf
AVBufferRef * buf[AV_NUM_DATA_POINTERS]
AVBuffer references backing the data for this frame.
Definition: frame.h:557
logger
static AVCodecContext * logger(V4L2Buffer *buf)
Definition: v4l2_buffers.c:46
AVCOL_SPC_BT2020_CL
@ AVCOL_SPC_BT2020_CL
ITU-R BT2020 constant luminance system.
Definition: pixfmt.h:621
AV_PKT_FLAG_KEY
#define AV_PKT_FLAG_KEY
The packet contains a keyframe.
Definition: packet.h:577
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:365
v4l2_get_timebase
static AVRational v4l2_get_timebase(V4L2Buffer *avbuf)
Definition: v4l2_buffers.c:51
v4l2_bufref_to_buf
static int v4l2_bufref_to_buf(V4L2Buffer *out, int plane, const uint8_t *data, int size, int offset)
Definition: v4l2_buffers.c:276
AVCOL_SPC_BT470BG
@ AVCOL_SPC_BT470BG
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601
Definition: pixfmt.h:615
AVCOL_TRC_IEC61966_2_1
@ AVCOL_TRC_IEC61966_2_1
IEC 61966-2-1 (sRGB or sYCC)
Definition: pixfmt.h:594
v4l2_buf_increase_ref
static int v4l2_buf_increase_ref(V4L2Buffer *in)
Definition: v4l2_buffers.c:236
V4L2Buffer::buf
struct v4l2_buffer buf
Definition: v4l2_buffers.h:63
ff_v4l2_buffer_buf_to_avframe
int ff_v4l2_buffer_buf_to_avframe(AVFrame *frame, V4L2Buffer *avbuf)
Extracts the data from a V4L2Buffer to an AVFrame.
Definition: v4l2_buffers.c:418
AVCOL_TRC_GAMMA28
@ AVCOL_TRC_GAMMA28
also ITU-R BT470BG
Definition: pixfmt.h:586
pts
static int64_t pts
Definition: transcode_aac.c:643
atomic_fetch_sub
#define atomic_fetch_sub(object, operand)
Definition: stdatomic.h:137
V4L2Context::streamon
int streamon
Whether the stream has been started (VIDIOC_STREAMON has been sent).
Definition: v4l2_context.h:88
AVCOL_TRC_GAMMA22
@ AVCOL_TRC_GAMMA22
also ITU-R BT470M / ITU-R BT1700 625 PAL & SECAM
Definition: pixfmt.h:585
V4L2Buffer::context
struct V4L2Context * context
Definition: v4l2_buffers.h:46
pkt
AVPacket * pkt
Definition: movenc.c:59
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
ff_v4l2_buffer_avframe_to_buf
int ff_v4l2_buffer_avframe_to_buf(const AVFrame *frame, V4L2Buffer *out)
Extracts the data from an AVFrame to a V4L2Buffer.
Definition: v4l2_buffers.c:411
AV_PKT_FLAG_CORRUPT
#define AV_PKT_FLAG_CORRUPT
The packet content is corrupted.
Definition: packet.h:578
AV_FRAME_FLAG_KEY
#define AV_FRAME_FLAG_KEY
A flag to mark frames that are keyframes.
Definition: frame.h:595
V4L2Buffer
V4L2Buffer (wrapper for v4l2_buffer management)
Definition: v4l2_buffers.h:44
s
#define s(width, name)
Definition: cbs_vp9.c:198
AVCOL_TRC_BT1361_ECG
@ AVCOL_TRC_BT1361_ECG
ITU-R BT1361 Extended Colour Gamut.
Definition: pixfmt.h:593
AV_CEIL_RSHIFT
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:58
AVCOL_SPC_SMPTE170M
@ AVCOL_SPC_SMPTE170M
also ITU-R BT601-6 525 / ITU-R BT1358 525 / ITU-R BT1700 NTSC / functionally identical to above
Definition: pixfmt.h:616
V4L2BUF_RET_USER
@ V4L2BUF_RET_USER
Definition: v4l2_buffers.h:38
ctx
AVFormatContext * ctx
Definition: movenc.c:48
av_rescale_q
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Definition: mathematics.c:142
v4l2_get_color_primaries
static enum AVColorPrimaries v4l2_get_color_primaries(V4L2Buffer *buf)
Definition: v4l2_buffers.c:84
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:73
AVCOL_PRI_SMPTE240M
@ AVCOL_PRI_SMPTE240M
identical to above, also called "SMPTE C" even though it uses D65
Definition: pixfmt.h:564
atomic_load
#define atomic_load(object)
Definition: stdatomic.h:93
AVCOL_PRI_UNSPECIFIED
@ AVCOL_PRI_UNSPECIFIED
Definition: pixfmt.h:558
AVCOL_PRI_BT470BG
@ AVCOL_PRI_BT470BG
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM
Definition: pixfmt.h:562
frame
static AVFrame * frame
Definition: demux_decode.c:54
AVCOL_PRI_SMPTE170M
@ AVCOL_PRI_SMPTE170M
also ITU-R BT601-6 525 / ITU-R BT1358 525 / ITU-R BT1700 NTSC
Definition: pixfmt.h:563
ff_v4l2_buffer_enqueue
int ff_v4l2_buffer_enqueue(V4L2Buffer *avbuf)
Enqueues a V4L2Buffer.
Definition: v4l2_buffers.c:561
AVPacket::buf
AVBufferRef * buf
A reference to the reference-counted buffer where the packet data is stored.
Definition: packet.h:505
v4l2_buffer_swframe_to_buf
static int v4l2_buffer_swframe_to_buf(const AVFrame *frame, V4L2Buffer *out)
Definition: v4l2_buffers.c:340
V4L2Context
Definition: v4l2_context.h:37
NULL
#define NULL
Definition: coverity.c:32
FF_DECODE_ERROR_INVALID_BITSTREAM
#define FF_DECODE_ERROR_INVALID_BITSTREAM
Definition: frame.h:672
av_buffer_unref
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it.
Definition: buffer.c:139
container_of
#define container_of(ptr, type, member)
Definition: v4l2_m2m.h:35
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
V4L2Context::type
enum v4l2_buf_type type
Type of this buffer context.
Definition: v4l2_context.h:48
AVCOL_PRI_BT709
@ AVCOL_PRI_BT709
also ITU-R BT1361 / IEC 61966-2-4 / SMPTE RP 177 Annex B
Definition: pixfmt.h:557
V4L2Buffer::V4L2Plane_info::mm_addr
void * mm_addr
Definition: v4l2_buffers.h:56
v4l2_get_color_range
static enum AVColorRange v4l2_get_color_range(V4L2Buffer *buf)
Definition: v4l2_buffers.c:118
AVFrame::pkt_dts
int64_t pkt_dts
DTS copied from the AVPacket that triggered returning this frame.
Definition: frame.h:463
atomic_fetch_sub_explicit
#define atomic_fetch_sub_explicit(object, operand, order)
Definition: stdatomic.h:152
AVCOL_RANGE_UNSPECIFIED
@ AVCOL_RANGE_UNSPECIFIED
Definition: pixfmt.h:649
index
int index
Definition: gxfenc.c:89
ff_v4l2_buffer_avpkt_to_buf
int ff_v4l2_buffer_avpkt_to_buf(const AVPacket *pkt, V4L2Buffer *out)
Extracts the data from an AVPacket to a V4L2Buffer.
Definition: v4l2_buffers.c:478
av_buffer_create
AVBufferRef * av_buffer_create(uint8_t *data, size_t size, void(*free)(void *opaque, uint8_t *data), void *opaque, int flags)
Create an AVBuffer from an existing array.
Definition: buffer.c:55
AVCOL_PRI_BT2020
@ AVCOL_PRI_BT2020
ITU-R BT2020.
Definition: pixfmt.h:566
V4L2BUF_AVAILABLE
@ V4L2BUF_AVAILABLE
Definition: v4l2_buffers.h:36
V4L2Context::width
int width
Width and height of the frames it produces (in case of a capture context, e.g.
Definition: v4l2_context.h:72
AVPacket::size
int size
Definition: packet.h:523
V4L2Buffer::context_ref
AVBufferRef * context_ref
Definition: v4l2_buffers.h:50
AVCOL_TRC_SMPTE240M
@ AVCOL_TRC_SMPTE240M
Definition: pixfmt.h:588
V4L2Buffer::plane_info
struct V4L2Buffer::V4L2Plane_info plane_info[VIDEO_MAX_PLANES]
V4L2BUF_IN_DRIVER
@ V4L2BUF_IN_DRIVER
Definition: v4l2_buffers.h:37
size
int size
Definition: twinvq_data.h:10344
atomic_fetch_add_explicit
#define atomic_fetch_add_explicit(object, operand, order)
Definition: stdatomic.h:149
AV_NOPTS_VALUE
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:248
AVFrame::format
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames,...
Definition: frame.h:431
v4l2_get_pts
static int64_t v4l2_get_pts(V4L2Buffer *avbuf)
Definition: v4l2_buffers.c:73
AVPacket::dts
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed.
Definition: packet.h:521
v4l2_timebase
static AVRational v4l2_timebase
Definition: v4l2_buffers.c:37
height
#define height
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
AVPacket::flags
int flags
A combination of AV_PKT_FLAG values.
Definition: packet.h:528
AVBufferRef::size
size_t size
Size of data in bytes.
Definition: buffer.h:94
V4L2Buffer::context_refcount
atomic_uint context_refcount
Definition: v4l2_buffers.h:51
AVCOL_TRC_BT709
@ AVCOL_TRC_BT709
also ITU-R BT1361
Definition: pixfmt.h:582
V4L2m2mContext::avctx
AVCodecContext * avctx
Definition: v4l2_m2m.h:52
AVCOL_SPC_SMPTE240M
@ AVCOL_SPC_SMPTE240M
derived from 170M primaries and D65 white point, 170M is derived from BT470 System M's primaries
Definition: pixfmt.h:617
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:255
AVPacket::pts
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
Definition: packet.h:515
AVCOL_SPC_BT2020_NCL
@ AVCOL_SPC_BT2020_NCL
ITU-R BT2020 non-constant luminance system.
Definition: pixfmt.h:620
v4l2_buffer_buf_to_swframe
static int v4l2_buffer_buf_to_swframe(AVFrame *frame, V4L2Buffer *avbuf)
Definition: v4l2_buffers.c:299
AVColorSpace
AVColorSpace
YUV colorspace type.
Definition: pixfmt.h:609
V4L2Buffer::flags
int flags
Definition: v4l2_buffers.h:66
V4L2Buffer::planes
struct v4l2_plane planes[VIDEO_MAX_PLANES]
Definition: v4l2_buffers.h:64
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
V4L2Buffer::V4L2Plane_info::bytesperline
int bytesperline
Definition: v4l2_buffers.h:55
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:576
AV_PIX_FMT_NV21
@ AV_PIX_FMT_NV21
as above, but U and V bytes are swapped
Definition: pixfmt.h:97
v4l2_context.h
AVCOL_SPC_UNSPECIFIED
@ AVCOL_SPC_UNSPECIFIED
Definition: pixfmt.h:612
v4l2_buf_to_bufref
static int v4l2_buf_to_bufref(V4L2Buffer *in, int plane, AVBufferRef **buf)
Definition: v4l2_buffers.c:256
AVCOL_RANGE_MPEG
@ AVCOL_RANGE_MPEG
Narrow or limited range content.
Definition: pixfmt.h:666
avcodec.h
AVCOL_PRI_BT470M
@ AVCOL_PRI_BT470M
also FCC Title 47 Code of Federal Regulations 73.682 (a)(20)
Definition: pixfmt.h:560
AVFrame::decode_error_flags
int decode_error_flags
decode error flags of the frame, set to a combination of FF_DECODE_ERROR_xxx flags if the decoder pro...
Definition: frame.h:671
ret
ret
Definition: filter_design.txt:187
AV_PIX_FMT_NV12
@ AV_PIX_FMT_NV12
planar YUV 4:2:0, 12bpp, 1 plane for Y and 1 plane for the UV components, which are interleaved (firs...
Definition: pixfmt.h:96
buf_to_m2mctx
static V4L2m2mContext * buf_to_m2mctx(V4L2Buffer *buf)
Definition: v4l2_buffers.c:39
AVFrame::sample_aspect_ratio
AVRational sample_aspect_ratio
Sample aspect ratio for the video frame, 0/1 if unknown/unspecified.
Definition: frame.h:451
V4L2Context::height
int height
Definition: v4l2_context.h:72
AVCodecContext
main external API structure.
Definition: avcodec.h:445
AVFrame::height
int height
Definition: frame.h:416
atomic_fetch_add
#define atomic_fetch_add(object, operand)
Definition: stdatomic.h:131
AVCOL_SPC_FCC
@ AVCOL_SPC_FCC
FCC Title 47 Code of Federal Regulations 73.682 (a)(20)
Definition: pixfmt.h:614
sem_post
#define sem_post(psem)
Definition: semaphore.h:26
AVCOL_TRC_SMPTE170M
@ AVCOL_TRC_SMPTE170M
also ITU-R BT601-6 525 or 625 / ITU-R BT1358 525 or 625 / ITU-R BT1700 NTSC
Definition: pixfmt.h:587
v4l2_free_buffer
static void v4l2_free_buffer(void *opaque, uint8_t *unused)
Definition: v4l2_buffers.c:212
V4L2Context::name
const char * name
context name.
Definition: v4l2_context.h:41
desc
const char * desc
Definition: libsvtav1.c:75
AVBufferRef
A reference to a data buffer.
Definition: buffer.h:82
AVPixFmtDescriptor
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:69
AVPacket
This structure stores compressed data.
Definition: packet.h:499
V4L2Context::format
struct v4l2_format format
Format returned by the driver after initializing the buffer context.
Definition: v4l2_context.h:66
v4l2_m2m.h
ff_v4l2_buffer_initialize
int ff_v4l2_buffer_initialize(V4L2Buffer *avbuf, int index)
Initializes a V4L2Buffer.
Definition: v4l2_buffers.c:494
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
Definition: frame.h:389
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
v4l2_get_color_trc
static enum AVColorTransferCharacteristic v4l2_get_color_trc(V4L2Buffer *buf)
Definition: v4l2_buffers.c:168
h
h
Definition: vp9dsp_template.c:2038
AVCOL_SPC_BT709
@ AVCOL_SPC_BT709
also ITU-R BT1361 / IEC 61966-2-4 xvYCC709 / derived in SMPTE RP 177 Annex B
Definition: pixfmt.h:611
AVColorRange
AVColorRange
Visual content value range.
Definition: pixfmt.h:648
V4L2Context::sample_aspect_ratio
AVRational sample_aspect_ratio
Definition: v4l2_context.h:73
V4L2Buffer::status
enum V4L2Buffer_status status
Definition: v4l2_buffers.h:67