FFmpeg
frame.c
Go to the documentation of this file.
1 /*
2  * This file is part of FFmpeg.
3  *
4  * FFmpeg is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU Lesser General Public
6  * License as published by the Free Software Foundation; either
7  * version 2.1 of the License, or (at your option) any later version.
8  *
9  * FFmpeg is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12  * Lesser General Public License for more details.
13  *
14  * You should have received a copy of the GNU Lesser General Public
15  * License along with FFmpeg; if not, write to the Free Software
16  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
17  */
18 
19 #include "channel_layout.h"
20 #include "avassert.h"
21 #include "buffer.h"
22 #include "common.h"
23 #include "cpu.h"
24 #include "dict.h"
25 #include "frame.h"
26 #include "imgutils.h"
27 #include "mem.h"
28 #include "samplefmt.h"
29 #include "hwcontext.h"
30 
31 static const AVSideDataDescriptor sd_props[] = {
32  [AV_FRAME_DATA_PANSCAN] = { "AVPanScan" },
33  [AV_FRAME_DATA_A53_CC] = { "ATSC A53 Part 4 Closed Captions" },
34  [AV_FRAME_DATA_MATRIXENCODING] = { "AVMatrixEncoding" },
35  [AV_FRAME_DATA_DOWNMIX_INFO] = { "Metadata relevant to a downmix procedure" },
36  [AV_FRAME_DATA_AFD] = { "Active format description" },
37  [AV_FRAME_DATA_MOTION_VECTORS] = { "Motion vectors" },
38  [AV_FRAME_DATA_SKIP_SAMPLES] = { "Skip samples" },
39  [AV_FRAME_DATA_GOP_TIMECODE] = { "GOP timecode" },
40  [AV_FRAME_DATA_S12M_TIMECODE] = { "SMPTE 12-1 timecode" },
41  [AV_FRAME_DATA_DYNAMIC_HDR_PLUS] = { "HDR Dynamic Metadata SMPTE2094-40 (HDR10+)" },
42  [AV_FRAME_DATA_DYNAMIC_HDR_VIVID] = { "HDR Dynamic Metadata CUVA 005.1 2021 (Vivid)" },
43  [AV_FRAME_DATA_REGIONS_OF_INTEREST] = { "Regions Of Interest" },
44  [AV_FRAME_DATA_VIDEO_ENC_PARAMS] = { "Video encoding parameters" },
45  [AV_FRAME_DATA_FILM_GRAIN_PARAMS] = { "Film grain parameters" },
46  [AV_FRAME_DATA_DETECTION_BBOXES] = { "Bounding boxes for object detection and classification" },
47  [AV_FRAME_DATA_DOVI_RPU_BUFFER] = { "Dolby Vision RPU Data" },
48  [AV_FRAME_DATA_DOVI_METADATA] = { "Dolby Vision Metadata" },
49  [AV_FRAME_DATA_LCEVC] = { "LCEVC NAL data" },
50  [AV_FRAME_DATA_VIEW_ID] = { "View ID" },
52  [AV_FRAME_DATA_REPLAYGAIN] = { "AVReplayGain", AV_SIDE_DATA_PROP_GLOBAL },
53  [AV_FRAME_DATA_DISPLAYMATRIX] = { "3x3 displaymatrix", AV_SIDE_DATA_PROP_GLOBAL },
54  [AV_FRAME_DATA_AUDIO_SERVICE_TYPE] = { "Audio service type", AV_SIDE_DATA_PROP_GLOBAL },
55  [AV_FRAME_DATA_MASTERING_DISPLAY_METADATA] = { "Mastering display metadata", AV_SIDE_DATA_PROP_GLOBAL },
56  [AV_FRAME_DATA_CONTENT_LIGHT_LEVEL] = { "Content light level metadata", AV_SIDE_DATA_PROP_GLOBAL },
57  [AV_FRAME_DATA_AMBIENT_VIEWING_ENVIRONMENT] = { "Ambient viewing environment", AV_SIDE_DATA_PROP_GLOBAL },
58  [AV_FRAME_DATA_SPHERICAL] = { "Spherical Mapping", AV_SIDE_DATA_PROP_GLOBAL },
60  [AV_FRAME_DATA_SEI_UNREGISTERED] = { "H.26[45] User Data Unregistered SEI message", AV_SIDE_DATA_PROP_MULTI },
61 };
62 
64 {
65  memset(frame, 0, sizeof(*frame));
66 
67  frame->pts =
68  frame->pkt_dts = AV_NOPTS_VALUE;
69  frame->best_effort_timestamp = AV_NOPTS_VALUE;
70  frame->duration = 0;
71 #if FF_API_FRAME_PKT
73  frame->pkt_pos = -1;
74  frame->pkt_size = -1;
76 #endif
77  frame->time_base = (AVRational){ 0, 1 };
78  frame->sample_aspect_ratio = (AVRational){ 0, 1 };
79  frame->format = -1; /* unknown */
80  frame->extended_data = frame->data;
81  frame->color_primaries = AVCOL_PRI_UNSPECIFIED;
82  frame->color_trc = AVCOL_TRC_UNSPECIFIED;
83  frame->colorspace = AVCOL_SPC_UNSPECIFIED;
84  frame->color_range = AVCOL_RANGE_UNSPECIFIED;
85  frame->chroma_location = AVCHROMA_LOC_UNSPECIFIED;
86  frame->flags = 0;
87 }
88 
89 static void free_side_data(AVFrameSideData **ptr_sd)
90 {
91  AVFrameSideData *sd = *ptr_sd;
92 
93  av_buffer_unref(&sd->buf);
94  av_dict_free(&sd->metadata);
95  av_freep(ptr_sd);
96 }
97 
98 static void wipe_side_data(AVFrameSideData ***sd, int *nb_side_data)
99 {
100  for (int i = 0; i < *nb_side_data; i++) {
101  free_side_data(&((*sd)[i]));
102  }
103  *nb_side_data = 0;
104 
105  av_freep(sd);
106 }
107 
109 {
110  wipe_side_data(&frame->side_data, &frame->nb_side_data);
111 }
112 
114 {
115  wipe_side_data(sd, nb_sd);
116 }
117 
118 static void remove_side_data(AVFrameSideData ***sd, int *nb_side_data,
119  const enum AVFrameSideDataType type)
120 {
121  for (int i = *nb_side_data - 1; i >= 0; i--) {
122  AVFrameSideData *entry = ((*sd)[i]);
123  if (entry->type != type)
124  continue;
125 
127 
128  ((*sd)[i]) = ((*sd)[*nb_side_data - 1]);
129  (*nb_side_data)--;
130  }
131 }
132 
133 static void remove_side_data_by_entry(AVFrameSideData ***sd, int *nb_sd,
134  const AVFrameSideData *target)
135 {
136  for (int i = *nb_sd - 1; i >= 0; i--) {
137  AVFrameSideData *entry = ((*sd)[i]);
138  if (entry != target)
139  continue;
140 
142 
143  ((*sd)[i]) = ((*sd)[*nb_sd - 1]);
144  (*nb_sd)--;
145 
146  return;
147  }
148 }
149 
151 {
152  AVFrame *frame = av_malloc(sizeof(*frame));
153 
154  if (!frame)
155  return NULL;
156 
158 
159  return frame;
160 }
161 
163 {
164  if (!frame || !*frame)
165  return;
166 
168  av_freep(frame);
169 }
170 
171 #define ALIGN (HAVE_SIMD_ALIGN_64 ? 64 : 32)
172 
174 {
176  int ret, padded_height;
177  int plane_padding;
178  ptrdiff_t linesizes[4];
179  size_t total_size, sizes[4];
180 
181  if (!desc)
182  return AVERROR(EINVAL);
183 
184  if ((ret = av_image_check_size(frame->width, frame->height, 0, NULL)) < 0)
185  return ret;
186 
187  if (align <= 0)
188  align = ALIGN;
189  plane_padding = FFMAX(ALIGN, align);
190 
191  if (!frame->linesize[0]) {
192  for (int i = 1; i <= align; i += i) {
193  ret = av_image_fill_linesizes(frame->linesize, frame->format,
194  FFALIGN(frame->width, i));
195  if (ret < 0)
196  return ret;
197  if (!(frame->linesize[0] & (align-1)))
198  break;
199  }
200 
201  for (int i = 0; i < 4 && frame->linesize[i]; i++)
202  frame->linesize[i] = FFALIGN(frame->linesize[i], align);
203  }
204 
205  for (int i = 0; i < 4; i++)
206  linesizes[i] = frame->linesize[i];
207 
208  padded_height = FFALIGN(frame->height, 32);
209  if ((ret = av_image_fill_plane_sizes(sizes, frame->format,
210  padded_height, linesizes)) < 0)
211  return ret;
212 
213  total_size = 4 * plane_padding + 4 * align;
214  for (int i = 0; i < 4; i++) {
215  if (sizes[i] > SIZE_MAX - total_size)
216  return AVERROR(EINVAL);
217  total_size += sizes[i];
218  }
219 
220  frame->buf[0] = av_buffer_alloc(total_size);
221  if (!frame->buf[0]) {
222  ret = AVERROR(ENOMEM);
223  goto fail;
224  }
225 
226  if ((ret = av_image_fill_pointers(frame->data, frame->format, padded_height,
227  frame->buf[0]->data, frame->linesize)) < 0)
228  goto fail;
229 
230  for (int i = 1; i < 4; i++) {
231  if (frame->data[i])
232  frame->data[i] += i * plane_padding;
233  frame->data[i] = (uint8_t *)FFALIGN((uintptr_t)frame->data[i], align);
234  }
235 
236  frame->extended_data = frame->data;
237 
238  return 0;
239 fail:
241  return ret;
242 }
243 
245 {
246  int planar = av_sample_fmt_is_planar(frame->format);
247  int channels, planes;
248  size_t size;
249  int ret;
250 
251  channels = frame->ch_layout.nb_channels;
252  planes = planar ? channels : 1;
253  if (!frame->linesize[0]) {
254  ret = av_samples_get_buffer_size(&frame->linesize[0], channels,
255  frame->nb_samples, frame->format,
256  align);
257  if (ret < 0)
258  return ret;
259  }
260 
261  if (align <= 0)
262  align = ALIGN;
263 
265  frame->extended_data = av_calloc(planes,
266  sizeof(*frame->extended_data));
267  frame->extended_buf = av_calloc(planes - AV_NUM_DATA_POINTERS,
268  sizeof(*frame->extended_buf));
269  if (!frame->extended_data || !frame->extended_buf) {
270  av_freep(&frame->extended_data);
271  av_freep(&frame->extended_buf);
272  return AVERROR(ENOMEM);
273  }
274  frame->nb_extended_buf = planes - AV_NUM_DATA_POINTERS;
275  } else
276  frame->extended_data = frame->data;
277 
278  if (frame->linesize[0] > SIZE_MAX - align)
279  return AVERROR(EINVAL);
280  size = frame->linesize[0] + (size_t)align;
281 
282  for (int i = 0; i < FFMIN(planes, AV_NUM_DATA_POINTERS); i++) {
283  frame->buf[i] = av_buffer_alloc(size);
284  if (!frame->buf[i]) {
286  return AVERROR(ENOMEM);
287  }
288  frame->extended_data[i] = frame->data[i] =
289  (uint8_t *)FFALIGN((uintptr_t)frame->buf[i]->data, align);
290  }
291  for (int i = 0; i < planes - AV_NUM_DATA_POINTERS; i++) {
292  frame->extended_buf[i] = av_buffer_alloc(size);
293  if (!frame->extended_buf[i]) {
295  return AVERROR(ENOMEM);
296  }
297  frame->extended_data[i + AV_NUM_DATA_POINTERS] =
298  (uint8_t *)FFALIGN((uintptr_t)frame->extended_buf[i]->data, align);
299  }
300  return 0;
301 
302 }
303 
305 {
306  if (frame->format < 0)
307  return AVERROR(EINVAL);
308 
309  if (frame->width > 0 && frame->height > 0)
310  return get_video_buffer(frame, align);
311  else if (frame->nb_samples > 0 &&
312  (av_channel_layout_check(&frame->ch_layout)))
313  return get_audio_buffer(frame, align);
314 
315  return AVERROR(EINVAL);
316 }
317 
318 static int frame_copy_props(AVFrame *dst, const AVFrame *src, int force_copy)
319 {
320  int ret;
321 
322 #if FF_API_FRAME_KEY
324  dst->key_frame = src->key_frame;
326 #endif
327  dst->pict_type = src->pict_type;
328  dst->sample_aspect_ratio = src->sample_aspect_ratio;
329  dst->crop_top = src->crop_top;
330  dst->crop_bottom = src->crop_bottom;
331  dst->crop_left = src->crop_left;
332  dst->crop_right = src->crop_right;
333  dst->pts = src->pts;
334  dst->duration = src->duration;
335  dst->repeat_pict = src->repeat_pict;
336 #if FF_API_INTERLACED_FRAME
338  dst->interlaced_frame = src->interlaced_frame;
339  dst->top_field_first = src->top_field_first;
341 #endif
342 #if FF_API_PALETTE_HAS_CHANGED
344  dst->palette_has_changed = src->palette_has_changed;
346 #endif
347  dst->sample_rate = src->sample_rate;
348  dst->opaque = src->opaque;
349  dst->pkt_dts = src->pkt_dts;
350 #if FF_API_FRAME_PKT
352  dst->pkt_pos = src->pkt_pos;
353  dst->pkt_size = src->pkt_size;
355 #endif
356  dst->time_base = src->time_base;
357  dst->quality = src->quality;
358  dst->best_effort_timestamp = src->best_effort_timestamp;
359  dst->flags = src->flags;
360  dst->decode_error_flags = src->decode_error_flags;
361  dst->color_primaries = src->color_primaries;
362  dst->color_trc = src->color_trc;
363  dst->colorspace = src->colorspace;
364  dst->color_range = src->color_range;
365  dst->chroma_location = src->chroma_location;
366 
367  av_dict_copy(&dst->metadata, src->metadata, 0);
368 
369  for (int i = 0; i < src->nb_side_data; i++) {
370  const AVFrameSideData *sd_src = src->side_data[i];
371  AVFrameSideData *sd_dst;
372  if ( sd_src->type == AV_FRAME_DATA_PANSCAN
373  && (src->width != dst->width || src->height != dst->height))
374  continue;
375  if (force_copy) {
376  sd_dst = av_frame_new_side_data(dst, sd_src->type,
377  sd_src->size);
378  if (!sd_dst) {
380  return AVERROR(ENOMEM);
381  }
382  memcpy(sd_dst->data, sd_src->data, sd_src->size);
383  } else {
384  AVBufferRef *ref = av_buffer_ref(sd_src->buf);
385  sd_dst = av_frame_new_side_data_from_buf(dst, sd_src->type, ref);
386  if (!sd_dst) {
389  return AVERROR(ENOMEM);
390  }
391  }
392  av_dict_copy(&sd_dst->metadata, sd_src->metadata, 0);
393  }
394 
395  ret = av_buffer_replace(&dst->opaque_ref, src->opaque_ref);
396  ret |= av_buffer_replace(&dst->private_ref, src->private_ref);
397  return ret;
398 }
399 
401 {
402  int ret = 0;
403 
404  av_assert1(dst->width == 0 && dst->height == 0);
405  av_assert1(dst->ch_layout.nb_channels == 0 &&
406  dst->ch_layout.order == AV_CHANNEL_ORDER_UNSPEC);
407 
408  dst->format = src->format;
409  dst->width = src->width;
410  dst->height = src->height;
411  dst->nb_samples = src->nb_samples;
412 
413  ret = frame_copy_props(dst, src, 0);
414  if (ret < 0)
415  goto fail;
416 
417  ret = av_channel_layout_copy(&dst->ch_layout, &src->ch_layout);
418  if (ret < 0)
419  goto fail;
420 
421  /* duplicate the frame data if it's not refcounted */
422  if (!src->buf[0]) {
424  if (ret < 0)
425  goto fail;
426 
427  ret = av_frame_copy(dst, src);
428  if (ret < 0)
429  goto fail;
430 
431  return 0;
432  }
433 
434  /* ref the buffers */
435  for (int i = 0; i < FF_ARRAY_ELEMS(src->buf); i++) {
436  if (!src->buf[i])
437  continue;
438  dst->buf[i] = av_buffer_ref(src->buf[i]);
439  if (!dst->buf[i]) {
440  ret = AVERROR(ENOMEM);
441  goto fail;
442  }
443  }
444 
445  if (src->extended_buf) {
446  dst->extended_buf = av_calloc(src->nb_extended_buf,
447  sizeof(*dst->extended_buf));
448  if (!dst->extended_buf) {
449  ret = AVERROR(ENOMEM);
450  goto fail;
451  }
452  dst->nb_extended_buf = src->nb_extended_buf;
453 
454  for (int i = 0; i < src->nb_extended_buf; i++) {
455  dst->extended_buf[i] = av_buffer_ref(src->extended_buf[i]);
456  if (!dst->extended_buf[i]) {
457  ret = AVERROR(ENOMEM);
458  goto fail;
459  }
460  }
461  }
462 
463  if (src->hw_frames_ctx) {
464  dst->hw_frames_ctx = av_buffer_ref(src->hw_frames_ctx);
465  if (!dst->hw_frames_ctx) {
466  ret = AVERROR(ENOMEM);
467  goto fail;
468  }
469  }
470 
471  /* duplicate extended data */
472  if (src->extended_data != src->data) {
473  int ch = dst->ch_layout.nb_channels;
474 
475  if (!ch) {
476  ret = AVERROR(EINVAL);
477  goto fail;
478  }
479 
480  dst->extended_data = av_malloc_array(sizeof(*dst->extended_data), ch);
481  if (!dst->extended_data) {
482  ret = AVERROR(ENOMEM);
483  goto fail;
484  }
485  memcpy(dst->extended_data, src->extended_data, sizeof(*src->extended_data) * ch);
486  } else
487  dst->extended_data = dst->data;
488 
489  memcpy(dst->data, src->data, sizeof(src->data));
490  memcpy(dst->linesize, src->linesize, sizeof(src->linesize));
491 
492  return 0;
493 
494 fail:
496  return ret;
497 }
498 
500 {
501  int ret = 0;
502 
503  if (dst == src)
504  return AVERROR(EINVAL);
505 
506  if (!src->buf[0]) {
508 
509  /* duplicate the frame data if it's not refcounted */
510  if ( src->data[0] || src->data[1]
511  || src->data[2] || src->data[3])
512  return av_frame_ref(dst, src);
513 
514  ret = frame_copy_props(dst, src, 0);
515  if (ret < 0)
516  goto fail;
517  }
518 
519  dst->format = src->format;
520  dst->width = src->width;
521  dst->height = src->height;
522  dst->nb_samples = src->nb_samples;
523 
524  ret = av_channel_layout_copy(&dst->ch_layout, &src->ch_layout);
525  if (ret < 0)
526  goto fail;
527 
529  av_dict_free(&dst->metadata);
530  ret = frame_copy_props(dst, src, 0);
531  if (ret < 0)
532  goto fail;
533 
534  /* replace the buffers */
535  for (int i = 0; i < FF_ARRAY_ELEMS(src->buf); i++) {
536  ret = av_buffer_replace(&dst->buf[i], src->buf[i]);
537  if (ret < 0)
538  goto fail;
539  }
540 
541  if (src->extended_buf) {
542  if (dst->nb_extended_buf != src->nb_extended_buf) {
543  int nb_extended_buf = FFMIN(dst->nb_extended_buf, src->nb_extended_buf);
544  void *tmp;
545 
546  for (int i = nb_extended_buf; i < dst->nb_extended_buf; i++)
547  av_buffer_unref(&dst->extended_buf[i]);
548 
549  tmp = av_realloc_array(dst->extended_buf, sizeof(*dst->extended_buf),
550  src->nb_extended_buf);
551  if (!tmp) {
552  ret = AVERROR(ENOMEM);
553  goto fail;
554  }
555  dst->extended_buf = tmp;
556  dst->nb_extended_buf = src->nb_extended_buf;
557 
558  memset(&dst->extended_buf[nb_extended_buf], 0,
559  (src->nb_extended_buf - nb_extended_buf) * sizeof(*dst->extended_buf));
560  }
561 
562  for (int i = 0; i < src->nb_extended_buf; i++) {
563  ret = av_buffer_replace(&dst->extended_buf[i], src->extended_buf[i]);
564  if (ret < 0)
565  goto fail;
566  }
567  } else if (dst->extended_buf) {
568  for (int i = 0; i < dst->nb_extended_buf; i++)
569  av_buffer_unref(&dst->extended_buf[i]);
570  av_freep(&dst->extended_buf);
571  }
572 
573  ret = av_buffer_replace(&dst->hw_frames_ctx, src->hw_frames_ctx);
574  if (ret < 0)
575  goto fail;
576 
577  if (dst->extended_data != dst->data)
578  av_freep(&dst->extended_data);
579 
580  if (src->extended_data != src->data) {
581  int ch = dst->ch_layout.nb_channels;
582 
583  if (!ch) {
584  ret = AVERROR(EINVAL);
585  goto fail;
586  }
587 
588  if (ch > SIZE_MAX / sizeof(*dst->extended_data))
589  goto fail;
590 
591  dst->extended_data = av_memdup(src->extended_data, sizeof(*dst->extended_data) * ch);
592  if (!dst->extended_data) {
593  ret = AVERROR(ENOMEM);
594  goto fail;
595  }
596  } else
597  dst->extended_data = dst->data;
598 
599  memcpy(dst->data, src->data, sizeof(src->data));
600  memcpy(dst->linesize, src->linesize, sizeof(src->linesize));
601 
602  return 0;
603 
604 fail:
606  return ret;
607 }
608 
610 {
612 
613  if (!ret)
614  return NULL;
615 
616  if (av_frame_ref(ret, src) < 0)
617  av_frame_free(&ret);
618 
619  return ret;
620 }
621 
623 {
624  if (!frame)
625  return;
626 
628 
629  for (int i = 0; i < FF_ARRAY_ELEMS(frame->buf); i++)
630  av_buffer_unref(&frame->buf[i]);
631  for (int i = 0; i < frame->nb_extended_buf; i++)
632  av_buffer_unref(&frame->extended_buf[i]);
633  av_freep(&frame->extended_buf);
634  av_dict_free(&frame->metadata);
635 
636  av_buffer_unref(&frame->hw_frames_ctx);
637 
638  av_buffer_unref(&frame->opaque_ref);
639  av_buffer_unref(&frame->private_ref);
640 
641  if (frame->extended_data != frame->data)
642  av_freep(&frame->extended_data);
643 
644  av_channel_layout_uninit(&frame->ch_layout);
645 
647 }
648 
650 {
651  av_assert1(dst->width == 0 && dst->height == 0);
652  av_assert1(dst->ch_layout.nb_channels == 0 &&
653  dst->ch_layout.order == AV_CHANNEL_ORDER_UNSPEC);
654 
655  *dst = *src;
656  if (src->extended_data == src->data)
657  dst->extended_data = dst->data;
659 }
660 
662 {
663  int ret = 1;
664 
665  /* assume non-refcounted frames are not writable */
666  if (!frame->buf[0])
667  return 0;
668 
669  for (int i = 0; i < FF_ARRAY_ELEMS(frame->buf); i++)
670  if (frame->buf[i])
671  ret &= !!av_buffer_is_writable(frame->buf[i]);
672  for (int i = 0; i < frame->nb_extended_buf; i++)
673  ret &= !!av_buffer_is_writable(frame->extended_buf[i]);
674 
675  return ret;
676 }
677 
679 {
680  AVFrame tmp;
681  int ret;
682 
684  return 0;
685 
686  memset(&tmp, 0, sizeof(tmp));
687  tmp.format = frame->format;
688  tmp.width = frame->width;
689  tmp.height = frame->height;
690  tmp.nb_samples = frame->nb_samples;
691  ret = av_channel_layout_copy(&tmp.ch_layout, &frame->ch_layout);
692  if (ret < 0) {
694  return ret;
695  }
696 
697  if (frame->hw_frames_ctx)
698  ret = av_hwframe_get_buffer(frame->hw_frames_ctx, &tmp, 0);
699  else
700  ret = av_frame_get_buffer(&tmp, 0);
701  if (ret < 0)
702  return ret;
703 
704  ret = av_frame_copy(&tmp, frame);
705  if (ret < 0) {
707  return ret;
708  }
709 
711  if (ret < 0) {
713  return ret;
714  }
715 
717 
718  *frame = tmp;
719  if (tmp.data == tmp.extended_data)
720  frame->extended_data = frame->data;
721 
722  return 0;
723 }
724 
726 {
727  return frame_copy_props(dst, src, 1);
728 }
729 
731 {
732  uint8_t *data;
733  int planes;
734 
735  if (frame->nb_samples) {
736  int channels = frame->ch_layout.nb_channels;
737  if (!channels)
738  return NULL;
739  planes = av_sample_fmt_is_planar(frame->format) ? channels : 1;
740  } else
741  planes = 4;
742 
743  if (plane < 0 || plane >= planes || !frame->extended_data[plane])
744  return NULL;
745  data = frame->extended_data[plane];
746 
747  for (int i = 0; i < FF_ARRAY_ELEMS(frame->buf) && frame->buf[i]; i++) {
748  AVBufferRef *buf = frame->buf[i];
749  if (data >= buf->data && data < buf->data + buf->size)
750  return buf;
751  }
752  for (int i = 0; i < frame->nb_extended_buf; i++) {
753  AVBufferRef *buf = frame->extended_buf[i];
754  if (data >= buf->data && data < buf->data + buf->size)
755  return buf;
756  }
757  return NULL;
758 }
759 
761  int *nb_sd,
763  AVBufferRef *buf, uint8_t *data,
764  size_t size)
765 {
766  AVFrameSideData *ret, **tmp;
767 
768  // *nb_sd + 1 needs to fit into an int and a size_t.
769  if ((unsigned)*nb_sd >= FFMIN(INT_MAX, SIZE_MAX))
770  return NULL;
771 
772  tmp = av_realloc_array(*sd, sizeof(**sd), *nb_sd + 1);
773  if (!tmp)
774  return NULL;
775  *sd = tmp;
776 
777  ret = av_mallocz(sizeof(*ret));
778  if (!ret)
779  return NULL;
780 
781  ret->buf = buf;
782  ret->data = data;
783  ret->size = size;
784  ret->type = type;
785 
786  (*sd)[(*nb_sd)++] = ret;
787 
788  return ret;
789 }
790 
792  int *nb_sd,
794  AVBufferRef *buf)
795 {
796  if (!buf)
797  return NULL;
798 
799  return add_side_data_from_buf_ext(sd, nb_sd, type, buf, buf->data, buf->size);
800 }
801 
804  AVBufferRef *buf)
805 {
806  return
808  &frame->side_data, &frame->nb_side_data, type, buf);
809 }
810 
813  size_t size)
814 {
818  if (!ret)
819  av_buffer_unref(&buf);
820  return ret;
821 }
822 
824  AVBufferRef *buf, int flags)
825 {
827  return NULL;
828 
829  av_dict_free(&dst->metadata);
830  av_buffer_unref(&dst->buf);
831  dst->buf = buf;
832  dst->data = buf->data;
833  dst->size = buf->size;
834  return dst;
835 }
836 
839  size_t size, unsigned int flags)
840 {
844 
846  remove_side_data(sd, nb_sd, type);
847  if ((!desc || !(desc->props & AV_SIDE_DATA_PROP_MULTI)) &&
848  (ret = (AVFrameSideData *)av_frame_side_data_get(*sd, *nb_sd, type))) {
850  if (!ret)
851  av_buffer_unref(&buf);
852  return ret;
853  }
854 
855  ret = add_side_data_from_buf(sd, nb_sd, type, buf);
856  if (!ret)
857  av_buffer_unref(&buf);
858 
859  return ret;
860 }
861 
864  AVBufferRef **pbuf, unsigned int flags)
865 {
867  AVFrameSideData *sd_dst = NULL;
868  AVBufferRef *buf = *pbuf;
869 
871  remove_side_data(sd, nb_sd, type);
872  if ((!desc || !(desc->props & AV_SIDE_DATA_PROP_MULTI)) &&
873  (sd_dst = (AVFrameSideData *)av_frame_side_data_get(*sd, *nb_sd, type))) {
874  sd_dst = replace_side_data_from_buf(sd_dst, buf, flags);
875  if (sd_dst)
876  *pbuf = NULL;
877  return sd_dst;
878  }
879 
880  sd_dst = add_side_data_from_buf(sd, nb_sd, type, buf);
881  if (!sd_dst)
882  return NULL;
883 
884  *pbuf = NULL;
885  return sd_dst;
886 }
887 
889  const AVFrameSideData *src, unsigned int flags)
890 {
891  const AVSideDataDescriptor *desc;
892  AVBufferRef *buf = NULL;
893  AVFrameSideData *sd_dst = NULL;
894  int ret = AVERROR_BUG;
895 
896  if (!sd || !src || !nb_sd || (*nb_sd && !*sd))
897  return AVERROR(EINVAL);
898 
901  remove_side_data(sd, nb_sd, src->type);
902  if ((!desc || !(desc->props & AV_SIDE_DATA_PROP_MULTI)) &&
903  (sd_dst = (AVFrameSideData *)av_frame_side_data_get(*sd, *nb_sd, src->type))) {
904  AVDictionary *dict = NULL;
905 
907  return AVERROR(EEXIST);
908 
909  ret = av_dict_copy(&dict, src->metadata, 0);
910  if (ret < 0)
911  return ret;
912 
913  ret = av_buffer_replace(&sd_dst->buf, src->buf);
914  if (ret < 0) {
915  av_dict_free(&dict);
916  return ret;
917  }
918 
919  av_dict_free(&sd_dst->metadata);
920  sd_dst->metadata = dict;
921  sd_dst->data = src->data;
922  sd_dst->size = src->size;
923  return 0;
924  }
925 
926  buf = av_buffer_ref(src->buf);
927  if (!buf)
928  return AVERROR(ENOMEM);
929 
930  sd_dst = add_side_data_from_buf_ext(sd, nb_sd, src->type, buf,
931  src->data, src->size);
932  if (!sd_dst) {
933  av_buffer_unref(&buf);
934  return AVERROR(ENOMEM);
935  }
936 
937  ret = av_dict_copy(&sd_dst->metadata, src->metadata, 0);
938  if (ret < 0) {
939  remove_side_data_by_entry(sd, nb_sd, sd_dst);
940  return ret;
941  }
942 
943  return 0;
944 }
945 
947  const int nb_sd,
949 {
950  for (int i = 0; i < nb_sd; i++) {
951  if (sd[i]->type == type)
952  return sd[i];
953  }
954  return NULL;
955 }
956 
959 {
960  remove_side_data(sd, nb_sd, type);
961 }
962 
965 {
967  frame->side_data, frame->nb_side_data,
968  type
969  );
970 }
971 
972 static int frame_copy_video(AVFrame *dst, const AVFrame *src)
973 {
974  int planes;
975 
976  if (dst->width < src->width ||
977  dst->height < src->height)
978  return AVERROR(EINVAL);
979 
980  if (src->hw_frames_ctx || dst->hw_frames_ctx)
981  return av_hwframe_transfer_data(dst, src, 0);
982 
984  for (int i = 0; i < planes; i++)
985  if (!dst->data[i] || !src->data[i])
986  return AVERROR(EINVAL);
987 
988  av_image_copy2(dst->data, dst->linesize,
989  src->data, src->linesize,
990  dst->format, src->width, src->height);
991 
992  return 0;
993 }
994 
995 static int frame_copy_audio(AVFrame *dst, const AVFrame *src)
996 {
997  int planar = av_sample_fmt_is_planar(dst->format);
998  int channels = dst->ch_layout.nb_channels;
999  int planes = planar ? channels : 1;
1000 
1001  if (dst->nb_samples != src->nb_samples ||
1002  av_channel_layout_compare(&dst->ch_layout, &src->ch_layout))
1003  return AVERROR(EINVAL);
1004 
1005  for (int i = 0; i < planes; i++)
1006  if (!dst->extended_data[i] || !src->extended_data[i])
1007  return AVERROR(EINVAL);
1008 
1009  av_samples_copy(dst->extended_data, src->extended_data, 0, 0,
1010  dst->nb_samples, channels, dst->format);
1011 
1012  return 0;
1013 }
1014 
1016 {
1017  if (dst->format != src->format || dst->format < 0)
1018  return AVERROR(EINVAL);
1019 
1020  if (dst->width > 0 && dst->height > 0)
1021  return frame_copy_video(dst, src);
1022  else if (dst->nb_samples > 0 &&
1023  (av_channel_layout_check(&dst->ch_layout)))
1024  return frame_copy_audio(dst, src);
1025 
1026  return AVERROR(EINVAL);
1027 }
1028 
1030 {
1031  remove_side_data(&frame->side_data, &frame->nb_side_data, type);
1032 }
1033 
1035 {
1036  unsigned t = type;
1037  if (t < FF_ARRAY_ELEMS(sd_props) && sd_props[t].name)
1038  return &sd_props[t];
1039  return NULL;
1040 }
1041 
1043 {
1045  return desc ? desc->name : NULL;
1046 }
1047 
1048 static int calc_cropping_offsets(size_t offsets[4], const AVFrame *frame,
1049  const AVPixFmtDescriptor *desc)
1050 {
1051  for (int i = 0; frame->data[i]; i++) {
1052  const AVComponentDescriptor *comp = NULL;
1053  int shift_x = (i == 1 || i == 2) ? desc->log2_chroma_w : 0;
1054  int shift_y = (i == 1 || i == 2) ? desc->log2_chroma_h : 0;
1055 
1056  if (desc->flags & AV_PIX_FMT_FLAG_PAL && i == 1) {
1057  offsets[i] = 0;
1058  break;
1059  }
1060 
1061  /* find any component descriptor for this plane */
1062  for (int j = 0; j < desc->nb_components; j++) {
1063  if (desc->comp[j].plane == i) {
1064  comp = &desc->comp[j];
1065  break;
1066  }
1067  }
1068  if (!comp)
1069  return AVERROR_BUG;
1070 
1071  offsets[i] = (frame->crop_top >> shift_y) * frame->linesize[i] +
1072  (frame->crop_left >> shift_x) * comp->step;
1073  }
1074 
1075  return 0;
1076 }
1077 
1079 {
1080  const AVPixFmtDescriptor *desc;
1081  size_t offsets[4];
1082 
1083  if (!(frame->width > 0 && frame->height > 0))
1084  return AVERROR(EINVAL);
1085 
1086  if (frame->crop_left >= INT_MAX - frame->crop_right ||
1087  frame->crop_top >= INT_MAX - frame->crop_bottom ||
1088  (frame->crop_left + frame->crop_right) >= frame->width ||
1089  (frame->crop_top + frame->crop_bottom) >= frame->height)
1090  return AVERROR(ERANGE);
1091 
1092  desc = av_pix_fmt_desc_get(frame->format);
1093  if (!desc)
1094  return AVERROR_BUG;
1095 
1096  /* Apply just the right/bottom cropping for hwaccel formats. Bitstream
1097  * formats cannot be easily handled here either (and corresponding decoders
1098  * should not export any cropping anyway), so do the same for those as well.
1099  * */
1101  frame->width -= frame->crop_right;
1102  frame->height -= frame->crop_bottom;
1103  frame->crop_right = 0;
1104  frame->crop_bottom = 0;
1105  return 0;
1106  }
1107 
1108  /* calculate the offsets for each plane */
1110 
1111  /* adjust the offsets to avoid breaking alignment */
1112  if (!(flags & AV_FRAME_CROP_UNALIGNED)) {
1113  int log2_crop_align = frame->crop_left ? ff_ctz(frame->crop_left) : INT_MAX;
1114  int min_log2_align = INT_MAX;
1115 
1116  for (int i = 0; frame->data[i]; i++) {
1117  int log2_align = offsets[i] ? ff_ctz(offsets[i]) : INT_MAX;
1118  min_log2_align = FFMIN(log2_align, min_log2_align);
1119  }
1120 
1121  /* we assume, and it should always be true, that the data alignment is
1122  * related to the cropping alignment by a constant power-of-2 factor */
1123  if (log2_crop_align < min_log2_align)
1124  return AVERROR_BUG;
1125 
1126  if (min_log2_align < 5 && log2_crop_align != INT_MAX) {
1127  frame->crop_left &= ~((1 << (5 + log2_crop_align - min_log2_align)) - 1);
1129  }
1130  }
1131 
1132  for (int i = 0; frame->data[i]; i++)
1133  frame->data[i] += offsets[i];
1134 
1135  frame->width -= (frame->crop_left + frame->crop_right);
1136  frame->height -= (frame->crop_top + frame->crop_bottom);
1137  frame->crop_left = 0;
1138  frame->crop_right = 0;
1139  frame->crop_top = 0;
1140  frame->crop_bottom = 0;
1141 
1142  return 0;
1143 }
av_samples_copy
int av_samples_copy(uint8_t *const *dst, uint8_t *const *src, int dst_offset, int src_offset, int nb_samples, int nb_channels, enum AVSampleFormat sample_fmt)
Copy samples from src to dst.
Definition: samplefmt.c:222
FF_ENABLE_DEPRECATION_WARNINGS
#define FF_ENABLE_DEPRECATION_WARNINGS
Definition: internal.h:73
free_side_data
static void free_side_data(AVFrameSideData **ptr_sd)
Definition: frame.c:89
name
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default minimum maximum flags name is the option name
Definition: writing_filters.txt:88
entry
#define entry
Definition: aom_film_grain_template.c:66
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
get_video_buffer
static int get_video_buffer(AVFrame *frame, int align)
Definition: frame.c:173
sd_props
static const AVSideDataDescriptor sd_props[]
Definition: frame.c:31
av_frame_get_buffer
int av_frame_get_buffer(AVFrame *frame, int align)
Allocate new buffer(s) for audio or video data.
Definition: frame.c:304
ff_ctz
#define ff_ctz
Definition: intmath.h:107
av_frame_get_side_data
AVFrameSideData * av_frame_get_side_data(const AVFrame *frame, enum AVFrameSideDataType type)
Definition: frame.c:963
comp
static void comp(unsigned char *dst, ptrdiff_t dst_stride, unsigned char *src, ptrdiff_t src_stride, int add)
Definition: eamad.c:81
av_frame_new_side_data
AVFrameSideData * av_frame_new_side_data(AVFrame *frame, enum AVFrameSideDataType type, size_t size)
Add a new side data to a frame.
Definition: frame.c:811
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:3170
AVBufferRef::data
uint8_t * data
The data buffer.
Definition: buffer.h:90
AV_FRAME_DATA_A53_CC
@ AV_FRAME_DATA_A53_CC
ATSC A53 Part 4 Closed Captions.
Definition: frame.h:59
add_side_data_from_buf_ext
static AVFrameSideData * add_side_data_from_buf_ext(AVFrameSideData ***sd, int *nb_sd, enum AVFrameSideDataType type, AVBufferRef *buf, uint8_t *data, size_t size)
Definition: frame.c:760
AV_FRAME_DATA_DOVI_METADATA
@ AV_FRAME_DATA_DOVI_METADATA
Parsed Dolby Vision metadata, suitable for passing to a software implementation.
Definition: frame.h:208
AV_FRAME_DATA_FILM_GRAIN_PARAMS
@ AV_FRAME_DATA_FILM_GRAIN_PARAMS
Film grain parameters for a frame, described by AVFilmGrainParams.
Definition: frame.h:188
AV_FRAME_DATA_S12M_TIMECODE
@ AV_FRAME_DATA_S12M_TIMECODE
Timecode which conforms to SMPTE ST 12-1.
Definition: frame.h:152
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:162
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:389
tmp
static uint8_t tmp[11]
Definition: aes_ctr.c:28
av_frame_make_writable
int av_frame_make_writable(AVFrame *frame)
Ensure that the frame data is writable, avoiding data copy if possible.
Definition: frame.c:678
AVFrameSideData::buf
AVBufferRef * buf
Definition: frame.h:270
AVCOL_TRC_UNSPECIFIED
@ AVCOL_TRC_UNSPECIFIED
Definition: pixfmt.h:614
data
const char data[16]
Definition: mxf.c:149
planes
static const struct @467 planes[]
AV_FRAME_DATA_DOVI_RPU_BUFFER
@ AV_FRAME_DATA_DOVI_RPU_BUFFER
Dolby Vision RPU raw data, suitable for passing to x265 or other libraries.
Definition: frame.h:201
frame_copy_props
static int frame_copy_props(AVFrame *dst, const AVFrame *src, int force_copy)
Definition: frame.c:318
AV_FRAME_DATA_DISPLAYMATRIX
@ AV_FRAME_DATA_DISPLAYMATRIX
This side data contains a 3x3 transformation matrix describing an affine transformation that needs to...
Definition: frame.h:85
get_audio_buffer
static int get_audio_buffer(AVFrame *frame, int align)
Definition: frame.c:244
AVDictionary
Definition: dict.c:34
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
av_buffer_ref
AVBufferRef * av_buffer_ref(const AVBufferRef *buf)
Create a new reference to an AVBuffer.
Definition: buffer.c:103
av_frame_side_data_clone
int av_frame_side_data_clone(AVFrameSideData ***sd, int *nb_sd, const AVFrameSideData *src, unsigned int flags)
Add a new side data entry to an array based on existing side data, taking a reference towards the con...
Definition: frame.c:888
frame_copy_video
static int frame_copy_video(AVFrame *dst, const AVFrame *src)
Definition: frame.c:972
av_frame_apply_cropping
int av_frame_apply_cropping(AVFrame *frame, int flags)
Crop the given video AVFrame according to its crop_left/crop_top/crop_right/ crop_bottom fields.
Definition: frame.c:1078
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:30
av_memdup
void * av_memdup(const void *p, size_t size)
Duplicate a buffer with av_malloc().
Definition: mem.c:304
av_pix_fmt_count_planes
int av_pix_fmt_count_planes(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:3210
ALIGN
#define ALIGN
Definition: frame.c:171
AV_FRAME_DATA_MATRIXENCODING
@ AV_FRAME_DATA_MATRIXENCODING
The data is the AVMatrixEncoding enum defined in libavutil/channel_layout.h.
Definition: frame.h:68
fail
#define fail()
Definition: checkasm.h:193
AV_PIX_FMT_FLAG_HWACCEL
#define AV_PIX_FMT_FLAG_HWACCEL
Pixel format is an HW accelerated format.
Definition: pixdesc.h:128
samplefmt.h
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
av_image_fill_pointers
int av_image_fill_pointers(uint8_t *data[4], enum AVPixelFormat pix_fmt, int height, uint8_t *ptr, const int linesizes[4])
Fill plane data pointers for an image with pixel format pix_fmt and height height.
Definition: imgutils.c:145
planar
uint8_t pi<< 24) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_U8,(uint64_t)((*(const uint8_t *) pi - 0x80U))<< 56) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16,(*(const int16_t *) pi >>8)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1<< 16)) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S16,(uint64_t)(*(const int16_t *) pi)<< 48) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32,(*(const int32_t *) pi >>24)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S32,(uint64_t)(*(const int32_t *) pi)<< 32) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S64,(*(const int64_t *) pi >>56)+0x80) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S64, *(const int64_t *) pi *(1.0f/(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S64, *(const int64_t *) pi *(1.0/(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_FLT, llrintf(*(const float *) pi *(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_DBL, llrint(*(const double *) pi *(UINT64_C(1)<< 63))) #define FMT_PAIR_FUNC(out, in) static conv_func_type *const fmt_pair_to_conv_functions[AV_SAMPLE_FMT_NB *AV_SAMPLE_FMT_NB]={ FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S64), };static void cpy1(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, len);} static void cpy2(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 2 *len);} static void cpy4(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 4 *len);} static void cpy8(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 8 *len);} AudioConvert *swri_audio_convert_alloc(enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, const int *ch_map, int flags) { AudioConvert *ctx;conv_func_type *f=fmt_pair_to_conv_functions[av_get_packed_sample_fmt(out_fmt)+AV_SAMPLE_FMT_NB *av_get_packed_sample_fmt(in_fmt)];if(!f) return NULL;ctx=av_mallocz(sizeof(*ctx));if(!ctx) return NULL;if(channels==1){ in_fmt=av_get_planar_sample_fmt(in_fmt);out_fmt=av_get_planar_sample_fmt(out_fmt);} ctx->channels=channels;ctx->conv_f=f;ctx->ch_map=ch_map;if(in_fmt==AV_SAMPLE_FMT_U8||in_fmt==AV_SAMPLE_FMT_U8P) memset(ctx->silence, 0x80, sizeof(ctx->silence));if(out_fmt==in_fmt &&!ch_map) { switch(av_get_bytes_per_sample(in_fmt)){ case 1:ctx->simd_f=cpy1;break;case 2:ctx->simd_f=cpy2;break;case 4:ctx->simd_f=cpy4;break;case 8:ctx->simd_f=cpy8;break;} } return ctx;} void swri_audio_convert_free(AudioConvert **ctx) { av_freep(ctx);} int swri_audio_convert(AudioConvert *ctx, AudioData *out, AudioData *in, int len) { int ch;int off=0;const int os=(out->planar ? 1 :out->ch_count) *out->bps;unsigned misaligned=0;av_assert0(ctx->channels==out->ch_count);if(ctx->in_simd_align_mask) { int planes=in->planar ? in->ch_count :1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) in->ch[ch];misaligned|=m &ctx->in_simd_align_mask;} if(ctx->out_simd_align_mask) { int planes=out->planar ? out->ch_count :1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) out->ch[ch];misaligned|=m &ctx->out_simd_align_mask;} if(ctx->simd_f &&!ctx->ch_map &&!misaligned){ off=len &~15;av_assert1(off >=0);av_assert1(off<=len);av_assert2(ctx->channels==SWR_CH_MAX||!in->ch[ctx->channels]);if(off >0){ if(out->planar==in->planar){ int planes=out->planar ? out->ch_count :1;for(ch=0;ch< planes;ch++){ ctx->simd_f(out->ch+ch,(const uint8_t **) in->ch+ch, off *(out-> planar
Definition: audioconvert.c:56
AVFrameSideDataType
AVFrameSideDataType
Definition: frame.h:49
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:150
get_frame_defaults
static void get_frame_defaults(AVFrame *frame)
Definition: frame.c:63
avassert.h
AV_FRAME_SIDE_DATA_FLAG_UNIQUE
#define AV_FRAME_SIDE_DATA_FLAG_UNIQUE
Remove existing entries before adding new ones.
Definition: frame.h:1061
AVFrameSideData::size
size_t size
Definition: frame.h:268
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
remove_side_data_by_entry
static void remove_side_data_by_entry(AVFrameSideData ***sd, int *nb_sd, const AVFrameSideData *target)
Definition: frame.c:133
av_image_fill_linesizes
int av_image_fill_linesizes(int linesizes[4], enum AVPixelFormat pix_fmt, int width)
Fill plane linesizes for an image with pixel format pix_fmt and width width.
Definition: imgutils.c:89
offsets
static const int offsets[]
Definition: hevc_pel.c:34
av_realloc_array
void * av_realloc_array(void *ptr, size_t nmemb, size_t size)
Definition: mem.c:217
AV_CHANNEL_ORDER_UNSPEC
@ AV_CHANNEL_ORDER_UNSPEC
Only the channel count is specified, without any further information about the channel order.
Definition: channel_layout.h:119
AV_FRAME_DATA_AUDIO_SERVICE_TYPE
@ AV_FRAME_DATA_AUDIO_SERVICE_TYPE
This side data must be associated with an audio frame and corresponds to enum AVAudioServiceType defi...
Definition: frame.h:114
replace_side_data_from_buf
static AVFrameSideData * replace_side_data_from_buf(AVFrameSideData *dst, AVBufferRef *buf, int flags)
Definition: frame.c:823
av_sample_fmt_is_planar
int av_sample_fmt_is_planar(enum AVSampleFormat sample_fmt)
Check if the sample format is planar.
Definition: samplefmt.c:114
wipe_side_data
static void wipe_side_data(AVFrameSideData ***sd, int *nb_side_data)
Definition: frame.c:98
channels
channels
Definition: aptx.h:31
AV_SIDE_DATA_PROP_MULTI
@ AV_SIDE_DATA_PROP_MULTI
Multiple instances of this side data type can be meaningfully present in a single side data array.
Definition: frame.h:285
av_frame_clone
AVFrame * av_frame_clone(const AVFrame *src)
Create a new frame that references the same data as src.
Definition: frame.c:609
AV_SIDE_DATA_PROP_GLOBAL
@ AV_SIDE_DATA_PROP_GLOBAL
The side data type can be used in stream-global structures.
Definition: frame.h:279
AVCOL_PRI_UNSPECIFIED
@ AVCOL_PRI_UNSPECIFIED
Definition: pixfmt.h:589
AV_FRAME_DATA_DYNAMIC_HDR_VIVID
@ AV_FRAME_DATA_DYNAMIC_HDR_VIVID
HDR Vivid dynamic metadata associated with a video frame.
Definition: frame.h:215
frame_copy_audio
static int frame_copy_audio(AVFrame *dst, const AVFrame *src)
Definition: frame.c:995
AV_FRAME_DATA_SPHERICAL
@ AV_FRAME_DATA_SPHERICAL
The data represents the AVSphericalMapping structure defined in libavutil/spherical....
Definition: frame.h:131
NULL
#define NULL
Definition: coverity.c:32
sizes
static const int sizes[][2]
Definition: img2dec.c:60
av_frame_copy_props
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:725
av_buffer_unref
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it.
Definition: buffer.c:139
AVComponentDescriptor
Definition: pixdesc.h:30
av_image_fill_plane_sizes
int av_image_fill_plane_sizes(size_t sizes[4], enum AVPixelFormat pix_fmt, int height, const ptrdiff_t linesizes[4])
Fill plane sizes for an image with pixel format pix_fmt and height height.
Definition: imgutils.c:111
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
av_frame_side_data_remove
void av_frame_side_data_remove(AVFrameSideData ***sd, int *nb_sd, enum AVFrameSideDataType type)
Remove and free all side data instances of the given type from an array.
Definition: frame.c:957
AV_FRAME_DATA_ICC_PROFILE
@ AV_FRAME_DATA_ICC_PROFILE
The data contains an ICC profile as an opaque octet buffer following the format described by ISO 1507...
Definition: frame.h:144
AV_FRAME_DATA_MASTERING_DISPLAY_METADATA
@ AV_FRAME_DATA_MASTERING_DISPLAY_METADATA
Mastering display metadata associated with a video frame.
Definition: frame.h:120
av_frame_new_side_data_from_buf
AVFrameSideData * av_frame_new_side_data_from_buf(AVFrame *frame, enum AVFrameSideDataType type, AVBufferRef *buf)
Add a new side data to a frame from an existing AVBufferRef.
Definition: frame.c:802
AV_FRAME_SIDE_DATA_FLAG_REPLACE
#define AV_FRAME_SIDE_DATA_FLAG_REPLACE
Don't add a new entry if another of the same type exists.
Definition: frame.h:1066
AV_FRAME_DATA_AFD
@ AV_FRAME_DATA_AFD
Active Format Description data consisting of a single byte as specified in ETSI TS 101 154 using AVAc...
Definition: frame.h:90
av_frame_get_plane_buffer
AVBufferRef * av_frame_get_plane_buffer(const AVFrame *frame, int plane)
Get the buffer reference a given data plane is stored in.
Definition: frame.c:730
AVCOL_RANGE_UNSPECIFIED
@ AVCOL_RANGE_UNSPECIFIED
Definition: pixfmt.h:683
AV_FRAME_DATA_SEI_UNREGISTERED
@ AV_FRAME_DATA_SEI_UNREGISTERED
User data unregistered metadata associated with a video frame.
Definition: frame.h:178
AV_FRAME_DATA_REPLAYGAIN
@ AV_FRAME_DATA_REPLAYGAIN
ReplayGain information in the form of the AVReplayGain struct.
Definition: frame.h:77
AV_FRAME_DATA_AMBIENT_VIEWING_ENVIRONMENT
@ AV_FRAME_DATA_AMBIENT_VIEWING_ENVIRONMENT
Ambient viewing environment metadata, as defined by H.274.
Definition: frame.h:220
AV_FRAME_DATA_PANSCAN
@ AV_FRAME_DATA_PANSCAN
The data is the AVPanScan struct defined in libavcodec.
Definition: frame.h:53
av_frame_ref
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:400
dst
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t intptr_t int int16_t * dst
Definition: dsp.h:83
av_frame_copy
int av_frame_copy(AVFrame *dst, const AVFrame *src)
Copy the frame data from src to dst.
Definition: frame.c:1015
cpu.h
AV_FRAME_DATA_LCEVC
@ AV_FRAME_DATA_LCEVC
Raw LCEVC payload data, as a uint8_t array, with NAL emulation bytes intact.
Definition: frame.h:236
size
int size
Definition: twinvq_data.h:10344
AV_NUM_DATA_POINTERS
#define AV_NUM_DATA_POINTERS
Definition: frame.h:390
AV_NOPTS_VALUE
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:248
AV_PIX_FMT_FLAG_BITSTREAM
#define AV_PIX_FMT_FLAG_BITSTREAM
All values of a component are bit-wise packed end to end.
Definition: pixdesc.h:124
AVFrameSideData::data
uint8_t * data
Definition: frame.h:267
av_frame_is_writable
int av_frame_is_writable(AVFrame *frame)
Check if the frame data is writable.
Definition: frame.c:661
AVCHROMA_LOC_UNSPECIFIED
@ AVCHROMA_LOC_UNSPECIFIED
Definition: pixfmt.h:737
frame.h
buffer.h
align
static const uint8_t *BS_FUNC() align(BSCTX *bc)
Skip bits to a byte boundary.
Definition: bitstream_template.h:411
av_frame_remove_side_data
void av_frame_remove_side_data(AVFrame *frame, enum AVFrameSideDataType type)
Remove and free all side data instances of the given type.
Definition: frame.c:1029
av_dict_free
void av_dict_free(AVDictionary **pm)
Free all the memory allocated for an AVDictionary struct and all keys and values.
Definition: dict.c:223
av_buffer_alloc
AVBufferRef * av_buffer_alloc(size_t size)
Allocate an AVBuffer of the given size using av_malloc().
Definition: buffer.c:77
av_channel_layout_compare
int av_channel_layout_compare(const AVChannelLayout *chl, const AVChannelLayout *chl1)
Check whether two channel layouts are semantically the same, i.e.
Definition: channel_layout.c:808
AVBufferRef::size
size_t size
Size of data in bytes.
Definition: buffer.h:94
AV_FRAME_DATA_VIEW_ID
@ AV_FRAME_DATA_VIEW_ID
This side data must be associated with a video frame.
Definition: frame.h:245
AV_FRAME_DATA_SKIP_SAMPLES
@ AV_FRAME_DATA_SKIP_SAMPLES
Recommmends skipping the specified number of samples.
Definition: frame.h:109
AV_FRAME_DATA_CONTENT_LIGHT_LEVEL
@ AV_FRAME_DATA_CONTENT_LIGHT_LEVEL
Content light level (based on CTA-861.3).
Definition: frame.h:137
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:256
av_frame_side_data_free
void av_frame_side_data_free(AVFrameSideData ***sd, int *nb_sd)
Free all side data entries and their contents, then zeroes out the values which the pointers are poin...
Definition: frame.c:113
av_malloc_array
#define av_malloc_array(a, b)
Definition: tableprint_vlc.h:31
common.h
av_assert1
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:56
AV_FRAME_DATA_STEREO3D
@ AV_FRAME_DATA_STEREO3D
Stereoscopic 3d metadata.
Definition: frame.h:64
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
av_frame_move_ref
void av_frame_move_ref(AVFrame *dst, AVFrame *src)
Move everything contained in src to dst and reset src.
Definition: frame.c:649
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:622
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:256
remove_side_data
static void remove_side_data(AVFrameSideData ***sd, int *nb_side_data, const enum AVFrameSideDataType type)
Definition: frame.c:118
av_buffer_replace
int av_buffer_replace(AVBufferRef **pdst, const AVBufferRef *src)
Ensure dst refers to the same data as src.
Definition: buffer.c:233
av_samples_get_buffer_size
int av_samples_get_buffer_size(int *linesize, int nb_channels, int nb_samples, enum AVSampleFormat sample_fmt, int align)
Get the required buffer size for the given audio parameters.
Definition: samplefmt.c:121
AVCOL_SPC_UNSPECIFIED
@ AVCOL_SPC_UNSPECIFIED
Definition: pixfmt.h:643
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Definition: mem.c:264
av_buffer_is_writable
int av_buffer_is_writable(const AVBufferRef *buf)
Definition: buffer.c:147
ret
ret
Definition: filter_design.txt:187
AV_FRAME_DATA_GOP_TIMECODE
@ AV_FRAME_DATA_GOP_TIMECODE
The GOP timecode in 25 bit timecode format.
Definition: frame.h:125
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
av_channel_layout_check
int av_channel_layout_check(const AVChannelLayout *channel_layout)
Check whether a channel layout is valid, i.e.
Definition: channel_layout.c:782
dict.h
av_hwframe_transfer_data
int av_hwframe_transfer_data(AVFrame *dst, const AVFrame *src, int flags)
Copy data to or from a hw surface.
Definition: hwcontext.c:433
av_frame_replace
int av_frame_replace(AVFrame *dst, const AVFrame *src)
Ensure the destination frame refers to the same data described by the source frame,...
Definition: frame.c:499
AV_FRAME_DATA_DYNAMIC_HDR_PLUS
@ AV_FRAME_DATA_DYNAMIC_HDR_PLUS
HDR dynamic metadata associated with a video frame.
Definition: frame.h:159
channel_layout.h
av_frame_side_data_new
AVFrameSideData * av_frame_side_data_new(AVFrameSideData ***sd, int *nb_sd, enum AVFrameSideDataType type, size_t size, unsigned int flags)
Add new side data entry to an array.
Definition: frame.c:837
AV_FRAME_DATA_VIDEO_ENC_PARAMS
@ AV_FRAME_DATA_VIDEO_ENC_PARAMS
Encoding parameters for a video frame, as described by AVVideoEncParams.
Definition: frame.h:170
av_image_copy2
static void av_image_copy2(uint8_t *const dst_data[4], const int dst_linesizes[4], uint8_t *const src_data[4], const int src_linesizes[4], enum AVPixelFormat pix_fmt, int width, int height)
Wrapper around av_image_copy() to workaround the limitation that the conversion from uint8_t * const ...
Definition: imgutils.h:184
av_channel_layout_uninit
void av_channel_layout_uninit(AVChannelLayout *channel_layout)
Free any allocated data in the channel layout and reset the channel count to 0.
Definition: channel_layout.c:441
AVFrameSideData::type
enum AVFrameSideDataType type
Definition: frame.h:266
ref
static int ref[MAX_W *MAX_W]
Definition: jpeg2000dwt.c:117
AVSideDataDescriptor
This struct describes the properties of a side data type.
Definition: frame.h:292
av_channel_layout_copy
int av_channel_layout_copy(AVChannelLayout *dst, const AVChannelLayout *src)
Make a copy of a channel layout.
Definition: channel_layout.c:448
FF_DISABLE_DEPRECATION_WARNINGS
#define FF_DISABLE_DEPRECATION_WARNINGS
Definition: internal.h:72
desc
const char * desc
Definition: libsvtav1.c:79
mem.h
AVBufferRef
A reference to a data buffer.
Definition: buffer.h:82
AVFrameSideData
Structure to hold side data for an AVFrame.
Definition: frame.h:265
AVPixFmtDescriptor
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:69
FFALIGN
#define FFALIGN(x, a)
Definition: macros.h:78
av_frame_side_data_desc
const AVSideDataDescriptor * av_frame_side_data_desc(enum AVFrameSideDataType type)
Definition: frame.c:1034
calc_cropping_offsets
static int calc_cropping_offsets(size_t offsets[4], const AVFrame *frame, const AVPixFmtDescriptor *desc)
Definition: frame.c:1048
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
add_side_data_from_buf
static AVFrameSideData * add_side_data_from_buf(AVFrameSideData ***sd, int *nb_sd, enum AVFrameSideDataType type, AVBufferRef *buf)
Definition: frame.c:791
av_dict_copy
int av_dict_copy(AVDictionary **dst, const AVDictionary *src, int flags)
Copy entries from one AVDictionary struct into another.
Definition: dict.c:237
av_frame_side_data_name
const char * av_frame_side_data_name(enum AVFrameSideDataType type)
Definition: frame.c:1042
av_frame_side_data_get
static const AVFrameSideData * av_frame_side_data_get(AVFrameSideData *const *sd, const int nb_sd, enum AVFrameSideDataType type)
Wrapper around av_frame_side_data_get_c() to workaround the limitation that for any type T the conver...
Definition: frame.h:1159
AV_FRAME_DATA_REGIONS_OF_INTEREST
@ AV_FRAME_DATA_REGIONS_OF_INTEREST
Regions Of Interest, the data is an array of AVRegionOfInterest type, the number of array element is ...
Definition: frame.h:165
imgutils.h
flags
#define flags(name, subs,...)
Definition: cbs_av1.c:482
hwcontext.h
AVERROR_BUG
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
Definition: error.h:52
AVFrameSideData::metadata
AVDictionary * metadata
Definition: frame.h:269
av_frame_side_data_get_c
const AVFrameSideData * av_frame_side_data_get_c(const AVFrameSideData *const *sd, const int nb_sd, enum AVFrameSideDataType type)
Get a side data entry of a specific type from an array.
Definition: frame.c:946
AV_FRAME_DATA_MOTION_VECTORS
@ AV_FRAME_DATA_MOTION_VECTORS
Motion vectors exported by some codecs (on demand through the export_mvs flag set in the libavcodec A...
Definition: frame.h:97
AV_FRAME_CROP_UNALIGNED
@ AV_FRAME_CROP_UNALIGNED
Apply the maximum possible cropping, even if it requires setting the AVFrame.data[] entries to unalig...
Definition: frame.h:1015
av_image_check_size
int av_image_check_size(unsigned int w, unsigned int h, int log_offset, void *log_ctx)
Check if the given dimension of an image is valid, meaning that all bytes of the image can be address...
Definition: imgutils.c:318
frame_side_data_wipe
static void frame_side_data_wipe(AVFrame *frame)
Definition: frame.c:108
AV_PIX_FMT_FLAG_PAL
#define AV_PIX_FMT_FLAG_PAL
Pixel format has a palette in data[1], values are indexes in this palette.
Definition: pixdesc.h:120
av_hwframe_get_buffer
int av_hwframe_get_buffer(AVBufferRef *hwframe_ref, AVFrame *frame, int flags)
Allocate a new frame attached to the given AVHWFramesContext.
Definition: hwcontext.c:491
AV_FRAME_DATA_DOWNMIX_INFO
@ AV_FRAME_DATA_DOWNMIX_INFO
Metadata relevant to a downmix procedure.
Definition: frame.h:73
av_frame_side_data_add
AVFrameSideData * av_frame_side_data_add(AVFrameSideData ***sd, int *nb_sd, enum AVFrameSideDataType type, AVBufferRef **pbuf, unsigned int flags)
Add a new side data entry to an array from an existing AVBufferRef.
Definition: frame.c:862
src
#define src
Definition: vp8dsp.c:248
AV_FRAME_DATA_DETECTION_BBOXES
@ AV_FRAME_DATA_DETECTION_BBOXES
Bounding boxes for object detection and classification, as described by AVDetectionBBoxHeader.
Definition: frame.h:194