FFmpeg
vdpau.c
Go to the documentation of this file.
1 /*
2  * Video Decode and Presentation API for UNIX (VDPAU) is used for
3  * HW decode acceleration for MPEG-1/2, MPEG-4 ASP, H.264 and VC-1.
4  *
5  * Copyright (c) 2008 NVIDIA
6  *
7  * This file is part of FFmpeg.
8  *
9  * FFmpeg is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU Lesser General Public
11  * License as published by the Free Software Foundation; either
12  * version 2.1 of the License, or (at your option) any later version.
13  *
14  * FFmpeg is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17  * Lesser General Public License for more details.
18  *
19  * You should have received a copy of the GNU Lesser General Public
20  * License along with FFmpeg; if not, write to the Free Software
21  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22  */
23 
24 #include "config_components.h"
25 
26 #include "libavutil/mem.h"
27 #include "avcodec.h"
28 #include "decode.h"
29 #include "hwaccel_internal.h"
30 #include "internal.h"
31 #include "vdpau.h"
32 #include "vdpau_internal.h"
33 
34 /**
35  * @addtogroup VDPAU_Decoding
36  *
37  * @{
38  */
39 
40 static int vdpau_error(VdpStatus status)
41 {
42  switch (status) {
43  case VDP_STATUS_OK:
44  return 0;
45  case VDP_STATUS_NO_IMPLEMENTATION:
46  return AVERROR(ENOSYS);
47  case VDP_STATUS_DISPLAY_PREEMPTED:
48  return AVERROR(EIO);
49  case VDP_STATUS_INVALID_HANDLE:
50  return AVERROR(EBADF);
51  case VDP_STATUS_INVALID_POINTER:
52  return AVERROR(EFAULT);
53  case VDP_STATUS_RESOURCES:
54  return AVERROR(ENOBUFS);
55  case VDP_STATUS_HANDLE_DEVICE_MISMATCH:
56  return AVERROR(EXDEV);
57  case VDP_STATUS_ERROR:
58  return AVERROR(EIO);
59  default:
60  return AVERROR(EINVAL);
61  }
62 }
63 
65  VdpChromaType *type,
66  uint32_t *width, uint32_t *height)
67 {
68  VdpChromaType t;
69  uint32_t w = avctx->coded_width;
70  uint32_t h = avctx->coded_height;
71 
72  /* See <vdpau/vdpau.h> for per-type alignment constraints. */
73  switch (avctx->sw_pix_fmt) {
74  case AV_PIX_FMT_YUV420P:
78  t = VDP_CHROMA_TYPE_420;
79  w = (w + 1) & ~1;
80  h = (h + 3) & ~3;
81  break;
82  case AV_PIX_FMT_YUV422P:
84  t = VDP_CHROMA_TYPE_422;
85  w = (w + 1) & ~1;
86  h = (h + 1) & ~1;
87  break;
88  case AV_PIX_FMT_YUV444P:
92  t = VDP_CHROMA_TYPE_444;
93  h = (h + 1) & ~1;
94  break;
95  default:
96  return AVERROR(ENOSYS);
97  }
98 
99  if (type)
100  *type = t;
101  if (width)
102  *width = w;
103  if (height)
104  *height = h;
105  return 0;
106 }
107 
109  AVBufferRef *hw_frames_ctx)
110 {
111  AVHWFramesContext *hw_frames = (AVHWFramesContext*)hw_frames_ctx->data;
112  VdpChromaType type;
113  uint32_t width;
114  uint32_t height;
115 
117  return AVERROR(EINVAL);
118 
119  hw_frames->format = AV_PIX_FMT_VDPAU;
120  hw_frames->sw_format = avctx->sw_pix_fmt;
121  hw_frames->width = width;
122  hw_frames->height = height;
123 
124  return 0;
125 }
126 
128  VdpDecoderProfile profile, int level)
129 {
130  VDPAUHWContext *hwctx = avctx->hwaccel_context;
131  VDPAUContext *vdctx = avctx->internal->hwaccel_priv_data;
132  VdpVideoSurfaceQueryCapabilities *surface_query_caps;
133  VdpDecoderQueryCapabilities *decoder_query_caps;
134  VdpDecoderCreate *create;
135  VdpGetInformationString *info;
136  const char *info_string;
137  void *func;
138  VdpStatus status;
139  VdpBool supported;
140  uint32_t max_level, max_mb, max_width, max_height;
141  VdpChromaType type;
142  uint32_t width;
143  uint32_t height;
144  int ret;
145 
146  vdctx->width = UINT32_MAX;
147  vdctx->height = UINT32_MAX;
148 
150  return AVERROR(ENOSYS);
151 
152  if (hwctx) {
153  hwctx->reset = 0;
154 
155  if (hwctx->context.decoder != VDP_INVALID_HANDLE) {
156  vdctx->decoder = hwctx->context.decoder;
157  vdctx->render = hwctx->context.render;
158  vdctx->device = VDP_INVALID_HANDLE;
159  return 0; /* Decoder created by user */
160  }
161 
162  vdctx->device = hwctx->device;
163  vdctx->get_proc_address = hwctx->get_proc_address;
164 
165  if (hwctx->flags & AV_HWACCEL_FLAG_IGNORE_LEVEL)
166  level = 0;
167 
168  if (!(hwctx->flags & AV_HWACCEL_FLAG_ALLOW_HIGH_DEPTH) &&
169  type != VDP_CHROMA_TYPE_420)
170  return AVERROR(ENOSYS);
171  } else {
172  AVHWFramesContext *frames_ctx;
173  AVVDPAUDeviceContext *dev_ctx;
174 
176  if (ret < 0)
177  return ret;
178 
179  frames_ctx = (AVHWFramesContext*)avctx->hw_frames_ctx->data;
180  dev_ctx = frames_ctx->device_ctx->hwctx;
181 
182  vdctx->device = dev_ctx->device;
183  vdctx->get_proc_address = dev_ctx->get_proc_address;
184 
186  level = 0;
187  }
188 
189  if (level < 0)
190  return AVERROR(ENOTSUP);
191 
192  status = vdctx->get_proc_address(vdctx->device,
193  VDP_FUNC_ID_GET_INFORMATION_STRING,
194  &func);
195  if (status != VDP_STATUS_OK)
196  return vdpau_error(status);
197  else
198  info = func;
199 
200  status = info(&info_string);
201  if (status != VDP_STATUS_OK)
202  return vdpau_error(status);
203  if (avctx->codec_id == AV_CODEC_ID_HEVC && strncmp(info_string, "NVIDIA ", 7) == 0 &&
205  int driver_version = 0;
206  sscanf(info_string, "NVIDIA VDPAU Driver Shared Library %d", &driver_version);
207  if (driver_version < 410) {
208  av_log(avctx, AV_LOG_VERBOSE, "HEVC with NVIDIA VDPAU drivers is buggy, skipping.\n");
209  return AVERROR(ENOTSUP);
210  }
211  }
212 
213  status = vdctx->get_proc_address(vdctx->device,
214  VDP_FUNC_ID_VIDEO_SURFACE_QUERY_CAPABILITIES,
215  &func);
216  if (status != VDP_STATUS_OK)
217  return vdpau_error(status);
218  else
219  surface_query_caps = func;
220 
221  status = surface_query_caps(vdctx->device, type, &supported,
222  &max_width, &max_height);
223  if (status != VDP_STATUS_OK)
224  return vdpau_error(status);
225  if (supported != VDP_TRUE ||
226  max_width < width || max_height < height)
227  return AVERROR(ENOTSUP);
228 
229  status = vdctx->get_proc_address(vdctx->device,
230  VDP_FUNC_ID_DECODER_QUERY_CAPABILITIES,
231  &func);
232  if (status != VDP_STATUS_OK)
233  return vdpau_error(status);
234  else
235  decoder_query_caps = func;
236 
237  status = decoder_query_caps(vdctx->device, profile, &supported, &max_level,
238  &max_mb, &max_width, &max_height);
239 #ifdef VDP_DECODER_PROFILE_H264_CONSTRAINED_BASELINE
240  if ((status != VDP_STATUS_OK || supported != VDP_TRUE) && profile == VDP_DECODER_PROFILE_H264_CONSTRAINED_BASELINE) {
241  profile = VDP_DECODER_PROFILE_H264_MAIN;
242  status = decoder_query_caps(vdctx->device, profile, &supported,
243  &max_level, &max_mb,
244  &max_width, &max_height);
245  }
246 #endif
247  if (status != VDP_STATUS_OK)
248  return vdpau_error(status);
249 
250  if (supported != VDP_TRUE || max_level < level ||
251  max_width < width || max_height < height)
252  return AVERROR(ENOTSUP);
253 
254  status = vdctx->get_proc_address(vdctx->device, VDP_FUNC_ID_DECODER_CREATE,
255  &func);
256  if (status != VDP_STATUS_OK)
257  return vdpau_error(status);
258  else
259  create = func;
260 
261  status = vdctx->get_proc_address(vdctx->device, VDP_FUNC_ID_DECODER_RENDER,
262  &func);
263  if (status != VDP_STATUS_OK)
264  return vdpau_error(status);
265  else
266  vdctx->render = func;
267 
268  status = create(vdctx->device, profile, width, height, avctx->refs,
269  &vdctx->decoder);
270  if (status == VDP_STATUS_OK) {
271  vdctx->width = avctx->coded_width;
272  vdctx->height = avctx->coded_height;
273  }
274 
275  return vdpau_error(status);
276 }
277 
279 {
280  VDPAUContext *vdctx = avctx->internal->hwaccel_priv_data;
281  VdpDecoderDestroy *destroy;
282  void *func;
283  VdpStatus status;
284 
285  if (vdctx->device == VDP_INVALID_HANDLE)
286  return 0; /* Decoder created and destroyed by user */
287  if (vdctx->width == UINT32_MAX && vdctx->height == UINT32_MAX)
288  return 0;
289 
290  status = vdctx->get_proc_address(vdctx->device,
291  VDP_FUNC_ID_DECODER_DESTROY, &func);
292  if (status != VDP_STATUS_OK)
293  return vdpau_error(status);
294  else
295  destroy = func;
296 
297  status = destroy(vdctx->decoder);
298  return vdpau_error(status);
299 }
300 
302 {
303  VDPAUHWContext *hwctx = avctx->hwaccel_context;
304  VDPAUContext *vdctx = avctx->internal->hwaccel_priv_data;
305 
306  if (vdctx->device == VDP_INVALID_HANDLE)
307  return 0; /* Decoder created by user */
308  if (avctx->coded_width == vdctx->width &&
309  avctx->coded_height == vdctx->height && (!hwctx || !hwctx->reset))
310  return 0;
311 
312  FF_HW_SIMPLE_CALL(avctx, uninit);
313  return FF_HW_SIMPLE_CALL(avctx, init);
314 }
315 
317  av_unused const uint8_t *buffer,
318  av_unused uint32_t size)
319 {
320  pic_ctx->bitstream_buffers_allocated = 0;
321  pic_ctx->bitstream_buffers_used = 0;
322  pic_ctx->bitstream_buffers = NULL;
323  return 0;
324 }
325 
327  struct vdpau_picture_context *pic_ctx)
328 {
329  VDPAUContext *vdctx = avctx->internal->hwaccel_priv_data;
330  AVVDPAUContext *hwctx = avctx->hwaccel_context;
331  VdpVideoSurface surf = ff_vdpau_get_surface_id(frame);
332  VdpStatus status;
333  int val;
334 
335  val = ff_vdpau_common_reinit(avctx);
336  if (val < 0)
337  return val;
338 
339  if (hwctx && !hwctx->render && hwctx->render2) {
340  status = hwctx->render2(avctx, frame, (void *)&pic_ctx->info,
341  pic_ctx->bitstream_buffers_used, pic_ctx->bitstream_buffers);
342  } else
343  status = vdctx->render(vdctx->decoder, surf, &pic_ctx->info,
344  pic_ctx->bitstream_buffers_used,
345  pic_ctx->bitstream_buffers);
346 
347  av_freep(&pic_ctx->bitstream_buffers);
348 
349  return vdpau_error(status);
350 }
351 
352 #if CONFIG_MPEG1_VDPAU_HWACCEL || \
353  CONFIG_MPEG2_VDPAU_HWACCEL || CONFIG_MPEG4_VDPAU_HWACCEL || \
354  CONFIG_VC1_VDPAU_HWACCEL || CONFIG_WMV3_VDPAU_HWACCEL
355 #include "mpegvideodec.h"
356 
358 {
359  MpegEncContext *s = avctx->priv_data;
360  MPVPicture *pic = s->cur_pic.ptr;
361  struct vdpau_picture_context *pic_ctx = pic->hwaccel_picture_private;
362  int val;
363 
364  val = ff_vdpau_common_end_frame(avctx, pic->f, pic_ctx);
365  if (val < 0)
366  return val;
367 
368  ff_mpeg_draw_horiz_band(s, 0, s->avctx->height);
369  return 0;
370 }
371 #endif
372 
374  const uint8_t *buf, uint32_t size)
375 {
376  VdpBitstreamBuffer *buffers = pic_ctx->bitstream_buffers;
377 
378  buffers = av_fast_realloc(buffers, &pic_ctx->bitstream_buffers_allocated,
379  (pic_ctx->bitstream_buffers_used + 1) * sizeof(*buffers));
380  if (!buffers)
381  return AVERROR(ENOMEM);
382 
383  pic_ctx->bitstream_buffers = buffers;
384  buffers += pic_ctx->bitstream_buffers_used++;
385 
386  buffers->struct_version = VDP_BITSTREAM_BUFFER_VERSION;
387  buffers->bitstream = buf;
388  buffers->bitstream_bytes = size;
389  return 0;
390 }
391 
392 int av_vdpau_bind_context(AVCodecContext *avctx, VdpDevice device,
393  VdpGetProcAddress *get_proc, unsigned flags)
394 {
395  VDPAUHWContext *hwctx;
396 
398  return AVERROR(EINVAL);
399 
400  if (av_reallocp(&avctx->hwaccel_context, sizeof(*hwctx)))
401  return AVERROR(ENOMEM);
402 
403  hwctx = avctx->hwaccel_context;
404 
405  memset(hwctx, 0, sizeof(*hwctx));
406  hwctx->context.decoder = VDP_INVALID_HANDLE;
407  hwctx->device = device;
408  hwctx->get_proc_address = get_proc;
409  hwctx->flags = flags;
410  hwctx->reset = 1;
411  return 0;
412 }
413 
414 /* @}*/
flags
const SwsFlags flags[]
Definition: swscale.c:61
func
int(* func)(AVBPrint *dst, const char *in, const char *arg)
Definition: jacosubdec.c:68
AVHWDeviceContext::hwctx
void * hwctx
The format-specific data, allocated and freed by libavutil along with this context.
Definition: hwcontext.h:86
AVCodecContext::hwaccel_context
void * hwaccel_context
Legacy hardware accelerator context.
Definition: avcodec.h:1429
level
uint8_t level
Definition: svq3.c:205
ff_vdpau_common_frame_params
int ff_vdpau_common_frame_params(AVCodecContext *avctx, AVBufferRef *hw_frames_ctx)
Definition: vdpau.c:108
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
destroy
static void destroy(struct ResampleContext **c)
Definition: soxr_resample.c:64
AVBufferRef::data
uint8_t * data
The data buffer.
Definition: buffer.h:90
ff_vdpau_common_reinit
static int ff_vdpau_common_reinit(AVCodecContext *avctx)
Definition: vdpau.c:301
AVHWFramesContext::format
enum AVPixelFormat format
The pixel format identifying the underlying HW surface type.
Definition: hwcontext.h:198
av_unused
#define av_unused
Definition: attributes.h:131
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:410
VDPAUHWContext::get_proc_address
VdpGetProcAddress * get_proc_address
Definition: vdpau_internal.h:68
vdpau_picture_context::bitstream_buffers_used
int bitstream_buffers_used
Useful bitstream buffers in the bitstream buffers table.
Definition: vdpau_internal.h:112
w
uint8_t w
Definition: llviddspenc.c:38
internal.h
AV_PIX_FMT_YUV420P10
#define AV_PIX_FMT_YUV420P10
Definition: pixfmt.h:528
AVVDPAUDeviceContext::get_proc_address
VdpGetProcAddress * get_proc_address
Definition: hwcontext_vdpau.h:37
AVVDPAUDeviceContext
This struct is allocated as AVHWDeviceContext.hwctx.
Definition: hwcontext_vdpau.h:35
AV_LOG_VERBOSE
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:225
FF_HW_SIMPLE_CALL
#define FF_HW_SIMPLE_CALL(avctx, function)
Definition: hwaccel_internal.h:176
AVVDPAUContext::render2
AVVDPAU_Render2 render2
Definition: vdpau.h:93
AVHWFramesContext::width
int width
The allocated dimensions of the frames in this pool.
Definition: hwcontext.h:218
VDPAUContext::width
uint32_t width
Definition: vdpau_internal.h:94
VDPAUContext::render
VdpDecoderRender * render
VDPAU decoder render callback.
Definition: vdpau_internal.h:92
vdpau_internal.h
AV_HWACCEL_FLAG_IGNORE_LEVEL
#define AV_HWACCEL_FLAG_IGNORE_LEVEL
Hardware acceleration should be used for decoding even if the codec level used is unknown or higher t...
Definition: avcodec.h:1986
VDPAUHWContext::device
VdpDevice device
Definition: vdpau_internal.h:67
vdpau_picture_context
Definition: vdpau_internal.h:98
AVVDPAUContext
This structure is used to share data between the libavcodec library and the client video application.
Definition: vdpau.h:78
AVCodecContext::refs
int refs
number of reference frames
Definition: avcodec.h:689
val
static double val(void *priv, double ch)
Definition: aeval.c:77
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
AVCodecContext::coded_height
int coded_height
Definition: avcodec.h:607
ff_vdpau_add_buffer
int ff_vdpau_add_buffer(struct vdpau_picture_context *pic_ctx, const uint8_t *buf, uint32_t size)
Definition: vdpau.c:373
AV_PIX_FMT_YUV444P10
#define AV_PIX_FMT_YUV444P10
Definition: pixfmt.h:531
mpegvideodec.h
vdpau.h
av_cold
#define av_cold
Definition: attributes.h:90
AVHWFramesContext::height
int height
Definition: hwcontext.h:218
AV_PIX_FMT_YUVJ422P
@ AV_PIX_FMT_YUVJ422P
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
Definition: pixfmt.h:86
av_fast_realloc
void * av_fast_realloc(void *ptr, unsigned int *size, size_t min_size)
Reallocate the given buffer if it is not large enough, otherwise do nothing.
Definition: mem.c:497
s
#define s(width, name)
Definition: cbs_vp9.c:198
info
MIPS optimizations info
Definition: mips.txt:2
decode.h
av_vdpau_bind_context
int av_vdpau_bind_context(AVCodecContext *avctx, VdpDevice device, VdpGetProcAddress *get_proc, unsigned flags)
Associate a VDPAU device with a codec context for hardware acceleration.
Definition: vdpau.c:392
AV_HWACCEL_FLAG_ALLOW_HIGH_DEPTH
#define AV_HWACCEL_FLAG_ALLOW_HIGH_DEPTH
Hardware acceleration can output YUV pixel formats with a different chroma sampling than 4:2:0 and/or...
Definition: avcodec.h:1992
ff_vdpau_common_start_frame
int ff_vdpau_common_start_frame(struct vdpau_picture_context *pic_ctx, av_unused const uint8_t *buffer, av_unused uint32_t size)
Definition: vdpau.c:316
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:73
ff_vdpau_get_surface_id
static uintptr_t ff_vdpau_get_surface_id(AVFrame *pic)
Extract VdpVideoSurface from an AVFrame.
Definition: vdpau_internal.h:38
AV_PIX_FMT_YUVJ444P
@ AV_PIX_FMT_YUVJ444P
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
Definition: pixfmt.h:87
ff_decode_get_hw_frames_ctx
int ff_decode_get_hw_frames_ctx(AVCodecContext *avctx, enum AVHWDeviceType dev_type)
Make sure avctx.hw_frames_ctx is set.
Definition: decode.c:1043
AVCodecContext::codec_id
enum AVCodecID codec_id
Definition: avcodec.h:441
if
if(ret)
Definition: filter_design.txt:179
NULL
#define NULL
Definition: coverity.c:32
AVHWFramesContext::sw_format
enum AVPixelFormat sw_format
The pixel format identifying the actual data layout of the hardware frames.
Definition: hwcontext.h:211
hwaccel_internal.h
create
static struct ResampleContext * create(struct ResampleContext *c, int out_rate, int in_rate, int filter_size, int phase_shift, int linear, double cutoff, enum AVSampleFormat format, enum SwrFilterType filter_type, double kaiser_beta, double precision, int cheby, int exact_rational)
Definition: soxr_resample.c:32
VDPAUContext::height
uint32_t height
Definition: vdpau_internal.h:95
AVCodecContext::internal
struct AVCodecInternal * internal
Private context used for internal data.
Definition: avcodec.h:466
AV_PIX_FMT_YUVJ420P
@ AV_PIX_FMT_YUVJ420P
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
Definition: pixfmt.h:85
ff_vdpau_common_end_frame
int ff_vdpau_common_end_frame(AVCodecContext *avctx, AVFrame *frame, struct vdpau_picture_context *pic_ctx)
Definition: vdpau.c:326
AV_HWACCEL_FLAG_ALLOW_PROFILE_MISMATCH
#define AV_HWACCEL_FLAG_ALLOW_PROFILE_MISMATCH
Hardware acceleration should still be attempted for decoding when the codec profile does not match th...
Definition: avcodec.h:2006
VDPAUHWContext
Definition: vdpau_internal.h:65
ff_vdpau_common_uninit
av_cold int ff_vdpau_common_uninit(AVCodecContext *avctx)
Definition: vdpau.c:278
av_vdpau_get_surface_parameters
int av_vdpau_get_surface_parameters(AVCodecContext *avctx, VdpChromaType *type, uint32_t *width, uint32_t *height)
Gets the parameters to create an adequate VDPAU video surface for the codec context using VDPAU hardw...
Definition: vdpau.c:64
VDPAUContext::device
VdpDevice device
VDPAU device handle.
Definition: vdpau_internal.h:77
VDPAUContext
Definition: vdpau_internal.h:73
init
int(* init)(AVBSFContext *ctx)
Definition: dts2pts.c:368
height
#define height
Definition: dsp.h:85
AVCodecInternal::hwaccel_priv_data
void * hwaccel_priv_data
hwaccel-specific private data
Definition: internal.h:130
size
int size
Definition: twinvq_data.h:10344
av_reallocp
int av_reallocp(void *ptr, size_t size)
Allocate, reallocate, or free a block of memory through a pointer to a pointer.
Definition: mem.c:188
VDPAUContext::decoder
VdpDecoder decoder
VDPAU decoder handle.
Definition: vdpau_internal.h:82
AV_PIX_FMT_YUV444P12
#define AV_PIX_FMT_YUV444P12
Definition: pixfmt.h:535
ff_mpeg_draw_horiz_band
void ff_mpeg_draw_horiz_band(MpegEncContext *s, int y, int h)
Definition: mpegvideo_dec.c:423
ff_vdpau_mpeg_end_frame
int ff_vdpau_mpeg_end_frame(AVCodecContext *avctx)
AV_PIX_FMT_VDPAU
@ AV_PIX_FMT_VDPAU
HW acceleration through VDPAU, Picture.data[3] contains a VdpVideoSurface.
Definition: pixfmt.h:194
MPVPicture::hwaccel_picture_private
void * hwaccel_picture_private
RefStruct reference for hardware accelerator private data.
Definition: mpegpicture.h:75
uninit
static void uninit(AVBSFContext *ctx)
Definition: pcm_rechunk.c:68
VDPAUHWContext::reset
char reset
Definition: vdpau_internal.h:69
AVCodecContext::hwaccel_flags
int hwaccel_flags
Bit set of AV_HWACCEL_FLAG_* flags, which affect hardware accelerated decoding (if active).
Definition: avcodec.h:1484
vdpau_picture_context::bitstream_buffers
VdpBitstreamBuffer * bitstream_buffers
Table of bitstream buffers.
Definition: vdpau_internal.h:117
AV_CODEC_ID_HEVC
@ AV_CODEC_ID_HEVC
Definition: codec_id.h:228
VDPAUHWContext::context
AVVDPAUContext context
Definition: vdpau_internal.h:66
AV_HWDEVICE_TYPE_VDPAU
@ AV_HWDEVICE_TYPE_VDPAU
Definition: hwcontext.h:29
profile
int profile
Definition: mxfenc.c:2250
AVCodecContext::hw_frames_ctx
AVBufferRef * hw_frames_ctx
A reference to the AVHWFramesContext describing the input (for encoding) or output (decoding) frames.
Definition: avcodec.h:1453
avcodec.h
AVHWFramesContext
This struct describes a set or pool of "hardware" frames (i.e.
Definition: hwcontext.h:116
ret
ret
Definition: filter_design.txt:187
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
AVHWFramesContext::device_ctx
AVHWDeviceContext * device_ctx
The parent AVHWDeviceContext.
Definition: hwcontext.h:135
VDPAUContext::get_proc_address
VdpGetProcAddress * get_proc_address
VDPAU device driver.
Definition: vdpau_internal.h:87
ff_vdpau_common_init
av_cold int ff_vdpau_common_init(AVCodecContext *avctx, VdpDecoderProfile profile, int level)
Definition: vdpau.c:127
AV_PIX_FMT_YUV420P12
#define AV_PIX_FMT_YUV420P12
Definition: pixfmt.h:532
MPVPicture::f
struct AVFrame * f
Definition: mpegpicture.h:59
AVVDPAUContext::decoder
VdpDecoder decoder
VDPAU decoder handle.
Definition: vdpau.h:84
vdpau_picture_context::bitstream_buffers_allocated
int bitstream_buffers_allocated
Allocated size of the bitstream_buffers table.
Definition: vdpau_internal.h:107
AVCodecContext
main external API structure.
Definition: avcodec.h:431
status
ov_status_e status
Definition: dnn_backend_openvino.c:100
vdpau_picture_context::info
union VDPAUPictureInfo info
VDPAU picture information.
Definition: vdpau_internal.h:102
buffer
the frame and frame reference mechanism is intended to as much as expensive copies of that data while still allowing the filters to produce correct results The data is stored in buffers represented by AVFrame structures Several references can point to the same frame buffer
Definition: filter_design.txt:49
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:78
AVCodecContext::coded_width
int coded_width
Bitstream width / height, may be different from width/height e.g.
Definition: avcodec.h:607
vdpau_error
static int vdpau_error(VdpStatus status)
Definition: vdpau.c:40
AV_PIX_FMT_YUV422P
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:77
mem.h
AVBufferRef
A reference to a data buffer.
Definition: buffer.h:82
VDPAUHWContext::flags
unsigned char flags
Definition: vdpau_internal.h:70
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:458
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
AVVDPAUContext::render
VdpDecoderRender * render
VDPAU decoder render callback.
Definition: vdpau.h:91
h
h
Definition: vp9dsp_template.c:2070
AVCodecContext::sw_pix_fmt
enum AVPixelFormat sw_pix_fmt
Nominal unaccelerated pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:638
MPVPicture
MPVPicture.
Definition: mpegpicture.h:58
width
#define width
Definition: dsp.h:85
AVVDPAUDeviceContext::device
VdpDevice device
Definition: hwcontext_vdpau.h:36
MpegEncContext
MpegEncContext.
Definition: mpegvideo.h:64