Go to the documentation of this file.
23 #include <vdpau/vdpau.h>
42 VdpPictureInfoVP9 *
info = &pic_ctx->
info.vp9;
51 info->lastReference = VDP_INVALID_HANDLE;
52 info->goldenReference = VDP_INVALID_HANDLE;
53 info->altReference = VDP_INVALID_HANDLE;
55 if (
h->refs[
h->h.refidx[0]].f &&
h->refs[
h->h.refidx[0]].f->private_ref) {
58 if (
h->refs[
h->h.refidx[1]].f &&
h->refs[
h->h.refidx[1]].f->private_ref) {
61 if (
h->refs[
h->h.refidx[2]].f &&
h->refs[
h->h.refidx[2]].f->private_ref) {
65 info->profile =
h->h.profile;
66 info->frameContextIdx =
h->h.framectxid;
67 info->keyFrame =
h->h.keyframe;
68 info->showFrame = !
h->h.invisible;
69 info->errorResilient =
h->h.errorres;
70 info->frameParallelDecoding =
h->h.parallelmode;
75 info->intraOnly =
h->h.intraonly;
76 info->allowHighPrecisionMv =
h->h.keyframe ? 0 :
h->h.highprecisionmvs;
77 info->refreshEntropyProbs =
h->h.refreshctx;
82 info->loopFilterLevel =
h->h.filter.level;
83 info->loopFilterSharpness =
h->h.filter.sharpness;
84 info->modeRefLfEnabled =
h->h.lf_delta.enabled;
86 info->log2TileColumns =
h->h.tiling.log2_tile_cols;
87 info->log2TileRows =
h->h.tiling.log2_tile_rows;
89 info->segmentEnabled =
h->h.segmentation.enabled;
90 info->segmentMapUpdate =
h->h.segmentation.update_map;
91 info->segmentMapTemporalUpdate =
h->h.segmentation.temporal;
92 info->segmentFeatureMode =
h->h.segmentation.absolute_vals;
94 info->qpYAc =
h->h.yac_qi;
95 info->qpYDc =
h->h.ydc_qdelta;
96 info->qpChDc =
h->h.uvdc_qdelta;
97 info->qpChAc =
h->h.uvac_qdelta;
99 info->resetFrameContext =
h->h.resetctx;
100 info->mcompFilterType =
h->h.filtermode ^ (
h->h.filtermode <= 1);
101 info->uncompressedHeaderSize =
h->h.uncompressed_header_size;
102 info->compressedHeaderSize =
h->h.compressed_header_size;
103 info->refFrameSignBias[0] = 0;
107 info->mbModeLfDelta[
i] =
h->h.lf_delta.mode[
i];
110 info->mbRefLfDelta[
i] =
h->h.lf_delta.ref[
i];
113 info->mbSegmentTreeProbs[
i] =
h->h.segmentation.prob[
i];
116 info->activeRefIdx[
i] =
h->h.refidx[
i];
117 info->segmentPredProbs[
i] =
h->h.segmentation.pred_prob[
i];
118 info->refFrameSignBias[
i + 1] =
h->h.signbias[
i];
122 info->segmentFeatureEnable[
i][0] =
h->h.segmentation.feat[
i].q_enabled;
123 info->segmentFeatureEnable[
i][1] =
h->h.segmentation.feat[
i].lf_enabled;
124 info->segmentFeatureEnable[
i][2] =
h->h.segmentation.feat[
i].ref_enabled;
125 info->segmentFeatureEnable[
i][3] =
h->h.segmentation.feat[
i].skip_enabled;
127 info->segmentFeatureData[
i][0] =
h->h.segmentation.feat[
i].q_val;
128 info->segmentFeatureData[
i][1] =
h->h.segmentation.feat[
i].lf_val;
129 info->segmentFeatureData[
i][2] =
h->h.segmentation.feat[
i].ref_val;
130 info->segmentFeatureData[
i][3] = 0;
136 info->colorSpace = 0;
139 info->colorSpace = 1;
142 info->colorSpace = 2;
145 info->colorSpace = 3;
148 info->colorSpace = 4;
151 info->colorSpace = 5;
154 info->colorSpace = 6;
157 info->colorSpace = 7;
209 profile = VDP_DECODER_PROFILE_VP9_PROFILE_0;
212 profile = VDP_DECODER_PROFILE_VP9_PROFILE_1;
215 profile = VDP_DECODER_PROFILE_VP9_PROFILE_2;
218 profile = VDP_DECODER_PROFILE_VP9_PROFILE_3;
int ff_vdpau_common_frame_params(AVCodecContext *avctx, AVBufferRef *hw_frames_ctx)
static int vdpau_vp9_end_frame(AVCodecContext *avctx)
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
enum AVColorSpace colorspace
YUV colorspace type.
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
int depth
Number of bits in the component.
@ AVCOL_SPC_RGB
order of coefficients is actually GBR, also IEC 61966-2-1 (sRGB), YZX and ST 428-1
static int vdpau_vp9_init(AVCodecContext *avctx)
@ AVCOL_SPC_BT470BG
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601
@ AVCOL_SPC_RESERVED
reserved for future use by ITU-T and ISO/IEC just like 15-255 are
static double val(void *priv, double ch)
void * hwaccel_picture_private
int ff_vdpau_add_buffer(struct vdpau_picture_context *pic_ctx, const uint8_t *buf, uint32_t size)
int ff_vdpau_common_init(AVCodecContext *avctx, VdpDecoderProfile profile, int level)
#define FF_ARRAY_ELEMS(a)
@ AVCOL_SPC_SMPTE170M
also ITU-R BT601-6 525 / ITU-R BT1358 525 / ITU-R BT1700 NTSC / functionally identical to above
int ff_vdpau_common_start_frame(struct vdpau_picture_context *pic_ctx, av_unused const uint8_t *buffer, av_unused uint32_t size)
uint8_t log2_chroma_w
Amount to shift the luma width right to find the chroma width.
static uintptr_t ff_vdpau_get_surface_id(AVFrame *pic)
Extract VdpVideoSurface from an AVFrame.
int ff_vdpau_common_end_frame(AVCodecContext *avctx, AVFrame *frame, struct vdpau_picture_context *pic_ctx)
int ff_vdpau_common_uninit(AVCodecContext *avctx)
const AVHWAccel ff_vp9_vdpau_hwaccel
@ AV_PIX_FMT_VDPAU
HW acceleration through VDPAU, Picture.data[3] contains a VdpVideoSurface.
const char * name
Name of the hardware accelerated codec.
@ AVCOL_SPC_SMPTE240M
derived from 170M primaries and D65 white point, 170M is derived from BT470 System M's primaries
#define i(width, name, range_min, range_max)
@ AVCOL_SPC_BT2020_NCL
ITU-R BT2020 non-constant luminance system.
#define HWACCEL_CAP_ASYNC_SAFE
static const uint8_t start_code_prefix[3]
main external API structure.
union VDPAUPictureInfo info
VDPAU picture information.
the frame and frame reference mechanism is intended to as much as expensive copies of that data while still allowing the filters to produce correct results The data is stored in buffers represented by AVFrame structures Several references can point to the same frame buffer
AVComponentDescriptor comp[4]
Parameters that describe how pixels are packed.
static int vdpau_vp9_start_frame(AVCodecContext *avctx, const uint8_t *buffer, uint32_t size)
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
static int vdpau_vp9_decode_slice(AVCodecContext *avctx, const uint8_t *buffer, uint32_t size)
int width
picture width / height.
enum AVPixelFormat sw_pix_fmt
Nominal unaccelerated pixel format, see AV_PIX_FMT_xxx.
@ AVCOL_SPC_BT709
also ITU-R BT1361 / IEC 61966-2-4 xvYCC709 / derived in SMPTE RP 177 Annex B
uint8_t log2_chroma_h
Amount to shift the luma height right to find the chroma height.