Go to the documentation of this file.
24 #include <vdpau/vdpau.h>
43 rf->surface = VDP_INVALID_HANDLE;
44 rf->is_long_term = VDP_FALSE;
45 rf->top_is_reference = VDP_FALSE;
46 rf->bottom_is_reference = VDP_FALSE;
47 rf->field_order_cnt[0] = 0;
48 rf->field_order_cnt[1] = 0;
57 if (pic_structure == 0)
60 rf->surface = surface;
76 VdpReferenceFrameH264 *rf = &
info->referenceFrames[0];
77 #define H264_RF_COUNT FF_ARRAY_ELEMS(info->referenceFrames)
81 int i, ls =
list ? 16 :
h->short_ref_count;
83 for (
i = 0;
i < ls; ++
i) {
85 VdpReferenceFrameH264 *rf2;
86 VdpVideoSurface surface_ref;
94 rf2 = &
info->referenceFrames[0];
96 if ((rf2->surface == surface_ref) &&
97 (rf2->is_long_term == pic->
long_ref) &&
98 (rf2->frame_idx == pic_frame_idx))
129 #ifdef VDP_DECODER_PROFILE_H264_HIGH_444_PREDICTIVE
130 VdpPictureInfoH264Predictive *info2 = &pic_ctx->
info.h264_predictive;
134 info->slice_count = 0;
137 info->is_reference =
h->nal_ref_idc != 0;
138 info->frame_num =
h->poc.frame_num;
141 info->num_ref_frames =
sps->ref_frame_count;
142 info->mb_adaptive_frame_field_flag =
sps->mb_aff && !
info->field_pic_flag;
143 info->constrained_intra_pred_flag =
pps->constrained_intra_pred;
144 info->weighted_pred_flag =
pps->weighted_pred;
145 info->weighted_bipred_idc =
pps->weighted_bipred_idc;
146 info->frame_mbs_only_flag =
sps->frame_mbs_only_flag;
147 info->transform_8x8_mode_flag =
pps->transform_8x8_mode;
148 info->chroma_qp_index_offset =
pps->chroma_qp_index_offset[0];
149 info->second_chroma_qp_index_offset =
pps->chroma_qp_index_offset[1];
150 info->pic_init_qp_minus26 =
pps->init_qp - 26;
151 info->num_ref_idx_l0_active_minus1 =
pps->ref_count[0] - 1;
152 info->num_ref_idx_l1_active_minus1 =
pps->ref_count[1] - 1;
153 info->log2_max_frame_num_minus4 =
sps->log2_max_frame_num - 4;
154 info->pic_order_cnt_type =
sps->poc_type;
155 info->log2_max_pic_order_cnt_lsb_minus4 =
sps->poc_type ? 0 :
sps->log2_max_poc_lsb - 4;
156 info->delta_pic_order_always_zero_flag =
sps->delta_pic_order_always_zero_flag;
157 info->direct_8x8_inference_flag =
sps->direct_8x8_inference_flag;
158 #ifdef VDP_DECODER_PROFILE_H264_HIGH_444_PREDICTIVE
159 info2->qpprime_y_zero_transform_bypass_flag =
sps->transform_bypass;
160 info2->separate_colour_plane_flag =
sps->residual_color_transform_flag;
162 info->entropy_coding_mode_flag =
pps->cabac;
163 info->pic_order_present_flag =
pps->pic_order_present;
164 info->deblocking_filter_control_present_flag =
pps->deblocking_filter_parameters_present;
165 info->redundant_pic_cnt_present_flag =
pps->redundant_pic_cnt_present;
167 memcpy(
info->scaling_lists_4x4,
pps->scaling_matrix4,
168 sizeof(
info->scaling_lists_4x4));
169 memcpy(
info->scaling_lists_8x8[0],
pps->scaling_matrix8[0],
170 sizeof(
info->scaling_lists_8x8[0]));
171 memcpy(
info->scaling_lists_8x8[1],
pps->scaling_matrix8[3],
172 sizeof(
info->scaling_lists_8x8[1]));
224 profile = VDP_DECODER_PROFILE_H264_BASELINE;
227 #ifdef VDP_DECODER_PROFILE_H264_CONSTRAINED_BASELINE
228 profile = VDP_DECODER_PROFILE_H264_CONSTRAINED_BASELINE;
232 profile = VDP_DECODER_PROFILE_H264_MAIN;
235 profile = VDP_DECODER_PROFILE_H264_HIGH;
237 #ifdef VDP_DECODER_PROFILE_H264_EXTENDED
239 profile = VDP_DECODER_PROFILE_H264_EXTENDED;
245 profile = VDP_DECODER_PROFILE_H264_HIGH;
247 #ifdef VDP_DECODER_PROFILE_H264_HIGH_444_PREDICTIVE
251 profile = VDP_DECODER_PROFILE_H264_HIGH_444_PREDICTIVE;
259 level = VDP_DECODER_LEVEL_H264_1b;
265 .
p.
name =
"h264_vdpau",
static int vdpau_h264_init(AVCodecContext *avctx)
int ff_vdpau_common_frame_params(AVCodecContext *avctx, AVBufferRef *hw_frames_ctx)
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
#define AV_PROFILE_H264_INTRA
#define AV_PROFILE_H264_MAIN
AVHWAccel p
The public AVHWAccel.
const FFHWAccel ff_h264_vdpau_hwaccel
#define PICT_BOTTOM_FIELD
int frame_num
frame_num (raw frame_num from slice header)
#define AV_PROFILE_H264_EXTENDED
static double val(void *priv, double ch)
static int vdpau_h264_decode_slice(AVCodecContext *avctx, const uint8_t *buffer, uint32_t size)
int ff_vdpau_add_buffer(struct vdpau_picture_context *pic_ctx, const uint8_t *buf, uint32_t size)
int ff_vdpau_common_init(AVCodecContext *avctx, VdpDecoderProfile profile, int level)
static void vdpau_h264_set_reference_frames(AVCodecContext *avctx)
#define AV_PROFILE_H264_HIGH_10
static void vdpau_h264_clear_rf(VdpReferenceFrameH264 *rf)
int ff_vdpau_common_start_frame(struct vdpau_picture_context *pic_ctx, av_unused const uint8_t *buffer, av_unused uint32_t size)
static uintptr_t ff_vdpau_get_surface_id(AVFrame *pic)
Extract VdpVideoSurface from an AVFrame.
#define AV_PROFILE_H264_HIGH_422
void ff_h264_draw_horiz_band(const H264Context *h, H264SliceContext *sl, int y, int height)
#define HWACCEL_CAP_ASYNC_SAFE
Header providing the internals of AVHWAccel.
int ff_vdpau_common_end_frame(AVCodecContext *avctx, AVFrame *frame, struct vdpau_picture_context *pic_ctx)
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining list
int level
Encoding level descriptor.
static int vdpau_h264_end_frame(AVCodecContext *avctx)
int pic_id
pic_num (short -> no wrap version of pic_num, pic_num & max_pic_num; long -> long_pic_num)
int ff_vdpau_common_uninit(AVCodecContext *avctx)
int(* init)(AVBSFContext *ctx)
static int FUNC() pps(CodedBitstreamContext *ctx, RWContext *rw, H264RawPPS *current)
#define AV_PROFILE_H264_CAVLC_444
@ AV_PIX_FMT_VDPAU
HW acceleration through VDPAU, Picture.data[3] contains a VdpVideoSurface.
const char * name
Name of the hardware accelerated codec.
static const uint8_t start_code_prefix[3]
#define i(width, name, range_min, range_max)
#define AV_PROFILE_H264_HIGH_444_PREDICTIVE
static int32_t h264_foc(int foc)
#define AV_PROFILE_H264_BASELINE
static int FUNC() sps(CodedBitstreamContext *ctx, RWContext *rw, H264RawSPS *current)
main external API structure.
#define AV_PROFILE_H264_HIGH
union VDPAUPictureInfo info
VDPAU picture information.
int field_poc[2]
top/bottom POC
the frame and frame reference mechanism is intended to as much as expensive copies of that data while still allowing the filters to produce correct results The data is stored in buffers represented by AVFrame structures Several references can point to the same frame buffer
static int vdpau_h264_start_frame(AVCodecContext *avctx, const uint8_t *buffer, uint32_t size)
#define AV_PROFILE_H264_CONSTRAINED_BASELINE
void * hwaccel_picture_private
RefStruct reference for hardware accelerator private data.
static void vdpau_h264_set_rf(VdpReferenceFrameH264 *rf, H264Picture *pic, int pic_structure)
int long_ref
1->long term reference 0->short term reference