Go to the documentation of this file.
24 #include "config_components.h"
26 #include <vdpau/vdpau.h>
45 info->forward_reference = VDP_INVALID_HANDLE;
46 info->backward_reference = VDP_INVALID_HANDLE;
48 switch (
s->pict_type) {
51 assert(
ref != VDP_INVALID_HANDLE);
59 info->slice_count = 0;
60 info->picture_structure =
s->picture_structure;
61 info->picture_coding_type =
s->pict_type;
62 info->intra_dc_precision =
s->intra_dc_precision;
63 info->frame_pred_frame_dct =
s->frame_pred_frame_dct;
64 info->concealment_motion_vectors =
s->concealment_motion_vectors;
65 info->intra_vlc_format =
s->intra_vlc_format;
66 info->alternate_scan =
s->alternate_scan;
67 info->q_scale_type =
s->q_scale_type;
68 info->top_field_first =
s->top_field_first;
70 info->full_pel_forward_vector =
s->full_pel[0];
71 info->full_pel_backward_vector =
s->full_pel[1];
73 info->f_code[0][0] =
s->mpeg_f_code[0][0];
74 info->f_code[0][1] =
s->mpeg_f_code[0][1];
75 info->f_code[1][0] =
s->mpeg_f_code[1][0];
76 info->f_code[1][1] =
s->mpeg_f_code[1][1];
77 for (
i = 0;
i < 64; ++
i) {
78 int n =
s->idsp.idct_permutation[
i];
79 info->intra_quantizer_matrix[
i] =
s->intra_matrix[n];
80 info->non_intra_quantizer_matrix[
i] =
s->inter_matrix[n];
102 #if CONFIG_MPEG1_VDPAU_HWACCEL
106 VDP_DECODER_LEVEL_MPEG1_NA);
110 .
p.
name =
"mpeg1_vdpau",
118 .
init = vdpau_mpeg1_init,
125 #if CONFIG_MPEG2_VDPAU_HWACCEL
132 profile = VDP_DECODER_PROFILE_MPEG2_MAIN;
135 profile = VDP_DECODER_PROFILE_MPEG2_SIMPLE;
145 .
p.
name =
"mpeg2_vdpau",
153 .
init = vdpau_mpeg2_init,
int ff_vdpau_common_frame_params(AVCodecContext *avctx, AVBufferRef *hw_frames_ctx)
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
const struct FFHWAccel ff_mpeg2_vdpau_hwaccel
#define AV_PROFILE_MPEG2_SIMPLE
AVHWAccel p
The public AVHWAccel.
static int vdpau_mpeg_start_frame(AVCodecContext *avctx, const uint8_t *buffer, uint32_t size)
VdpPictureInfoMPEG1Or2 mpeg
const struct FFHWAccel ff_mpeg1_vdpau_hwaccel
static double val(void *priv, double ch)
int ff_vdpau_add_buffer(struct vdpau_picture_context *pic_ctx, const uint8_t *buf, uint32_t size)
int ff_vdpau_common_init(AVCodecContext *avctx, VdpDecoderProfile profile, int level)
int ff_vdpau_common_start_frame(struct vdpau_picture_context *pic_ctx, av_unused const uint8_t *buffer, av_unused uint32_t size)
static uintptr_t ff_vdpau_get_surface_id(AVFrame *pic)
Extract VdpVideoSurface from an AVFrame.
#define HWACCEL_CAP_ASYNC_SAFE
Header providing the internals of AVHWAccel.
int ff_vdpau_common_uninit(AVCodecContext *avctx)
int(* init)(AVBSFContext *ctx)
int ff_vdpau_mpeg_end_frame(AVCodecContext *avctx)
@ AV_PIX_FMT_VDPAU
HW acceleration through VDPAU, Picture.data[3] contains a VdpVideoSurface.
const char * name
Name of the hardware accelerated codec.
void * hwaccel_picture_private
RefStruct reference for hardware accelerator private data.
#define i(width, name, range_min, range_max)
main external API structure.
union VDPAUPictureInfo info
VDPAU picture information.
@ AV_PICTURE_TYPE_B
Bi-dir predicted.
the frame and frame reference mechanism is intended to as much as expensive copies of that data while still allowing the filters to produce correct results The data is stored in buffers represented by AVFrame structures Several references can point to the same frame buffer
#define AV_PROFILE_MPEG2_MAIN
static int ref[MAX_W *MAX_W]
@ AV_PICTURE_TYPE_P
Predicted.
static int vdpau_mpeg_decode_slice(AVCodecContext *avctx, const uint8_t *buffer, uint32_t size)
@ AV_CODEC_ID_MPEG2VIDEO
preferred ID for MPEG-1/2 video decoding