Go to the documentation of this file.
26 #include <sys/types.h>
28 #include <mfx/mfxvideo.h>
51 #define PTS_TO_MFX_PTS(pts, pts_tb) ((pts) == AV_NOPTS_VALUE ? \
52 MFX_TIMESTAMP_UNKNOWN : pts_tb.num ? \
53 av_rescale_q(pts, pts_tb, mfx_tb) : pts)
55 #define MFX_PTS_TO_PTS(mfx_pts, pts_tb) ((mfx_pts) == MFX_TIMESTAMP_UNKNOWN ? \
56 AV_NOPTS_VALUE : pts_tb.num ? \
57 av_rescale_q(mfx_pts, mfx_tb, pts_tb) : mfx_pts)
153 if (q->
gpu_copy == MFX_GPUCOPY_ON &&
154 !(q->
iopattern & MFX_IOPATTERN_OUT_SYSTEM_MEMORY)) {
156 "only works in system memory mode.\n");
161 }
else if (hw_frames_ref) {
174 q->
iopattern == MFX_IOPATTERN_OUT_OPAQUE_MEMORY,
182 }
else if (hw_device_ref) {
206 MFXVideoDECODE_Close(q->
session);
213 return sizeof(mfxSyncPoint*) +
sizeof(
QSVFrame*);
223 mfxSession session =
NULL;
263 frames_hwctx = hwframes_ctx->
hwctx;
269 frames_hwctx->
frame_type = MFX_MEMTYPE_VIDEO_MEMORY_DECODER_TARGET;
285 if (frames_hwctx->frame_type & MFX_MEMTYPE_OPAQUE_FRAME)
286 iopattern = MFX_IOPATTERN_OUT_OPAQUE_MEMORY;
287 else if (frames_hwctx->frame_type & MFX_MEMTYPE_VIDEO_MEMORY_DECODER_TARGET)
288 iopattern = MFX_IOPATTERN_OUT_VIDEO_MEMORY;
293 iopattern = MFX_IOPATTERN_OUT_SYSTEM_MEMORY;
316 avctx->
width = param->mfx.FrameInfo.CropW;
317 avctx->
height = param->mfx.FrameInfo.CropH;
320 avctx->
level = param->mfx.CodecLevel;
321 avctx->
profile = param->mfx.CodecProfile;
328 "Error initializing the MFX video decoder");
340 mfxVideoParam *param)
343 mfxExtVideoSignalInfo video_signal_info = { 0 };
344 mfxExtBuffer *header_ext_params[1] = { (mfxExtBuffer *)&video_signal_info };
345 mfxBitstream bs = { 0 };
348 bs.Data = avpkt->
data;
349 bs.DataLength = avpkt->
size;
350 bs.MaxLength = bs.DataLength;
353 bs.DataFlag |= MFX_BITSTREAM_COMPLETE_FRAME;
368 param->mfx.CodecId =
ret;
369 video_signal_info.Header.BufferId = MFX_EXTBUFF_VIDEO_SIGNAL_INFO;
370 video_signal_info.Header.BufferSz =
sizeof(video_signal_info);
373 param->ExtParam = header_ext_params;
374 param->NumExtParam = 1;
375 ret = MFXVideoDECODE_DecodeHeader(q->
session, &bs, param);
376 if (MFX_ERR_MORE_DATA ==
ret) {
381 "Error decoding stream header");
385 if (video_signal_info.ColourDescriptionPresent) {
387 avctx->
color_trc = video_signal_info.TransferCharacteristics;
388 avctx->
colorspace = video_signal_info.MatrixCoefficients;
410 frame->surface = *(mfxFrameSurface1*)
frame->frame->data[3];
414 frame->surface.Data.PitchLow =
frame->frame->linesize[0];
415 frame->surface.Data.Y =
frame->frame->data[0];
416 frame->surface.Data.UV =
frame->frame->data[1];
426 frame->surface.Data.ExtParam = &
frame->ext_param;
427 frame->surface.Data.NumExtParam = 1;
428 frame->ext_param = (mfxExtBuffer*)&
frame->dec_info;
429 frame->dec_info.Header.BufferId = MFX_EXTBUFF_DECODED_FRAME_INFO;
430 frame->dec_info.Header.BufferSz =
sizeof(
frame->dec_info);
463 *surf = &
frame->surface;
485 *surf = &
frame->surface;
506 mfxFrameSurface1 *insurf;
507 mfxFrameSurface1 *outsurf;
509 mfxBitstream bs = { { { 0 } } };
513 bs.Data = avpkt->
data;
514 bs.DataLength = avpkt->
size;
515 bs.MaxLength = bs.DataLength;
518 bs.DataFlag |= MFX_BITSTREAM_COMPLETE_FRAME;
535 insurf, &outsurf, sync);
536 if (
ret == MFX_WRN_DEVICE_BUSY)
539 }
while (
ret == MFX_WRN_DEVICE_BUSY ||
ret == MFX_ERR_MORE_SURFACE);
541 if (
ret != MFX_ERR_NONE &&
542 ret != MFX_ERR_MORE_DATA &&
543 ret != MFX_WRN_VIDEO_PARAM_CHANGED &&
544 ret != MFX_ERR_MORE_SURFACE) {
547 "Error during QSV decoding.");
552 if (!*sync && !bs.DataOffset) {
553 bs.DataOffset = avpkt->
size;
557 }
else if (!*sync && bs.DataOffset) {
568 "The returned surface does not correspond to any frame\n");
590 ret = MFXVideoCORE_SyncOperation(q->
session, *sync, 1000);
591 }
while (
ret == MFX_WRN_IN_EXECUTION);
596 src_frame = out_frame->
frame;
607 outsurf->Info.PicStruct & MFX_PICSTRUCT_FRAME_TRIPLING ? 4 :
608 outsurf->Info.PicStruct & MFX_PICSTRUCT_FRAME_DOUBLING ? 2 :
609 outsurf->Info.PicStruct & MFX_PICSTRUCT_FIELD_REPEATED ? 1 : 0;
610 frame->top_field_first =
611 outsurf->Info.PicStruct & MFX_PICSTRUCT_FIELD_TFF;
612 frame->interlaced_frame =
613 !(outsurf->Info.PicStruct & MFX_PICSTRUCT_PROGRESSIVE);
617 frame->key_frame = !!(out_frame->
dec_info.FrameType & MFX_FRAMETYPE_IDR);
621 ((mfxFrameSurface1*)
frame->data[3])->Info = outsurf->Info;
626 return bs.DataOffset;
634 MFXVideoDECODE_Close(q->
session);
667 mfxVideoParam param = { 0 };
775 uid =
"f622394d8d87452f878c51f2fc9b4131";
777 uid =
"a922394d8d87452f878c51f2fc9b4131";
780 static const char *
const uid_hevcdec_sw =
"15dd936825ad475ea34e35f3f54217a6";
781 static const char *
const uid_hevcdec_hw =
"33a61c0b4c27454ca8d85dde757c6f8e";
783 if (
s->qsv.load_plugins[0]) {
785 "load_plugins is not empty, but load_plugin is not set to 'none'."
786 "The load_plugin value will be ignored.\n");
789 uid = uid_hevcdec_sw;
791 uid = uid_hevcdec_hw;
797 if (!
s->qsv.load_plugins)
803 if (!
s->packet_fifo) {
842 while (!*got_frame) {
844 if (
s->buffer_pkt.size <= 0) {
849 if (!
s->qsv.reinit_flag) {
862 if (
s->qsv.reinit_flag)
865 s->buffer_pkt.size -=
ret;
866 s->buffer_pkt.data +=
ret;
879 s->qsv.initialized = 0;
882 #define OFFSET(x) offsetof(QSVDecContext, x)
883 #define VD AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_DECODING_PARAM
885 #define DEFINE_QSV_DECODER_WITH_OPTION(x, X, bsf_name, opt) \
886 static const AVClass x##_qsv_class = { \
887 .class_name = #x "_qsv", \
888 .item_name = av_default_item_name, \
890 .version = LIBAVUTIL_VERSION_INT, \
892 const AVCodec ff_##x##_qsv_decoder = { \
894 .long_name = NULL_IF_CONFIG_SMALL(#X " video (Intel Quick Sync Video acceleration)"), \
895 .priv_data_size = sizeof(QSVDecContext), \
896 .type = AVMEDIA_TYPE_VIDEO, \
897 .id = AV_CODEC_ID_##X, \
898 .init = qsv_decode_init, \
899 .decode = qsv_decode_frame, \
900 .flush = qsv_decode_flush, \
901 .close = qsv_decode_close, \
903 .capabilities = AV_CODEC_CAP_DELAY | AV_CODEC_CAP_DR1 | AV_CODEC_CAP_AVOID_PROBING | AV_CODEC_CAP_HYBRID, \
904 .priv_class = &x##_qsv_class, \
905 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_NV12, \
909 .hw_configs = qsv_hw_configs, \
910 .wrapper_name = "qsv", \
913 #define DEFINE_QSV_DECODER(x, X, bsf_name) DEFINE_QSV_DECODER_WITH_OPTION(x, X, bsf_name, options)
915 #if CONFIG_HEVC_QSV_DECODER
924 {
"load_plugins",
"A :-separate list of hexadecimal plugin UIDs to load in an internal session",
927 {
"gpu_copy",
"A GPU-accelerated copy between video and system memory",
OFFSET(qsv.gpu_copy),
AV_OPT_TYPE_INT, { .i64 = MFX_GPUCOPY_DEFAULT }, MFX_GPUCOPY_DEFAULT, MFX_GPUCOPY_OFF,
VD,
"gpu_copy"},
939 {
"gpu_copy",
"A GPU-accelerated copy between video and system memory",
OFFSET(qsv.gpu_copy),
AV_OPT_TYPE_INT, { .i64 = MFX_GPUCOPY_DEFAULT }, MFX_GPUCOPY_DEFAULT, MFX_GPUCOPY_OFF,
VD,
"gpu_copy"},
946 #if CONFIG_H264_QSV_DECODER
950 #if CONFIG_MPEG2_QSV_DECODER
954 #if CONFIG_VC1_QSV_DECODER
958 #if CONFIG_MJPEG_QSV_DECODER
962 #if CONFIG_VP8_QSV_DECODER
966 #if CONFIG_VP9_QSV_DECODER
970 #if CONFIG_AV1_QSV_DECODER
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
int frame_type
A combination of MFX_MEMTYPE_* describing the frame pool.
void * hwaccel_context
Hardware accelerator context.
AVBufferPool * av_buffer_pool_init(size_t size, AVBufferRef *(*alloc)(size_t size))
Allocate and initialize a buffer pool.
#define AV_LOG_WARNING
Something somehow does not look correct.
AVPixelFormat
Pixel format.
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
static int qsv_process_data(AVCodecContext *avctx, QSVContext *q, AVFrame *frame, int *got_frame, const AVPacket *pkt)
enum AVColorSpace colorspace
YUV colorspace type.
int av_fifo_generic_write(AVFifoBuffer *f, void *src, int size, int(*func)(void *, void *, int))
Feed data from a user-supplied callback to an AVFifoBuffer.
int ff_get_format(AVCodecContext *avctx, const enum AVPixelFormat *fmt)
Select the (possibly hardware accelerated) pixel format.
AVBufferRef * hw_frames_ctx
uint8_t * data
The data buffer.
void av_fifo_free(AVFifoBuffer *f)
Free an AVFifoBuffer.
enum AVPixelFormat format
The pixel format identifying the underlying HW surface type.
static unsigned int qsv_fifo_size(const AVFifoBuffer *fifo)
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
int av_hwframe_ctx_init(AVBufferRef *ref)
Finalize the context before use.
int ff_qsv_close_internal_session(QSVSession *qs)
This structure describes decoded (raw) audio or video data.
enum AVPictureType ff_qsv_map_pictype(int mfx_pic_type)
enum AVColorTransferCharacteristic color_trc
Color Transfer Characteristic.
@ AVCOL_RANGE_JPEG
Full range content.
AVBufferRef * av_hwframe_ctx_alloc(AVBufferRef *device_ref_in)
Allocate an AVHWFramesContext tied to a given device context.
enum AVFieldOrder field_order
Field order.
int ff_qsv_find_surface_idx(QSVFramesContext *ctx, QSVFrame *frame)
QSVFrame * work_frames
a linked list of frames currently being used by QSV
AVBufferRef * av_buffer_ref(const AVBufferRef *buf)
Create a new reference to an AVBuffer.
int av_fifo_generic_read(AVFifoBuffer *f, void *dest, int buf_size, void(*func)(void *, void *, int))
Feed data from an AVFifoBuffer to a user-supplied callback.
int width
The allocated dimensions of the frames in this pool.
int iopattern
The IO pattern to use.
int ff_qsv_init_session_device(AVCodecContext *avctx, mfxSession *psession, AVBufferRef *device_ref, const char *load_plugins, int gpu_copy)
AVFifoBuffer * packet_fifo
#define DEFINE_QSV_DECODER(x, X, bsf_name)
static const AVOption options[]
#define DEFINE_QSV_DECODER_WITH_OPTION(x, X, bsf_name, opt)
static const AVRational mfx_tb
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
static QSVFrame * find_frame(QSVContext *q, mfxFrameSurface1 *surf)
static int qsv_decode_header(AVCodecContext *avctx, QSVContext *q, const AVPacket *avpkt, enum AVPixelFormat pix_fmt, mfxVideoParam *param)
enum AVColorPrimaries color_primaries
Chromaticity coordinates of the source primaries.
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
int ff_qsv_print_warning(void *log_ctx, mfxStatus err, const char *warning_string)
int av_fifo_space(const AVFifoBuffer *f)
Return the amount of space in bytes in the AVFifoBuffer, that is the amount of data you can write int...
#define ASYNC_DEPTH_DEFAULT
@ AV_CODEC_HW_CONFIG_METHOD_HW_FRAMES_CTX
The codec supports this format via the hw_frames_ctx interface.
AVBufferRef * av_buffer_pool_get(AVBufferPool *pool)
Allocate a new AVBuffer, reusing an old buffer from the pool when available.
static av_cold int qsv_decode_init(AVCodecContext *avctx)
static unsigned int qsv_fifo_item_size(void)
QSVFramesContext frames_ctx
static int qsv_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
static enum AVPixelFormat pix_fmts[]
enum AVPixelFormat pix_fmt
For decoders, a hardware pixel format which that decoder may be able to decode to if suitable hardwar...
static enum AVPixelFormat pix_fmt
int av_usleep(unsigned usec)
Sleep for a period of time.
int av_fifo_realloc2(AVFifoBuffer *f, unsigned int new_size)
Resize an AVFifoBuffer.
int ff_qsv_init_session_frames(AVCodecContext *avctx, mfxSession *psession, QSVFramesContext *qsv_frames_ctx, const char *load_plugins, int opaque, int gpu_copy)
Describe the class of an AVClass context structure.
enum AVPixelFormat sw_format
The pixel format identifying the actual data layout of the hardware frames.
enum AVColorRange color_range
MPEG vs JPEG YUV range.
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it.
Rational number (pair of numerator and denominator).
@ AV_CODEC_HW_CONFIG_METHOD_HW_DEVICE_CTX
The codec supports this format via the hw_device_ctx interface.
int ff_qsv_print_iopattern(void *log_ctx, int mfx_iopattern, const char *extra_string)
void av_buffer_pool_uninit(AVBufferPool **ppool)
Mark the pool as being available for freeing.
AVFifoBuffer * async_fifo
@ AV_PIX_FMT_QSV
HW acceleration through QSV, data[3] contains a pointer to the mfxFrameSurface1 structure.
int av_packet_ref(AVPacket *dst, const AVPacket *src)
Setup a new reference to the data described by a given packet.
static void qsv_clear_buffers(QSVDecContext *s)
mfxExtBuffer ** ext_buffers
#define PTS_TO_MFX_PTS(pts, pts_tb)
static void qsv_decode_close_qsvcontext(QSVContext *q)
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
mfxSession session
If non-NULL, the session to use for encoding or decoding.
AVRational pkt_timebase
Timebase in which pkt_dts/pts and AVPacket.dts/pts are.
static int alloc_frame(AVCodecContext *avctx, QSVContext *q, QSVFrame *frame)
static int qsv_decode(AVCodecContext *avctx, QSVContext *q, AVFrame *frame, int *got_frame, const AVPacket *avpkt)
static av_cold int qsv_decode_close(AVCodecContext *avctx)
mfxExtBuffer ** ext_buffers
Extra buffers to pass to encoder or decoder initialization.
int av_image_get_buffer_size(enum AVPixelFormat pix_fmt, int width, int height, int align)
Return the size in bytes of the amount of data required to store an image with the given parameters.
static int get_surface(AVCodecContext *avctx, QSVContext *q, mfxFrameSurface1 **surf)
enum AVFieldOrder ff_qsv_map_picstruct(int mfx_pic_struct)
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
static int qsv_decode_preinit(AVCodecContext *avctx, QSVContext *q, enum AVPixelFormat pix_fmt, mfxVideoParam *param)
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
AVBufferRef * hw_device_ctx
A reference to the AVHWDeviceContext describing the device which will be used by a hardware encoder/d...
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
@ AVCOL_RANGE_MPEG
Narrow or limited range content.
AVBufferRef * hw_frames_ctx
A reference to the AVHWFramesContext describing the input (for encoding) or output (decoding) frames.
static int qsv_init_session(AVCodecContext *avctx, QSVContext *q, mfxSession session, AVBufferRef *hw_frames_ref, AVBufferRef *hw_device_ref)
This struct describes a set or pool of "hardware" frames (i.e.
AVBufferRef * av_buffer_allocz(size_t size)
Same as av_buffer_alloc(), except the returned buffer will be initialized to zero.
@ AV_PIX_FMT_NV12
planar YUV 4:2:0, 12bpp, 1 plane for Y and 1 plane for the UV components, which are interleaved (firs...
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
#define MFX_PTS_TO_PTS(mfx_pts, pts_tb)
void * hwctx
The format-specific data, allocated and freed automatically along with this context.
int ff_qsv_codec_id_to_mfx(enum AVCodecID codec_id)
int ff_decode_frame_props(AVCodecContext *avctx, AVFrame *frame)
Set various frame properties from the codec context / packet data.
main external API structure.
enum AVPixelFormat orig_pix_fmt
static int qsv_decode_init_context(AVCodecContext *avctx, QSVContext *q, mfxVideoParam *param)
This struct is used for communicating QSV parameters between libavcodec and the caller.
enum AVPixelFormat ff_qsv_map_fourcc(uint32_t fourcc)
static int qsv_get_continuous_buffer(AVCodecContext *avctx, AVFrame *frame, AVBufferPool *pool)
This struct is allocated as AVHWFramesContext.hwctx.
int coded_width
Bitstream width / height, may be different from width/height e.g.
int initial_pool_size
Initial size of the frame pool.
char * av_strdup(const char *s)
Duplicate a string.
A reference to a data buffer.
mfxExtDecodedFrameInfo dec_info
int ff_attach_decode_data(AVFrame *frame)
int av_fifo_size(const AVFifoBuffer *f)
Return the amount of data in bytes in the AVFifoBuffer, that is the amount of data you can read from ...
This structure stores compressed data.
static void qsv_clear_unused_frames(QSVContext *q)
int width
picture width / height.
AVFifoBuffer * av_fifo_alloc(unsigned int size)
Initialize an AVFifoBuffer.
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
enum AVPixelFormat sw_pix_fmt
Nominal unaccelerated pixel format, see AV_PIX_FMT_xxx.
AVCodecHWConfig public
This is the structure which will be returned to the user by avcodec_get_hw_config().
int ff_qsv_print_error(void *log_ctx, mfxStatus err, const char *error_string)
int ff_qsv_init_internal_session(AVCodecContext *avctx, QSVSession *qs, const char *load_plugins, int gpu_copy)
static void qsv_decode_flush(AVCodecContext *avctx)
static const AVCodecHWConfigInternal *const qsv_hw_configs[]