Perform QSV-accelerated H.264 decoding with output frames in the GPU video surfaces, write the decoded frames to an output file.
#include <stdio.h>
{
}
}
fprintf(stderr, "The QSV pixel format not offered in get_format()\n");
}
{
fprintf(stderr, "Error during decoding\n");
}
break;
fprintf(stderr, "Error during decoding\n");
}
fprintf(stderr, "Error transferring the data to system memory\n");
}
for (j = 0; j < (sw_frame->
height >> (
i > 0)); j++)
}
return 0;
}
int main(
int argc,
char **argv)
{
if (argc < 3) {
fprintf(stderr, "Usage: %s <input file> <output file>\n", argv[0]);
return 1;
}
fprintf(stderr, "Cannot open input file '%s': ", argv[1]);
}
else
}
fprintf(stderr, "No H.264 video stream in the input file\n");
}
fprintf(stderr, "Cannot open the hardware device\n");
}
fprintf(stderr, "The QSV decoder is not present in libavcodec\n");
}
}
}
}
fprintf(stderr, "Error opening the decoder: ");
}
fprintf(stderr, "Error opening the output context: ");
}
}
break;
}
char buf[1024];
fprintf(stderr, "%s\n", buf);
}
}
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
uint8_t * extradata
Extra binary data needed for initializing the decoder, codec-dependent.
AVPixelFormat
Pixel format.
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
enum AVPixelFormat(* get_format)(struct AVCodecContext *s, const enum AVPixelFormat *fmt)
Callback to negotiate the pixel format.
#define AVERROR_EOF
End of file.
enum AVDiscard discard
Selects which packets can be discarded at will and do not need to be demuxed.
static int get_format(AVCodecContext *avctx, const enum AVPixelFormat *pix_fmts)
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
This structure describes decoded (raw) audio or video data.
AVStream ** streams
A list of all streams in the file.
int avio_open(AVIOContext **s, const char *filename, int flags)
Create and initialize a AVIOContext for accessing the resource indicated by url.
AVBufferRef * av_buffer_ref(const AVBufferRef *buf)
Create a new reference to an AVBuffer.
int av_read_frame(AVFormatContext *s, AVPacket *pkt)
Return the next frame of a stream.
void av_packet_free(AVPacket **pkt)
Free the packet, if the packet is reference counted, it will be unreferenced first.
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
void avformat_close_input(AVFormatContext **s)
Close an opened input AVFormatContext.
static const chunk_decoder decoder[8]
int av_strerror(int errnum, char *errbuf, size_t errbuf_size)
Put a description of the AVERROR code errnum in errbuf.
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
#define FF_ARRAY_ELEMS(a)
int avformat_open_input(AVFormatContext **ps, const char *url, const AVInputFormat *fmt, AVDictionary **options)
Open an input stream and read the header.
AVCodecContext * avcodec_alloc_context3(const AVCodec *codec)
Allocate an AVCodecContext and set its fields to default values.
int attribute_align_arg avcodec_receive_frame(AVCodecContext *avctx, AVFrame *frame)
Return decoded output data from a decoder or encoder (when the AV_CODEC_FLAG_RECON_FRAME flag is used...
static enum AVPixelFormat pix_fmts[]
#define AVIO_FLAG_WRITE
write-only
@ AVDISCARD_ALL
discard all
AVCodecParameters * codecpar
Codec parameters associated with this stream.
const AVCodec * avcodec_find_decoder_by_name(const char *name)
Find a registered decoder with the specified name.
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it.
void avcodec_free_context(AVCodecContext **avctx)
Free the codec context and everything associated with it and write NULL to the provided pointer.
int attribute_align_arg avcodec_open2(AVCodecContext *avctx, const AVCodec *codec, AVDictionary **options)
Initialize the AVCodecContext to use the given AVCodec.
@ AV_PIX_FMT_QSV
HW acceleration through QSV, data[3] contains a pointer to the mfxFrameSurface1 structure.
int extradata_size
Size of the extradata content in bytes.
unsigned int nb_streams
Number of elements in AVFormatContext.streams.
int main(int argc, char **argv)
void avio_write(AVIOContext *s, const unsigned char *buf, int size)
AVPacket * av_packet_alloc(void)
Allocate an AVPacket and set its fields to default values.
static int decode_packet(AVCodecContext *decoder_ctx, AVFrame *frame, AVFrame *sw_frame, AVPacket *pkt, AVIOContext *output_ctx)
int avcodec_send_packet(AVCodecContext *avctx, const AVPacket *avpkt)
Supply raw packet data as input to a decoder.
#define i(width, name, range_min, range_max)
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
AVBufferRef * hw_device_ctx
A reference to the AVHWDeviceContext describing the device which will be used by a hardware encoder/d...
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
int av_hwdevice_ctx_create(AVBufferRef **pdevice_ref, enum AVHWDeviceType type, const char *device, AVDictionary *opts, int flags)
Open a device of the specified type and create an AVHWDeviceContext for it.
#define AV_INPUT_BUFFER_PADDING_SIZE
int av_hwframe_transfer_data(AVFrame *dst, const AVFrame *src, int flags)
Copy data to or from a hw surface.
main external API structure.
int index
stream index in AVFormatContext
A reference to a data buffer.
enum AVCodecID codec_id
Specific type of the encoded data (the codec used).
This structure stores compressed data.
int avio_close(AVIOContext *s)
Close the resource accessed by the AVIOContext s and free it.
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
static AVCodecContext * decoder_ctx