Go to the documentation of this file.
32 #define xavs2_opt_set2(name, format, ...) do{ \
33 char opt_str[16] = {0}; \
35 av_strlcatf(opt_str, sizeof(opt_str), format, __VA_ARGS__); \
36 err = cae->api->opt_set2(cae->param, name, opt_str); \
38 av_log(avctx, AV_LOG_WARNING, "Invalid value for %s: %s\n", name, opt_str);\
59 const xavs2_api_t *
api;
136 for (plane = 0; plane < 3; plane++) {
137 p_plane = (uint16_t *)pic->img.img_planes[plane];
139 for (hIdx = 0; hIdx < pic->img.i_lines[plane]; hIdx++) {
140 memset(p_plane, 0, pic->img.i_stride[plane]);
141 for (wIdx = 0; wIdx < pic->img.i_width[plane]; wIdx++) {
142 p_plane[wIdx] = p_buffer[wIdx] << shift_in;
144 p_plane += pic->img.i_stride[plane];
158 for (plane = 0; plane < 3; plane++) {
159 p_plane = pic->img.img_planes[plane];
161 stride = pic->img.i_width[plane] * pic->img.in_sample_size;
162 for (hIdx = 0; hIdx < pic->img.i_lines[plane]; hIdx++) {
163 memcpy(p_plane, p_buffer,
stride);
164 p_plane += pic->img.i_stride[plane];
179 if (cae->
api->encoder_get_buffer(cae->
encoder, &pic) < 0) {
186 if (pic.img.in_sample_size == pic.img.enc_sample_size) {
189 const int shift_in = atoi(cae->
api->opt_get(cae->
param,
"SampleShift"));
194 if (pic.img.in_sample_size == pic.img.enc_sample_size) {
206 pic.i_type = XAVS2_TYPE_AUTO;
219 if ((cae->
packet.len) && (cae->
packet.state != XAVS2_STATE_FLUSH_END)) {
228 if (cae->
packet.type == XAVS2_TYPE_IDR ||
229 cae->
packet.type == XAVS2_TYPE_I ||
230 cae->
packet.type == XAVS2_TYPE_KEYFRAME) {
260 #define OFFSET(x) offsetof(XAVS2EContext, x)
261 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
264 {
"lcu_row_threads" ,
"number of parallel threads for rows" ,
OFFSET(lcu_row_threads) ,
AV_OPT_TYPE_INT, {.i64 = 0 }, 0, INT_MAX,
VE },
265 {
"initial_qp" ,
"Quantization initial parameter" ,
OFFSET(initial_qp) ,
AV_OPT_TYPE_INT, {.i64 = 34 }, 1, 63,
VE },
269 {
"speed_level" ,
"Speed level, higher is better but slower",
OFFSET(preset_level) ,
AV_OPT_TYPE_INT, {.i64 = 0 }, 0, 9,
VE },
270 {
"log_level" ,
"log level: -1: none, 0: error, 1: warning, 2: info, 3: debug",
OFFSET(log_level) ,
AV_OPT_TYPE_INT, {.i64 = 0 }, -1, 3,
VE },
271 {
"xavs2-params" ,
"set the xavs2 configuration using a :-separated list of key=value parameters",
OFFSET(xavs2_opts),
AV_OPT_TYPE_DICT, { 0 }, 0, 0,
VE },
290 .
p.
name =
"libxavs2",
306 .p.wrapper_name =
"libxavs2",
static void xavs2_copy_frame(xavs2_picture_t *pic, const AVFrame *frame)
AVPixelFormat
Pixel format.
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
static void xavs2_copy_frame_with_shift(xavs2_picture_t *pic, const AVFrame *frame, const int shift_in)
This structure describes decoded (raw) audio or video data.
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
AVDictionary * xavs2_opts
#define AV_PIX_FMT_YUV420P10
#define FF_CODEC_CAP_NOT_INIT_THREADSAFE
The codec is not known to be init-threadsafe (i.e.
static const AVClass libxavs2
int qmax
maximum quantizer
static void bit_depth(AudioStatsContext *s, const uint64_t *const mask, uint8_t *depth)
#define AV_PKT_FLAG_KEY
The packet contains a keyframe.
void ff_mpeg12_find_best_frame_rate(AVRational frame_rate, int *code, int *ext_n, int *ext_d, int nonstandard)
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
AVCodec p
The public AVCodec.
int thread_count
thread count is used to decide how many independent tasks should be passed to execute()
int flags
AV_CODEC_FLAG_*.
#define FF_CODEC_ENCODE_CB(func)
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
#define AV_CODEC_CAP_OTHER_THREADS
Codec supports multithreading through a method other than slice- or frame-level multithreading.
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
#define CODEC_LONG_NAME(str)
#define LIBAVUTIL_VERSION_INT
Describe the class of an AVClass context structure.
static int xavs2_encode_frame(AVCodecContext *avctx, AVPacket *pkt, const AVFrame *frame, int *got_packet)
static av_cold int xavs2_init(AVCodecContext *avctx)
int64_t bit_rate
the average bitrate
const char * av_default_item_name(void *ptr)
Return the context name.
int(* init)(AVBSFContext *ctx)
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
const FFCodec ff_libxavs2_encoder
int gop_size
the number of pictures in a group of pictures, or 0 for intra_only
#define xavs2_opt_set2(name, format,...)
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames,...
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed.
#define AVERROR_EXTERNAL
Generic error in an external library.
int flags
A combination of AV_PKT_FLAG values.
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some it can consider them to be part of the FIFO and delay acknowledging a status change accordingly Example code
const char * name
Name of the codec implementation.
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
#define AV_CODEC_FLAG_CLOSED_GOP
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
main external API structure.
int ff_get_encode_buffer(AVCodecContext *avctx, AVPacket *avpkt, int64_t size, int flags)
Get a buffer for a packet.
int qmin
minimum quantizer
#define AV_CODEC_CAP_DELAY
Encoder or decoder requires flushing with NULL input at the end in order to give the complete and cor...
int max_b_frames
maximum number of B-frames between non-B-frames Note: The output will be delayed by max_b_frames+1 re...
#define FF_CODEC_CAP_AUTO_THREADS
Codec handles avctx->thread_count == 0 (auto) internally.
This structure stores compressed data.
int width
picture width / height.
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
static const FFCodecDefault xavs2_defaults[]
const AVDictionaryEntry * av_dict_iterate(const AVDictionary *m, const AVDictionaryEntry *prev)
Iterate over a dictionary.
static const AVOption options[]
static av_cold int xavs2_close(AVCodecContext *avctx)