Go to the documentation of this file.
22 #include <dav1d/dav1d.h>
38 #define FF_DAV1D_VERSION_AT_LEAST(x,y) \
39 (DAV1D_API_VERSION_MAJOR > (x) || DAV1D_API_VERSION_MAJOR == (x) && DAV1D_API_VERSION_MINOR >= (y))
79 uint8_t *aligned_ptr, *
data[4];
104 aligned_ptr = (uint8_t *)
FFALIGN((uintptr_t)buf->
data, DAV1D_PICTURE_ALIGNMENT);
106 DAV1D_PICTURE_ALIGNMENT);
112 p->data[0] =
data[0];
113 p->data[1] =
data[1];
114 p->data[2] =
data[2];
115 p->stride[0] = linesize[0];
116 p->stride[1] = linesize[1];
117 p->allocator_data = buf;
131 c->profile = seq->profile;
132 c->level = ((seq->operating_points[0].major_level - 2) << 2)
133 | seq->operating_points[0].minor_level;
136 case DAV1D_CHR_VERTICAL:
139 case DAV1D_CHR_COLOCATED:
148 if (seq->layout == DAV1D_PIXEL_LAYOUT_I444 &&
149 seq->mtrx == DAV1D_MC_IDENTITY &&
150 seq->pri == DAV1D_COLOR_PRI_BT709 &&
151 seq->trc == DAV1D_TRC_SRGB)
154 c->pix_fmt =
pix_fmt[seq->layout][seq->hbd];
156 if (seq->num_units_in_tick && seq->time_scale) {
158 seq->num_units_in_tick, seq->time_scale, INT_MAX);
159 if (seq->equal_picture_interval)
160 c->ticks_per_frame = seq->num_ticks_per_picture;
163 if (seq->film_grain_present)
171 Dav1dSequenceHeader seq;
175 if (!
c->extradata ||
c->extradata_size <= 0)
178 if (
c->extradata[0] & 0x80) {
179 int version =
c->extradata[0] & 0x7F;
181 if (
version != 1 ||
c->extradata_size < 4) {
184 "Error decoding extradata\n");
189 if (
c->extradata_size == 4)
195 res = dav1d_parse_sequence_header(&seq,
c->extradata +
offset,
212 #if FF_DAV1D_VERSION_AT_LEAST(6,0)
213 int threads =
c->thread_count;
215 int threads = (
c->thread_count ?
c->thread_count :
av_cpu_count()) * 3 / 2;
221 dav1d_default_settings(&
s);
224 s.allocator.cookie = dav1d;
227 s.frame_size_limit =
c->max_pixels;
236 #if FF_DAV1D_VERSION_AT_LEAST(6,2)
237 s.strict_std_compliance =
c->strict_std_compliance > 0;
240 #if FF_DAV1D_VERSION_AT_LEAST(6,0)
244 s.n_threads =
FFMIN(threads, DAV1D_MAX_THREADS);
249 s.n_threads,
s.max_frame_delay);
260 s.n_frame_threads,
s.n_tile_threads);
263 #if FF_DAV1D_VERSION_AT_LEAST(6,8)
265 s.decode_frame_type = DAV1D_DECODEFRAMETYPE_KEY;
267 s.decode_frame_type = DAV1D_DECODEFRAMETYPE_INTRA;
269 s.decode_frame_type = DAV1D_DECODEFRAMETYPE_REFERENCE;
276 res = dav1d_open(&dav1d->
c, &
s);
287 dav1d_data_unref(&dav1d->
data);
288 dav1d_flush(dav1d->
c);
293 #if FF_API_REORDERED_OPAQUE
294 int64_t reordered_opaque;
315 Dav1dPicture pic = { 0 }, *p = &pic;
318 #if FF_DAV1D_VERSION_AT_LEAST(5,1)
319 enum Dav1dEventFlags event_flags = 0;
354 dav1d_data_unref(
data);
358 #if FF_API_REORDERED_OPAQUE
359 od->reordered_opaque =
c->reordered_opaque;
365 res = dav1d_data_wrap_user_data(
data, (
const uint8_t *)
pkt,
370 dav1d_data_unref(
data);
381 res = dav1d_send_data(dav1d->
c,
data);
386 dav1d_data_unref(
data);
391 res = dav1d_get_picture(dav1d->
c, p);
395 else if (res ==
AVERROR(EAGAIN) &&
c->internal->draining)
405 if (!
frame->buf[0]) {
406 dav1d_picture_unref(p);
410 frame->data[0] = p->data[0];
411 frame->data[1] = p->data[1];
412 frame->data[2] = p->data[2];
413 frame->linesize[0] = p->stride[0];
414 frame->linesize[1] = p->stride[1];
415 frame->linesize[2] = p->stride[1];
417 #if FF_DAV1D_VERSION_AT_LEAST(5,1)
418 dav1d_get_event_flags(dav1d->
c, &event_flags);
420 event_flags & DAV1D_EVENT_FLAG_NEW_SEQUENCE)
427 frame->width = p->p.w;
428 frame->height = p->p.h;
429 if (
c->width != p->p.w ||
c->height != p->p.h) {
436 &
frame->sample_aspect_ratio.den,
437 frame->height * (int64_t)p->frame_hdr->render_width,
438 frame->width * (int64_t)p->frame_hdr->render_height,
447 frame->reordered_opaque = od->reordered_opaque;
465 frame->key_frame = p->frame_hdr->frame_type == DAV1D_FRAME_TYPE_KEY;
467 switch (p->frame_hdr->frame_type) {
468 case DAV1D_FRAME_TYPE_KEY:
469 case DAV1D_FRAME_TYPE_INTRA:
472 case DAV1D_FRAME_TYPE_INTER:
475 case DAV1D_FRAME_TYPE_SWITCH:
483 if (p->mastering_display) {
490 for (
int i = 0;
i < 3;
i++) {
503 if (p->content_light) {
509 light->
MaxCLL = p->content_light->max_content_light_level;
510 light->
MaxFALL = p->content_light->max_frame_average_light_level;
514 unsigned int user_identifier;
519 user_identifier = bytestream2_get_be32(&gb);
520 switch (user_identifier) {
521 case MKBETAG(
'G',
'A',
'9',
'4'): {
540 if (p->frame_hdr->film_grain.present && (!dav1d->
apply_grain ||
549 fgp->
seed = p->frame_hdr->film_grain.data.seed;
571 memcpy(&fgp->
codec.
aom.
uv_mult, &p->frame_hdr->film_grain.data.uv_mult,
581 dav1d_picture_unref(p);
592 dav1d_data_unref(&dav1d->
data);
593 dav1d_close(&dav1d->
c);
598 #ifndef DAV1D_MAX_FRAME_THREADS
599 #define DAV1D_MAX_FRAME_THREADS DAV1D_MAX_THREADS
601 #ifndef DAV1D_MAX_TILE_THREADS
602 #define DAV1D_MAX_TILE_THREADS DAV1D_MAX_THREADS
604 #ifndef DAV1D_MAX_FRAME_DELAY
605 #define DAV1D_MAX_FRAME_DELAY DAV1D_MAX_FRAME_THREADS
608 #define OFFSET(x) offsetof(Libdav1dContext, x)
609 #define VD AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_DECODING_PARAM
615 {
"oppoint",
"Select an operating point of the scalable bitstream",
OFFSET(operating_point),
AV_OPT_TYPE_INT, { .i64 = -1 }, -1, 31,
VD },
628 .
p.
name =
"libdav1d",
641 .p.wrapper_name =
"libdav1d",
void av_vlog(void *avcl, int level, const char *fmt, va_list vl)
Send the specified message to the log if the level is less than or equal to the current av_log_level.
#define FF_ENABLE_DEPRECATION_WARNINGS
AVBufferPool * av_buffer_pool_init(size_t size, AVBufferRef *(*alloc)(size_t size))
Allocate and initialize a buffer pool.
int ff_decode_get_packet(AVCodecContext *avctx, AVPacket *pkt)
Called by decoders to get the next packet for decoding.
#define AV_LOG_WARNING
Something somehow does not look correct.
AVPixelFormat
Pixel format.
#define AV_EF_EXPLODE
abort decoding on minor error detection
static int libdav1d_picture_allocator(Dav1dPicture *p, void *cookie)
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
AVColorTransferCharacteristic
Color Transfer Characteristic.
static const AVClass libdav1d_class
#define FF_API_REORDERED_OPAQUE
#define AVERROR_EOF
End of file.
uint8_t * data
The data buffer.
@ AV_FRAME_DATA_A53_CC
ATSC A53 Part 4 Closed Captions.
uint8_t uv_points[2][10][2]
unsigned MaxCLL
Max content light level (cd/m^2).
This structure describes decoded (raw) audio or video data.
@ AVCOL_RANGE_JPEG
Full range content.
#define AV_PIX_FMT_YUV420P10
static void libdav1d_user_data_free(const uint8_t *data, void *opaque)
AVBufferRef * av_buffer_ref(const AVBufferRef *buf)
Create a new reference to an AVBuffer.
AVColorPrimaries
Chromaticity coordinates of the source primaries.
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
void av_packet_free(AVPacket **pkt)
Free the packet, if the packet is reference counted, it will be unreferenced first.
uint64_t seed
Seed to use for the synthesis process, if the codec allows for it.
Content light level needed by to transmit HDR over HDMI (CTA-861.3).
static av_always_inline void bytestream2_skip(GetByteContext *g, unsigned int size)
#define AV_CODEC_FLAG_COPY_OPAQUE
AVCodec p
The public AVCodec.
int grain_scale_shift
Signals the down shift applied to the generated gaussian numbers during synthesis.
#define AV_PIX_FMT_GBRP10
static void libdav1d_init_params(AVCodecContext *c, const Dav1dSequenceHeader *seq)
static av_cold int libdav1d_init(AVCodecContext *c)
#define AV_CODEC_FLAG_LOW_DELAY
Force low delay.
int ff_decode_frame_props_from_pkt(const AVCodecContext *avctx, AVFrame *frame, const AVPacket *pkt)
Set various frame properties from the provided packet.
int limit_output_range
Signals to clip to limited color levels after film grain application.
int num_y_points
Number of points, and the scale and value for each point of the piecewise linear scaling function for...
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
#define AV_PIX_FMT_YUV444P10
#define DAV1D_MAX_FRAME_THREADS
static __device__ float ceil(float a)
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
AVBufferRef * av_buffer_pool_get(AVBufferPool *pool)
Allocate a new AVBuffer, reusing an old buffer from the pool when available.
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample format(the sample packing is implied by the sample format) and sample rate. The lists are not just lists
static __device__ float floor(float a)
#define AV_CODEC_CAP_OTHER_THREADS
Codec supports multithreading through a method other than slice- or frame-level multithreading.
AVFilmGrainParams * av_film_grain_params_create_side_data(AVFrame *frame)
Allocate a complete AVFilmGrainParams and add it to the frame.
int(* init)(AVBSFContext *ctx)
#define av_assert0(cond)
assert() equivalent, that is always enabled.
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
void * opaque
for some private data of the user
#define CODEC_LONG_NAME(str)
#define FF_CODEC_PROPERTY_FILM_GRAIN
#define AV_PIX_FMT_GRAY10
int ff_parse_a53_cc(AVBufferRef **pbuf, const uint8_t *data, int size)
Parse a data array for ATSC A53 Part 4 Closed Captions and store them in an AVBufferRef.
AVBufferRef * buf
A reference to the reference-counted buffer where the packet data is stored.
#define LIBAVUTIL_VERSION_INT
Describe the class of an AVClass context structure.
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it.
@ AVCHROMA_LOC_LEFT
MPEG-2/4 4:2:0, H.264 default for 4:2:0.
@ AVCHROMA_LOC_TOPLEFT
ITU-R 601, SMPTE 274M 296M S314M(DV 4:1:1), mpeg2 4:2:2.
const char * av_default_item_name(void *ptr)
Return the context name.
@ AV_PICTURE_TYPE_I
Intra.
void av_buffer_pool_uninit(AVBufferPool **ppool)
Mark the pool as being available for freeing.
static void libdav1d_picture_release(Dav1dPicture *p, void *cookie)
int ff_set_sar(AVCodecContext *avctx, AVRational sar)
Check that the provided sample aspect ratio is valid and set it on the codec context.
AVFrameSideData * av_frame_new_side_data_from_buf(AVFrame *frame, enum AVFrameSideDataType type, AVBufferRef *buf)
Add a new side data to a frame from an existing AVBufferRef.
#define AV_PIX_FMT_YUV422P10
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
@ AV_PICTURE_TYPE_SP
Switching Predicted.
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
static av_always_inline int bytestream2_get_bytes_left(GetByteContext *g)
int num_uv_points[2]
If chroma_scaling_from_luma is set to 0, signals the chroma scaling function parameters.
#define DAV1D_MAX_FRAME_DELAY
@ AVDISCARD_NONKEY
discard all frames except keyframes
int av_image_fill_arrays(uint8_t *dst_data[4], int dst_linesize[4], const uint8_t *src, enum AVPixelFormat pix_fmt, int width, int height, int align)
Setup the data pointers and linesizes based on the specified image parameters and the provided array.
static void libdav1d_log_callback(void *opaque, const char *fmt, va_list vl)
static enum AVPixelFormat pix_fmt_rgb[3]
static int libdav1d_receive_frame(AVCodecContext *c, AVFrame *frame)
#define FF_CODEC_CAP_SETS_FRAME_PROPS
Codec handles output frame properties internally instead of letting the internal logic derive them fr...
#define AV_PIX_FMT_YUV422P12
static AVRational av_make_q(int num, int den)
Create an AVRational.
#define AV_NOPTS_VALUE
Undefined timestamp value.
#define MKBETAG(a, b, c, d)
#define AV_PIX_FMT_YUV444P12
This structure describes how to handle film grain synthesis in video for specific codecs.
int av_image_get_buffer_size(enum AVPixelFormat pix_fmt, int width, int height, int align)
Return the size in bytes of the amount of data required to store an image with the given parameters.
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
AVPacket * av_packet_alloc(void)
Allocate an AVPacket and set its fields to default values.
int8_t ar_coeffs_y[24]
Luma auto-regression coefficients.
#define AV_LOG_INFO
Standard information.
const FFCodec ff_libdav1d_decoder
@ AVDISCARD_NONINTRA
discard all non intra frames
#define i(width, name, range_min, range_max)
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
#define AV_PIX_FMT_GBRP12
AVColorSpace
YUV colorspace type.
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
int scaling_shift
Specifies the shift applied to the chroma components.
const char * name
Name of the codec implementation.
#define FF_CODEC_CAP_SETS_PKT_DTS
Decoders marked with FF_CODEC_CAP_SETS_PKT_DTS want to set AVFrame.pkt_dts manually.
@ AVCOL_RANGE_MPEG
Narrow or limited range content.
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
#define AV_PIX_FMT_YUV420P12
int ff_decode_frame_props(AVCodecContext *avctx, AVFrame *frame)
Set various frame properties from the codec context / packet data.
main external API structure.
int ar_coeff_lag
Specifies the auto-regression lag.
#define FF_CODEC_RECEIVE_FRAME_CB(func)
int uv_offset[2]
Offset used for component scaling function.
static enum AVPixelFormat pix_fmt[][3]
int uv_mult[2]
Specifies the luma/chroma multipliers for the index to the component scaling function.
#define AV_CODEC_CAP_DELAY
Encoder or decoder requires flushing with NULL input at the end in order to give the complete and cor...
#define FF_CODEC_PROPERTY_CLOSED_CAPTIONS
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
#define FF_DISABLE_DEPRECATION_WARNINGS
@ AV_PIX_FMT_GBRP
planar GBR 4:4:4 24bpp
int overlap_flag
Signals whether to overlap film grain blocks.
static const AVOption libdav1d_options[]
@ AV_PICTURE_TYPE_P
Predicted.
static void libdav1d_flush(AVCodecContext *c)
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
void(* flush)(AVBSFContext *ctx)
A reference to a data buffer.
#define FF_CODEC_CAP_AUTO_THREADS
Codec handles avctx->thread_count == 0 (auto) internally.
static void libdav1d_data_free(const uint8_t *data, void *opaque)
unsigned MaxFALL
Max average light level per frame (cd/m^2).
This structure stores compressed data.
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
#define AV_PIX_FMT_GRAY12
int chroma_scaling_from_luma
Signals whether to derive the chroma scaling function from the luma.
@ AVDISCARD_NONREF
discard all non reference
union AVFilmGrainParams::@319 codec
Additional fields may be added both here and in any structure included.
@ AV_FILM_GRAIN_PARAMS_AV1
The union is valid when interpreted as AVFilmGrainAOMParams (codec.aom)
enum AVFilmGrainParamsType type
Specifies the codec for which this structure is valid.
#define DAV1D_MAX_TILE_THREADS
#define AV_CODEC_EXPORT_DATA_FILM_GRAIN
Decoding only.
static av_cold int libdav1d_parse_extradata(AVCodecContext *c)
int ar_coeff_shift
Specifies the range of the auto-regressive coefficients.
static av_cold int libdav1d_close(AVCodecContext *c)
#define AV_OPT_FLAG_DEPRECATED
set if option is deprecated, users should refer to AVOption.help text for more information
int8_t ar_coeffs_uv[2][25]
Chroma auto-regression coefficients.