Go to the documentation of this file.
23 #include <stdatomic.h>
31 #define VVC_FRAME_FLAG_OUTPUT (1 << 0)
32 #define VVC_FRAME_FLAG_SHORT_REF (1 << 1)
33 #define VVC_FRAME_FLAG_LONG_REF (1 << 2)
34 #define VVC_FRAME_FLAG_BUMPING (1 << 3)
59 frame->nb_rpl_elems = 0;
68 const int x_cb = x0 >>
fc->ps.sps->ctb_log2_size_y;
69 const int y_cb = y0 >>
fc->ps.sps->ctb_log2_size_y;
70 const int pic_width_cb =
fc->ps.pps->ctb_width;
71 const int ctb_addr_rs = y_cb * pic_width_cb + x_cb;
128 frame->nb_rpl_elems =
s->current_frame.nb_units;
131 if (!
frame->tab_dmvr_mvf)
137 frame->ctb_count =
pps->ctb_width *
pps->ctb_height;
138 for (
int j = 0; j <
frame->ctb_count; j++)
142 if (!
frame->progress)
157 const int poc =
ph->poc;
180 else if (
ph->r->ph_pic_output_flag)
183 if (!
ph->r->ph_non_ref_pic_flag)
187 ref->sequence =
s->seq_decode;
188 ref->frame->crop_left =
fc->ps.pps->r->pps_conf_win_left_offset <<
fc->ps.sps->hshift[
CHROMA];
189 ref->frame->crop_right =
fc->ps.pps->r->pps_conf_win_right_offset <<
fc->ps.sps->hshift[
CHROMA];
190 ref->frame->crop_top =
fc->ps.pps->r->pps_conf_win_top_offset <<
fc->ps.sps->vshift[
CHROMA];
191 ref->frame->crop_bottom =
fc->ps.pps->r->pps_conf_win_bottom_offset <<
fc->ps.sps->vshift[
CHROMA];
201 int min_poc = INT_MAX;
204 if (no_output_of_prior_pics_flag) {
208 frame->sequence ==
s->seq_output) {
217 frame->sequence ==
s->seq_output) {
219 if (
frame->poc < min_poc || nb_output == 1) {
220 min_poc =
frame->poc;
227 if (!
flush &&
s->seq_output ==
s->seq_decode &&
sps &&
228 nb_output <= sps->
r->sps_dpb_params.dpb_max_dec_pic_buffering_minus1[
sps->r->sps_max_sublayers_minus1] + 1)
243 "Output frame with POC %d.\n",
frame->poc);
247 if (
s->seq_output !=
s->seq_decode)
248 s->seq_output = (
s->seq_output + 1) & 0xff;
258 const int poc =
fc->ps.ph.poc;
260 int min_poc = INT_MAX;
265 frame->sequence ==
s->seq_output &&
271 if (
sps && dpb >=
sps->r->sps_dpb_params.dpb_max_dec_pic_buffering_minus1[
sps->r->sps_max_sublayers_minus1] + 1) {
275 frame->sequence ==
s->seq_output &&
278 min_poc =
frame->poc;
286 frame->sequence ==
s->seq_output &&
287 frame->poc <= min_poc) {
298 const unsigned mask = use_msb ? ~0 :
fc->ps.sps->max_pic_order_cnt_lsb - 1;
302 if (
ref->frame->buf[0] &&
ref->sequence ==
s->seq_decode) {
326 if (!
s->avctx->hwaccel) {
327 if (!
sps->pixel_shift) {
333 for (
int y = 0; y < (
pps->height >>
sps->vshift[
i]); y++) {
342 frame->sequence =
s->seq_decode;
352 int poc,
int ref_flag, uint8_t use_msb)
396 if (!((
sps->r->sps_weighted_pred_flag ||
397 sps->r->sps_weighted_bipred_flag) &&
i != 0))
403 const int lx,
const int j,
const int max_poc_lsb)
410 lt_poc += poc -
delta * max_poc_lsb - (poc & (max_poc_lsb - 1));
411 *prev_delta_poc_msb =
delta;
422 const int max_poc_lsb =
sps->max_pic_order_cnt_lsb;
431 for (
int lx =
L0; lx <=
L1; lx++) {
434 int poc_base =
ph->poc;
435 int prev_delta_poc_msb = 0;
449 poc =
poc_lt(&prev_delta_poc_msb,
ph->poc, ref_lists, lx, j, max_poc_lsb);
528 prev = &(*prev)->
next;
F H1 F F H1 F F F F H1<-F-------F-------F v v v H2 H3 H2 ^ ^ ^ F-------F-------F-> H1<-F-------F-------F|||||||||F H1 F|||||||||F H1 Funavailable fullpel samples(outside the picture for example) shall be equalto the closest available fullpel sampleSmaller pel interpolation:--------------------------if diag_mc is set then points which lie on a line between 2 vertically, horizontally or diagonally adjacent halfpel points shall be interpolatedlinearly with rounding to nearest and halfway values rounded up.points which lie on 2 diagonals at the same time should only use the onediagonal not containing the fullpel point F--> O q O<--h1-> O q O<--F v \/v \/v O O O O O O O|/|\|q q q q q|/|\|O O O O O O O ^/\ ^/\ ^ h2--> O q O<--h3-> O q O<--h2 v \/v \/v O O O O O O O|\|/|q q q q q|\|/|O O O O O O O ^/\ ^/\ ^ F--> O q O<--h1-> O q O<--Fthe remaining points shall be bilinearly interpolated from theup to 4 surrounding halfpel and fullpel points, again rounding should be tonearest and halfway values rounded upcompliant Snow decoders MUST support 1-1/8 pel luma and 1/2-1/16 pel chromainterpolation at leastOverlapped block motion compensation:-------------------------------------FIXMELL band prediction:===================Each sample in the LL0 subband is predicted by the median of the left, top andleft+top-topleft samples, samples outside the subband shall be considered tobe 0. To reverse this prediction in the decoder apply the following.for(y=0;y< height;y++){ for(x=0;x< width;x++){ sample[y][x]+=median(sample[y-1][x], sample[y][x-1], sample[y-1][x]+sample[y][x-1]-sample[y-1][x-1]);}}sample[-1][ *]=sample[ *][-1]=0;width, height here are the width and height of the LL0 subband not of the finalvideoDequantization:===============FIXMEWavelet Transform:==================Snow supports 2 wavelet transforms, the symmetric biorthogonal 5/3 integertransform and an integer approximation of the symmetric biorthogonal 9/7daubechies wavelet.2D IDWT(inverse discrete wavelet transform) --------------------------------------------The 2D IDWT applies a 2D filter recursively, each time combining the4 lowest frequency subbands into a single subband until only 1 subbandremains.The 2D filter is done by first applying a 1D filter in the vertical directionand then applying it in the horizontal one. --------------- --------------- --------------- ---------------|LL0|HL0|||||||||||||---+---|HL1||L0|H0|HL1||LL1|HL1|||||LH0|HH0|||||||||||||-------+-------|-> L1 H1 LH1 HH1 LH1 HH1 LH1 HH1 L1
static int add_candidate_ref(VVCContext *s, VVCFrameContext *fc, RefPicList *list, int poc, int ref_flag, uint8_t use_msb)
uint16_t poc_lsb_lt[2][VVC_MAX_REF_ENTRIES]
uint32_t num_ctus_in_curr_slice
NumCtusInCurrSlice.
static int ff_mutex_init(AVMutex *mutex, const void *attr)
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
void ff_vvc_report_frame_finished(VVCFrame *frame)
uint8_t * data
The data buffer.
static void * ff_refstruct_alloc_ext(size_t size, unsigned flags, void *opaque, void(*free_cb)(FFRefStructOpaque opaque, void *obj))
A wrapper around ff_refstruct_alloc_ext_c() for the common case of a non-const qualified opaque.
static int FUNC() ph(CodedBitstreamContext *ctx, RWContext *rw, H266RawPH *current)
This structure describes decoded (raw) audio or video data.
static int is_progress_done(const FrameProgress *p, const VVCProgressListener *l)
const H266RawSliceHeader * r
RefStruct reference.
uint8_t st_ref_pic_flag[VVC_MAX_REF_ENTRIES]
int ff_vvc_slice_rpl(VVCContext *s, VVCFrameContext *fc, SliceContext *sc)
#define fc(width, name, range_min, range_max)
uint8_t ltrp_in_header_flag
RefStruct is an API for creating reference-counted objects with minimal overhead.
int flags
Frame flags, a combination of AV_FRAME_FLAGS.
static void free_progress(FFRefStructOpaque unused, void *obj)
AVBufferRef * buf[AV_NUM_DATA_POINTERS]
AVBuffer references backing the data for this frame.
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call have so the codec calls ff_thread_report set FF_CODEC_CAP_ALLOCATE_PROGRESS in FFCodec caps_internal and use ff_thread_get_buffer() to allocate frames. Otherwise decode directly into the user-supplied frames. Call ff_thread_report_progress() after some part of the current picture has decoded. A good place to put this is where draw_horiz_band() is called - add this if it isn 't called anywhere
static int ff_mutex_unlock(AVMutex *mutex)
uint8_t inter_layer_ref_pic_flag[VVC_MAX_REF_ENTRIES]
H266RefPicListStruct rpl_ref_list[2]
VVCProgressListener * listener[VVC_PROGRESS_LAST]
uint8_t delta_poc_msb_cycle_present_flag[2][VVC_MAX_REF_ENTRIES]
void ff_vvc_report_progress(VVCFrame *frame, const VVCProgress vp, const int y)
void ff_vvc_unref_frame(VVCFrameContext *fc, VVCFrame *frame, int flags)
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
#define FF_ARRAY_ELEMS(a)
#define VVC_FRAME_FLAG_LONG_REF
static const uint16_t mask[17]
void av_memcpy_backptr(uint8_t *dst, int back, int cnt)
Overlapping memcpy() implementation.
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
#define av_assert0(cond)
assert() equivalent, that is always enabled.
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
static VVCFrame * generate_missing_ref(VVCContext *s, VVCFrameContext *fc, int poc)
static int delta_poc_st(const H266RefPicListStruct *rpls, const int lx, const int i, const VVCSPS *sps)
struct HEVCFrame * ref[HEVC_MAX_REFS]
void ff_vvc_clear_refs(VVCFrameContext *fc)
static void add_listener(VVCProgressListener **prev, VVCProgressListener *l)
uint8_t abs_delta_poc_st[VVC_MAX_REF_ENTRIES]
void ff_vvc_flush_dpb(VVCFrameContext *fc)
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
uint8_t strp_entry_sign_flag[VVC_MAX_REF_ENTRIES]
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining list
void(* flush)(AVBSFContext *ctx)
static int init_slice_rpl(const VVCFrameContext *fc, SliceContext *sc)
static void * ff_refstruct_allocz(size_t size)
Equivalent to ff_refstruct_alloc_ext(size, 0, NULL, NULL)
uint8_t rpls_poc_lsb_lt[VVC_MAX_REF_ENTRIES]
static VVCFrame * find_ref_idx(VVCContext *s, VVCFrameContext *fc, int poc, uint8_t use_msb)
int ff_vvc_output_frame(VVCContext *s, VVCFrameContext *fc, AVFrame *out, const int no_output_of_prior_pics_flag, int flush)
static int ff_mutex_destroy(AVMutex *mutex)
static int poc_lt(int *prev_delta_poc_msb, const int poc, const H266RefPicLists *ref_lists, const int lx, const int j, const int max_poc_lsb)
static int FUNC() pps(CodedBitstreamContext *ctx, RWContext *rw, H264RawPPS *current)
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
static void mark_ref(VVCFrame *frame, int flag)
static FrameProgress * alloc_progress(void)
void avpriv_report_missing_feature(void *avc, const char *msg,...) av_printf_format(2
Log a generic warning message about a missing feature.
const uint32_t * ctb_addr_in_curr_slice
CtbAddrInCurrSlice.
static int ff_mutex_lock(AVMutex *mutex)
size_t size
Size of data in bytes.
#define i(width, name, range_min, range_max)
atomic_int progress[VVC_PROGRESS_LAST]
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
#define VVC_FRAME_FLAG_OUTPUT
#define GDR_IS_RECOVERED(s)
VVCProgressListener * next
uint16_t delta_poc_msb_cycle_lt[2][VVC_MAX_REF_ENTRIES]
static VVCProgressListener * get_done_listener(FrameProgress *p, const VVCProgress vp)
static int FUNC() sps(CodedBitstreamContext *ctx, RWContext *rw, H264RawSPS *current)
static VVCProgressListener * remove_listener(VVCProgressListener **prev, VVCProgressListener *l)
static int ref[MAX_W *MAX_W]
static int ff_cond_signal(AVCond *cond)
int ff_vvc_frame_rpl(VVCContext *s, VVCFrameContext *fc, SliceContext *sc)
#define VVC_FRAME_FLAG_BUMPING
static VVCFrame * alloc_frame(VVCContext *s, VVCFrameContext *fc)
#define VVC_FRAME_FLAG_SHORT_REF
const RefPicList * ff_vvc_get_ref_list(const VVCFrameContext *fc, const VVCFrame *ref, int x0, int y0)
static int ff_cond_destroy(AVCond *cond)
progress_done_fn progress_done
#define flags(name, subs,...)
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
static int ff_cond_init(AVCond *cond, const void *attr)
int ff_vvc_set_new_ref(VVCContext *s, VVCFrameContext *fc, AVFrame **frame)
void ff_refstruct_unref(void *objp)
Decrement the reference count of the underlying object and automatically free the object if there are...
void * ff_refstruct_pool_get(FFRefStructPool *pool)
Get an object from the pool, reusing an old one from the pool when available.
void ff_vvc_add_progress_listener(VVCFrame *frame, VVCProgressListener *l)
void ff_vvc_bump_frame(VVCContext *s, VVCFrameContext *fc)