Go to the documentation of this file.
23 #ifndef AVCODEC_HEVCDEC_H
24 #define AVCODEC_HEVCDEC_H
26 #include <stdatomic.h>
48 #define SHIFT_CTB_WPP 2
50 #define MAX_TB_SIZE 32
52 #define DEFAULT_INTRA_TC_OFFSET 2
54 #define HEVC_CONTEXTS 199
55 #define HEVC_STAT_COEFFS 4
57 #define MRG_MAX_NUM_CANDS 5
62 #define EPEL_EXTRA_BEFORE 1
63 #define EPEL_EXTRA_AFTER 2
65 #define QPEL_EXTRA_BEFORE 3
66 #define QPEL_EXTRA_AFTER 4
69 #define EDGE_EMU_BUFFER_STRIDE 80
74 #define SAMPLE(tab, x, y) ((tab)[(y) * s->sps->width + (x)])
75 #define SAMPLE_CTB(tab, x, y) ((tab)[(y) * min_cb_width + (x)])
77 #define IS_IDR(s) ((s)->nal_unit_type == HEVC_NAL_IDR_W_RADL || (s)->nal_unit_type == HEVC_NAL_IDR_N_LP)
78 #define IS_BLA(s) ((s)->nal_unit_type == HEVC_NAL_BLA_W_RADL || (s)->nal_unit_type == HEVC_NAL_BLA_W_LP || \
79 (s)->nal_unit_type == HEVC_NAL_BLA_N_LP)
80 #define IS_IRAP(s) ((s)->nal_unit_type >= 16 && (s)->nal_unit_type <= 23)
390 #define HEVC_FRAME_FLAG_OUTPUT (1 << 0)
391 #define HEVC_FRAME_FLAG_SHORT_REF (1 << 1)
392 #define HEVC_FRAME_FLAG_LONG_REF (1 << 2)
393 #define HEVC_FRAME_FLAG_BUMPING (1 << 3)
461 #define BOUNDARY_LEFT_SLICE (1 << 0)
462 #define BOUNDARY_LEFT_TILE (1 << 1)
463 #define BOUNDARY_UPPER_SLICE (1 << 2)
464 #define BOUNDARY_UPPER_TILE (1 << 3)
674 int nPbW,
int nPbH,
int log2_cb_size,
675 int part_idx,
int merge_idx,
MvField *
mv);
677 int nPbW,
int nPbH,
int log2_cb_size,
678 int part_idx,
int merge_idx,
683 int log2_trafo_size);
691 int log2_trafo_size,
enum ScanType scan_idx,
uint16_t seq_decode
Sequence counters for decoded and output frames, so that old frames are output first after a POC rese...
int temporal_id
temporal_id_plus1 - 1
uint8_t * filter_slice_edges
uint8_t flags
A combination of HEVC_FRAME_FLAG_*.
void ff_hevc_hls_filters(HEVCContext *s, int x_ctb, int y_ctb, int ctb_size)
AVBufferRef * hwaccel_priv_buf
int ff_hevc_res_scale_sign_flag(HEVCContext *s, int idx)
void ff_hevc_bump_frame(HEVCContext *s)
uint8_t slice_initialized
1 if the independent slice segment header was successfully parsed
static const int8_t mv[256][2]
int ff_hevc_inter_pred_idc_decode(HEVCContext *s, int nPbW, int nPbH)
void ff_hevc_flush_dpb(HEVCContext *s)
Drop all frames currently in DPB.
This structure describes decoded (raw) audio or video data.
void * hwaccel_picture_private
const uint8_t ff_hevc_qpel_extra_before[4]
@ LAST_SIGNIFICANT_COEFF_X_PREFIX
int16_t y
vertical component of motion vector
@ CU_CHROMA_QP_OFFSET_FLAG
uint8_t * sao_pixel_buffer_h[3]
int ff_hevc_rem_intra_luma_pred_mode_decode(HEVCContext *s)
Decoded Picture Buffer (DPB).
uint8_t * checksum_buf
used on BE to byteswap the lines for checksumming
int ff_hevc_output_frame(HEVCContext *s, AVFrame *frame, int flush)
Find next frame in output order and put a reference to it in frame.
@ COEFF_ABS_LEVEL_GREATER2_FLAG
void ff_hevc_set_qPy(HEVCContext *s, int xBase, int yBase, int log2_cb_size)
void ff_hevc_save_states(HEVCContext *s, int ctb_addr_ts)
int ff_hevc_sao_band_position_decode(HEVCContext *s)
uint8_t ctb_up_right_flag
uint8_t intra_pred_mode_c[4]
@ COEFF_ABS_LEVEL_REMAINING
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
int ff_hevc_cbf_luma_decode(HEVCContext *s, int trafo_depth)
H274FilmGrainDatabase h274db
int ff_hevc_log2_res_scale_abs(HEVCContext *s, int idx)
int ff_hevc_cu_chroma_qp_offset_idx(HEVCContext *s)
struct HEVCContext ** sList
int ff_hevc_mpm_idx_decode(HEVCContext *s)
int last_eos
last packet contains an EOS/EOB NAL
@ PREV_INTRA_LUMA_PRED_FLAG
int slice_idx
number of the slice being currently decoded
static av_always_inline int ff_hevc_nal_is_nonref(enum HEVCNALUnitType type)
int ff_hevc_set_new_ref(HEVCContext *s, AVFrame **frame, int poc)
int ff_hevc_sao_offset_abs_decode(HEVCContext *s)
int ff_hevc_frame_rps(HEVCContext *s)
Construct the reference picture sets for the current frame.
const uint8_t ff_hevc_qpel_extra[4]
void ff_hevc_hls_mvd_coding(HEVCContext *s, int x0, int y0, int log2_cb_size)
struct HEVCFrame * ref[HEVC_MAX_REFS]
uint8_t max_trafo_depth
MaxTrafoDepth.
enum HEVCNALUnitType nal_unit_type
int ff_hevc_sao_eo_class_decode(HEVCContext *s)
int ff_hevc_part_mode_decode(HEVCContext *s, int log2_cb_size)
void ff_hevc_unref_frame(HEVCContext *s, HEVCFrame *frame, int flags)
int rem_intra_luma_pred_mode
int enable_parallel_tiles
Describe the class of an AVClass context structure.
static void flush(AVCodecContext *avctx)
int16_t tmp[MAX_PB_SIZE *MAX_PB_SIZE]
int ff_hevc_merge_flag_decode(HEVCContext *s)
uint8_t cu_transquant_bypass_flag
@ COEFF_ABS_LEVEL_GREATER1_FLAG
uint8_t poc_msb_present[32]
int ff_hevc_cu_qp_delta_sign_flag(HEVCContext *s)
int is_nalff
this flag is != 0 if bitstream is encapsulated as a format defined in 14496-15
int16_t x
horizontal component of motion vector
AVBufferPool * rpl_tab_pool
candidate references for the current frame
uint8_t edge_emu_buffer[(MAX_PB_SIZE+7) *EDGE_EMU_BUFFER_STRIDE *2]
int ff_hevc_prev_intra_luma_pred_flag_decode(HEVCContext *s)
RefPicList * ff_hevc_get_ref_list(HEVCContext *s, HEVCFrame *frame, int x0, int y0)
void ff_hevc_luma_mv_merge_mode(HEVCContext *s, int x0, int y0, int nPbW, int nPbH, int log2_cb_size, int part_idx, int merge_idx, MvField *mv)
int ff_hevc_sao_type_idx_decode(HEVCContext *s)
int nal_length_size
Number of bytes used for nal length (1, 2 or 4)
int ff_hevc_mvp_lx_flag_decode(HEVCContext *s)
struct HEVCFrame * collocated_ref
uint8_t cabac_state[HEVC_CONTEXTS]
int32_t * tab_slice_address
int ff_hevc_split_coding_unit_flag_decode(HEVCContext *s, int ct_depth, int x0, int y0)
int ff_hevc_pcm_flag_decode(HEVCContext *s)
DOVIContext dovi_ctx
Dolby Vision decoding context.
int ff_hevc_split_transform_flag_decode(HEVCContext *s, int log2_trafo_size)
@ SIGNIFICANT_COEFF_GROUP_FLAG
AVBufferRef * rpl_tab_buf
@ CU_TRANSQUANT_BYPASS_FLAG
uint8_t context_initialized
@ LAST_SIGNIFICANT_COEFF_Y_SUFFIX
#define DECLARE_ALIGNED(n, t, v)
int ff_hevc_cabac_init(HEVCContext *s, int ctb_addr_ts, int thread)
@ REM_INTRA_LUMA_PRED_MODE
void ff_hevc_luma_mv_mvp_mode(HEVCContext *s, int x0, int y0, int nPbW, int nPbH, int log2_cb_size, int part_idx, int merge_idx, MvField *mv, int mvp_lx_flag, int LX)
uint16_t sequence
A sequence counter, so that old frames are output first after a POC reset.
int ff_hevc_cu_chroma_qp_offset_flag(HEVCContext *s)
#define EDGE_EMU_BUFFER_STRIDE
int ff_hevc_skip_flag_decode(HEVCContext *s, int x0, int y0, int x_cb, int y_cb)
int ff_hevc_pred_mode_decode(HEVCContext *s)
const uint8_t ff_hevc_qpel_extra_after[4]
HEVCNALUnitType
Table 7-1 – NAL unit type codes and NAL unit type classes in T-REC-H.265-201802.
void ff_hevc_deblocking_boundary_strengths(HEVCContext *s, int x0, int y0, int log2_trafo_size)
uint8_t stat_coeff[HEVC_STAT_COEFFS]
HEVCLocalContext ** HEVClcList
enum HEVCNALUnitType first_nal_type
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
uint8_t edge_emu_buffer2[(MAX_PB_SIZE+7) *EDGE_EMU_BUFFER_STRIDE *2]
int ff_hevc_no_residual_syntax_flag_decode(HEVCContext *s)
void ff_hevc_hls_filter(HEVCContext *s, int x, int y, int ctb_size)
HEVCLocalContext * HEVClc
int ff_hevc_cbf_cb_cr_decode(HEVCContext *s, int trafo_depth)
main external API structure.
AVBufferPool * tab_mvf_pool
enum PredMode pred_mode
PredMode.
AVBufferRef * rpu_buf
0 or 1 Dolby Vision RPUs.
AVBufferRef * tab_mvf_buf
int ff_hevc_sao_merge_flag_decode(HEVCContext *s)
void ff_hevc_set_neighbour_available(HEVCContext *s, int x0, int y0, int nPbW, int nPbH)
A reference to a data buffer.
int ff_hevc_slice_rpl(HEVCContext *s)
Construct the reference picture list(s) for the current slice.
int eos
current packet contains an EOS/EOB NAL
enum PartMode part_mode
PartMode.
int ff_hevc_ref_idx_lx_decode(HEVCContext *s, int num_ref_idx_lx)
int ff_hevc_sao_offset_sign_decode(HEVCContext *s)
uint8_t * sao_pixel_buffer_v[3]
int ff_hevc_merge_idx_decode(HEVCContext *s)
void ff_hevc_hls_residual_coding(HEVCContext *s, int x0, int y0, int log2_trafo_size, enum ScanType scan_idx, int c_idx)
#define flags(name, subs,...)
@ LAST_SIGNIFICANT_COEFF_Y_PREFIX
@ CU_CHROMA_QP_OFFSET_IDX
int ff_hevc_frame_nb_refs(const HEVCContext *s)
Get the number of candidate references for the current frame.
int ff_hevc_cu_qp_delta_abs(HEVCContext *s)
uint8_t intra_pred_mode[4]
int ff_hevc_end_of_slice_flag_decode(HEVCContext *s)
int ff_hevc_intra_chroma_pred_mode_decode(HEVCContext *s)
int isLongTerm[HEVC_MAX_REFS]
uint8_t intra_split_flag
IntraSplitFlag.
void ff_hevc_clear_refs(HEVCContext *s)
Mark all frames in DPB as unused for reference.
@ LAST_SIGNIFICANT_COEFF_X_SUFFIX
int ff_hevc_cu_transquant_bypass_flag_decode(HEVCContext *s)
@ EXPLICIT_RDPCM_DIR_FLAG
uint8_t stat_coeff[HEVC_STAT_COEFFS]