Go to the documentation of this file.
53 const uint8_t **out_data,
int *
out_size,
70 s->cbc->log_ctx = avctx;
73 s->parsed_extradata = 1;
97 for (
int i = 0;
i <
td->nb_units;
i++) {
112 if (!
frame->show_frame && !
frame->show_existing_frame)
115 ctx->width =
frame->frame_width_minus_1 + 1;
116 ctx->height =
frame->frame_height_minus_1 + 1;
120 switch (
frame->frame_type) {
151 if (!
color->subsampling_x && !
color->subsampling_y &&
174 s->cbc->log_ctx =
NULL;
uint32_t num_units_in_display_tick
#define AV_LOG_WARNING
Something somehow does not look correct.
AVPixelFormat
Pixel format.
static enum AVPixelFormat pix_fmts_rgb[3]
enum AVColorSpace colorspace
YUV colorspace type.
AVColorTransferCharacteristic
Color Transfer Characteristic.
av_cold void ff_cbs_fragment_free(CodedBitstreamFragment *frag)
Free the units array of a fragment in addition to what ff_cbs_fragment_reset does.
@ AV_PICTURE_STRUCTURE_UNKNOWN
unknown
static enum AVPixelFormat pix_fmts_8bit[2][2]
void * content
Pointer to the decomposed form of this unit.
enum AVColorTransferCharacteristic color_trc
Color Transfer Characteristic.
@ AVCOL_RANGE_JPEG
Full range content.
void ff_cbs_fragment_reset(CodedBitstreamFragment *frag)
Free the units contained in a fragment as well as the fragment's own data buffer, but not the units a...
Context structure for coded bitstream operations.
#define AV_PIX_FMT_YUV420P10
@ AVCOL_SPC_RGB
order of coefficients is actually GBR, also IEC 61966-2-1 (sRGB), YZX and ST 428-1
CodedBitstreamUnitType type
Codec-specific type of this unit.
AVColorPrimaries
Chromaticity coordinates of the source primaries.
@ AV1_OBU_TEMPORAL_DELIMITER
static av_cold int av1_parser_init(AVCodecParserContext *ctx)
Coded bitstream unit structure.
av_cold void ff_cbs_close(CodedBitstreamContext **ctx_ptr)
Close a context and free all internal state.
@ AVCOL_TRC_IEC61966_2_1
IEC 61966-2-1 (sRGB or sYCC)
#define AV_PIX_FMT_GBRP10
int ff_cbs_read(CodedBitstreamContext *ctx, CodedBitstreamFragment *frag, const uint8_t *data, size_t size)
Read a bitstream from a memory region into a fragment, then split into units and decompose.
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
#define AV_PIX_FMT_YUV444P10
@ AV_PICTURE_STRUCTURE_FRAME
coded as frame
enum AVColorPrimaries color_primaries
Chromaticity coordinates of the source primaries.
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
#define FF_ARRAY_ELEMS(a)
Coded bitstream fragment structure, combining one or more units.
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
#define AV_PIX_FMT_GRAY10
enum AVColorRange color_range
MPEG vs JPEG YUV range.
@ AVCOL_PRI_BT709
also ITU-R BT1361 / IEC 61966-2-4 / SMPTE RP 177 Annex B
static int av1_parser_parse(AVCodecParserContext *ctx, AVCodecContext *avctx, const uint8_t **out_data, int *out_size, const uint8_t *data, int size)
@ AV_PICTURE_TYPE_I
Intra.
#define AV_PIX_FMT_YUV422P10
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
@ AV_PICTURE_TYPE_SP
Switching Predicted.
static void av1_parser_close(AVCodecParserContext *ctx)
#define AV_PIX_FMT_YUV422P12
static const uint32_t color[16+AV_CLASS_CATEGORY_NB]
#define AV_PIX_FMT_YUV444P12
@ AV1_OBU_SEQUENCE_HEADER
@ AV_PICTURE_TYPE_NONE
Undefined.
const AVCodecParser ff_av1_parser
AV1RawFrameHeader frame_header
int ff_cbs_read_extradata_from_codec(CodedBitstreamContext *ctx, CodedBitstreamFragment *frag, const AVCodecContext *avctx)
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
static const CodedBitstreamUnitType decompose_unit_types[]
#define i(width, name, range_min, range_max)
static enum AVPixelFormat pix_fmts_12bit[2][2]
CodedBitstreamContext * cbc
#define AV_PIX_FMT_GBRP12
AVColorSpace
YUV colorspace type.
@ AVCOL_RANGE_MPEG
Narrow or limited range content.
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
#define AV_PIX_FMT_YUV420P12
main external API structure.
AV1RawSequenceHeader * sequence_header
uint32_t CodedBitstreamUnitType
The codec-specific type of a bitstream unit.
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
@ AV_PIX_FMT_GBRP
planar GBR 4:4:4 24bpp
@ AV_PICTURE_TYPE_P
Predicted.
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
static enum AVPixelFormat pix_fmts_10bit[2][2]
av_cold int ff_cbs_init(CodedBitstreamContext **ctx_ptr, enum AVCodecID codec_id, void *log_ctx)
Create and initialise a new context for the given codec.
#define AV_PIX_FMT_GRAY12
CodedBitstreamFragment temporal_unit
void * priv_data
Format private data.