Go to the documentation of this file.
64 static const int extradata_obu_types[] = {
69 int extradata_size = 0, filtered_size = 0;
71 int i, has_seq = 0,
ret = 0;
77 for (
i = 0;
i <
s->av1_pkt.nb_obus;
i++) {
83 }
else if (
s->remove) {
88 if (extradata_size && has_seq) {
108 *
size = extradata_size;
114 for (
i = 0;
i <
s->av1_pkt.nb_obus;
i++) {
116 if (
val_in_array(extradata_obu_types, nb_extradata_obu_types,
119 }
else if (
s->remove) {
138 static const int extradata_nal_types_hevc[] = {
141 static const int extradata_nal_types_h264[] = {
147 int extradata_size = 0, filtered_size = 0;
148 const int *extradata_nal_types;
149 int nb_extradata_nal_types;
150 int i, has_sps = 0, has_vps = 0,
ret = 0;
153 extradata_nal_types = extradata_nal_types_hevc;
154 nb_extradata_nal_types =
FF_ARRAY_ELEMS(extradata_nal_types_hevc);
156 extradata_nal_types = extradata_nal_types_h264;
157 nb_extradata_nal_types =
FF_ARRAY_ELEMS(extradata_nal_types_h264);
161 ctx, 0, 0,
ctx->par_in->codec_id, 1, 0);
165 for (
i = 0;
i <
s->h2645_pkt.nb_nals;
i++) {
168 extradata_size += nal->
raw_size + 3;
175 }
else if (
s->remove) {
180 if (extradata_size &&
202 *
size = extradata_size;
208 for (
i = 0;
i <
s->h2645_pkt.nb_nals;
i++) {
210 if (
val_in_array(extradata_nal_types, nb_extradata_nal_types,
212 bytestream2_put_be24u(&pb_extradata, 1);
214 }
else if (
s->remove) {
215 bytestream2_put_be24u(&pb_filtered_data, 1);
236 uint32_t
state = UINT32_MAX;
237 int has_extradata = 0, extradata_size = 0;
244 extradata_size = ptr - 4 -
pkt->
data;
249 if (extradata_size) {
255 *
size = extradata_size;
270 uint32_t
state = UINT32_MAX;
277 else if (found &&
state != 0x1B5 && state < 0x200 && state >= 0x100) {
300 uint32_t
state = UINT32_MAX;
324 static const struct {
361 uint8_t *extradata =
NULL;
369 ret =
s->extract(
ctx,
pkt, &extradata, &extradata_size);
376 extradata, extradata_size);
411 #define OFFSET(x) offsetof(ExtractExtradataContext, x)
412 #define FLAGS (AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_BSF_PARAM)
415 { .i64 = 0 }, 0, 1,
FLAGS },
427 .
name =
"extract_extradata",
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
uint8_t * data
The data buffer.
void ff_h2645_packet_uninit(H2645Packet *pkt)
Free all the allocated memory in the packet.
filter_frame For filters that do not use the this method is called when a frame is pushed to the filter s input It can be called at any time except in a reentrant way If the input frame is enough to produce then the filter should push the output frames on the output link immediately As an exception to the previous rule if the input frame is enough to produce several output frames then the filter needs output only at least one per link The additional frames can be left buffered in the filter
const uint8_t * avpriv_find_start_code(const uint8_t *p, const uint8_t *end, uint32_t *state)
The bitstream filter state.
int ff_av1_packet_split(AV1Packet *pkt, const uint8_t *buf, int length, void *logctx)
Split an input packet into OBUs.
int av_packet_add_side_data(AVPacket *pkt, enum AVPacketSideDataType type, uint8_t *data, size_t size)
Wrap an existing array as a packet side data.
An input packet split into OBUs.
static double val(void *priv, double ch)
#define FF_ARRAY_ELEMS(a)
static av_always_inline void bytestream2_init_writer(PutByteContext *p, uint8_t *buf, int buf_size)
AVBufferRef * buf
A reference to the reference-counted buffer where the packet data is stored.
#define LIBAVUTIL_VERSION_INT
Describe the class of an AVClass context structure.
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it.
const char * av_default_item_name(void *ptr)
Return the context name.
AVCodecID
Identify the syntax and semantics of the bitstream.
@ AV1_OBU_SEQUENCE_HEADER
int ff_h2645_packet_split(H2645Packet *pkt, const uint8_t *buf, int length, void *logctx, int is_nalff, int nal_length_size, enum AVCodecID codec_id, int small_padding, int use_ref)
Split an input packet into NAL units.
int raw_size
Size of entire OBU, including header.
AVBufferRef * av_buffer_alloc(size_t size)
Allocate an AVBuffer of the given size using av_malloc().
#define i(width, name, range_min, range_max)
void ff_av1_packet_uninit(AV1Packet *pkt)
Free all the allocated memory in the packet.
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
#define AV_INPUT_BUFFER_PADDING_SIZE
@ AV_PKT_DATA_NEW_EXTRADATA
The AV_PKT_DATA_NEW_EXTRADATA is used to notify the codec or the format that the extradata buffer was...
A reference to a data buffer.
static av_always_inline unsigned int bytestream2_put_bufferu(PutByteContext *p, const uint8_t *src, unsigned int size)
This structure stores compressed data.
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
int ff_bsf_get_packet_ref(AVBSFContext *ctx, AVPacket *pkt)
Called by bitstream filters to get packet for filtering.
@ AV_CODEC_ID_MPEG2VIDEO
preferred ID for MPEG-1/2 video decoding
void * priv_data
Format private data.