Go to the documentation of this file.
136 *
data = matrix_encoding;
150 w_align = 1 <<
desc->log2_chroma_w;
151 h_align = 1 <<
desc->log2_chroma_h;
154 switch (
s->pix_fmt) {
320 w_align =
FFMAX(w_align, 8);
344 for (
i = 0;
i < 4;
i++)
351 int chroma_shift =
desc->log2_chroma_w;
356 align =
FFMAX(linesize_align[0], linesize_align[3]);
357 linesize_align[1] <<= chroma_shift;
358 linesize_align[2] <<= chroma_shift;
362 #if FF_API_AVCODEC_CHROMA_POS
376 int buf_size,
int align)
381 frame->nb_samples, sample_fmt,
383 if (buf_size < needed_size)
395 (uint8_t *)(intptr_t)buf, nb_channels,
frame->nb_samples,
396 sample_fmt,
align)) < 0) {
416 for (p = 0; p<
desc->nb_components; p++) {
417 uint8_t *dst =
frame->data[p];
418 int is_chroma = p == 1 || p == 2;
421 if (
desc->comp[0].depth >= 9) {
422 ((uint16_t*)dst)[0] =
c[p];
424 dst +=
frame->linesize[p];
425 for (y = 1; y <
height; y++) {
426 memcpy(dst,
frame->data[p], 2*bytes);
427 dst +=
frame->linesize[p];
430 for (y = 0; y <
height; y++) {
431 memset(dst,
c[p], bytes);
432 dst +=
frame->linesize[p];
459 return "unknown_codec";
578 if (be < 0 || be > 1)
604 uint32_t
tag,
int bits_per_coded_sample, int64_t
bitrate,
605 uint8_t * extradata,
int frame_size,
int frame_bytes)
608 int framecount = (ba > 0 && frame_bytes / ba > 0) ? frame_bytes / ba : 1;
611 if (
bps > 0 && ch > 0 && frame_bytes > 0 && ch < 32768 &&
bps < 32768)
612 return (frame_bytes * 8LL) / (
bps * ch);
613 bps = bits_per_coded_sample;
631 if (framecount > INT_MAX/1024)
633 return 1024 * framecount;
644 return 256 * sr / 245;
646 return 588 * sr / 44100;
650 return (480 << (sr / 22050));
654 return sr <= 24000 ? 576 : 1152;
674 if (frame_bytes > 0) {
677 return 240 * (frame_bytes / 32);
679 return 256 * (frame_bytes / 64);
681 return 160 * (frame_bytes / 20);
683 return 4 * (frame_bytes / 4);
685 return 4 * (frame_bytes / 6);
690 return frame_bytes * 8 /
bps;
693 if (ch > 0 && ch < INT_MAX/16) {
697 return frame_bytes / (40 * ch) * 256;
699 return (frame_bytes - 4 * ch) / (128 * ch) * 256;
701 return frame_bytes / (9 * ch) * 16;
704 frame_bytes /= 16 * ch;
705 if (frame_bytes > INT_MAX / 28)
707 return frame_bytes * 28;
712 return (frame_bytes - 4 * ch) * 2 / ch;
714 return (frame_bytes - 4) * 2 / ch;
716 return (frame_bytes - 8) * 2;
720 return frame_bytes * 14LL / (8 * ch);
723 return (frame_bytes / 128) * 224 / ch;
725 return (frame_bytes - 6 - ch) / ch;
727 return (frame_bytes - 8) / ch;
729 return (frame_bytes - 2 * ch) / ch;
731 return 3 * frame_bytes / ch;
733 return 6 * frame_bytes / ch;
735 return 2 * (frame_bytes / (5 * ch));
738 return 4 * frame_bytes / ch;
745 return frame_bytes / ch;
747 return frame_bytes * 2 / ch;
753 int blocks = frame_bytes / ba;
757 if (bps < 2 || bps > 5)
759 tmp = blocks * (1LL + (ba - 4 * ch) / (
bps * ch) * 8);
762 tmp = blocks * (((ba - 16LL) * 2 / 3 * 4) / ch);
765 tmp = blocks * (1 + (ba - 4LL * ch) * 2 / ch);
768 tmp = blocks * ((ba - 4LL * ch) * 2 / ch);
771 tmp = blocks * (2 + (ba - 7LL * ch) * 2LL / ch);
774 tmp = blocks * (ba - 16LL) * 2 / ch;
791 if(
bps<4 || frame_bytes<3)
793 return 2 * ((frame_bytes - 3) / ((
bps * 2 / 8) * ch));
795 if(
bps<4 || frame_bytes<4)
797 return (frame_bytes - 4) / ((
FFALIGN(ch, 2) *
bps) / 8);
799 return 2 * (frame_bytes / ((
bps + 4) / 4)) / ch;
811 if (
bitrate > 0 && frame_bytes > 0 && sr > 0 && ba > 1) {
813 return (frame_bytes * 8LL * sr) /
bitrate;
823 #if FF_API_OLD_CHANNEL_LAYOUT
841 #if FF_API_OLD_CHANNEL_LAYOUT
927 f->owner[0] =
f->owner[1] = avctx;
939 f->owner[0] =
f->owner[1] =
NULL;
983 uint32_t *av_restrict
state)
991 for (
i = 0;
i < 3;
i++) {
994 if (
tmp == 0x100 || p == end)
999 if (p[-1] > 1 ) p += 3;
1000 else if (p[-2] ) p += 2;
1001 else if (p[-3]|(p[-1]-1)) p++;
1008 p =
FFMIN(p, end) - 4;
1021 *
size =
sizeof(*props);
1061 unsigned low = bcd & 0xf;
1062 unsigned high = bcd >> 4;
1063 if (low > 9 || high > 9)
1065 return low + 10*high;
1069 void **
data,
size_t *sei_size)
1084 tc = (uint32_t*)sd->
data;
1087 *sei_size =
sizeof(uint32_t) * 4;
1091 sei_data = (uint8_t*)*
data + prefix_len;
1096 for (
int j = 1; j <= m; j++) {
1097 uint32_t tcsmpte =
tc[j];
1098 unsigned hh =
bcd2uint(tcsmpte & 0x3f);
1099 unsigned mm =
bcd2uint(tcsmpte>>8 & 0x7f);
1101 unsigned ff =
bcd2uint(tcsmpte>>24 & 0x3f);
1102 unsigned drop = tcsmpte & 1<<30 && !0;
1109 pc = !!(tcsmpte & 1 << 7);
1111 pc = !!(tcsmpte & 1 << 23);
1112 ff = (ff + pc) & 0x7f;
1143 if (!bits_per_coded_sample) {
1154 const int * array_valid_values,
int default_value)
1159 ref_val = array_valid_values[
i];
1160 if (ref_val == INT_MAX)
1168 "%s %d are not supported. Set to default value : %d\n", val_name,
val, default_value);
1169 return default_value;
#define FF_ALLOCZ_TYPED_ARRAY(p, nelem)
int frame_size
Number of samples per channel in an audio frame.
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it be(in the first position) for now. Options ------- Then comes the options array. This is what will define the user accessible options. For example
#define FF_ENABLE_DEPRECATION_WARNINGS
@ AV_SAMPLE_FMT_FLTP
float, planar
@ AV_PIX_FMT_YUV420P9LE
planar YUV 4:2:0, 13.5bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian
#define AV_LOG_WARNING
Something somehow does not look correct.
int avcodec_enum_to_chroma_pos(int *xpos, int *ypos, enum AVChromaLocation pos)
Converts AVChromaLocation to swscale x/y chroma position.
uint8_t * extradata
Extra binary data needed for initializing the decoder, codec-dependent.
@ AV_CODEC_ID_ADPCM_IMA_QT
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
@ FF_CODEC_CB_TYPE_RECEIVE_PACKET
void ff_thread_release_buffer(AVCodecContext *avctx, AVFrame *f)
Wrapper around release_buffer() frame-for multithreaded codecs.
int sample_rate
samples per second
AVFrameSideData * av_frame_get_side_data(const AVFrame *frame, enum AVFrameSideDataType type)
AVFrameSideData * av_frame_new_side_data(AVFrame *frame, enum AVFrameSideDataType type, size_t size)
Add a new side data to a frame.
This struct describes the properties of an encoded stream.
@ AV_PIX_FMT_GBRP16BE
planar GBR 4:4:4 48bpp, big-endian
void ff_thread_report_progress2(AVCodecContext *avctx, int field, int thread, int n)
@ AV_PIX_FMT_GBRP10BE
planar GBR 4:4:4 30bpp, big-endian
const char * name
short name for the profile
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
@ AV_PIX_FMT_YUV422P14LE
planar YUV 4:2:2,28bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian
void ff_thread_await_progress2(AVCodecContext *avctx, int field, int thread, int shift)
const char * name
Name of the codec described by this descriptor.
AVPacketSideData * coded_side_data
Additional data associated with the entire coded stream.
int ff_thread_can_start_frame(AVCodecContext *avctx)
int av_samples_fill_arrays(uint8_t **audio_data, int *linesize, const uint8_t *buf, int nb_channels, int nb_samples, enum AVSampleFormat sample_fmt, int align)
Fill plane data pointers and linesize for samples with sample format sample_fmt.
static void init_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Initialize the PutBitContext s.
@ AV_FRAME_DATA_S12M_TIMECODE
Timecode which conforms to SMPTE ST 12-1.
@ AV_CODEC_ID_PCM_S32LE_PLANAR
const AVCodec * avcodec_find_encoder(enum AVCodecID id)
Find a registered encoder with a matching codec ID.
This structure describes decoded (raw) audio or video data.
static void put_bits(Jpeg2000EncoderContext *s, int val, int n)
put n times val bit
@ AV_PIX_FMT_YUVA444P10BE
planar YUV 4:4:4 40bpp, (1 Cr & Cb sample per 1x1 Y & A samples, big-endian)
@ AV_PIX_FMT_YUV440P12BE
planar YUV 4:4:0,24bpp, (1 Cr & Cb sample per 1x2 Y samples), big-endian
@ AV_CODEC_ID_PCM_S16BE_PLANAR
@ AV_SAMPLE_FMT_S32P
signed 32 bits, planar
@ AV_PIX_FMT_YUV420P14BE
planar YUV 4:2:0,21bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian
void avcodec_align_dimensions(AVCodecContext *s, int *width, int *height)
Modify width and height values so that they will result in a memory buffer that is acceptable for the...
@ AV_PIX_FMT_YUV420P16LE
planar YUV 4:2:0, 24bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian
@ AV_CODEC_ID_ADPCM_IMA_OKI
@ AV_PIX_FMT_GBRP14BE
planar GBR 4:4:4 42bpp, big-endian
@ AV_PIX_FMT_BGR24
packed RGB 8:8:8, 24bpp, BGRBGR...
int av_get_bits_per_pixel(const AVPixFmtDescriptor *pixdesc)
Return the number of bits per pixel used by the pixel format described by pixdesc.
uint32_t codec_tag
Additional information about the codec (corresponds to the AVI FOURCC).
@ AV_PIX_FMT_YUV440P
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
@ FF_CODEC_CB_TYPE_ENCODE_SUB
AVBufferRef * av_buffer_ref(const AVBufferRef *buf)
Create a new reference to an AVBuffer.
@ AV_PIX_FMT_YUVA444P9BE
planar YUV 4:4:4 36bpp, (1 Cr & Cb sample per 1x1 Y & A samples), big-endian
@ AV_PIX_FMT_YUV422P9BE
planar YUV 4:2:2, 18bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian
const char * avcodec_profile_name(enum AVCodecID codec_id, int profile)
Return a name for the specified profile, if available.
int nb_channels
Number of channels in this layout.
@ AV_CODEC_ID_PCM_S16LE_PLANAR
@ AV_CODEC_ID_ADPCM_THP_LE
@ AV_PIX_FMT_YUV444P16LE
planar YUV 4:4:4, 48bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian
@ AV_PIX_FMT_GBRAP12LE
planar GBR 4:4:4:4 48bpp, little-endian
@ AV_CODEC_ID_DSD_MSBF_PLANAR
@ AV_PIX_FMT_GRAY16BE
Y , 16bpp, big-endian.
enum AVChromaLocation avcodec_chroma_pos_to_enum(int xpos, int ypos)
Converts swscale x/y chroma position to AVChromaLocation.
@ AV_PIX_FMT_GBRAP
planar GBRA 4:4:4:4 32bpp
@ AV_PIX_FMT_YUV420P12LE
planar YUV 4:2:0,18bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian
@ AV_FRAME_DATA_MATRIXENCODING
The data is the AVMatrixEncoding enum defined in libavutil/channel_layout.h.
AVChannelLayout ch_layout
Audio channel layout.
const uint8_t * avpriv_find_start_code(const uint8_t *av_restrict p, const uint8_t *end, uint32_t *av_restrict state)
AVCodecContext * owner[2]
static double val(void *priv, double ch)
@ AV_SAMPLE_FMT_S64P
signed 64 bits, planar
@ AV_CODEC_ID_MP3
preferred ID for decoding MPEG audio layer 1, 2 or 3
#define ss(width, name, subs,...)
@ AV_PIX_FMT_YUVA444P16BE
planar YUV 4:4:4 64bpp, (1 Cr & Cb sample per 1x1 Y & A samples, big-endian)
uint8_t pi<< 24) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_U8,(uint64_t)((*(const uint8_t *) pi - 0x80U))<< 56) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16,(*(const int16_t *) pi >>8)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1<< 16)) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S16,(uint64_t)(*(const int16_t *) pi)<< 48) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32,(*(const int32_t *) pi >>24)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S32,(uint64_t)(*(const int32_t *) pi)<< 32) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S64,(*(const int64_t *) pi >>56)+0x80) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S64, *(const int64_t *) pi *(1.0f/(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S64, *(const int64_t *) pi *(1.0/(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_FLT, llrintf(*(const float *) pi *(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_DBL, llrint(*(const double *) pi *(UINT64_C(1)<< 63))) #define FMT_PAIR_FUNC(out, in) static conv_func_type *const fmt_pair_to_conv_functions[AV_SAMPLE_FMT_NB *AV_SAMPLE_FMT_NB]={ FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S64), };static void cpy1(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, len);} static void cpy2(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 2 *len);} static void cpy4(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 4 *len);} static void cpy8(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 8 *len);} AudioConvert *swri_audio_convert_alloc(enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, const int *ch_map, int flags) { AudioConvert *ctx;conv_func_type *f=fmt_pair_to_conv_functions[av_get_packed_sample_fmt(out_fmt)+AV_SAMPLE_FMT_NB *av_get_packed_sample_fmt(in_fmt)];if(!f) return NULL;ctx=av_mallocz(sizeof(*ctx));if(!ctx) return NULL;if(channels==1){ in_fmt=av_get_planar_sample_fmt(in_fmt);out_fmt=av_get_planar_sample_fmt(out_fmt);} ctx->channels=channels;ctx->conv_f=f;ctx->ch_map=ch_map;if(in_fmt==AV_SAMPLE_FMT_U8||in_fmt==AV_SAMPLE_FMT_U8P) memset(ctx->silence, 0x80, sizeof(ctx->silence));if(out_fmt==in_fmt &&!ch_map) { switch(av_get_bytes_per_sample(in_fmt)){ case 1:ctx->simd_f=cpy1;break;case 2:ctx->simd_f=cpy2;break;case 4:ctx->simd_f=cpy4;break;case 8:ctx->simd_f=cpy8;break;} } return ctx;} void swri_audio_convert_free(AudioConvert **ctx) { av_freep(ctx);} int swri_audio_convert(AudioConvert *ctx, AudioData *out, AudioData *in, int len) { int ch;int off=0;const int os=(out->planar ? 1 :out->ch_count) *out->bps;unsigned misaligned=0;av_assert0(ctx->channels==out->ch_count);if(ctx->in_simd_align_mask) { int planes=in->planar ? in->ch_count :1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) in->ch[ch];misaligned|=m &ctx->in_simd_align_mask;} if(ctx->out_simd_align_mask) { int planes=out->planar ? out->ch_count :1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) out->ch[ch];misaligned|=m &ctx->out_simd_align_mask;} if(ctx->simd_f &&!ctx->ch_map &&!misaligned){ off=len &~15;av_assert1(off >=0);av_assert1(off<=len);av_assert2(ctx->channels==SWR_CH_MAX||!in->ch[ctx->channels]);if(off >0){ if(out->planar==in->planar){ int planes=out->planar ? out->ch_count :1;for(ch=0;ch< planes;ch++){ ctx->simd_f(out->ch+ch,(const uint8_t **) in->ch+ch, off *(out-> planar
@ AV_PIX_FMT_YUV444P10BE
planar YUV 4:4:4, 30bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian
int av_image_check_size2(unsigned int w, unsigned int h, int64_t max_pixels, enum AVPixelFormat pix_fmt, int log_offset, void *log_ctx)
Check if the given dimension of an image is valid, meaning that all bytes of a plane of an image with...
@ AV_PIX_FMT_YUV420P10LE
planar YUV 4:2:0, 15bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian
@ FF_CODEC_CB_TYPE_DECODE
@ AV_PIX_FMT_YUV444P12LE
planar YUV 4:4:4,36bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian
@ AV_PIX_FMT_YUVJ411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples) full scale (JPEG), deprecated in favor ...
int av_get_bits_per_sample(enum AVCodecID codec_id)
Return codec bits per sample.
@ AV_CODEC_ID_ADPCM_SBPRO_2
@ AV_PIX_FMT_YUV422P12BE
planar YUV 4:2:2,24bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian
@ AV_PIX_FMT_YUV444P14LE
planar YUV 4:4:4,42bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian
@ AV_PIX_FMT_BGR8
packed RGB 3:3:2, 8bpp, (msb)2B 3G 3R(lsb)
#define FF_ARRAY_ELEMS(a)
int frame_size
Audio only.
void av_memcpy_backptr(uint8_t *dst, int back, int cnt)
Overlapping memcpy() implementation.
@ AV_PIX_FMT_YUVJ422P
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
@ AV_PIX_FMT_GBRAP16BE
planar GBRA 4:4:4:4 64bpp, big-endian
@ AV_CODEC_ID_ADPCM_IMA_ACORN
This struct describes the properties of a single codec described by an AVCodecID.
@ AV_PIX_FMT_GBRP16LE
planar GBR 4:4:4 48bpp, little-endian
int av_chroma_location_enum_to_pos(int *xpos, int *ypos, enum AVChromaLocation pos)
Converts AVChromaLocation to swscale x/y chroma position.
@ AV_PIX_FMT_YUVA420P
planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)
#define AV_CEIL_RSHIFT(a, b)
void * av_realloc_array(void *ptr, size_t nmemb, size_t size)
@ AV_CODEC_ID_ADPCM_IMA_EA_SEAD
@ AV_CODEC_ID_ADPCM_IMA_DK3
@ AV_PIX_FMT_GBRP12LE
planar GBR 4:4:4 36bpp, little-endian
#define FF_PROFILE_UNKNOWN
#define av_assert0(cond)
assert() equivalent, that is always enabled.
int av_sample_fmt_is_planar(enum AVSampleFormat sample_fmt)
Check if the sample format is planar.
@ AV_CODEC_ID_ADPCM_IMA_APC
@ AV_PIX_FMT_YUVA420P16BE
planar YUV 4:2:0 40bpp, (1 Cr & Cb sample per 2x2 Y & A samples, big-endian)
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
int ff_side_data_update_matrix_encoding(AVFrame *frame, enum AVMatrixEncoding matrix_encoding)
Add or update AV_FRAME_DATA_MATRIXENCODING side data.
@ AV_CODEC_ID_ADPCM_IMA_ISS
@ AV_CODEC_ID_BINKAUDIO_DCT
int avcodec_fill_audio_frame(AVFrame *frame, int nb_channels, enum AVSampleFormat sample_fmt, const uint8_t *buf, int buf_size, int align)
Fill AVFrame audio data and linesize pointers.
@ FF_CODEC_CB_TYPE_ENCODE
int ff_thread_get_buffer(AVCodecContext *avctx, AVFrame *f, int flags)
Wrapper around get_buffer() for frame-multithreaded codecs.
void avcodec_align_dimensions2(AVCodecContext *s, int *width, int *height, int linesize_align[AV_NUM_DATA_POINTERS])
Modify width and height values so that they will result in a memory buffer that is acceptable for the...
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this field
void ff_thread_release_ext_buffer(AVCodecContext *avctx, ThreadFrame *f)
Unref a ThreadFrame.
void ff_thread_await_progress(const ThreadFrame *f, int progress, int field)
Wait for earlier decoding threads to finish reference pictures.
@ AV_CODEC_ID_ADPCM_IMA_SMJPEG
@ AV_PIX_FMT_GBRP10LE
planar GBR 4:4:4 30bpp, little-endian
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
This structure describes the bitrate properties of an encoded bitstream.
@ AV_PIX_FMT_YUVJ444P
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
@ AV_PIX_FMT_YUV444P10LE
planar YUV 4:4:4, 30bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian
@ AV_PIX_FMT_YUVA422P10LE
planar YUV 4:2:2 30bpp, (1 Cr & Cb sample per 2x1 Y & A samples, little-endian)
int ff_thread_ref_frame(ThreadFrame *dst, const ThreadFrame *src)
@ AV_PIX_FMT_YUV444P9BE
planar YUV 4:4:4, 27bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian
@ AV_PIX_FMT_YUV422P10BE
planar YUV 4:2:2, 20bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian
@ AV_PIX_FMT_YUV422P16LE
planar YUV 4:2:2, 32bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian
@ AV_CODEC_ID_ADPCM_EA_XAS
int ff_match_2uint16(const uint16_t(*tab)[2], int size, int a, int b)
Return the index into tab at which {a,b} match elements {[0],[1]} of tab.
@ AV_PIX_FMT_GBRAP12BE
planar GBR 4:4:4:4 48bpp, big-endian
@ AV_CODEC_ID_INTERPLAY_VIDEO
@ AV_CODEC_ID_ADPCM_YAMAHA
@ AV_CODEC_ID_ADPCM_IMA_WS
@ AV_PIX_FMT_YUYV422
packed YUV 4:2:2, 16bpp, Y0 Cb Y1 Cr
Rational number (pair of numerator and denominator).
@ AV_CODEC_ID_INTERPLAY_DPCM
@ AV_PIX_FMT_YUVJ420P
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
@ FF_CODEC_CB_TYPE_DECODE_SUB
@ AV_CODEC_ID_ADPCM_IMA_DK4
int64_t bit_rate
the average bitrate
enum AVPacketSideDataType type
@ AV_PIX_FMT_YUVA422P12LE
planar YUV 4:2:2,24bpp, (1 Cr & Cb sample per 2x1 Y samples), 12b alpha, little-endian
int ff_slice_thread_init_progress(AVCodecContext *avctx)
@ AV_PIX_FMT_YUVA444P12BE
planar YUV 4:4:4,36bpp, (1 Cr & Cb sample per 1x1 Y samples), 12b alpha, big-endian
const AVProfile * profiles
array of recognized profiles, or NULL if unknown, array is terminated by {FF_PROFILE_UNKNOWN}
void av_fast_mallocz(void *ptr, unsigned int *size, size_t min_size)
Allocate and clear a buffer, reusing the given one if large enough.
@ AV_PIX_FMT_YUVA444P9LE
planar YUV 4:4:4 36bpp, (1 Cr & Cb sample per 1x1 Y & A samples), little-endian
@ AV_CODEC_ID_ADPCM_IMA_AMV
@ AV_PIX_FMT_YUVA420P16LE
planar YUV 4:2:0 40bpp, (1 Cr & Cb sample per 2x2 Y & A samples, little-endian)
@ AV_PIX_FMT_RGB8
packed RGB 3:3:2, 8bpp, (msb)2R 3G 3B(lsb)
@ AV_PIX_FMT_BGR0
packed BGR 8:8:8, 32bpp, BGRXBGRX... X=unused/undefined
@ AV_PIX_FMT_YUV440P10LE
planar YUV 4:4:0,20bpp, (1 Cr & Cb sample per 1x2 Y samples), little-endian
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
AVChannelLayout ch_layout
Audio only.
@ AV_PIX_FMT_YUVA420P9LE
planar YUV 4:2:0 22.5bpp, (1 Cr & Cb sample per 2x2 Y & A samples), little-endian
enum AVChromaLocation av_chroma_location_pos_to_enum(int xpos, int ypos)
Converts swscale x/y chroma position to AVChromaLocation.
void ff_color_frame(AVFrame *frame, const int c[4])
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
int sample_rate
Audio only.
@ AV_PIX_FMT_YUV420P14LE
planar YUV 4:2:0,21bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian
@ AV_CODEC_ID_PCM_S24LE_PLANAR
@ AV_CODEC_ID_GSM
as in Berlin toast format
AVCodecID
Identify the syntax and semantics of the bitstream.
@ AV_PIX_FMT_YUV444P14BE
planar YUV 4:4:4,42bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian
const AVCodec * avcodec_find_decoder(enum AVCodecID id)
Find a registered decoder with a matching codec ID.
static const uint8_t tab[16]
@ AV_PIX_FMT_YUV420P9BE
The following 12 formats have the disadvantage of needing 1 format for each bit depth.
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented.
int av_codec_is_decoder(const AVCodec *avcodec)
int av_get_exact_bits_per_sample(enum AVCodecID codec_id)
Return codec bits per sample.
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
@ AV_PIX_FMT_RGB24
packed RGB 8:8:8, 24bpp, RGBRGB...
@ AV_PIX_FMT_YUV440P12LE
planar YUV 4:4:0,24bpp, (1 Cr & Cb sample per 1x2 Y samples), little-endian
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
@ AV_SAMPLE_FMT_U8P
unsigned 8 bits, planar
static int shift(int a, int b)
@ AV_CODEC_ID_ADPCM_IMA_RAD
@ AV_PIX_FMT_YUV420P12BE
planar YUV 4:2:0,18bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian
@ AV_PIX_FMT_YUV422P10LE
planar YUV 4:2:2, 20bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian
@ AV_CODEC_ID_ADPCM_IMA_ALP
@ AV_PIX_FMT_YUV422P14BE
planar YUV 4:2:2,28bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian
int avpriv_codec_get_cap_skip_frame_fill_param(const AVCodec *codec)
#define AV_NUM_DATA_POINTERS
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_RB32
#define FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM
The decoder extracts and fills its parameters even if the frame is skipped due to the skip_frame sett...
@ AV_PIX_FMT_GBRP9BE
planar GBR 4:4:4 27bpp, big-endian
int av_get_audio_frame_duration2(AVCodecParameters *par, int frame_bytes)
This function is the same as av_get_audio_frame_duration(), except it works with AVCodecParameters in...
AVCPBProperties * ff_add_cpb_side_data(AVCodecContext *avctx)
Add a CPB properties side data to an encoding context.
static const av_always_inline FFCodec * ffcodec(const AVCodec *codec)
@ AV_PIX_FMT_YUV420P10BE
planar YUV 4:2:0, 15bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian
static const uint8_t *BS_FUNC() align(BSCTX *bc)
Skip bits to a byte boundary.
@ AV_PIX_FMT_GBRP9LE
planar GBR 4:4:4 27bpp, little-endian
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
@ AV_PIX_FMT_YUVA444P
planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples)
int ff_thread_get_ext_buffer(AVCodecContext *avctx, ThreadFrame *f, int flags)
Wrapper around ff_get_buffer() for frame-multithreaded codecs.
const struct AVCodecHWConfigInternal *const * hw_configs
Array of pointers to hardware configurations supported by the codec, or NULL if no hardware supported...
int ff_int_from_list_or_default(void *ctx, const char *val_name, int val, const int *array_valid_values, int default_value)
Check if a value is in the list.
@ AV_PIX_FMT_YUVA420P10LE
planar YUV 4:2:0 25bpp, (1 Cr & Cb sample per 2x2 Y & A samples, little-endian)
@ AV_SAMPLE_FMT_S16P
signed 16 bits, planar
AVChromaLocation
Location of chroma samples.
@ AV_PIX_FMT_YUVA422P10BE
planar YUV 4:2:2 30bpp, (1 Cr & Cb sample per 2x1 Y & A samples, big-endian)
int av_codec_is_encoder(const AVCodec *avcodec)
uint64_t vbv_delay
The delay between the time the packet this structure is associated with is received and the time when...
const char * avcodec_get_name(enum AVCodecID id)
Get the name of a codec.
int bits_per_coded_sample
bits per sample/pixel from the demuxer (needed for huffyuv).
@ AV_PIX_FMT_YUVA444P12LE
planar YUV 4:4:4,36bpp, (1 Cr & Cb sample per 1x1 Y samples), 12b alpha, little-endian
@ AV_PIX_FMT_YUVA422P9BE
planar YUV 4:2:2 27bpp, (1 Cr & Cb sample per 2x1 Y & A samples), big-endian
unsigned caps_internal
Internal codec capabilities FF_CODEC_CAP_*.
void ff_thread_finish_setup(AVCodecContext *avctx)
If the codec defines update_thread_context(), call this when they are ready for the next thread to st...
#define i(width, name, range_min, range_max)
const char * av_get_profile_name(const AVCodec *codec, int profile)
Return a name for the specified profile, if available.
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
@ AV_SAMPLE_FMT_U8
unsigned 8 bits
int block_align
Audio only.
AVCPBProperties * av_cpb_properties_alloc(size_t *size)
Allocate a CPB properties structure and initialize its fields to default values.
@ AV_CODEC_ID_DSD_LSBF_PLANAR
AVSampleFormat
Audio sample formats.
void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_malloc but the buffer has additional AV_INPUT_BUFFER_PADDING_SIZE at the end w...
#define AV_PIX_FMT_RGB555
unsigned int av_xiphlacing(unsigned char *s, unsigned int v)
Encode extradata length to a buffer.
@ AV_CODEC_ID_ADPCM_IMA_APM
@ AV_PIX_FMT_YUVJ440P
planar YUV 4:4:0 full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV440P and setting color_range
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
int av_get_audio_frame_duration(AVCodecContext *avctx, int frame_bytes)
Return audio frame duration.
@ AV_SAMPLE_FMT_S16
signed 16 bits
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
const char * name
Name of the codec implementation.
static av_always_inline AVRational av_inv_q(AVRational q)
Invert a rational.
@ AV_PKT_DATA_CPB_PROPERTIES
This side data corresponds to the AVCPBProperties struct.
@ AV_CODEC_ID_ADPCM_IMA_DAT4
int av_samples_get_buffer_size(int *linesize, int nb_channels, int nb_samples, enum AVSampleFormat sample_fmt, int align)
Get the required buffer size for the given audio parameters.
@ AV_PIX_FMT_YUV444P16BE
planar YUV 4:4:4, 48bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
int64_t ff_guess_coded_bitrate(AVCodecContext *avctx)
Get an estimated video bitrate based on frame size, frame rate and coded bits per pixel.
static int av_cmp_q(AVRational a, AVRational b)
Compare two rationals.
@ AV_PIX_FMT_GBRAP16LE
planar GBRA 4:4:4:4 64bpp, little-endian
@ AV_PIX_FMT_PAL8
8 bits with AV_PIX_FMT_RGB32 palette
@ AV_PIX_FMT_YVYU422
packed YUV 4:2:2, 16bpp, Y0 Cr Y1 Cb
int block_align
number of bytes per packet if constant and known or 0 Used by some WAV based audio codecs.
static int get_audio_frame_duration(enum AVCodecID id, int sr, int ch, int ba, uint32_t tag, int bits_per_coded_sample, int64_t bitrate, uint8_t *extradata, int frame_size, int frame_bytes)
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
#define AV_INPUT_BUFFER_PADDING_SIZE
@ AV_PIX_FMT_GBRP12BE
planar GBR 4:4:4 36bpp, big-endian
@ AV_PIX_FMT_UYVY422
packed YUV 4:2:2, 16bpp, Cb Y0 Cr Y1
@ AV_PIX_FMT_YUV444P12BE
planar YUV 4:4:4,36bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian
main external API structure.
int ff_slice_thread_allocz_entries(AVCodecContext *avctx, int count)
@ AV_CODEC_ID_ADPCM_G726LE
@ AV_PIX_FMT_YUV444P9LE
planar YUV 4:4:4, 27bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian
void av_fast_padded_mallocz(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_padded_malloc except that buffer will always be 0-initialized after call.
@ AV_SAMPLE_FMT_DBLP
double, planar
#define AV_PIX_FMT_FLAG_PLANAR
At least one pixel component is not in the first data plane.
@ FF_CODEC_CB_TYPE_RECEIVE_FRAME
unsigned cb_type
This field determines the type of the codec (decoder/encoder) and also the exact callback cb implemen...
@ AV_PIX_FMT_YUVA420P10BE
planar YUV 4:2:0 25bpp, (1 Cr & Cb sample per 2x2 Y & A samples, big-endian)
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
const AVCodecHWConfig * avcodec_get_hw_config(const AVCodec *avcodec, int index)
Retrieve supported hardware configurations for a codec.
@ AV_PIX_FMT_YUV420P16BE
planar YUV 4:2:0, 24bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian
#define FF_DISABLE_DEPRECATION_WARNINGS
@ AV_PIX_FMT_GBRP
planar GBR 4:4:4 24bpp
enum AVCodecID av_get_pcm_codec(enum AVSampleFormat fmt, int be)
Return the PCM codec associated with a sample format.
@ AV_PIX_FMT_YUV422P16BE
planar YUV 4:2:2, 32bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian
@ AV_PIX_FMT_GRAY16LE
Y , 16bpp, little-endian.
int bits_per_coded_sample
The number of bits per sample in the codedwords.
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
static unsigned bcd2uint(uint8_t bcd)
@ AV_CODEC_ID_ADPCM_SBPRO_4
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
int ff_set_sar(AVCodecContext *avctx, AVRational sar)
Check that the provided sample aspect ratio is valid and set it on the codec context.
@ AV_PIX_FMT_YUVA444P10LE
planar YUV 4:4:4 40bpp, (1 Cr & Cb sample per 1x1 Y & A samples, little-endian)
Structure to hold side data for an AVFrame.
static void flush_put_bits(PutBitContext *s)
Pad the end of the output stream with zeros.
@ AV_CODEC_ID_PCM_S24DAUD
@ AV_CODEC_ID_ADPCM_IMA_SSI
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
const VDPAUPixFmtMap * map
int ff_alloc_timecode_sei(const AVFrame *frame, AVRational rate, size_t prefix_len, void **data, size_t *sei_size)
Check AVFrame for S12M timecode side data and allocate and fill TC SEI message with timecode info.
unsigned int codec_tag
fourcc (LSB first, so "ABCD" -> ('D'<<24) + ('C'<<16) + ('B'<<8) + 'A').
@ AV_CODEC_ID_ADPCM_IMA_MOFLEX
enum AVCodecID codec_id
Specific type of the encoded data (the codec used).
@ AV_CODEC_ID_ADPCM_IMA_WAV
void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size)
Allocate a buffer, reusing the given one if large enough.
@ AV_PIX_FMT_YUV411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
@ AV_PIX_FMT_YUV440P10BE
planar YUV 4:4:0,20bpp, (1 Cr & Cb sample per 1x2 Y samples), big-endian
@ AV_PIX_FMT_YUVA422P16BE
planar YUV 4:2:2 48bpp, (1 Cr & Cb sample per 2x1 Y & A samples, big-endian)
@ AV_PIX_FMT_YUV422P9LE
planar YUV 4:2:2, 18bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian
@ AV_PIX_FMT_YUVA422P16LE
planar YUV 4:2:2 48bpp, (1 Cr & Cb sample per 2x1 Y & A samples, little-endian)
@ AV_PIX_FMT_GBRP14LE
planar GBR 4:4:4 42bpp, little-endian
@ AV_CODEC_ID_PCM_S8_PLANAR
int width
picture width / height.
#define flags(name, subs,...)
@ AV_PIX_FMT_YUV410P
planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
int64_t bit_rate
The average bitrate of the encoded data (in bits per second).
@ AV_SAMPLE_FMT_DBL
double
const AVCodecDescriptor * avcodec_descriptor_get(enum AVCodecID id)
int av_image_check_sar(unsigned int w, unsigned int h, AVRational sar)
Check if the given sample aspect ratio of an image is valid.
@ AV_PIX_FMT_YUVA444P16LE
planar YUV 4:4:4 64bpp, (1 Cr & Cb sample per 1x1 Y & A samples, little-endian)
@ AV_PIX_FMT_YUVA422P12BE
planar YUV 4:2:2,24bpp, (1 Cr & Cb sample per 2x1 Y samples), 12b alpha, big-endian
AVCodecHWConfig public
This is the structure which will be returned to the user by avcodec_get_hw_config().
void ff_thread_report_progress(ThreadFrame *f, int progress, int field)
Notify later decoding threads when part of their reference picture is ready.
@ AV_SAMPLE_FMT_S32
signed 32 bits
@ AV_PIX_FMT_YUV422P12LE
planar YUV 4:2:2,24bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian
int ff_thread_init(AVCodecContext *s)
@ AV_PIX_FMT_YUVA420P9BE
planar YUV 4:2:0 22.5bpp, (1 Cr & Cb sample per 2x2 Y & A samples), big-endian
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown) That is the width of a pixel divided by the height of the pixel.
@ AV_PIX_FMT_YUVA422P
planar YUV 4:2:2 24bpp, (1 Cr & Cb sample per 2x1 Y & A samples)
@ AV_PIX_FMT_UYYVYY411
packed YUV 4:1:1, 12bpp, Cb Y0 Y1 Cr Y2 Y3
@ AV_CODEC_ID_ADPCM_SBPRO_3
@ AV_PIX_FMT_YUVA422P9LE
planar YUV 4:2:2 27bpp, (1 Cr & Cb sample per 2x1 Y & A samples), little-endian