Go to the documentation of this file.
23 #include <AudioToolbox/AudioToolbox.h>
25 #define FF_BUFQUEUE_SIZE 256
65 return kAudioFormatMPEG4AAC;
67 return kAudioFormatMPEG4AAC_HE;
69 return kAudioFormatMPEG4AAC_HE_V2;
71 return kAudioFormatMPEG4AAC_LD;
73 return kAudioFormatMPEG4AAC_ELD;
76 return kAudioFormatAppleIMA4;
78 return kAudioFormatAppleLossless;
80 return kAudioFormatiLBC;
82 return kAudioFormatALaw;
84 return kAudioFormatULaw;
94 UInt32
size =
sizeof(unsigned);
95 AudioConverterPrimeInfo prime_info;
96 AudioStreamBasicDescription out_format;
99 kAudioConverterPropertyMaximumOutputPacketSize,
105 size =
sizeof(prime_info);
107 if (!AudioConverterGetProperty(at->
converter,
108 kAudioConverterPrimeInfo,
109 &
size, &prime_info)) {
113 size =
sizeof(out_format);
114 if (!AudioConverterGetProperty(at->
converter,
115 kAudioConverterCurrentOutputStreamDescription,
116 &
size, &out_format)) {
117 if (out_format.mFramesPerPacket)
118 avctx->
frame_size = out_format.mFramesPerPacket;
135 *
tag = bytestream2_get_byte(gb);
137 int c = bytestream2_get_byte(gb);
152 return avctx->
bit_rate <= 14000 ? 30 : 20;
177 return kAudioChannelLabel_LFE2;
185 layout->mChannelLayoutTag = kAudioChannelLayoutTag_UseChannelDescriptions;
191 if (c < 0 || c >= 64)
194 layout->mChannelDescriptions[
i].mChannelLabel = label;
204 static const struct {
236 AudioStreamBasicDescription in_format = {
238 .mFormatID = kAudioFormatLinearPCM,
242 : kAudioFormatFlagIsSignedInteger)
243 | kAudioFormatFlagIsPacked,
245 .mFramesPerPacket = 1,
250 AudioStreamBasicDescription out_format = {
253 .mChannelsPerFrame = in_format.mChannelsPerFrame,
255 UInt32 layout_size =
sizeof(AudioChannelLayout) +
257 AudioChannelLayout *channel_layout =
av_malloc(layout_size);
264 out_format.mFramesPerPacket = 8000 *
mode / 1000;
265 out_format.mBytesPerPacket = (
mode == 20 ? 38 : 50);
285 if (AudioConverterSetProperty(at->
converter, kAudioConverterInputChannelLayout,
286 layout_size, channel_layout)) {
294 channel_layout->mChannelLayoutTag =
tag;
295 channel_layout->mNumberChannelDescriptions = 0;
298 if (AudioConverterSetProperty(at->
converter, kAudioConverterOutputChannelLayout,
299 layout_size, channel_layout)) {
308 kAudioConverterPropertyBitDepthHint,
312 #if !TARGET_OS_IPHONE
315 kAudioCodecBitRateControlMode_Variable :
316 kAudioCodecBitRateControlMode_Constant;
318 AudioConverterSetProperty(at->
converter, kAudioCodecPropertyBitRateControlMode,
321 if (at->
mode == kAudioCodecBitRateControlMode_Variable) {
323 if (q < 0 || q > 14) {
325 "VBR quality %d out of range, should be 0-14\n", q);
329 AudioConverterSetProperty(at->
converter, kAudioCodecPropertySoundQualityForVBR,
337 kAudioConverterApplicableEncodeBitRates,
340 UInt32 new_rate = rate;
347 kAudioConverterApplicableEncodeBitRates,
349 count =
size /
sizeof(AudioValueRange);
350 for (
i = 0;
i < count;
i++) {
351 AudioValueRange *range = &ranges[
i];
352 if (rate >= range->mMinimum && rate <= range->mMaximum) {
355 }
else if (rate > range->mMaximum) {
356 new_rate = range->mMaximum;
358 new_rate = range->mMinimum;
362 if (new_rate != rate) {
364 "Bitrate %u not allowed; changing to %u\n", rate, new_rate);
369 AudioConverterSetProperty(at->
converter, kAudioConverterEncodeBitRate,
370 sizeof(rate), &rate);
374 AudioConverterSetProperty(at->
converter, kAudioConverterCodecQuality,
377 if (!AudioConverterGetPropertyInfo(at->
converter, kAudioConverterCompressionMagicCookie,
394 kAudioConverterCompressionMagicCookie,
395 &extradata_size, extradata);
417 flags = bytestream2_get_byte(&gb);
433 #if !TARGET_OS_IPHONE && defined(__MAC_10_9)
434 if (at->
mode == kAudioCodecBitRateControlMode_Variable && avctx->
rc_max_rate) {
437 AudioConverterSetProperty(at->
converter, kAudioCodecPropertyPacketSizeLimitForVBR,
438 sizeof(max_size), &max_size);
452 AudioBufferList *
data,
453 AudioStreamPacketDescription **packets,
473 data->mNumberBuffers = 1;
475 data->mBuffers[0].mDataByteSize =
frame->nb_samples *
478 data->mBuffers[0].mData =
frame->data[0];
479 if (*nb_packets >
frame->nb_samples)
480 *nb_packets =
frame->nb_samples;
500 AudioBufferList out_buffers = {
509 AudioStreamPacketDescription out_pkt_desc = {0};
540 out_buffers.mBuffers[0].mData = avpkt->
data;
545 got_packet_ptr, &out_buffers,
550 if ((!
ret ||
ret == 1) && *got_packet_ptr) {
551 avpkt->
size = out_buffers.mBuffers[0].mDataByteSize;
553 out_pkt_desc.mVariableFramesInPacket :
557 }
else if (
ret &&
ret != 1) {
593 #define AE AV_OPT_FLAG_AUDIO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
595 #if !TARGET_OS_IPHONE
596 {
"aac_at_mode",
"ratecontrol mode", offsetof(
ATDecodeContext,
mode),
AV_OPT_TYPE_INT, {.i64 = -1}, -1, kAudioCodecBitRateControlMode_Variable,
AE,
"mode"},
597 {
"auto",
"VBR if global quality is given; CBR otherwise", 0,
AV_OPT_TYPE_CONST, {.i64 = -1}, INT_MIN, INT_MAX,
AE,
"mode"},
598 {
"cbr",
"constant bitrate", 0,
AV_OPT_TYPE_CONST, {.i64 = kAudioCodecBitRateControlMode_Constant}, INT_MIN, INT_MAX,
AE,
"mode"},
599 {
"abr",
"long-term average bitrate", 0,
AV_OPT_TYPE_CONST, {.i64 = kAudioCodecBitRateControlMode_LongTermAverage}, INT_MIN, INT_MAX,
AE,
"mode"},
600 {
"cvbr",
"constrained variable bitrate", 0,
AV_OPT_TYPE_CONST, {.i64 = kAudioCodecBitRateControlMode_VariableConstrained}, INT_MIN, INT_MAX,
AE,
"mode"},
601 {
"vbr" ,
"variable bitrate", 0,
AV_OPT_TYPE_CONST, {.i64 = kAudioCodecBitRateControlMode_Variable}, INT_MIN, INT_MAX,
AE,
"mode"},
607 #define FFAT_ENC_CLASS(NAME) \
608 static const AVClass ffat_##NAME##_enc_class = { \
609 .class_name = "at_" #NAME "_enc", \
610 .item_name = av_default_item_name, \
612 .version = LIBAVUTIL_VERSION_INT, \
615 #define FFAT_ENC(NAME, ID, PROFILES, CAPS, CHANNEL_LAYOUTS, CH_LAYOUTS) \
616 FFAT_ENC_CLASS(NAME) \
617 const FFCodec ff_##NAME##_at_encoder = { \
618 .p.name = #NAME "_at", \
619 CODEC_LONG_NAME(#NAME " (AudioToolbox)"), \
620 .p.type = AVMEDIA_TYPE_AUDIO, \
622 .priv_data_size = sizeof(ATDecodeContext), \
623 .init = ffat_init_encoder, \
624 .close = ffat_close_encoder, \
625 FF_CODEC_ENCODE_CB(ffat_encode), \
626 .flush = ffat_encode_flush, \
627 .p.priv_class = &ffat_##NAME##_enc_class, \
628 .p.capabilities = AV_CODEC_CAP_DELAY | \
629 AV_CODEC_CAP_ENCODER_FLUSH CAPS, \
630 CODEC_OLD_CHANNEL_LAYOUTS_ARRAY(CHANNEL_LAYOUTS) \
631 .p.ch_layouts = CH_LAYOUTS, \
632 .p.sample_fmts = (const enum AVSampleFormat[]) { \
634 AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_NONE \
636 .p.profiles = PROFILES, \
637 .p.wrapper_name = "at", \
656 #if FF_API_OLD_CHANNEL_LAYOUT
657 static const uint64_t aac_at_channel_layouts[] = {
#define AV_CH_LAYOUT_7POINT0
int frame_size
Number of samples per channel in an audio frame.
#define AV_CH_LAYOUT_6POINT1
#define AV_LOG_WARNING
Something somehow does not look correct.
#define AV_CH_LAYOUT_7POINT1_WIDE_BACK
@ AV_CODEC_ID_ADPCM_IMA_QT
they must not be accessed directly The fifo field contains the frames that are queued in the input for processing by the filter The status_in and status_out fields contains the queued status(EOF or error) of the link
#define MP4DecConfigDescrTag
#define AV_CHANNEL_LAYOUT_OCTAGONAL
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
void ff_af_queue_remove(AudioFrameQueue *afq, int nb_samples, int64_t *pts, int64_t *duration)
Remove frame(s) from the queue.
#define AV_CHANNEL_LAYOUT_STEREO
int sample_rate
samples per second
#define AV_CH_LOW_FREQUENCY_2
void ff_af_queue_close(AudioFrameQueue *afq)
Close AudioFrameQueue.
#define AV_CH_LAYOUT_MONO
#define AV_CODEC_FLAG_QSCALE
Use fixed qscale.
av_cold void ff_af_queue_init(AVCodecContext *avctx, AudioFrameQueue *afq)
Initialize AudioFrameQueue.
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
This structure describes decoded (raw) audio or video data.
enum AVChannel av_channel_layout_channel_from_index(const AVChannelLayout *channel_layout, unsigned int idx)
Get the channel with the given index in a channel layout.
int64_t duration
Duration of this packet in AVStream->time_base units, 0 if unknown.
enum AVChannelOrder order
Channel order used in this layout.
#define AVERROR_UNKNOWN
Unknown error, typically from an external library.
int nb_channels
Number of channels in this layout.
trying all byte sequences megabyte in length and selecting the best looking sequence will yield cases to try But a word about quality
static AVFrame * ff_bufqueue_get(struct FFBufQueue *queue)
Get the first buffer from the queue and remove it.
#define AV_CH_SURROUND_DIRECT_RIGHT
#define AV_CH_LAYOUT_6POINT0
static av_always_inline void bytestream2_skip(GetByteContext *g, unsigned int size)
struct FFBufQueue used_frame_queue
#define MP4DecSpecificDescrTag
AVChannelLayout ch_layout
Audio channel layout.
int initial_padding
Audio only.
int flags
AV_CODEC_FLAG_*.
int ff_af_queue_add(AudioFrameQueue *afq, const AVFrame *f)
Add a frame to the queue.
#define AV_CH_LAYOUT_STEREO
#define AV_CHANNEL_LAYOUT_SURROUND
#define AV_CH_LAYOUT_QUAD
static int ff_bufqueue_is_full(struct FFBufQueue *queue)
Test if a buffer queue is full.
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
#define FF_ARRAY_ELEMS(a)
#define FF_PROFILE_AAC_HE_V2
#define AV_CH_LOW_FREQUENCY
#define AV_CHANNEL_LAYOUT_4POINT0
#define AV_CHANNEL_LAYOUT_7POINT1
int global_quality
Global quality for codecs which cannot change it per frame.
@ AV_CHANNEL_ORDER_UNSPEC
Only the channel count is specified, without any further information about the channel order.
#define FF_PROFILE_UNKNOWN
#define av_assert0(cond)
assert() equivalent, that is always enabled.
int bits_per_raw_sample
Bits per sample/pixel of internal libavcodec pixel/sample format.
AVFrame * av_frame_clone(const AVFrame *src)
Create a new frame that references the same data as src.
int64_t rc_max_rate
maximum bitrate
#define AV_CH_STEREO_RIGHT
#define AV_CHANNEL_LAYOUT_7POINT1_WIDE_BACK
Describe the class of an AVClass context structure.
int64_t bit_rate
the average bitrate
#define FF_PROFILE_AAC_LD
static void ff_bufqueue_discard_all(struct FFBufQueue *queue)
Unref and remove all buffers from the queue.
#define AV_CH_LAYOUT_5POINT1
#define AV_CODEC_CAP_VARIABLE_FRAME_SIZE
Audio encoder supports receiving a different number of samples in each call.
#define FF_PROFILE_AAC_ELD
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
static av_always_inline int bytestream2_get_bytes_left(GetByteContext *g)
AVCodecID
Identify the syntax and semantics of the bitstream.
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
An AVChannelLayout holds information about the channel layout of audio data.
#define FF_PROFILE_AAC_LOW
enum AVSampleFormat sample_fmt
audio sample format
#define MKBETAG(a, b, c, d)
#define AV_CHANNEL_LAYOUT_6POINT0
#define AVERROR_EXTERNAL
Generic error in an external library.
static void ff_bufqueue_add(void *log, struct FFBufQueue *queue, AVFrame *buf)
Add a buffer to the queue.
#define AV_CH_TOP_BACK_RIGHT
int av_channel_layout_compare(const AVChannelLayout *chl, const AVChannelLayout *chl1)
Check whether two channel layouts are semantically the same, i.e.
void av_channel_layout_default(AVChannelLayout *ch_layout, int nb_channels)
Get the default channel layout for a given number of channels.
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel layout
#define AV_CH_LAYOUT_OCTAGONAL
#define AV_CH_LAYOUT_5POINT0
#define i(width, name, range_min, range_max)
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
int av_get_bytes_per_sample(enum AVSampleFormat sample_fmt)
Return number of bytes per sample.
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Structure holding the queue.
#define AV_CHANNEL_LAYOUT_QUAD
@ AV_SAMPLE_FMT_U8
unsigned 8 bits
unsigned short available
number of available buffers
#define AV_CH_BACK_CENTER
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
const uint8_t * buffer_end
#define FF_PROFILE_AAC_HE
int block_align
number of bytes per packet if constant and known or 0 Used by some WAV based audio codecs.
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
#define AV_CH_LAYOUT_SURROUND
#define AV_CHANNEL_LAYOUT_7POINT0
#define AV_INPUT_BUFFER_PADDING_SIZE
AudioConverterRef converter
main external API structure.
const VDPAUPixFmtMap * map
#define AV_CHANNEL_LAYOUT_MONO
This structure stores compressed data.
#define AV_CH_LAYOUT_4POINT0
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
#define flags(name, subs,...)
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
#define AV_CHANNEL_LAYOUT_6POINT1
#define AV_CHANNEL_LAYOUT_5POINT0
@ AV_SAMPLE_FMT_DBL
double
#define FF_QP2LAMBDA
factor to convert from H.263 QP to lambda
#define AV_CHANNEL_LAYOUT_5POINT1
int ff_alloc_packet(AVCodecContext *avctx, AVPacket *avpkt, int64_t size)
Check AVPacket size and allocate data.
struct FFBufQueue frame_queue