Go to the documentation of this file.
23 #include "config_components.h"
73 int32_t intermediate_samples[4];
77 for (
i = 0;
i < 2;
i++)
81 &intermediate_samples[0+
i],
82 &intermediate_samples[2+
i]);
85 for (
i = 0;
i < 2;
i++)
88 &intermediate_samples[2*
i],
89 &subband_samples[2*
i+0],
90 &subband_samples[2*
i+1]);
100 for (
i = nb_intervals >> 1;
i > 0;
i >>= 1)
114 int32_t quantized_sample, dithered_sample, parity_change;
115 int32_t d,
mean, interval, inv, sample_difference_abs;
118 sample_difference_abs =
FFABS(sample_difference);
119 sample_difference_abs =
FFMIN(sample_difference_abs, (1 << 23) - 1);
123 intervals,
tables->tables_size);
126 d = rshift64(
MUL64(d,
tables->quantize_dither_factors[quantized_sample]), 23);
128 intervals += quantized_sample;
129 mean = (intervals[1] + intervals[0]) / 2;
130 interval = (intervals[1] - intervals[0]) * (-(sample_difference < 0) | 1);
133 error = ((
int64_t)sample_difference_abs << 20) -
MUL64(dithered_sample, quantization_factor);
136 parity_change = quantized_sample;
142 inv = -(sample_difference < 0);
143 quantize->quantized_sample = quantized_sample ^ inv;
144 quantize->quantized_sample_parity_change = parity_change ^ inv;
153 for (subband = 0; subband <
NB_SUBBANDS; subband++) {
157 channel->invert_quantize[subband].quantization_factor,
167 static const int map[] = { 1, 2, 0, 3 };
171 if (
c->quantize[
map[
i]].error <
min->error)
176 min->quantized_sample =
min->quantized_sample_parity_change;
183 return (((
channel->quantize[3].quantized_sample & 0x06) |
parity) << 13)
184 | (((
channel->quantize[2].quantized_sample & 0x03) ) << 11)
185 | (((
channel->quantize[1].quantized_sample & 0x0F) ) << 7)
186 | (((
channel->quantize[0].quantized_sample & 0x7F) ) << 0);
192 return (((
channel->quantize[3].quantized_sample & 0x01E) |
parity) << 19)
193 | (((
channel->quantize[2].quantized_sample & 0x00F) ) << 15)
194 | (((
channel->quantize[1].quantized_sample & 0x03F) ) << 9)
195 | (((
channel->quantize[0].quantized_sample & 0x1FF) ) << 0);
229 output_size =
s->block_size *
frame->nb_samples/4;
233 for (
pos = 0, ipos = 0;
pos < output_size;
pos +=
s->block_size, ipos += 4) {
268 #if CONFIG_APTX_ENCODER
282 .p.supported_samplerates = (
const int[]) {8000, 16000, 24000, 32000, 44100, 48000, 0},
286 #if CONFIG_APTX_HD_ENCODER
289 CODEC_LONG_NAME(
"aptX HD (Audio Processing Technology for Bluetooth)"),
300 .p.supported_samplerates = (
const int[]) {8000, 16000, 24000, 32000, 44100, 48000, 0},
ConstTables ff_aptx_quant_tables[2][NB_SUBBANDS]
static void error(const char *err)
int frame_size
Number of samples per channel in an audio frame.
void ff_af_queue_remove(AudioFrameQueue *afq, int nb_samples, int64_t *pts, int64_t *duration)
Remove frame(s) from the queue.
#define AV_CHANNEL_LAYOUT_STEREO
static int32_t aptx_quantized_parity(Channel *channel)
void ff_af_queue_close(AudioFrameQueue *afq)
Close AudioFrameQueue.
static int aptx_encode_frame(AVCodecContext *avctx, AVPacket *avpkt, const AVFrame *frame, int *got_packet_ptr)
filter_frame For filters that do not use the this method is called when a frame is pushed to the filter s input It can be called at any time except in a reentrant way If the input frame is enough to produce output
av_cold void ff_af_queue_init(AVCodecContext *avctx, AudioFrameQueue *afq)
Initialize AudioFrameQueue.
This structure describes decoded (raw) audio or video data.
const FFCodec ff_aptx_hd_encoder
void ff_aptx_generate_dither(Channel *channel)
@ AV_SAMPLE_FMT_S32P
signed 32 bits, planar
int64_t duration
Duration of this packet in AVStream->time_base units, 0 if unknown.
FilterSignal inner_filter_signal[NB_FILTERS][NB_FILTERS]
AVCodec p
The public AVCodec.
static void aptx_qmf_tree_analysis(QMFAnalysis *qmf, int32_t samples[4], int32_t subband_samples[4])
static int quantize(CinepakEncContext *s, int h, uint8_t *data[4], int linesize[4], int v1mode, strip_info *info, mb_encoding encoding)
Writing a table generator This documentation is preliminary Parts of the API are not good and should be changed Basic concepts A table generator consists of two *_tablegen c and *_tablegen h The h file will provide the variable declarations and initialization code for the tables
#define FF_CODEC_ENCODE_CB(func)
int ff_af_queue_add(AudioFrameQueue *afq, const AVFrame *f)
Add a frame to the queue.
static uint16_t aptx_pack_codeword(Channel *channel)
const FFCodec ff_aptx_encoder
#define AV_CODEC_CAP_ENCODER_REORDERED_OPAQUE
This encoder can reorder user opaque values from input AVFrames and return them with corresponding ou...
#define CODEC_LONG_NAME(str)
static uint32_t aptxhd_pack_codeword(Channel *channel)
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
FilterSignal outer_filter_signal[NB_FILTERS]
static av_always_inline int32_t aptx_qmf_convolution(FilterSignal *signal, const int32_t coeffs[FILTER_TAPS], int shift)
static av_always_inline void aptx_qmf_polyphase_analysis(FilterSignal signal[NB_FILTERS], const int32_t coeffs[NB_FILTERS][FILTER_TAPS], int shift, int32_t samples[NB_FILTERS], int32_t *low_subband_output, int32_t *high_subband_output)
struct AVCodecInternal * internal
Private context used for internal data.
static const int32_t aptx_qmf_outer_coeffs[NB_FILTERS][FILTER_TAPS]
static void aptx_encode_samples(AptXContext *ctx, int32_t samples[NB_CHANNELS][4], uint8_t *output)
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
int(* init)(AVBSFContext *ctx)
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
An AVChannelLayout holds information about the channel layout of audio data.
static int shift(int a, int b)
static const int32_t aptx_qmf_inner_coeffs[NB_FILTERS][FILTER_TAPS]
static av_always_inline int diff(const struct color_info *a, const struct color_info *b, const int trans_thresh)
static void aptx_quantize_difference(Quantize *quantize, int32_t sample_difference, int32_t dither, int32_t quantization_factor, ConstTables *tables)
static av_cold int aptx_encode_init(AVCodecContext *avctx)
#define i(width, name, range_min, range_max)
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
AVSampleFormat
Audio sample formats.
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default value
static av_always_inline void aptx_qmf_filter_signal_push(FilterSignal *signal, int32_t sample)
const char * name
Name of the codec implementation.
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
av_cold int ff_aptx_init(AVCodecContext *avctx)
main external API structure.
void ff_aptx_invert_quantize_and_prediction(Channel *channel, int hd)
static av_always_inline int32_t aptx_bin_search(int32_t value, int32_t factor, const int32_t *intervals, int32_t nb_intervals)
int ff_get_encode_buffer(AVCodecContext *avctx, AVPacket *avpkt, int64_t size, int flags)
Get a buffer for a packet.
int pad_samples
Audio encoders can set this flag during init to indicate that they want the small last frame to be pa...
Filter the word “frame” indicates either a video frame or a group of audio samples
static float mean(const float *input, int size)
static const int factor[16]
const VDPAUPixFmtMap * map
This structure stores compressed data.
static void aptx_encode_channel(Channel *channel, int32_t samples[4], int hd)
static av_cold int aptx_close(AVCodecContext *avctx)
static void aptx_insert_sync(Channel channels[NB_CHANNELS], int32_t *idx)
static int aptx_check_parity(Channel channels[NB_CHANNELS], int32_t *idx)
static const uint8_t dither[8][8]