Go to the documentation of this file.
43 #define MUL(a,b) (((int64_t)(a) * (int64_t)(b)) >> FRAC_BITS)
45 #define SAMPLES_BUF_SIZE 4096
68 float scale_factor_inv_table[64];
120 s->bitrate_index =
i;
125 s->frame_size = ((int)
a) * 8;
129 s->frame_frac_incr = (int)((
a -
floor(
a)) * 65536.0);
138 ff_dlog(avctx,
"%d kb/s, %d Hz, frame_size=%d bits, table=%d, padincr=%x\n",
141 for(
i=0;
i<
s->nb_channels;
i++)
142 s->samples_offset[
i] = 0;
150 s->filter_bank[
i] = v;
154 s->filter_bank[512 -
i] = v;
158 v = (int)(
exp2((3 -
i) / 3.0) * (1 << 20));
161 s->scale_factor_table[
i] = v;
163 s->scale_factor_inv_table[
i] =
exp2(-(3 -
i) / 3.0) / (
float)(1 << 20);
166 s->scale_factor_shift[
i] = 21 -
P - (
i / 3);
167 s->scale_factor_mult[
i] = (1 <<
P) *
exp2((
i % 3) / 3.0);
182 s->scale_diff_table[
i] = v;
191 s->total_quant_bits[
i] = 12 * v;
204 for(j=31;j>=3;j-=2)
tab[j] +=
tab[j - 2];
248 x1 =
MUL((t[8] - x2), xp[0]);
249 x2 =
MUL((t[8] + x2), xp[1]);
262 xr =
MUL(t[28],xp[0]);
266 xr =
MUL(t[4],xp[1]);
267 t[ 4] = (t[24] - xr);
268 t[24] = (t[24] + xr);
270 xr =
MUL(t[20],xp[2]);
274 xr =
MUL(t[12],xp[3]);
275 t[12] = (t[16] - xr);
276 t[16] = (t[16] + xr);
281 for (
i = 0;
i < 4;
i++) {
304 xr =
MUL(t1[0], *xp);
317 #define WSHIFT (WFRAC_BITS + 15 - FRAC_BITS)
327 offset =
s->samples_offset[ch];
328 out = &
s->sb_samples[ch][0][0][0];
337 p =
s->samples_buf[ch] +
offset;
341 sum = p[0*64] * q[0*64];
342 sum += p[1*64] * q[1*64];
343 sum += p[2*64] * q[2*64];
344 sum += p[3*64] * q[3*64];
345 sum += p[4*64] * q[4*64];
346 sum += p[5*64] * q[5*64];
347 sum += p[6*64] * q[6*64];
348 sum += p[7*64] * q[7*64];
365 s->samples_buf[ch], (512 - 32) * 2);
369 s->samples_offset[ch] =
offset;
373 unsigned char scale_code[
SBLIMIT],
374 unsigned char scale_factors[
SBLIMIT][3],
375 int sb_samples[3][12][
SBLIMIT],
378 int *p, vmax, v, n,
i, j, k,
code;
380 unsigned char *sf = &scale_factors[0][0];
382 for(j=0;j<sblimit;j++) {
385 p = &sb_samples[
i][0][j];
398 index = (21 - n) * 3 - 3;
400 while (vmax <= s->scale_factor_table[
index+1])
418 d1 =
s->scale_diff_table[sf[0] - sf[1] + 64];
419 d2 =
s->scale_diff_table[sf[1] - sf[2] + 64];
422 switch(d1 * 5 + d2) {
454 sf[1] = sf[2] = sf[0];
459 sf[0] = sf[1] = sf[2];
465 sf[0] = sf[2] = sf[1];
471 sf[1] = sf[2] = sf[0];
479 sf[0], sf[1], sf[2], d1, d2,
code);
480 scale_code[j] =
code;
492 for(
i=0;
i<
s->sblimit;
i++) {
498 #define SB_NOTALLOCATED 0
499 #define SB_ALLOCATED 1
510 int i, ch,
b, max_smr, max_ch, max_sb, current_frame_size, max_frame_size;
514 const unsigned char *alloc;
516 memcpy(smr, smr1,
s->nb_channels *
sizeof(
short) *
SBLIMIT);
521 max_frame_size =
s->frame_size;
522 s->frame_frac +=
s->frame_frac_incr;
523 if (
s->frame_frac >= 65536) {
524 s->frame_frac -= 65536;
532 current_frame_size = 32;
533 alloc =
s->alloc_table;
534 for(
i=0;
i<
s->sblimit;
i++) {
536 current_frame_size += incr *
s->nb_channels;
544 for(ch=0;ch<
s->nb_channels;ch++) {
545 for(
i=0;
i<
s->sblimit;
i++) {
546 if (smr[ch][
i] > max_smr && subband_status[ch][
i] !=
SB_NOMORE) {
547 max_smr = smr[ch][
i];
555 ff_dlog(
NULL,
"current=%d max=%d max_sb=%d max_ch=%d alloc=%d\n",
556 current_frame_size, max_frame_size, max_sb, max_ch,
561 alloc =
s->alloc_table;
562 for(
i=0;
i<max_sb;
i++) {
563 alloc += 1 << alloc[0];
569 incr +=
s->total_quant_bits[alloc[1]];
573 incr =
s->total_quant_bits[alloc[
b + 1]] -
574 s->total_quant_bits[alloc[
b]];
577 if (current_frame_size + incr <= max_frame_size) {
580 current_frame_size += incr;
582 smr[max_ch][max_sb] = smr1[max_ch][max_sb] -
quant_snr[alloc[
b]];
584 if (
b == ((1 << alloc[0]) - 1))
585 subband_status[max_ch][max_sb] =
SB_NOMORE;
590 subband_status[max_ch][max_sb] =
SB_NOMORE;
593 *padding = max_frame_size - current_frame_size;
605 int i, j, k, l, bit_alloc_bits,
b, ch;
628 for(
i=0;
i<
s->sblimit;
i++) {
629 bit_alloc_bits =
s->alloc_table[j];
630 for(ch=0;ch<
s->nb_channels;ch++) {
633 j += 1 << bit_alloc_bits;
637 for(
i=0;
i<
s->sblimit;
i++) {
638 for(ch=0;ch<
s->nb_channels;ch++) {
645 for(
i=0;
i<
s->sblimit;
i++) {
646 for(ch=0;ch<
s->nb_channels;ch++) {
648 sf = &
s->scale_factors[ch][
i][0];
649 switch(
s->scale_code[ch][
i]) {
673 for(
i=0;
i<
s->sblimit;
i++) {
674 bit_alloc_bits =
s->alloc_table[j];
675 for(ch=0;ch<
s->nb_channels;ch++) {
680 qindex =
s->alloc_table[j+
b];
683 sample =
s->sb_samples[ch][k][l + m][
i];
688 a = (
float)
sample *
s->scale_factor_inv_table[
s->scale_factors[ch][
i][k]];
689 q[m] = (
int)((
a + 1.0) *
steps * 0.5);
694 e =
s->scale_factors[ch][
i][k];
695 shift =
s->scale_factor_shift[e];
696 mult =
s->scale_factor_mult[e];
707 q[m] = (
q1 * (unsigned)
steps) >> (
P + 1);
727 j += 1 << bit_alloc_bits;
733 for(
i=0;
i<padding;
i++)
741 const int16_t *
samples = (
const int16_t *)
frame->data[0];
746 for(
i=0;i<s->nb_channels;
i++) {
750 for(
i=0;
i<
s->nb_channels;
i++) {
752 s->sb_samples[
i],
s->sblimit);
754 for(
i=0;
i<
s->nb_channels;
i++) {
static void idct32(int *out, int *tab)
int frame_size
Number of samples per channel in an audio frame.
static const uint8_t q1[256]
int sb_samples[MPA_MAX_CHANNELS][3][12][SBLIMIT]
int ff_mpa_l2_select_table(int bitrate, int nb_channels, int freq, int lsf)
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
static int put_bytes_output(const PutBitContext *s)
int sample_rate
samples per second
static const FFCodecDefault mp2_defaults[]
static void init_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Initialize the PutBitContext s.
static void compute_scale_factors(MpegAudioContext *s, unsigned char scale_code[SBLIMIT], unsigned char scale_factors[SBLIMIT][3], int sb_samples[3][12][SBLIMIT], int sblimit)
This structure describes decoded (raw) audio or video data.
static void put_bits(Jpeg2000EncoderContext *s, int val, int n)
put n times val bit
static const uint16_t table[]
const int ff_mpa_quant_bits[17]
unsigned char scale_diff_table[128]
int nb_channels
Number of channels in this layout.
static void filter(MpegAudioContext *s, int ch, const short *samples, int incr)
unsigned char scale_code[MPA_MAX_CHANNELS][SBLIMIT]
unsigned short scale_factor_mult[64]
unsigned short total_quant_bits[17]
AVChannelLayout ch_layout
Audio channel layout.
static const unsigned char nb_scale_factors[4]
int initial_padding
Audio only.
static const struct twinvq_data tab
static int bit_alloc(AC3EncodeContext *s, int snr_offset)
Run the bit allocation with a given SNR offset.
static int16_t mult(Float11 *f1, Float11 *f2)
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
const int ff_mpa_quant_steps[17]
int scale_factor_table[64]
unsigned char scale_factors[MPA_MAX_CHANNELS][SBLIMIT][3]
static __device__ float floor(float a)
#define av_assert0(cond)
assert() equivalent, that is always enabled.
static int MPA_encode_frame(AVCodecContext *avctx, AVPacket *avpkt, const AVFrame *frame, int *got_packet_ptr)
static const int costab32[30]
const unsigned char *const ff_mpa_alloc_tables[5]
static av_always_inline int64_t ff_samples_to_time_base(const AVCodecContext *avctx, int64_t samples)
Rescale from sample rate to AVCodecContext.time_base.
int64_t bit_rate
the average bitrate
static const float fixed_smr[SBLIMIT]
static const int bitinv32[32]
static const unsigned short quant_snr[17]
const int32_t ff_mpa_enwindow[257]
static void encode_frame(MpegAudioContext *s, unsigned char bit_alloc[MPA_MAX_CHANNELS][SBLIMIT], int padding)
const unsigned char * alloc_table
static int shift(int a, int b)
#define AV_NOPTS_VALUE
Undefined timestamp value.
static void psycho_acoustic_model(MpegAudioContext *s, short smr[SBLIMIT])
short samples_buf[MPA_MAX_CHANNELS][SAMPLES_BUF_SIZE]
static void compute_bit_allocation(MpegAudioContext *s, short smr1[MPA_MAX_CHANNELS][SBLIMIT], unsigned char bit_alloc[MPA_MAX_CHANNELS][SBLIMIT], int *padding)
int8_t scale_factor_shift[64]
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
#define i(width, name, range_min, range_max)
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some it can consider them to be part of the FIFO and delay acknowledging a status change accordingly Example code
const int ff_mpa_sblimit_table[5]
static av_cold int MPA_encode_init(AVCodecContext *avctx)
int samples_offset[MPA_MAX_CHANNELS]
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
static const int16_t steps[16]
main external API structure.
const uint16_t ff_mpa_freq_tab[3]
Filter the word “frame” indicates either a video frame or a group of audio samples
static void flush_put_bits(PutBitContext *s)
Pad the end of the output stream with zeros.
const FF_VISIBILITY_PUSH_HIDDEN uint16_t ff_mpa_bitrate_tab[2][3][15]
This structure stores compressed data.
#define MPA_MAX_CODED_FRAME_SIZE
int ff_alloc_packet(AVCodecContext *avctx, AVPacket *avpkt, int64_t size)
Check AVPacket size and allocate data.