Go to the documentation of this file.
40 #define BITSTREAM_READER_LE
51 #define QDM2_LIST_ADD(list, size, packet) \
54 list[size - 1].next = &list[size]; \
56 list[size].packet = packet; \
57 list[size].next = NULL; \
62 #define QDM2_SB_USED(sub_sampling) (((sub_sampling) >= 2) ? 30 : 8 << (sub_sampling))
64 #define FIX_NOISE_IDX(noise_idx) \
65 if ((noise_idx) >= 3840) \
66 (noise_idx) -= 3840; \
68 #define SB_DITHERING_NOISE(sb,noise_idx) (noise_table[(noise_idx)++] * sb_noise_attenuation[(sb)])
70 #define SAMPLES_NEEDED \
71 av_log (NULL,AV_LOG_INFO,"This file triggers some untested code. Please contact the developers.\n");
73 #define SAMPLES_NEEDED_2(why) \
74 av_log (NULL,AV_LOG_INFO,"This file triggers some missing code. Please contact the developers.\nPosition: %s\n",why);
76 #define QDM2_MAX_FRAME_SIZE 512
198 0, 5, 1, 5, 5, 5, 5, 5, 2, 5, 5, 5, 5, 5, 5, 5, 3, 5, 5, 5, 5, 5, 4
222 if ((
value & ~3) > 0)
253 return (uint16_t)(
value & 0xffff);
267 if (sub_packet->
type == 0) {
268 sub_packet->
size = 0;
273 if (sub_packet->
type & 0x80) {
274 sub_packet->
size <<= 8;
276 sub_packet->
type &= 0x7f;
279 if (sub_packet->
type == 0x7f)
316 int i, j,
n,
ch, sum;
321 for (
i = 0;
i <
n;
i++) {
324 for (j = 0; j < 8; j++)
331 for (j = 0; j < 8; j++)
353 for (j = 0; j < 64; j++) {
378 for (j = 0; j < 64; ) {
379 if (coding_method[
ch][sb][j] < 8)
381 if ((coding_method[
ch][sb][j] - 8) > 22) {
409 for (k = 0; k <
run; k++) {
411 int sbjk = sb + (j + k) / 64;
416 if (coding_method[
ch][sbjk][(j + k) % 64] > coding_method[
ch][sb][j]) {
420 memset(&coding_method[
ch][sb][j + k], case_val,
422 memset(&coding_method[
ch][sb][j + k], case_val,
443 int i, sb,
ch, sb_used;
447 for (sb = 0; sb < 30; sb++)
448 for (
i = 0;
i < 8;
i++) {
462 for (sb = 0; sb < sb_used; sb++)
464 for (
i = 0;
i < 64;
i++) {
473 for (sb = 0; sb < sb_used; sb++) {
474 if ((sb >= 4) && (sb <= 23)) {
476 for (
i = 0;
i < 64;
i++) {
490 for (
i = 0;
i < 64;
i++) {
502 for (
i = 0;
i < 64;
i++) {
534 int c,
int superblocktype_2_3,
539 int add1, add2, add3, add4;
542 if (!superblocktype_2_3) {
547 for (sb = 0; sb < 30; sb++) {
548 for (j = 1; j < 63; j++) {
549 add1 = tone_level_idx[
ch][sb][j] - 10;
552 add2 = add3 = add4 = 0;
568 tmp = tone_level_idx[
ch][sb][j + 1] * 2 - add4 - add3 - add2 - add1;
571 tone_level_idx_temp[
ch][sb][j + 1] =
tmp & 0xff;
573 tone_level_idx_temp[
ch][sb][0] = tone_level_idx_temp[
ch][sb][1];
578 for (sb = 0; sb < 30; sb++)
579 for (j = 0; j < 64; j++)
580 acc += tone_level_idx_temp[
ch][sb][j];
582 multres = 0x66666667LL * (
acc * 10);
583 esp_40 = (multres >> 32) / 8 + ((multres & 0xffffffff) >> 31);
585 for (sb = 0; sb < 30; sb++)
586 for (j = 0; j < 64; j++) {
587 comp = tone_level_idx_temp[
ch][sb][j]* esp_40 * 10;
618 coding_method[
ch][sb][j] = ((
tmp & 0xfffa) + 30 )& 0xff;
620 for (sb = 0; sb < 30; sb++)
623 for (sb = 0; sb < 30; sb++)
624 for (j = 0; j < 64; j++)
626 if (coding_method[
ch][sb][j] < 10)
627 coding_method[
ch][sb][j] = 10;
630 if (coding_method[
ch][sb][j] < 16)
631 coding_method[
ch][sb][j] = 16;
633 if (coding_method[
ch][sb][j] < 30)
634 coding_method[
ch][sb][j] = 30;
639 for (sb = 0; sb < 30; sb++)
640 for (j = 0; j < 64; j++)
658 int length,
int sb_min,
int sb_max)
661 int joined_stereo, zero_encoding;
663 float type34_div = 0;
664 float type34_predictor;
666 int sign_bits[16] = {0};
670 for (sb=sb_min; sb < sb_max; sb++)
676 for (sb = sb_min; sb < sb_max; sb++) {
688 for (j = 0; j < 16; j++)
691 for (j = 0; j < 64; j++)
707 type34_predictor = 0.0;
710 for (j = 0; j < 128; ) {
715 for (k = 0; k < 5; k++) {
716 if ((j + 2 * k) >= 128)
727 for (k = 0; k < 5; k++)
730 for (k = 0; k < 5; k++)
733 for (k = 0; k < 10; k++)
756 for (k = 0; k < 5; k++) {
768 for (k = 0; k < 5; k++)
772 for (k = 0; k < 5; k++)
786 for (k = 0; k < 3; k++)
789 for (k = 0; k < 3; k++)
812 type34_div = (float)(1 <<
get_bits(gb, 2));
838 for (k = 0; k <
run && j + k < 128; k++) {
842 if (sign_bits[(j + k) / 8])
851 for (k = 0; k <
run; k++)
882 quantized_coeffs[0] =
level;
884 for (
i = 0;
i < 7; ) {
896 for (k = 1; k <=
run; k++)
929 for (sb = 0; sb <
n; sb++)
931 for (j = 0; j < 8; j++) {
935 for (k=0; k < 8; k++) {
941 for (k=0; k < 8; k++)
948 for (sb = 0; sb <
n; sb++)
956 for (j = 0; j < 8; j++)
962 for (sb = 0; sb <
n; sb++)
964 for (j = 0; j < 8; j++) {
986 for (
i = 1;
i <
n;
i++)
991 for (j = 0; j < (8 - 1); ) {
998 for (k = 1; k <=
run; k++)
1007 for (
i = 0;
i < 8;
i++)
1101 if (nodes[0] && nodes[1] && nodes[2])
1107 if (nodes[0] && nodes[1] && nodes[3])
1122 int i, packet_bytes, sub_packet_size, sub_packets_D;
1123 unsigned int next_index = 0;
1164 for (
i = 0;
i < 6;
i++)
1168 for (
i = 0; packet_bytes > 0;
i++) {
1185 if (next_index >=
header.size)
1193 sub_packet_size = ((packet->
size > 0xff) ? 1 : 0) + packet->
size + 2;
1195 if (packet->
type == 0)
1198 if (sub_packet_size > packet_bytes) {
1199 if (packet->
type != 10 && packet->
type != 11 && packet->
type != 12)
1201 packet->
size += packet_bytes - sub_packet_size;
1204 packet_bytes -= sub_packet_size;
1210 if (packet->
type == 8) {
1213 }
else if (packet->
type >= 9 && packet->
type <= 12) {
1216 }
else if (packet->
type == 13) {
1217 for (j = 0; j < 6; j++)
1219 }
else if (packet->
type == 14) {
1220 for (j = 0; j < 6; j++)
1222 }
else if (packet->
type == 15) {
1225 }
else if (packet->
type >= 16 && packet->
type < 48 &&
1250 ((sub_packet >= 16) ? (sub_packet - 16) : sub_packet);
1262 int local_int_4, local_int_8, stereo_phase, local_int_10;
1263 int local_int_14, stereo_exp, local_int_20, local_int_28;
1277 if(local_int_4 < q->group_size)
1283 local_int_4 += local_int_10;
1284 local_int_28 += (1 << local_int_8);
1286 local_int_4 += 8 * local_int_10;
1287 local_int_28 += (8 << local_int_8);
1292 if (local_int_10 <= 2) {
1297 while (
offset >= (local_int_10 - 1)) {
1298 offset += (1 - (local_int_10 - 1));
1299 local_int_4 += local_int_10;
1300 local_int_28 += (1 << local_int_8);
1307 local_int_14 = (
offset >> local_int_8);
1330 if (stereo_phase < 0)
1335 int sub_packet = (local_int_20 + local_int_28);
1345 stereo_exp, stereo_phase);
1361 for (
i = 0;
i < 5;
i++)
1384 (packet->
type < 16 || packet->
type >= 48 ||
1403 }
else if (
type == 31) {
1404 for (j = 0; j < 4; j++)
1406 }
else if (
type == 46) {
1407 for (j = 0; j < 6; j++)
1409 for (j = 0; j < 4; j++)
1415 for (
i = 0, j = -1;
i < 5;
i++)
1430 const double iscale = 2.0 *
M_PI / 512.0;
1452 for (
i = 0;
i < 2;
i++) {
1458 for (
i = 0;
i < 4;
i++) {
1474 const double iscale = 0.25 *
M_PI;
1508 for (
i = 0;
i < 4;
i++)
1521 if (offset < q->frequency_range) {
1564 int i, k,
ch, sb_used, sub_sampling, dither_state = 0;
1570 for (
i = 0;
i < 8;
i++)
1571 for (k = sb_used; k <
SBLIMIT; k++)
1577 for (
i = 0;
i < 8;
i++) {
1668 if (bytestream2_peek_be64(&gb) == (((uint64_t)
MKBETAG(
'f',
'r',
'm',
'a') << 32) |
1669 (uint64_t)
MKBETAG(
'Q',
'D',
'M',
'2')))
1681 size = bytestream2_get_be32(&gb);
1690 if (bytestream2_get_be32(&gb) !=
MKBETAG(
'Q',
'D',
'C',
'A')) {
1697 avctx->
channels =
s->nb_channels =
s->channels = bytestream2_get_be32(&gb);
1706 avctx->
bit_rate = bytestream2_get_be32(&gb);
1707 s->group_size = bytestream2_get_be32(&gb);
1708 s->fft_size = bytestream2_get_be32(&gb);
1709 s->checksum_size = bytestream2_get_be32(&gb);
1710 if (
s->checksum_size >= 1
U << 28 ||
s->checksum_size <= 1) {
1715 s->fft_order =
av_log2(
s->fft_size) + 1;
1718 if ((
s->fft_order < 7) || (
s->fft_order > 9)) {
1724 s->group_order =
av_log2(
s->group_size) + 1;
1725 s->frame_size =
s->group_size / 16;
1730 s->sub_sampling =
s->fft_order - 7;
1731 s->frequency_range = 255 / (1 << (2 -
s->sub_sampling));
1738 switch ((
s->sub_sampling * 2 +
s->channels - 1)) {
1739 case 0:
tmp = 40;
break;
1740 case 1:
tmp = 48;
break;
1741 case 2:
tmp = 56;
break;
1742 case 3:
tmp = 72;
break;
1743 case 4:
tmp = 80;
break;
1744 case 5:
tmp = 100;
break;
1745 default:
tmp=
s->sub_sampling;
break;
1752 s->cm_table_select = tmp_val;
1755 s->coeff_per_sb_select = 0;
1757 s->coeff_per_sb_select = 1;
1759 s->coeff_per_sb_select = 2;
1761 if (
s->fft_size != (1 << (
s->fft_order - 1))) {
1846 int *got_frame_ptr,
AVPacket *avpkt)
1850 int buf_size = avpkt->
size;
1857 if(buf_size < s->checksum_size)
1861 frame->nb_samples = 16 *
s->frame_size;
1866 for (
i = 0;
i < 16;
i++) {
1869 out +=
s->channels *
s->frame_size;
1874 return s->checksum_size;
#define SAMPLES_NEEDED_2(why)
static VLC fft_stereo_exp_vlc
static int qdm2_decode_frame(AVCodecContext *avctx, void *data, int *got_frame_ptr, AVPacket *avpkt)
static const int16_t fft_level_index_table[256]
MPADSPContext mpadsp
Synthesis filter.
static VLC vlc_tab_type30
static VLC vlc_tab_type34
int8_t quantized_coeffs[MPA_MAX_CHANNELS][10][8]
static av_cold int init(AVCodecContext *avctx)
static int get_bits_left(GetBitContext *gb)
uint64_t channel_layout
Audio channel layout.
static int fix_coding_method_array(int sb, int channels, sb_int8_array coding_method)
Called while processing data from subpackets 11 and 12.
int sample_rate
samples per second
static void comp(unsigned char *dst, ptrdiff_t dst_stride, unsigned char *src, ptrdiff_t src_stride, int add)
int synth_buf_offset[MPA_MAX_CHANNELS]
#define AV_CH_LAYOUT_MONO
static uint8_t random_dequant_index[256][5]
uint8_t pi<< 24) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_U8,(uint64_t)((*(const uint8_t *) pi - 0x80U))<< 56) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16,(*(const int16_t *) pi >>8)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S16,(uint64_t)(*(const int16_t *) pi)<< 48) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32,(*(const int32_t *) pi >>24)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S32,(uint64_t)(*(const int32_t *) pi)<< 32) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S64,(*(const int64_t *) pi >>56)+0x80) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S64, *(const int64_t *) pi *(1.0f/(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S64, *(const int64_t *) pi *(1.0/(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_FLT, llrintf(*(const float *) pi *(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_DBL, llrint(*(const double *) pi *(INT64_C(1)<< 63))) #define FMT_PAIR_FUNC(out, in) static conv_func_type *const fmt_pair_to_conv_functions[AV_SAMPLE_FMT_NB *AV_SAMPLE_FMT_NB]={ FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S64), };static void cpy1(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, len);} static void cpy2(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 2 *len);} static void cpy4(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 4 *len);} static void cpy8(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 8 *len);} AudioConvert *swri_audio_convert_alloc(enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, const int *ch_map, int flags) { AudioConvert *ctx;conv_func_type *f=fmt_pair_to_conv_functions[av_get_packed_sample_fmt(out_fmt)+AV_SAMPLE_FMT_NB *av_get_packed_sample_fmt(in_fmt)];if(!f) return NULL;ctx=av_mallocz(sizeof(*ctx));if(!ctx) return NULL;if(channels==1){ in_fmt=av_get_planar_sample_fmt(in_fmt);out_fmt=av_get_planar_sample_fmt(out_fmt);} ctx->channels=channels;ctx->conv_f=f;ctx->ch_map=ch_map;if(in_fmt==AV_SAMPLE_FMT_U8||in_fmt==AV_SAMPLE_FMT_U8P) memset(ctx->silence, 0x80, sizeof(ctx->silence));if(out_fmt==in_fmt &&!ch_map) { switch(av_get_bytes_per_sample(in_fmt)){ case 1:ctx->simd_f=cpy1;break;case 2:ctx->simd_f=cpy2;break;case 4:ctx->simd_f=cpy4;break;case 8:ctx->simd_f=cpy8;break;} } if(HAVE_X86ASM &&1) swri_audio_convert_init_x86(ctx, out_fmt, in_fmt, channels);if(ARCH_ARM) swri_audio_convert_init_arm(ctx, out_fmt, in_fmt, channels);if(ARCH_AARCH64) swri_audio_convert_init_aarch64(ctx, out_fmt, in_fmt, channels);return ctx;} void swri_audio_convert_free(AudioConvert **ctx) { av_freep(ctx);} int swri_audio_convert(AudioConvert *ctx, AudioData *out, AudioData *in, int len) { int ch;int off=0;const int os=(out->planar ? 1 :out->ch_count) *out->bps;unsigned misaligned=0;av_assert0(ctx->channels==out->ch_count);if(ctx->in_simd_align_mask) { int planes=in->planar ? in->ch_count :1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) in->ch[ch];misaligned|=m &ctx->in_simd_align_mask;} if(ctx->out_simd_align_mask) { int planes=out->planar ? out->ch_count :1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) out->ch[ch];misaligned|=m &ctx->out_simd_align_mask;} if(ctx->simd_f &&!ctx->ch_map &&!misaligned){ off=len &~15;av_assert1(off >=0);av_assert1(off<=len);av_assert2(ctx->channels==SWR_CH_MAX||!in->ch[ctx->channels]);if(off >0){ if(out->planar==in->planar){ int planes=out->planar ? out->ch_count :1;for(ch=0;ch< planes;ch++){ ctx->simd_f(out-> ch ch
static int get_bits_count(const GetBitContext *s)
av_cold void ff_mpadsp_init(MPADSPContext *s)
static av_cold void qdm2_init_static_data(void)
Init static data (does not depend on specific file)
This structure describes decoded (raw) audio or video data.
int sub_packets_B
number of packets on 'B' list
static const int8_t coding_method_table[5][30]
static av_always_inline int get_vlc2(GetBitContext *s, VLC_TYPE(*table)[2], int bits, int max_depth)
Parse a vlc code.
int group_order
Parameters built from header parameters, do not change during playback.
static VLC vlc_tab_tone_level_idx_hi1
#define SOFTCLIP_THRESHOLD
static uint16_t softclip_table[HARDCLIP_THRESHOLD - SOFTCLIP_THRESHOLD+1]
float synth_buf[MPA_MAX_CHANNELS][512 *2]
QDM2SubPNode sub_packet_list_A[16]
list of all packets
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
static int qdm2_decode(QDM2Context *q, const uint8_t *in, int16_t *out)
static av_always_inline unsigned int bytestream2_get_bytes_left(GetByteContext *g)
static void process_subpacket_11(QDM2Context *q, QDM2SubPNode *node)
Process subpacket 11.
int has_errors
packet has errors
int checksum_size
size of data block, used also for checksum
int frame_size
size of data frame
static void skip_bits(GetBitContext *s, int n)
static int synthfilt_build_sb_samples(QDM2Context *q, GetBitContext *gb, int length, int sb_min, int sb_max)
Called by process_subpacket_11 to process more data from subpacket 11 with sb 0-8.
static av_always_inline void bytestream2_skip(GetByteContext *g, unsigned int size)
QDM2Complex complex[MPA_MAX_CHANNELS][256]
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
static av_cold void init_noise_samples(void)
float output_buffer[QDM2_MAX_FRAME_SIZE *MPA_MAX_CHANNELS *2]
static void build_sb_samples_from_noise(QDM2Context *q, int sb)
Build subband samples with noise weighted by q->tone_level.
static const struct twinvq_data tab
static void process_synthesis_subpackets(QDM2Context *q, QDM2SubPNode *list)
Process new subpackets for synthesis filter.
av_cold void ff_rdft_end(RDFTContext *s)
QDM2SubPNode sub_packet_list_C[16]
packets with errors?
const uint8_t * data
pointer to subpacket data (points to input data buffer, it's not a private copy)
static const int switchtable[23]
FFTCoefficient fft_coefs[1000]
static void fill_coding_method_array(sb_int8_array tone_level_idx, sb_int8_array tone_level_idx_temp, sb_int8_array coding_method, int nb_channels, int c, int superblocktype_2_3, int cm_table_select)
Related to synthesis filter Called by process_subpacket_11 c is built with data from subpacket 11 Mos...
static av_cold void rnd_table_init(void)
static void qdm2_fft_init_coefficient(QDM2Context *q, int sub_packet, int offset, int duration, int channel, int exp, int phase)
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
float ff_mpa_synth_window_float[]
#define AV_CH_LAYOUT_STEREO
static int process_subpacket_9(QDM2Context *q, QDM2SubPNode *node)
Process subpacket 9, init quantized_coeffs with data from it.
unsigned int size
subpacket size
static void qdm2_decode_super_block(QDM2Context *q)
Decode superblock, fill packet lists.
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
static void qdm2_fft_decode_tones(QDM2Context *q, int duration, GetBitContext *gb, int b)
static const float fft_tone_level_table[2][64]
const uint8_t * compressed_data
I/O data.
static const float dequant_1bit[2][3]
static void decode(AVCodecContext *dec_ctx, AVPacket *pkt, AVFrame *frame, FILE *outfile)
#define FIX_NOISE_IDX(noise_idx)
struct QDM2SubPNode * next
pointer to next packet in the list, NULL if leaf node
#define HARDCLIP_THRESHOLD
A node in the subpacket list.
#define QDM2_LIST_ADD(list, size, packet)
int do_synth_filter
used to perform or skip synthesis filter
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
static const uint8_t coeff_per_sb_for_dequant[3][30]
static int init_quantized_coeffs_elem0(int8_t *quantized_coeffs, GetBitContext *gb)
Init the first element of a channel in quantized_coeffs with data from packet 10 (quantized_coeffs[ch...
static const float fft_tone_envelope_table[4][31]
static QDM2SubPNode * qdm2_search_subpacket_type_in_list(QDM2SubPNode *list, int type)
Return node pointer to first packet of requested type in list.
float tone_level[MPA_MAX_CHANNELS][30][64]
Mixed temporary data used in decoding.
static int qdm2_get_vlc(GetBitContext *gb, const VLC *vlc, int flag, int depth)
int8_t coding_method[MPA_MAX_CHANNELS][30][64]
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
static av_cold int qdm2_decode_close(AVCodecContext *avctx)
static uint16_t qdm2_packet_checksum(const uint8_t *data, int length, int value)
QDM2 checksum.
static const uint8_t last_coeff[3]
static VLC fft_stereo_phase_vlc
int64_t bit_rate
the average bitrate
static unsigned int get_bits1(GetBitContext *s)
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining list
static void qdm2_fft_tone_synthesizer(QDM2Context *q, int sub_packet)
float sb_samples[MPA_MAX_CHANNELS][128][SBLIMIT]
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
static void process_subpacket_12(QDM2Context *q, QDM2SubPNode *node)
Process subpacket 12.
QDM2SubPNode sub_packet_list_D[16]
DCT packets.
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
int8_t tone_level_idx_base[MPA_MAX_CHANNELS][30][8]
enum AVSampleFormat sample_fmt
audio sample format
static void qdm2_calculate_fft(QDM2Context *q, int channel, int sub_packet)
#define MKBETAG(a, b, c, d)
static av_cold int qdm2_decode_init(AVCodecContext *avctx)
Init parameters from codec extradata.
static const uint8_t header[24]
int8_t tone_level_idx[MPA_MAX_CHANNELS][30][64]
#define QDM2_SB_USED(sub_sampling)
QDM2SubPacket sub_packets[16]
Packets and packet lists.
int fft_order
order of FFT (actually fftorder+1)
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
static uint8_t random_dequant_type24[128][3]
static const int vlc_stage3_values[60]
static VLC vlc_tab_tone_level_idx_mid
int channels
number of audio channels
av_cold int ff_rdft_init(RDFTContext *s, int nbits, enum RDFTransformType trans)
Set up a real FFT.
static const uint8_t fft_subpackets[32]
#define DECLARE_ALIGNED(n, t, v)
static VLC vlc_tab_tone_level_idx_hi2
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) #define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac) { } void ff_audio_convert_free(AudioConvert **ac) { if(! *ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);} AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map) { AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method !=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2) { ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc) { av_free(ac);return NULL;} return ac;} in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar) { ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar ? ac->channels :1;} else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;} int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in) { int use_generic=1;int len=in->nb_samples;int p;if(ac->dc) { av_log(ac->avr, AV_LOG_TRACE, "%d samples - audio_convert: %s to %s (dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> in
int coeff_per_sb_select
selector for "num. of coeffs. per subband" tables. Can be 0, 1, 2
#define i(width, name, range_min, range_max)
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
int cm_table_select
selector for "coding method" tables. Can be 0, 1 (from init: 0-4)
static void qdm2_decode_fft_packets(QDM2Context *q)
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default value
@ AV_SAMPLE_FMT_S16
signed 16 bits
static void qdm2_decode_sub_packet_header(GetBitContext *gb, QDM2SubPacket *sub_packet)
Fill a QDM2SubPacket structure with packet type, size, and data pointer.
static av_cold void qdm2_init_vlc(void)
const char * name
Name of the codec implementation.
int sub_sampling
subsampling: 0=25%, 1=50%, 2=100% */
int nb_channels
Parameters from codec header, do not change during playback.
static const int8_t tone_level_idx_offset_table[30][4]
static VLC fft_level_exp_alt_vlc
static void average_quantized_coeffs(QDM2Context *q)
Replace 8 elements with their average value.
int8_t tone_level_idx_hi1[MPA_MAX_CHANNELS][3][8][8]
static void fill_tone_level_array(QDM2Context *q, int flag)
Related to synthesis filter Called by process_subpacket_10.
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
static void init_tone_level_dequantization(QDM2Context *q, GetBitContext *gb)
Related to synthesis filter, process data from packet 10 Init part of quantized_coeffs via function i...
static const float fft_tone_sample_table[4][16][5]
static const float type34_delta[10]
static const uint8_t coeff_per_sb_for_avg[3][30]
#define FF_ARRAY_ELEMS(a)
int fft_size
size of FFT, in complex numbers
main external API structure.
static VLC fft_level_exp_vlc
static void qdm2_synthesis_filter(QDM2Context *q, int index)
int8_t sb_int8_array[2][30][64]
int noise_idx
index for dithering noise table
static float noise_samples[128]
int superblocktype_2_3
select fft tables and some algorithm based on superblock type
int channels
number of channels
Filter the word “frame” indicates either a video frame or a group of audio samples
QDM2SubPNode sub_packet_list_B[16]
FFT packets B are on list.
void ff_mpa_synth_filter_float(MPADSPContext *s, float *synth_buf_ptr, int *synth_buf_offset, float *window, int *dither_state, float *samples, ptrdiff_t incr, float *sb_samples)
int8_t tone_level_idx_hi2[MPA_MAX_CHANNELS][26]
static const float type30_dequant[8]
static int qdm2_get_se_vlc(const VLC *vlc, GetBitContext *gb, int depth)
static const int fft_cutoff_index_table[4][2]
static void process_subpacket_10(QDM2Context *q, QDM2SubPNode *node)
Process subpacket 10 if not null, else.
int fft_coefs_min_index[5]
FFTTone fft_tones[1000]
FFT and tones.
int8_t tone_level_idx_mid[MPA_MAX_CHANNELS][26][8]
#define avpriv_request_sample(...)
static void qdm2_fft_generate_tone(QDM2Context *q, FFTTone *tone)
int fft_coefs_max_index[5]
#define QDM2_MAX_FRAME_SIZE
static av_always_inline int diff(const uint32_t a, const uint32_t b)
This structure stores compressed data.
static VLC vlc_tab_fft_tone_offset[5]
void ff_mpa_synth_init_float(float *window)
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
void(* rdft_calc)(struct RDFTContext *s, FFTSample *z)
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
#define SB_DITHERING_NOISE(sb, noise_idx)
static const uint8_t dequant_table[64]
int8_t tone_level_idx_temp[MPA_MAX_CHANNELS][30][64]
int group_size
size of frame group (16 frames per group)
VLC_TYPE(* table)[2]
code, bits
static av_cold void softclip_table_init(void)
QDM2SubPacket * packet
packet
float samples[MPA_MAX_CHANNELS *MPA_FRAME_SIZE]