41 #define CASE_0(codec_id, ...)
42 #define CASE_1(codec_id, ...) \
46 #define CASE_2(enabled, codec_id, ...) \
47 CASE_ ## enabled(codec_id, __VA_ARGS__)
48 #define CASE_3(config, codec_id, ...) \
49 CASE_2(config, codec_id, __VA_ARGS__)
50 #define CASE(codec, ...) \
51 CASE_3(CONFIG_ ## codec ## _ENCODER, AV_CODEC_ID_ ## codec, __VA_ARGS__)
77 #define FREEZE_INTERVAL 128
93 (
s->block_size & (
s->block_size - 1))) {
99 int frontier, max_paths;
101 if ((
unsigned)avctx->
trellis > 16
U) {
118 frontier = 1 << avctx->
trellis;
155 bytestream_put_le16(&extradata, avctx->
frame_size);
156 bytestream_put_le16(&extradata, 7);
157 for (
int i = 0;
i < 7;
i++) {
170 av_log(avctx, AV_LOG_ERROR,
"Sample rate must be 11025, "
172 return AVERROR(EINVAL);
184 av_log(avctx, AV_LOG_ERROR,
"Sample rate must be 22050\n");
185 return AVERROR(EINVAL);
189 av_log(avctx, AV_LOG_ERROR,
"Only mono is supported\n");
190 return AVERROR(EINVAL);
249 const int sign = (
delta < 0) * 8;
256 nibble = sign | nibble;
258 c->prev_sample +=
diff;
269 int nibble = 8*(
delta < 0);
291 c->prev_sample -=
diff;
293 c->prev_sample +=
diff;
307 ((
c->sample2) * (
c->coeff2))) / 64;
311 bias =
c->idelta / 2;
313 bias = -
c->idelta / 2;
315 nibble = (nibble + bias) /
c->idelta;
318 predictor += ((nibble & 0x08) ? (nibble - 0x10) : nibble) *
c->idelta;
320 c->sample2 =
c->sample1;
353 const int16_t *
samples, uint8_t *dst,
358 const int frontier = 1 << avctx->
trellis;
365 int pathn = 0, froze = -1,
i, j, k, generation = 0;
366 uint8_t *
hash =
s->trellis_hash;
367 memset(
hash, 0xff, 65536 *
sizeof(*
hash));
369 memset(nodep_buf, 0, 2 * frontier *
sizeof(*nodep_buf));
370 nodes[0] = node_buf + frontier;
373 nodes[0]->
step =
c->step_index;
382 nodes[0]->
step =
c->idelta;
385 nodes[0]->
step = 127;
388 nodes[0]->
step =
c->step;
393 for (
i = 0;
i < n;
i++) {
398 memset(nodes_next, 0, frontier *
sizeof(
TrellisNode*));
399 for (j = 0; j < frontier && nodes[j]; j++) {
402 const int range = (j < frontier / 2) ? 1 : 0;
403 const int step = nodes[j]->step;
406 const int predictor = ((nodes[j]->sample1 *
c->coeff1) +
407 (nodes[j]->sample2 *
c->coeff2)) / 64;
409 const int nmin =
av_clip(div-range, -8, 6);
410 const int nmax =
av_clip(div+range, -7, 7);
411 for (nidx = nmin; nidx <= nmax; nidx++) {
412 const int nibble = nidx & 0xf;
414 #define STORE_NODE(NAME, STEP_INDEX)\
420 dec_sample = av_clip_int16(dec_sample);\
421 d = sample - dec_sample;\
422 ssd = nodes[j]->ssd + d*(unsigned)d;\
427 if (ssd < nodes[j]->ssd)\
440 h = &hash[(uint16_t) dec_sample];\
441 if (*h == generation)\
443 if (heap_pos < frontier) {\
448 pos = (frontier >> 1) +\
449 (heap_pos & ((frontier >> 1) - 1));\
450 if (ssd > nodes_next[pos]->ssd)\
455 u = nodes_next[pos];\
457 av_assert1(pathn < FREEZE_INTERVAL << avctx->trellis);\
459 nodes_next[pos] = u;\
463 u->step = STEP_INDEX;\
464 u->sample2 = nodes[j]->sample1;\
465 u->sample1 = dec_sample;\
466 paths[u->path].nibble = nibble;\
467 paths[u->path].prev = nodes[j]->path;\
471 int parent = (pos - 1) >> 1;\
472 if (nodes_next[parent]->ssd <= ssd)\
474 FFSWAP(TrellisNode*, nodes_next[parent], nodes_next[pos]);\
485 #define LOOP_NODES(NAME, STEP_TABLE, STEP_INDEX)\
486 const int predictor = nodes[j]->sample1;\
487 const int div = (sample - predictor) * 4 / STEP_TABLE;\
488 int nmin = av_clip(div - range, -7, 6);\
489 int nmax = av_clip(div + range, -6, 7);\
494 for (nidx = nmin; nidx <= nmax; nidx++) {\
495 const int nibble = nidx < 0 ? 7 - nidx : nidx;\
496 int dec_sample = predictor +\
498 ff_adpcm_yamaha_difflookup[nibble]) / 8;\
499 STORE_NODE(NAME, STEP_INDEX);\
517 if (generation == 255) {
518 memset(
hash, 0xff, 65536 *
sizeof(*
hash));
523 if (nodes[0]->ssd > (1 << 28)) {
524 for (j = 1; j < frontier && nodes[j]; j++)
525 nodes[j]->ssd -= nodes[0]->ssd;
531 p = &paths[nodes[0]->path];
532 for (k =
i; k > froze; k--) {
541 memset(nodes + 1, 0, (frontier - 1) *
sizeof(
TrellisNode*));
545 p = &paths[nodes[0]->
path];
546 for (
i = n - 1;
i > froze;
i--) {
551 c->predictor = nodes[0]->sample1;
552 c->sample1 = nodes[0]->sample1;
553 c->sample2 = nodes[0]->sample2;
554 c->step_index = nodes[0]->step;
555 c->step = nodes[0]->step;
556 c->idelta = nodes[0]->step;
559 #if CONFIG_ADPCM_ARGO_ENCODER
570 return (nibble >>
shift) & 0x0F;
574 const int16_t *
samples,
int nsamples,
586 for (
int n = 0; n < nsamples; n++) {
604 int st, pkt_size,
ret;
611 samples_p = (int16_t **)
frame->extended_data;
627 int blocks = (
frame->nb_samples - 1) / 8;
631 status->prev_sample = samples_p[ch][0];
634 bytestream_put_le16(&dst,
status->prev_sample);
635 *dst++ =
status->step_index;
644 for (
int ch = 0; ch < avctx->
channels; ch++) {
646 buf + ch * blocks * 8, &
c->status[ch],
649 for (
int i = 0;
i < blocks;
i++) {
650 for (
int ch = 0; ch < avctx->
channels; ch++) {
651 uint8_t *buf1 = buf + ch * blocks * 8 +
i * 8;
652 for (
int j = 0; j < 8; j += 2)
653 *dst++ = buf1[j] | (buf1[j + 1] << 4);
658 for (
int i = 0;
i < blocks;
i++) {
659 for (
int ch = 0; ch < avctx->
channels; ch++) {
661 const int16_t *smp = &samples_p[ch][1 +
i * 8];
662 for (
int j = 0; j < 8; j += 2) {
675 for (
int ch = 0; ch < avctx->
channels; ch++) {
683 for (
int i = 0;
i < 64;
i++)
687 for (
int i = 0;
i < 64;
i += 2) {
705 for (
int i = 0;
i <
frame->nb_samples;
i++) {
706 for (
int ch = 0; ch < avctx->
channels; ch++) {
719 for (
int n =
frame->nb_samples / 2; n > 0; n--) {
720 for (
int ch = 0; ch < avctx->
channels; ch++) {
730 const int n =
frame->nb_samples - 1;
755 buf + n, &
c->status[1], n,
757 for (
int i = 0;
i < n;
i++) {
763 for (
int i = 1;
i <
frame->nb_samples;
i++) {
781 if (
c->status[
i].idelta < 16)
782 c->status[
i].idelta = 16;
783 bytestream_put_le16(&dst,
c->status[
i].idelta);
789 bytestream_put_le16(&dst,
c->status[
i].sample1);
792 bytestream_put_le16(&dst,
c->status[
i].sample2);
802 for (
int i = 0;
i < n;
i += 2)
803 *dst++ = (buf[
i] << 4) | buf[
i + 1];
809 for (
int i = 0;
i < n;
i++)
810 *dst++ = (buf[
i] << 4) | buf[n +
i];
814 for (
int i = 7 * avctx->
channels; i < avctx->block_align;
i++) {
823 int n =
frame->nb_samples / 2;
832 for (
int i = 0;
i < n;
i += 2)
833 *dst++ = buf[
i] | (buf[
i + 1] << 4);
839 for (
int i = 0;
i < n;
i++)
840 *dst++ = buf[
i] | (buf[n +
i] << 4);
844 for (n *= avctx->
channels; n > 0; n--) {
857 for (
int n =
frame->nb_samples / 2; n > 0; n--) {
858 for (
int ch = 0; ch < avctx->
channels; ch++) {
870 c->status[0].prev_sample = *
samples;
871 bytestream_put_le16(&dst,
c->status[0].prev_sample);
872 bytestream_put_byte(&dst,
c->status[0].step_index);
873 bytestream_put_byte(&dst, 0);
877 const int n =
frame->nb_samples >> 1;
884 for (
int i = 0;
i < n;
i++)
885 bytestream_put_byte(&dst, (buf[2 *
i] << 4) | buf[2 *
i + 1]);
889 }
else for (
int n =
frame->nb_samples >> 1; n > 0; n--) {
893 bytestream_put_byte(&dst, nibble);
898 bytestream_put_byte(&dst, nibble);
907 for (
int ch = 0; ch < avctx->
channels; ch++) {
908 int64_t
error = INT64_MAX, tmperr = INT64_MAX;
910 int saved1 =
c->status[ch].sample1;
911 int saved2 =
c->status[ch].sample2;
914 for (
int s = 2;
s < 18 && tmperr != 0;
s++) {
915 for (
int f = 0;
f < 2 && tmperr != 0;
f++) {
916 c->status[ch].sample1 = saved1;
917 c->status[ch].sample2 = saved2;
918 tmperr = adpcm_argo_compress_block(
c->status + ch,
NULL, samples_p[ch],
920 if (tmperr <
error) {
929 c->status[ch].sample1 = saved1;
930 c->status[ch].sample2 = saved2;
931 adpcm_argo_compress_block(
c->status + ch, &pb, samples_p[ch],
942 for (
int n =
frame->nb_samples / 2; n > 0; n--) {
944 for (
int ch = 0; ch < avctx->
channels; ch++) {
973 .
name =
"block_size",
974 .help =
"set the block size",
977 .default_val = {.i64 = 1024},
992 #define ADPCM_ENCODER_0(id_, name_, sample_fmts_, capabilities_, long_name_)
993 #define ADPCM_ENCODER_1(id_, name_, sample_fmts_, capabilities_, long_name_) \
994 const AVCodec ff_ ## name_ ## _encoder = { \
996 .long_name = NULL_IF_CONFIG_SMALL(long_name_), \
997 .type = AVMEDIA_TYPE_AUDIO, \
999 .priv_data_size = sizeof(ADPCMEncodeContext), \
1000 .init = adpcm_encode_init, \
1001 .encode2 = adpcm_encode_frame, \
1002 .close = adpcm_encode_close, \
1003 .sample_fmts = sample_fmts_, \
1004 .capabilities = capabilities_ | AV_CODEC_CAP_DR1, \
1005 .caps_internal = FF_CODEC_CAP_INIT_CLEANUP | FF_CODEC_CAP_INIT_THREADSAFE, \
1006 .priv_class = &adpcm_encoder_class, \
1008 #define ADPCM_ENCODER_2(enabled, codec_id, name, sample_fmts, capabilities, long_name) \
1009 ADPCM_ENCODER_ ## enabled(codec_id, name, sample_fmts, capabilities, long_name)
1010 #define ADPCM_ENCODER_3(config, codec_id, name, sample_fmts, capabilities, long_name) \
1011 ADPCM_ENCODER_2(config, codec_id, name, sample_fmts, capabilities, long_name)
1012 #define ADPCM_ENCODER(codec, name, sample_fmts, capabilities, long_name) \
1013 ADPCM_ENCODER_3(CONFIG_ ## codec ## _ENCODER, AV_CODEC_ID_ ## codec, \
1014 name, sample_fmts, capabilities, long_name)