35 #define TM2_ESCAPE 0x80000000
132 huff->
bits[huff->
num] = prefix;
137 if ((ret =
tm2_read_tree(ctx, prefix << 1, length + 1, huff)) < 0)
139 if ((ret =
tm2_read_tree(ctx, (prefix << 1) | 1, length + 1, huff)) < 0)
163 if ((huff.
nodes <= 0) || (huff.
nodes > 0x10000)) {
165 "nodes: %i\n", huff.
nodes);
196 huff.
lens,
sizeof(
int),
sizeof(
int),
197 huff.
bits,
sizeof(uint32_t),
sizeof(uint32_t), 0);
208 for (i = 0; i < code->
length; i++)
238 #define TM2_OLD_HEADER_MAGIC 0x00000100
239 #define TM2_NEW_HEADER_MAGIC 0x00000101
267 if ((d < 1) || (d >
TM2_DELTAS) || (mb < 1)) {
272 for (i = 0; i < d; i++) {
274 if (v & (1 << (mb - 1)))
275 ctx->
deltas[stream_id][i] = v - (1 <<
mb);
280 ctx->
deltas[stream_id][i] = 0;
300 len = bytestream2_get_be32(&gb);
306 if (len >= INT_MAX/4-1 || len < 0 || skip > buf_size) {
311 toks = bytestream2_get_be32(&gb);
313 len = bytestream2_get_be32(&gb);
315 len = bytestream2_get_be32(&gb);
328 len = bytestream2_get_be32(&gb);
345 if ((toks < 0) || (toks > 0xFFFFFF)) {
356 len = bytestream2_get_be32(&gb);
362 for (i = 0; i < toks; i++) {
368 if (stream_id <= TM2_MOT && ctx->tokens[stream_id][i] >=
TM2_DELTAS || ctx->
tokens[stream_id][i]<0) {
370 ctx->
tokens[stream_id][i], stream_id, i);
375 for (i = 0; i < toks; i++) {
377 if (stream_id <= TM2_MOT && ctx->tokens[stream_id][i] >=
TM2_DELTAS) {
379 ctx->
tokens[stream_id][i], stream_id, i);
408 #define TM2_INIT_POINTERS() \
411 int Ystride, Ustride, Vstride;\
413 Ystride = ctx->y_stride;\
414 Vstride = ctx->uv_stride;\
415 Ustride = ctx->uv_stride;\
416 Y = (ctx->cur?ctx->Y2:ctx->Y1) + by * 4 * Ystride + bx * 4;\
417 V = (ctx->cur?ctx->V2:ctx->V1) + by * 2 * Vstride + bx * 2;\
418 U = (ctx->cur?ctx->U2:ctx->U1) + by * 2 * Ustride + bx * 2;\
419 last = ctx->last + bx * 4;\
420 clast = ctx->clast + bx * 4;
422 #define TM2_INIT_POINTERS_2() \
424 int oYstride, oUstride, oVstride;\
426 TM2_INIT_POINTERS();\
430 Yo = (ctx->cur?ctx->Y1:ctx->Y2) + by * 4 * oYstride + bx * 4;\
431 Vo = (ctx->cur?ctx->V1:ctx->V2) + by * 2 * oVstride + bx * 2;\
432 Uo = (ctx->cur?ctx->U1:ctx->U2) + by * 2 * oUstride + bx * 2;
435 #define TM2_RECALC_BLOCK(CHR, stride, last, CD) {\
436 CD[0] = CHR[1] - last[1];\
437 CD[1] = (int)CHR[stride + 1] - (int)CHR[1];\
438 last[0] = (int)CHR[stride + 0];\
439 last[1] = (int)CHR[stride + 1];}
447 for (j = 0; j < 4; j++){
449 for (i = 0; i < 4; i++){
450 d = deltas[i + j * 4];
453 Y[i] = av_clip_uint8(last[i]);
463 for (j = 0; j < 2; j++) {
464 for (i = 0; i < 2; i++) {
465 CD[j] += deltas[i + j * 2];
483 t = (CD[0] + CD[1]) >> 1;
484 l = (prev - CD[0] - CD[1] + clast[1]) >> 1;
485 CD[1] = CD[0] + CD[1] - t;
499 for (i = 0; i < 4; i++) {
507 for (i = 0; i < 16; i++)
521 deltas[1] = deltas[2] = deltas[3] = 0;
525 deltas[1] = deltas[2] = deltas[3] = 0;
529 for (i = 0; i < 16; i++)
544 deltas[1] = deltas[2] = deltas[3] = 0;
548 deltas[1] = deltas[2] = deltas[3] = 0;
552 for (i = 0; i < 16; i++)
561 last[0] = (last[-1] - ctx->
D[0] - ctx->
D[1] - ctx->
D[2] - ctx->
D[3] + last[1]) >> 1;
563 last[0] = (last[1] - ctx->
D[0] - ctx->
D[1] - ctx->
D[2] - ctx->
D[3])>> 1;
564 last[2] = (last[1] + last[3]) >> 1;
566 t1 = ctx->
D[0] + ctx->
D[1];
568 ctx->
D[1] = t1 - (t1 >> 1);
569 t2 = ctx->
D[2] + ctx->
D[3];
571 ctx->
D[3] = t2 - (t2 >> 1);
580 int left, right,
diff;
585 deltas[0] = deltas[1] = deltas[2] = deltas[3] = 0;
588 deltas[0] = deltas[1] = deltas[2] = deltas[3] = 0;
592 for (i = 0; i < 16; i++)
595 ct = ctx->
D[0] + ctx->
D[1] + ctx->
D[2] + ctx->
D[3];
598 left = last[-1] - ct;
604 last[0] = left + (diff >> 2);
605 last[1] = left + (diff >> 1);
606 last[2] = right - (diff >> 2);
611 ctx->
D[0] = (tp + (ct >> 2)) - left;
613 ctx->
D[1] = (tp + (ct >> 1)) - left;
615 ctx->
D[2] = ((tp + ct) - (ct >> 2)) - left;
617 ctx->
D[3] = (tp + ct) - left;
628 for (j = 0; j < 2; j++) {
629 for (i = 0; i < 2; i++){
633 U += Ustride;
V += Vstride;
634 Uo += oUstride; Vo += oVstride;
642 ctx->
D[0] = Yo[3] - last[3];
643 ctx->
D[1] = Yo[3 + oYstride] - Yo[3];
644 ctx->
D[2] = Yo[3 + oYstride * 2] - Yo[3 + oYstride];
645 ctx->
D[3] = Yo[3 + oYstride * 3] - Yo[3 + oYstride * 2];
647 for (j = 0; j < 4; j++) {
648 for (i = 0; i < 4; i++) {
664 for (j = 0; j < 2; j++) {
665 for (i = 0; i < 2; i++) {
680 ctx->
D[0] = Yo[3] - last[3];
681 ctx->
D[1] = Yo[3 + oYstride] - Yo[3];
682 ctx->
D[2] = Yo[3 + oYstride * 2] - Yo[3 + oYstride];
683 ctx->
D[3] = Yo[3 + oYstride * 3] - Yo[3 + oYstride * 2];
685 for (j = 0; j < 4; j++) {
687 for (i = 0; i < 4; i++) {
691 ctx->
D[j] = last[3] - d;
705 mx = av_clip(mx, -(bx * 4 + 4), ctx->
avctx->
width - bx * 4);
706 my = av_clip(my, -(by * 4 + 4), ctx->
avctx->
height - by * 4);
713 Yo += my * oYstride + mx;
714 Uo += (my >> 1) * oUstride + (mx >> 1);
715 Vo += (my >> 1) * oVstride + (mx >> 1);
718 for (j = 0; j < 2; j++) {
719 for (i = 0; i < 2; i++) {
734 for (j = 0; j < 4; j++) {
735 for (i = 0; i < 4; i++) {
743 ctx->
D[0] =
Y[3] - last[3];
744 ctx->
D[1] =
Y[3 + Ystride] -
Y[3];
745 ctx->
D[2] = Y[3 + Ystride * 2] - Y[3 + Ystride];
746 ctx->
D[3] = Y[3 + Ystride * 3] - Y[3 + Ystride * 2];
747 for (i = 0; i < 4; i++)
748 last[i] = Y[i + Ystride * 3];
768 memset(ctx->
last, 0, 4 * bw *
sizeof(
int));
769 memset(ctx->
clast, 0, 4 * bw *
sizeof(
int));
771 for (j = 0; j < bh; j++) {
772 memset(ctx->
D, 0, 4 *
sizeof(
int));
773 memset(ctx->
CD, 0, 4 *
sizeof(
int));
774 for (i = 0; i < bw; i++) {
808 Y = (ctx->
cur?ctx->
Y2:ctx->
Y1);
809 U = (ctx->
cur?ctx->
U2:ctx->
U1);
810 V = (ctx->
cur?ctx->
V2:ctx->
V1);
812 for (j = 0; j <
h; j++) {
813 for (i = 0; i < w; i++) {
814 int y = Y[i],
u = U[i >> 1],
v = V[i >> 1];
815 dst[3*i+0] = av_clip_uint8(y + v);
816 dst[3*i+1] = av_clip_uint8(y);
817 dst[3*i+2] = av_clip_uint8(y + u);
821 Y[-4] = Y[-3] = Y[-2] = Y[-1] = Y[0];
822 Y[w + 3] = Y[w + 2] = Y[w + 1] = Y[w] = Y[w - 1];
830 }
else if (j == h - 1) {
840 U[-2] = U[-1] = U[0];
841 V[-2] = V[-1] = V[0];
842 U[cw + 1] = U[cw] = U[cw - 1];
843 V[cw + 1] = V[cw] = V[cw - 1];
851 }
else if (j == h - 1) {
871 #define TM2_HEADER_SIZE 40
874 void *
data,
int *got_frame,
879 int buf_size = avpkt->
size & ~3;
901 if (offset >= buf_size) {
925 return (ret < 0) ? ret : buf_size;
1018 .
name =
"truemotion2",
void(* bswap_buf)(uint32_t *dst, const uint32_t *src, int w)
static void tm2_low_chroma(int *data, int stride, int *clast, int *CD, int *deltas, int bx)
const char const char void * val
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
This structure describes decoded (raw) audio or video data.
ptrdiff_t const GLvoid * data
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
static av_cold int init(AVCodecContext *avctx)
int * recode
table for converting from code indexes to values
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_malloc but the buffer has additional FF_INPUT_BUFFER_PADDING_SIZE at the end w...
AVCodec ff_truemotion2_decoder
int num
current number filled
static int tm2_read_header(TM2Context *ctx, const uint8_t *buf)
void void avpriv_request_sample(void *avc, const char *msg,...) av_printf_format(2
Log a generic warning message about a missing feature.
structure for gathering Huffman codes information
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
static const int tm2_stream_order[TM2_NUM_STREAMS]
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
int max_bits
maximum length of code
static int tm2_read_deltas(TM2Context *ctx, int stream_id)
#define CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
static void tm2_free_codes(TM2Codes *code)
int min_bits
minimum length of code
static int get_bits_count(const GetBitContext *s)
static int tm2_read_tree(TM2Context *ctx, uint32_t prefix, int length, TM2Huff *huff)
VLC vlc
table for FFmpeg bitstream reader
bitstream reader API header.
static void tm2_hi_res_block(TM2Context *ctx, AVFrame *pic, int bx, int by)
static int tm2_read_stream(TM2Context *ctx, const uint8_t *buf, int stream_id, int buf_size)
static void tm2_update_block(TM2Context *ctx, AVFrame *pic, int bx, int by)
static int get_bits_left(GetBitContext *gb)
int av_reallocp_array(void *ptr, size_t nmemb, size_t size)
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
int val_bits
length of literal
static int tm2_build_huff_table(TM2Context *ctx, TM2Codes *code)
static av_always_inline void bytestream2_skip(GetByteContext *g, unsigned int size)
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
static void tm2_low_res_block(TM2Context *ctx, AVFrame *pic, int bx, int by)
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
static int tm2_decode_blocks(TM2Context *ctx, AVFrame *p)
int * tokens[TM2_NUM_STREAMS]
const char * name
Name of the codec implementation.
static const uint8_t offset[127][2]
Libavcodec external API header.
av_cold void ff_bswapdsp_init(BswapDSPContext *c)
int max_num
total number of codes
int ff_reget_buffer(AVCodecContext *avctx, AVFrame *frame)
Identical in function to av_frame_make_writable(), except it uses ff_get_buffer() to allocate the buf...
enum AVPictureType pict_type
Picture type of the frame.
static int GET_TOK(TM2Context *ctx, int type)
int width
picture width / height.
#define TM2_OLD_HEADER_MAGIC
static av_always_inline int get_vlc2(GetBitContext *s, VLC_TYPE(*table)[2], int bits, int max_depth)
Parse a vlc code.
packed RGB 8:8:8, 24bpp, BGRBGR...
static int tm2_get_token(GetBitContext *gb, TM2Codes *code)
int nodes
total number of nodes in tree
static av_cold int decode_end(AVCodecContext *avctx)
static av_always_inline int bytestream2_tell(GetByteContext *g)
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
#define TM2_INIT_POINTERS_2()
main external API structure.
static void tm2_still_block(TM2Context *ctx, AVFrame *pic, int bx, int by)
#define init_vlc(vlc, nb_bits, nb_codes,bits, bits_wrap, bits_size,codes, codes_wrap, codes_size,flags)
static unsigned int get_bits1(GetBitContext *s)
#define TM2_INIT_POINTERS()
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
static void tm2_med_res_block(TM2Context *ctx, AVFrame *pic, int bx, int by)
static void tm2_motion_block(TM2Context *ctx, AVFrame *pic, int bx, int by)
static unsigned int get_bits_long(GetBitContext *s, int n)
Read 0-32 bits.
static void tm2_apply_deltas(TM2Context *ctx, int *Y, int stride, int *deltas, int *last)
void * av_calloc(size_t nmemb, size_t size)
Allocate a block of nmemb * size bytes with alignment suitable for all memory accesses (including vec...
#define TM2_RECALC_BLOCK(CHR, stride, last, CD)
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
static int decode(AVCodecContext *avctx, void *data, int *got_sub, AVPacket *avpkt)
GLint GLenum GLboolean GLsizei stride
common internal api header.
static void tm2_null_res_block(TM2Context *ctx, AVFrame *pic, int bx, int by)
int deltas[TM2_NUM_STREAMS][TM2_DELTAS]
static av_cold int decode_init(AVCodecContext *avctx)
#define TM2_NEW_HEADER_MAGIC
static av_always_inline int diff(const uint32_t a, const uint32_t b)
VLC_TYPE(* table)[2]
code, bits
int tok_lens[TM2_NUM_STREAMS]
int key_frame
1 -> keyframe, 0-> not
static void tm2_high_chroma(int *data, int stride, int *last, int *CD, int *deltas)
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(constuint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(constuint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(constint16_t *) pi >>8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t,*(constint16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t,*(constint16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(constint32_t *) pi >>24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t,*(constint32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t,*(constint32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(constfloat *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(constfloat *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(constfloat *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(constdouble *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(constdouble *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(constdouble *) pi *(1U<< 31))))#defineSET_CONV_FUNC_GROUP(ofmt, ifmt) staticvoidset_generic_function(AudioConvert *ac){}voidff_audio_convert_free(AudioConvert **ac){if(!*ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);}AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enumAVSampleFormatout_fmt, enumAVSampleFormatin_fmt, intchannels, intsample_rate, intapply_map){AudioConvert *ac;intin_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) returnNULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method!=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt)>2){ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc){av_free(ac);returnNULL;}returnac;}in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar){ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar?ac->channels:1;}elseif(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;elseac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);returnac;}intff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in){intuse_generic=1;intlen=in->nb_samples;intp;if(ac->dc){av_log(ac->avr, AV_LOG_TRACE,"%dsamples-audio_convert:%sto%s(dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));returnff_convert_dither(ac-> out
Huffman codes for each of streams.
#define av_malloc_array(a, b)
int tok_ptrs[TM2_NUM_STREAMS]
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
This structure stores compressed data.
void ff_free_vlc(VLC *vlc)
static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)