Go to the documentation of this file.
27 #define BITSTREAM_READER_LE
39 16, 11, 10, 16, 24, 40, 51, 61, 12, 12, 14, 19,
40 26, 58, 60, 55, 14, 13, 16, 24, 40, 57, 69, 56,
41 14, 17, 22, 29, 51, 87, 80, 62, 18, 22, 37, 56,
42 68,109,103, 77, 24, 35, 55, 64, 81,104,113, 92,
43 49, 64, 78, 87,103,121,120,101, 72, 92, 95, 98,
48 17, 18, 24, 47, 99, 99, 99, 99, 18, 21, 26, 66,
49 99, 99, 99, 99, 24, 26, 56, 99, 99, 99, 99, 99,
50 47, 66, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99,
51 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99,
52 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99,
106 int len = 0, skip = 0,
max;
180 }
else if (
mode == 0) {
197 const int *quant_matrix,
int *skip,
int *dc_level)
199 const uint8_t *scantable =
s->scantable.permutated;
202 memset(
s->wblocks, 0,
s->wblocks_size);
204 for (
int i = 0;
i < 64;
i++) {
205 int16_t *
block =
s->wblocks + scantable[
i];
207 for (
int j = 0; j <
s->blocks_w;) {
211 rskip =
FFMIN(*skip,
s->blocks_w - j);
214 for (
int k = 0; k < rskip; k++)
215 block[64 * k] = *dc_level * quant_matrix[0];
227 block[0] = (
i == 0 ? *dc_level :
level) * quant_matrix[
i];
238 const int *quant_matrix,
int *skip,
241 const uint8_t *scantable =
s->scantable.permutated;
244 memset(
s->wblocks, 0,
s->wblocks_size);
245 memset(
s->map, 0,
s->map_size);
247 for (
int i = 0;
i < 64;
i++) {
248 int16_t *
block =
s->wblocks + scantable[
i];
250 for (
int j = 0; j <
s->blocks_w;) {
254 rskip =
FFMIN(*skip,
s->blocks_w - j);
274 const int *quant_matrix,
int *skip,
int *dc_level)
276 const uint8_t *scantable =
s->scantable.permutated;
277 const int offset =
s->plus ? 0 : 1024;
278 int16_t *
block =
s->block;
281 memset(
block, 0,
sizeof(
s->block));
291 block[scantable[0]] =
offset + *dc_level * quant_matrix[0];
293 for (
int i = 1;
i < 64;) {
297 rskip =
FFMIN(*skip, 64 -
i);
317 int ret, skip = 0, dc_level = 0;
318 const int offset =
s->plus ? 0 : 1024;
325 64 *
s->blocks_w *
sizeof(*
s->wblocks));
329 for (
int y = 0; y <
s->blocks_h; y++) {
334 for (
int x = 0; x <
s->blocks_w; x++) {
336 s->idsp.idct_put(
frame->data[plane] + (
s->blocks_h - 1 - y) * 8 *
frame->linesize[plane] + x * 8,
337 frame->linesize[plane],
s->wblocks + 64 * x);
341 for (
int y = 0; y <
s->blocks_h; y++) {
342 for (
int x = 0; x <
s->blocks_w; x++) {
347 s->idsp.idct_put(
frame->data[plane] + (
s->blocks_h - 1 - y) * 8 *
frame->linesize[plane] + x * 8,
348 frame->linesize[plane],
s->block);
363 const int *quant_matrix,
int *skip,
366 const uint8_t *scantable =
s->scantable.permutated;
367 int16_t *
block =
s->block;
370 memset(
block, 0,
sizeof(
s->block));
372 for (
int i = 0;
i < 64;) {
376 rskip =
FFMIN(*skip, 64 -
i);
403 64 *
s->blocks_w *
sizeof(*
s->wblocks));
408 s->blocks_w *
sizeof(*
s->map));
412 for (
int y = 0; y <
s->blocks_h; y++) {
417 for (
int x = 0; x <
s->blocks_w; x++) {
418 int shift = plane == 0;
420 int orig_mv_x =
s->mvectors[mvpos].x;
421 int mv_x =
s->mvectors[mvpos].x / (1 + !
shift);
422 int mv_y =
s->mvectors[mvpos].y / (1 + !
shift);
423 int h =
s->avctx->coded_height >> !
shift;
424 int w =
s->avctx->coded_width >> !
shift;
427 if (orig_mv_x >= -32) {
428 if (y * 8 + mv_y < 0 || y * 8 + mv_y + 8 >
h ||
429 x * 8 + mv_x < 0 || x * 8 + mv_x + 8 >
w)
433 prev->
data[plane] + ((
s->blocks_h - 1 - y) * 8 - mv_y) * prev->
linesize[plane] + (x * 8 + mv_x),
436 s->idsp.idct(
s->wblocks + x * 64);
437 for (
int i = 0;
i < 64;
i++)
438 s->wblocks[
i + x * 64] = (
s->wblocks[
i + x * 64] + 1) & 0xFFFC;
439 s->idsp.add_pixels_clamped(&
s->wblocks[x*64],
frame->data[plane] + (
s->blocks_h - 1 - y) * 8 *
frame->linesize[plane] + x * 8,
440 frame->linesize[plane]);
443 s->idsp.idct_put(
frame->data[plane] + (
s->blocks_h - 1 - y) * 8 *
frame->linesize[plane] + x * 8,
444 frame->linesize[plane],
s->wblocks + x * 64);
448 }
else if (
s->flags & 2) {
449 for (
int y = 0; y <
s->blocks_h; y++) {
450 for (
int x = 0; x <
s->blocks_w; x++) {
451 int shift = plane == 0;
453 int orig_mv_x =
s->mvectors[mvpos].x;
454 int mv_x =
s->mvectors[mvpos].x / (1 + !
shift);
455 int mv_y =
s->mvectors[mvpos].y / (1 + !
shift);
456 int h =
s->avctx->coded_height >> !
shift;
457 int w =
s->avctx->coded_width >> !
shift;
464 if (orig_mv_x >= -32) {
465 if (y * 8 + mv_y < 0 || y * 8 + mv_y + 8 >
h ||
466 x * 8 + mv_x < 0 || x * 8 + mv_x + 8 >
w)
470 prev->
data[plane] + ((
s->blocks_h - 1 - y) * 8 - mv_y) * prev->
linesize[plane] + (x * 8 + mv_x),
473 s->idsp.idct(
s->block);
474 for (
int i = 0;
i < 64;
i++)
475 s->block[
i] = (
s->block[
i] + 1) & 0xFFFC;
476 s->idsp.add_pixels_clamped(
s->block,
frame->data[plane] + (
s->blocks_h - 1 - y) * 8 *
frame->linesize[plane] + x * 8,
477 frame->linesize[plane]);
480 s->idsp.idct_put(
frame->data[plane] + (
s->blocks_h - 1 - y) * 8 *
frame->linesize[plane] + x * 8,
481 frame->linesize[plane],
s->block);
485 }
else if (
s->flags & 1) {
487 64 *
s->blocks_w *
sizeof(*
s->wblocks));
492 s->blocks_w *
sizeof(*
s->map));
496 for (
int y = 0; y <
s->blocks_h; y++) {
501 for (
int x = 0; x <
s->blocks_w; x++) {
504 s->idsp.idct_add(
frame->data[plane] + (
s->blocks_h - 1 - y) * 8 *
frame->linesize[plane] + x * 8,
505 frame->linesize[plane],
s->wblocks + 64 * x);
509 for (
int y = 0; y <
s->blocks_h; y++) {
510 for (
int x = 0; x <
s->blocks_w; x++) {
519 s->idsp.idct_add(
frame->data[plane] + (
s->blocks_h - 1 - y) * 8 *
frame->linesize[plane] + x * 8,
520 frame->linesize[plane],
s->block);
537 double f = 1.0 -
fabs(qscale);
539 if (!
s->key_frame && (
s->flags & 2)) {
541 for (
int i = 0;
i < 64;
i++) {
546 for (
int i = 0;
i < 64;
i++) {
547 luma[
i] =
FFMAX(1, 16 - qscale * 32);
553 for (
int i = 0;
i < 64;
i++) {
558 for (
int i = 0;
i < 64;
i++) {
565 for (
int i = 0;
i < 64;
i++) {
568 s->luma_quant_matrix[
i] = luma[
pos] * ((
pos / 8) & 1 ? -1 : 1);
569 s->chroma_quant_matrix[
i] =
chroma[
pos] * ((
pos / 8) & 1 ? -1 : 1);
576 uint8_t
r = 0,
g = 0,
b = 0;
581 for (
int y = 0; y < avctx->
height; y++) {
582 for (
int x = 0; x < avctx->
width; x++) {
583 dst[x*3+0] = bytestream2_get_byteu(gbyte) +
r;
585 dst[x*3+1] = bytestream2_get_byteu(gbyte) +
g;
587 dst[x*3+2] = bytestream2_get_byteu(gbyte) +
b;
590 dst -=
frame->linesize[0];
597 uint8_t **
u, uint8_t **v,
598 int ylinesize,
int ulinesize,
int vlinesize,
600 int *nx,
int *ny,
int *np,
int w,
int h)
602 uint8_t *y0dst = *y0;
603 uint8_t *y1dst = *y1;
606 int x = *nx, y = *ny,
pos = *np;
609 y0dst[2*x+0] += fill[0];
610 y0dst[2*x+1] += fill[1];
611 y1dst[2*x+0] += fill[2];
612 y1dst[2*x+1] += fill[3];
614 }
else if (
pos == 1) {
623 y0dst -= 2*ylinesize;
624 y1dst -= 2*ylinesize;
628 y0dst[2*x+0] += fill[2];
629 y0dst[2*x+1] += fill[3];
631 }
else if (
pos == 2) {
632 y1dst[2*x+0] += fill[0];
633 y1dst[2*x+1] += fill[1];
642 y0dst -= 2*ylinesize;
643 y1dst -= 2*ylinesize;
664 int runlen, y = 0, x = 0;
669 code = bytestream2_peek_le32(gbyte);
670 runlen =
code & 0xFFFFFF;
672 if (
code >> 24 == 0x77) {
675 for (
int i = 0;
i < 4;
i++)
676 fill[
i] = bytestream2_get_byte(gbyte);
681 for (
int i = 0;
i < 4;
i++) {
684 if (x >=
frame->width * 3) {
687 dst -=
frame->linesize[0];
688 if (y >=
frame->height)
694 for (
int i = 0;
i < 4;
i++)
695 fill[
i] = bytestream2_get_byte(gbyte);
697 for (
int i = 0;
i < 4;
i++) {
700 if (x >=
frame->width * 3) {
703 dst -=
frame->linesize[0];
704 if (y >=
frame->height)
717 uint8_t *y1dst = y0dst -
frame->linesize[0];
718 uint8_t *udst =
frame->data[1] + ((avctx->
height >> 1) - 1) *
frame->linesize[1];
719 uint8_t *vdst =
frame->data[2] + ((avctx->
height >> 1) - 1) *
frame->linesize[2];
720 int runlen, y = 0, x = 0,
pos = 0;
725 code = bytestream2_peek_le32(gbyte);
726 runlen =
code & 0xFFFFFF;
728 if (
code >> 24 == 0x77) {
731 for (
int i = 0;
i < 4;
i++)
732 fill[
i] = bytestream2_get_byte(gbyte);
747 for (
int i = 0;
i < 4;
i++)
748 fill[
i] = bytestream2_get_byte(gbyte);
767 uint8_t *y1dst = y0dst -
frame->linesize[0];
768 uint8_t *udst =
frame->data[1] + ((avctx->
height >> 1) - 1) *
frame->linesize[1];
769 uint8_t *vdst =
frame->data[2] + ((avctx->
height >> 1) - 1) *
frame->linesize[2];
770 uint8_t ly0 = 0, ly1 = 0, ly2 = 0, ly3 = 0, lu = 0, lv = 0;
772 for (
int y = 0; y < avctx->
height / 2; y++) {
773 for (
int x = 0; x < avctx->
width / 2; x++) {
774 y0dst[x*2+0] = bytestream2_get_byte(gbyte) + ly0;
776 y0dst[x*2+1] = bytestream2_get_byte(gbyte) + ly1;
778 y1dst[x*2+0] = bytestream2_get_byte(gbyte) + ly2;
780 y1dst[x*2+1] = bytestream2_get_byte(gbyte) + ly3;
782 udst[x] = bytestream2_get_byte(gbyte) + lu;
784 vdst[x] = bytestream2_get_byte(gbyte) + lv;
788 y0dst -= 2*
frame->linesize[0];
789 y1dst -= 2*
frame->linesize[0];
790 udst -=
frame->linesize[1];
791 vdst -=
frame->linesize[2];
839 nb_mvs *
sizeof(*
s->mvectors));
844 (
s->size[0] +
s->size[1] +
s->size[2]))) < 0)
847 memset(
s->mvectors, 0,
sizeof(*
s->mvectors) * nb_mvs);
849 for (
int i = 0;
i < nb_mvs;
i++) {
857 for (
int i = 0;
i < nb_mvs;
i++) {
922 if (idx < 256 && idx >= 0) {
924 }
else if (idx >= 0) {
925 get_tree_codes(codes, nodes, nodes[idx].child[0], pfx + (0 << bitpos), bitpos + 1);
926 get_tree_codes(codes, nodes, nodes[idx].child[1], pfx + (1
U << bitpos), bitpos + 1);
932 int zlcount = 0, curlen, idx, nindex, last, llast;
933 int blcounts[32] = { 0 };
939 for (
int i = 0;
i < 256;
i++) {
940 int bitlen = bitlens[
i];
941 int blcount = blcounts[bitlen];
943 zlcount += bitlen < 1;
944 syms[(bitlen << 8) + blcount] =
i;
948 for (
int i = 0;
i < 512;
i++) {
953 for (
int i = 0;
i < 256;
i++) {
954 node_idx[
i] = 257 +
i;
962 for (curlen = 1; curlen < 32; curlen++) {
963 if (blcounts[curlen] > 0) {
964 int max_zlcount = zlcount + blcounts[curlen];
966 for (
int i = 0; zlcount < 256 && zlcount < max_zlcount; zlcount++,
i++) {
967 int p = node_idx[nindex - 1 + 512];
968 int ch = syms[256 * curlen +
i];
973 if (nodes[p].child[0] == -1) {
974 nodes[p].
child[0] = ch;
976 nodes[p].
child[1] = ch;
988 p = node_idx[nindex - 1 + 512];
990 if (nodes[p].child[0] == -1) {
991 nodes[p].
child[0] = ch;
993 nodes[p].
child[1] = ch;
1002 for (
int i = 0;
i < idx;
i++)
1003 node_idx[512 +
i] = old_idx[
i];
1017 uint32_t new_codes[256];
1019 uint8_t symbols[256];
1020 uint32_t codes[256];
1027 for (
int i = 0;
i < 256;
i++) {
1029 bits[nb_codes] = bitlen[
i];
1030 codes[nb_codes] = new_codes[
i];
1031 symbols[nb_codes] =
i;
1057 if (
s->output_size > avctx->
width * avctx->
height * 9LL + 10000)
1070 for (
int i = 0;
i < count;
i++)
1073 for (
int i = 0;
i < 256;
i++) {
1078 for (
int i = 0;
i < 256;
i++)
1090 s->output[x++] =
val;
1104 unsigned compressed_size;
1113 header = bytestream2_get_le32(gbyte);
1114 s->fflags = bytestream2_get_le32(gbyte);
1115 s->bitstream_size =
s->fflags & 0x1FFFFFFF;
1118 if (avpkt->
size <
s->bitstream_size + 8)
1122 frame->key_frame =
s->key_frame;
1125 if (!
s->key_frame) {
1126 if (!
s->prev_frame->data[0]) {
1141 }
else if (!
s->dct) {
1147 w = bytestream2_get_le32(gbyte);
1148 h = bytestream2_get_le32(gbyte);
1149 if (
w == INT32_MIN ||
h == INT32_MIN)
1171 s->compression = bytestream2_get_le32(gbyte);
1172 if (
s->compression < 0 ||
s->compression > 100)
1175 for (
int i = 0;
i < 3;
i++)
1176 s->size[
i] = bytestream2_get_le32(gbyte);
1178 compressed_size =
s->output_size;
1181 compressed_size = avpkt->
size;
1184 if (
s->size[0] < 0 ||
s->size[1] < 0 ||
s->size[2] < 0 ||
1185 skip +
s->size[0] +
s->size[1] +
s->size[2] > compressed_size) {
1193 if (
frame->key_frame) {
1194 if (!
s->dct && !
s->rgb)
1196 else if (!
s->dct &&
s->rgb)
1202 s->prev_frame->height !=
frame->height)
1205 if (!(
s->flags & 2)) {
1213 }
else if (!
s->dct && !
s->rgb) {
1247 if (!
s->rgb && !
s->dct) {
1277 s->mvectors_size = 0;
1279 s->wblocks_size = 0;
1281 s->padded_output_size = 0;
static av_always_inline int fill_pixels(uint8_t **y0, uint8_t **y1, uint8_t **u, uint8_t **v, int ylinesize, int ulinesize, int vlinesize, uint8_t *fill, int *nx, int *ny, int *np, int w, int h)
static const uint8_t unscaled_chroma[64]
#define AV_LOG_WARNING
Something somehow does not look correct.
#define FF_CODEC_CAP_INIT_THREADSAFE
The codec does not modify any global variables in the init function, allowing to call the init functi...
av_cold void ff_init_scantable(uint8_t *permutation, ScanTable *st, const uint8_t *src_scantable)
static int get_bits_left(GetBitContext *gb)
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
static double cb(void *priv, double x, double y)
#define u(width, name, range_min, range_max)
static av_cold int decode_init(AVCodecContext *avctx)
static int decode_inter(AVCodecContext *avctx, GetBitContext *gb, AVFrame *frame, AVFrame *prev)
static unsigned int get_bits_long(GetBitContext *s, int n)
Read 0-32 bits.
static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
static int get_bits_count(const GetBitContext *s)
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
This structure describes decoded (raw) audio or video data.
unsigned padded_output_size
static av_always_inline int get_vlc2(GetBitContext *s, VLC_TYPE(*table)[2], int bits, int max_depth)
Parse a vlc code.
int chroma_quant_matrix[64]
static void copy_block8(uint8_t *dst, const uint8_t *src, ptrdiff_t dstStride, ptrdiff_t srcStride, int h)
@ AV_PIX_FMT_BGR24
packed RGB 8:8:8, 24bpp, BGRBGR...
int luma_quant_matrix[64]
static int decode_intra_blocks(AGMContext *s, GetBitContext *gb, const int *quant_matrix, int *skip, int *dc_level)
static int decode_inter_block(AGMContext *s, GetBitContext *gb, const int *quant_matrix, int *skip, int *map)
#define AV_PKT_FLAG_KEY
The packet contains a keyframe.
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
static void decode_flush(AVCodecContext *avctx)
static void skip_bits(GetBitContext *s, int n)
static av_always_inline void bytestream2_skip(GetByteContext *g, unsigned int size)
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
static int decode_huffman2(AVCodecContext *avctx, int header, int size)
static double val(void *priv, double ch)
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
static int read_code(GetBitContext *gb, int *oskip, int *level, int *map, int mode)
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
static void decode(AVCodecContext *dec_ctx, AVPacket *pkt, AVFrame *frame, FILE *outfile)
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
static const uint8_t unscaled_luma[64]
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
void ff_free_vlc(VLC *vlc)
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
static av_cold int decode_close(AVCodecContext *avctx)
static int decode_inter_plane(AGMContext *s, GetBitContext *gb, int size, const int *quant_matrix, AVFrame *frame, AVFrame *prev, int plane)
Describe the class of an AVClass context structure.
static __device__ float fabs(float a)
static void flush(AVCodecContext *avctx)
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
static int decode_runlen_rgb(AVCodecContext *avctx, GetByteContext *gbyte, AVFrame *frame)
@ AV_PICTURE_TYPE_I
Intra.
static int decode_motion_vectors(AVCodecContext *avctx, GetBitContext *gb)
int ff_init_vlc_sparse(VLC *vlc_arg, int nb_bits, int nb_codes, const void *bits, int bits_wrap, int bits_size, const void *codes, int codes_wrap, int codes_size, const void *symbols, int symbols_wrap, int symbols_size, int flags)
#define FF_CODEC_CAP_EXPORTS_CROPPING
The decoder sets the cropping fields in the output frames manually.
static av_always_inline int bytestream2_get_bytes_left(GetByteContext *g)
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
static int decode_intra_block(AGMContext *s, GetBitContext *gb, const int *quant_matrix, int *skip, int *dc_level)
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
int av_frame_copy(AVFrame *dst, const AVFrame *src)
Copy the frame data from src to dst.
static int decode_runlen(AVCodecContext *avctx, GetByteContext *gbyte, AVFrame *frame)
static const uint8_t header[24]
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
int flags
A combination of AV_PKT_FLAG values.
#define DECLARE_ALIGNED(n, t, v)
#define i(width, name, range_min, range_max)
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some it can consider them to be part of the FIFO and delay acknowledging a status change accordingly Example code
static unsigned int show_bits(GetBitContext *s, int n)
Show 1-25 bits.
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_malloc but the buffer has additional AV_INPUT_BUFFER_PADDING_SIZE at the end w...
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default value
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
av_cold void ff_idctdsp_init(IDCTDSPContext *c, AVCodecContext *avctx)
int idct_algo
IDCT algorithm, see FF_IDCT_* below.
const char * name
Name of the codec implementation.
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
const uint8_t ff_zigzag_direct[64]
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
static int decode_intra_plane(AGMContext *s, GetBitContext *gb, int size, const int *quant_matrix, AVFrame *frame, int plane)
static const uint8_t * align_get_bits(GetBitContext *s)
main external API structure.
static int decode_inter_blocks(AGMContext *s, GetBitContext *gb, const int *quant_matrix, int *skip, int *map)
static int decode_raw_intra_rgb(AVCodecContext *avctx, GetByteContext *gbyte, AVFrame *frame)
static int build_huff(const uint8_t *bitlen, VLC *vlc)
static int shift(int a, int b)
int coded_width
Bitstream width / height, may be different from width/height e.g.
@ AV_PICTURE_TYPE_P
Predicted.
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
const VDPAUPixFmtMap * map
unsigned int codec_tag
fourcc (LSB first, so "ABCD" -> ('D'<<24) + ('C'<<16) + ('B'<<8) + 'A').
This structure stores compressed data.
int width
picture width / height.
static int decode_raw_intra(AVCodecContext *avctx, GetByteContext *gbyte, AVFrame *frame)
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
The exact code depends on how similar the blocks are and how related they are to the block
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
#define MKTAG(a, b, c, d)
const AVCodec ff_agm_decoder
static int make_new_tree(const uint8_t *bitlens, uint32_t *codes)
static void get_tree_codes(uint32_t *codes, Node *nodes, int idx, uint32_t pfx, int bitpos)
static int decode_intra(AVCodecContext *avctx, GetBitContext *gb, AVFrame *frame)
static void compute_quant_matrix(AGMContext *s, double qscale)