Go to the documentation of this file.
38 #define CLV_VLC_BITS 9
92 int idx = 1, last = 0,
val, skip;
94 memset(
blk, 0,
sizeof(*
blk) * 64);
100 while (idx < 64 && !last) {
106 skip = (
val >> 4) & 0xFF;
117 val = ac_quant * (2 * aval + 1);
129 return (idx <= 64 && last) ? 0 : -1;
132 #define DCT_TEMPLATE(blk, step, bias, shift, dshift, OP) \
133 const int t0 = OP(2841 * blk[1 * step] + 565 * blk[7 * step]); \
134 const int t1 = OP( 565 * blk[1 * step] - 2841 * blk[7 * step]); \
135 const int t2 = OP(1609 * blk[5 * step] + 2408 * blk[3 * step]); \
136 const int t3 = OP(2408 * blk[5 * step] - 1609 * blk[3 * step]); \
137 const int t4 = OP(1108 * blk[2 * step] - 2676 * blk[6 * step]); \
138 const int t5 = OP(2676 * blk[2 * step] + 1108 * blk[6 * step]); \
139 const int t6 = ((blk[0 * step] + blk[4 * step]) * (1 << dshift)) + bias; \
140 const int t7 = ((blk[0 * step] - blk[4 * step]) * (1 << dshift)) + bias; \
141 const int t8 = t0 + t2; \
142 const int t9 = t0 - t2; \
143 const int tA = (int)(181U * (t9 + (t1 - t3)) + 0x80) >> 8; \
144 const int tB = (int)(181U * (t9 - (t1 - t3)) + 0x80) >> 8; \
145 const int tC = t1 + t3; \
147 blk[0 * step] = (t6 + t5 + t8) >> shift; \
148 blk[1 * step] = (t7 + t4 + tA) >> shift; \
149 blk[2 * step] = (t7 - t4 + tB) >> shift; \
150 blk[3 * step] = (t6 - t5 + tC) >> shift; \
151 blk[4 * step] = (t6 - t5 - tC) >> shift; \
152 blk[5 * step] = (t7 - t4 - tB) >> shift; \
153 blk[6 * step] = (t7 + t4 - tA) >> shift; \
154 blk[7 * step] = (t6 + t5 - t8) >> shift; \
157 #define COP(x) (((x) + 4) >> 3)
165 for (
i = 0;
i < 8;
i++) {
171 for (
i = 0;
i < 8;
i++) {
179 int i, has_ac[6], off;
181 for (
i = 0;
i < 6;
i++)
184 off = x * 16 + y * 16 *
c->pic->linesize[0];
185 for (
i = 0;
i < 4;
i++) {
188 if (!x && !(
i & 1)) {
189 c->block[0] +=
c->top_dc[0];
190 c->top_dc[0] =
c->block[0];
192 c->block[0] +=
c->left_dc[(
i & 2) >> 1];
194 c->left_dc[(
i & 2) >> 1] =
c->block[0];
195 c->block[0] *=
c->luma_dc_quant;
198 off +=
c->pic->linesize[0] * 8;
199 c->idsp.put_pixels_clamped(
c->block,
200 c->pic->data[0] + off + (
i & 1) * 8,
201 c->pic->linesize[0]);
204 off = x * 8 + y * 8 *
c->pic->linesize[1];
205 for (
i = 1;
i < 3;
i++) {
209 c->block[0] +=
c->top_dc[
i];
210 c->top_dc[
i] =
c->block[0];
212 c->block[0] +=
c->left_dc[
i + 1];
214 c->left_dc[
i + 1] =
c->block[0];
215 c->block[0] *=
c->chroma_dc_quant;
217 c->idsp.put_pixels_clamped(
c->block,
c->pic->data[
i] + off,
218 c->pic->linesize[
i]);
225 int plane,
int x,
int y,
int dx,
int dy,
int size)
227 int shift = plane > 0;
230 int sstride, dstride, soff, doff;
231 uint8_t *sbuf, *dbuf;
234 if (x < 0 || sx < 0 || y < 0 || sy < 0 ||
241 sstride =
src->linesize[plane];
243 soff = sx + sy * sstride;
244 sbuf =
src->data[plane];
245 doff = x + y * dstride;
246 dbuf = dst->
data[plane];
249 uint8_t *dptr = &dbuf[doff];
250 uint8_t *sptr = &sbuf[soff];
252 memcpy(dptr, sptr,
size);
261 int plane,
int x,
int y,
int dx,
int dy,
int size,
int bias)
263 int shift = plane > 0;
266 int sstride =
src->linesize[plane];
268 int soff = sx + sy * sstride;
269 uint8_t *sbuf =
src->data[plane];
270 int doff = x + y * dstride;
271 uint8_t *dbuf = dst->
data[plane];
274 if (x < 0 || sx < 0 || y < 0 || sy < 0 ||
281 for (j = 0; j <
size; j++) {
282 uint8_t *dptr = &dbuf[doff];
283 uint8_t *sptr = &sbuf[soff];
286 int val = sptr[
i] + bias;
301 int left_mv, right_mv, top_mv, bot_mv;
309 }
else if ((mb_x == 0) || (mb_x == mvi->
mb_w - 1)) {
313 MV B = mvi->
mv[ mb_x ];
314 MV C = mvi->
mv[ mb_x + 1];
321 left_mv = -((mb_x * mvi->
mb_size));
322 right_mv = ((mvi->
mb_w - mb_x - 1) * mvi->
mb_size);
323 if (res.
x < left_mv) {
326 if (res.
x > right_mv) {
329 top_mv = -((mb_y * mvi->
mb_size));
331 if (res.
y < top_mv) {
334 if (res.
y > bot_mv) {
371 if (lc[
level].flags_cb.table) {
375 if (lc[
level].mv_cb.table) {
379 mv.x = (int8_t)(mv_code & 0xff);
380 mv.y = (int8_t)(mv_code >> 8);
387 if (lc[
level].bias_cb.table) {
391 bias = (int16_t)(bias_val);
406 for (
i = 0;
i < 4;
i++) {
407 if (ti->
flags & (1 <<
i)) {
418 int plane,
int x,
int y,
int dx,
int dy,
int size,
int bias)
432 int plane,
int x,
int y,
int size,
438 mv.x = root_mv.
x + tile->
mv.
x;
439 mv.y = root_mv.
y + tile->
mv.
y;
444 int i, hsize =
size >> 1;
446 for (
i = 0;
i < 4;
i++) {
447 int xoff = (
i & 2) == 0 ? 0 : hsize;
448 int yoff = (
i & 1) == 0 ? 0 : hsize;
470 int size =
comp == 0 ? tile_size : tile_size >> 1;
472 uint8_t *framebuf = buf->
data[
comp];
477 if ((right ==
size) && (bottom ==
size)) {
482 for (j = 0; j <
h; j++) {
483 for (
i = 0;
i < right;
i++) {
484 framebuf[off +
i] = 0x80;
489 if (bottom !=
size) {
491 for (j = 0; j < bottom; j++) {
493 framebuf[off +
i] = 0x80;
504 const uint8_t *buf = avpkt->
data;
505 int buf_size = avpkt->
size;
514 int skip = bytestream2_get_byte(&gb);
524 if (buf_size < c->mb_width *
c->mb_height) {
532 c->pic->key_frame = 1;
535 bytestream2_get_be32(&gb);
536 c->ac_quant = bytestream2_get_byte(&gb);
537 c->luma_dc_quant = 32;
538 c->chroma_dc_quant = 32;
544 for (
i = 0;
i < 3;
i++)
549 for (j = 0; j <
c->mb_height; j++) {
550 for (
i = 0;
i <
c->mb_width;
i++) {
574 mvi_reset(&
c->mvi,
c->pmb_width,
c->pmb_height, 1 <<
c->tile_shift);
576 for (j = 0; j <
c->pmb_height; j++) {
577 for (
i = 0;
i <
c->pmb_width;
i++) {
583 for (plane = 0; plane < 3; plane++) {
584 int16_t x = plane == 0 ?
i <<
c->tile_shift :
i << (
c->tile_shift - 1);
585 int16_t y = plane == 0 ? j <<
c->tile_shift : j << (
c->tile_shift - 1);
586 int16_t
size = plane == 0 ? 1 <<
c->tile_shift : 1 << (
c->tile_shift - 1);
587 int16_t mx = plane == 0 ?
mv.x :
mv.x / 2;
588 int16_t my = plane == 0 ?
mv.y :
mv.y / 2;
595 int x =
i <<
c->tile_shift;
596 int y = j <<
c->tile_shift;
597 int size = 1 <<
c->tile_shift;
608 x =
i << (
c->tile_shift - 1);
609 y = j << (
c->tile_shift - 1);
610 size = 1 << (
c->tile_shift - 1);
611 cmv.
x =
mv.x + tile->
mv.
x;
612 cmv.
y =
mv.y + tile->
mv.
y;
636 c->pic->key_frame = 0;
650 return mb_ret < 0 ? mb_ret : buf_size;
654 const uint16_t **syms,
unsigned *
offset)
659 for (
int i = 0;
i < 16;
i++) {
660 unsigned count = counts[
i];
663 for (count += num; num < count; num++)
684 for (
unsigned i = 0, j = 0, k = 0,
offset = 0;;
i++) {
685 if (0x36F & (1 <<
i)) {
691 if (0x1B7 & (1 <<
i)) {
725 if (1
U <<
c->tile_shift !=
c->tile_size ||
c->tile_shift < 1 ||
c->tile_shift > 30) {
726 av_log(avctx,
AV_LOG_ERROR,
"Tile size: %d, is not power of 2 > 1 and < 2^31\n",
c->tile_size);
742 c->pmb_width = (
w +
c->tile_size - 1) >>
c->tile_shift;
743 c->pmb_height = (
h +
c->tile_size - 1) >>
c->tile_shift;
746 c->mvi.mv =
av_calloc(
c->pmb_width * 2,
sizeof(*
c->mvi.mv));
747 if (!
c->pic || !
c->prev || !
c->mvi.mv)
770 .
name =
"clearvideo",
#define AV_LOG_WARNING
Something somehow does not look correct.
#define FF_CODEC_CAP_INIT_THREADSAFE
The codec does not modify any global variables in the init function, allowing to call the init functi...
static int get_bits_left(GetBitContext *gb)
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
static void comp(unsigned char *dst, ptrdiff_t dst_stride, unsigned char *src, ptrdiff_t src_stride, int add)
static const uint8_t clv_dc_syms[NUM_DC_CODES]
static const int8_t mv[256][2]
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
static int copyadd_block(AVCodecContext *avctx, AVFrame *dst, AVFrame *src, int plane, int x, int y, int dx, int dy, int size, int bias)
This structure describes decoded (raw) audio or video data.
static const uint16_t clv_mv_syms[]
static av_always_inline int get_vlc2(GetBitContext *s, VLC_TYPE(*table)[2], int bits, int max_depth)
Parse a vlc code.
static const uint8_t clv_flags_syms[][16]
static av_cold int clv_decode_init(AVCodecContext *avctx)
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
static av_always_inline void bytestream2_skip(GetByteContext *g, unsigned int size)
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
static void extend_edges(AVFrame *buf, int tile_size)
static int decode_mb(CLVContext *c, int x, int y)
static const uint8_t clv_dc_lens[NUM_DC_CODES]
static MV mvi_predict(MVInfo *mvi, int mb_x, int mb_y, MV diff)
static double val(void *priv, double ch)
int ff_init_vlc_from_lengths(VLC *vlc_arg, int nb_bits, int nb_codes, const int8_t *lens, int lens_wrap, const void *symbols, int symbols_wrap, int symbols_size, int offset, int flags, void *logctx)
Build VLC decoding tables suitable for use with get_vlc2()
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
static av_cold void clv_init_static(void)
s EdgeDetect Foobar g libavfilter vf_edgedetect c libavfilter vf_foobar c edit libavfilter and add an entry for foobar following the pattern of the other filters edit libavfilter allfilters and add an entry for foobar following the pattern of the other filters configure make j< whatever > ffmpeg ffmpeg i you should get a foobar png with Lena edge detected That s your new playground is ready Some little details about what s going which in turn will define variables for the build system and the C
static int ff_thread_once(char *control, void(*routine)(void))
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
static const uint16_t clv_bias_syms[]
#define FF_ARRAY_ELEMS(a)
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
struct TileInfo * child[4]
static void decode(AVCodecContext *dec_ctx, AVPacket *pkt, AVFrame *frame, FILE *outfile)
static void clv_dct(int16_t *block)
static int get_sbits(GetBitContext *s, int n)
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
#define INIT_VLC_STATIC_FROM_LENGTHS(vlc, bits, nb_codes, lens, len_wrap, symbols, symbols_wrap, symbols_size, offset, flags, static_size)
@ AV_PICTURE_TYPE_I
Intra.
static unsigned int get_bits1(GetBitContext *s)
static av_cold int clv_decode_end(AVCodecContext *avctx)
static void mvi_update_row(MVInfo *mvi)
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
static av_always_inline int bytestream2_tell(GetByteContext *g)
static int clv_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
int av_frame_copy(AVFrame *dst, const AVFrame *src)
Copy the frame data from src to dst.
static int restore_tree(AVCodecContext *avctx, AVFrame *dst, AVFrame *src, int plane, int x, int y, int size, TileInfo *tile, MV root_mv)
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_RB32
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
static int copy_block(AVCodecContext *avctx, AVFrame *dst, AVFrame *src, int plane, int x, int y, int dx, int dy, int size)
#define DECLARE_ALIGNED(n, t, v)
#define i(width, name, range_min, range_max)
static void pred_mv(DiracBlock *block, int stride, int x, int y, int ref)
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
static const uint8_t clv_ac_bits[NUM_AC_CODES]
av_cold void ff_idctdsp_init(IDCTDSPContext *c, AVCodecContext *avctx)
const char * name
Name of the codec implementation.
static LevelCodes lev[4+3+3]
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
const AVCodec ff_clearvideo_decoder
void * av_calloc(size_t nmemb, size_t size)
static const uint8_t clv_flags_bits[][16]
const uint8_t ff_zigzag_direct[64]
int ff_reget_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Identical in function to ff_get_buffer(), except it reuses the existing buffer if available.
#define INIT_VLC_STATIC_OVERLONG
#define FFSWAP(type, a, b)
static TileInfo * decode_tile_info(GetBitContext *gb, const LevelCodes *lc, int level)
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
static VLC_TYPE vlc_buf[16716][2]
main external API structure.
static int decode_block(CLVContext *ctx, int16_t *blk, int has_ac, int ac_quant)
#define DCT_TEMPLATE(blk, step, bias, shift, dshift, OP)
static int shift(int a, int b)
int coded_width
Bitstream width / height, may be different from width/height e.g.
@ AV_PICTURE_TYPE_P
Predicted.
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
static void mvi_reset(MVInfo *mvi, int mb_w, int mb_h, int mb_size)
static av_always_inline int diff(const uint32_t a, const uint32_t b)
unsigned int codec_tag
fourcc (LSB first, so "ABCD" -> ('D'<<24) + ('C'<<16) + ('B'<<8) + 'A').
This structure stores compressed data.
static const uint8_t clv_mv_len_counts[][16]
int width
picture width / height.
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
#define flags(name, subs,...)
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
The exact code depends on how similar the blocks are and how related they are to the block
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
#define MKTAG(a, b, c, d)
static int tile_do_block(AVCodecContext *avctx, AVFrame *dst, AVFrame *src, int plane, int x, int y, int dx, int dy, int size, int bias)
VLC_TYPE(* table)[2]
code, bits
static const uint16_t clv_ac_syms[NUM_AC_CODES]
static av_cold void build_vlc(VLC *vlc, const uint8_t counts[16], const uint16_t **syms, unsigned *offset)
static const uint8_t clv_bias_len_counts[][16]
static const uint8_t mv_syms[2][16][10]