Go to the documentation of this file.
37 #define LUMA_DC_BITS 9
38 #define CHROMA_DC_BITS 11
39 #define LUMA_AC_BITS 10
40 #define CHROMA_AC_BITS 10
94 for (
int i = 0;
i < 64;
i++)
99 const VLCElem * dc_table,
int dc_bits,
const VLCElem * ac_table,
int ac_bits,
100 int16_t * dc_pred,
const uint16_t *
dequant,
106 s->bdsp.clear_block(
block);
120 for (
int i = 1;
i < 64;
i++) {
121 int symbol =
get_vlc2(gb, ac_table, ac_bits, 2);
145 const uint8_t * buf = avpkt->
data;
148 int16_t dc_pred[3] = {0, 0, 0};
186 if (!
s->bitstream_buf)
190 for (
int i = 8;
i < avpkt->
size;
i++) {
191 int src = buf[
i] ^ 0x80;
193 if (
src == 0xFF &&
i + 1 < avpkt->
size && (buf[
i + 1] ^ 0x80) == 0x00)
202 for (
int mb_y = 0; mb_y < avctx->
height / 8; mb_y++)
203 for (
int mb_x = 0; mb_x < avctx->
width / 16; mb_x++)
204 for (
int b = 0;
b < 4;
b++) {
210 int plane =
b < luma_block ? 0 :
b - 1;
213 if (
b < luma_block) {
214 y = 8*mb_y + 8*(
b >> 1);
215 x = 16*mb_x + 8*(
b & 1);
220 yclip = y + 8 >= avctx->
height / 2;
227 for (
int yy = 0; yy < 8 && y + yy < avctx->
height / 2; yy++)
228 memcpy(
frame->data[plane] + (y+yy)*
frame->linesize[plane] + x,
tmp + yy, 8);
232 frame->data[plane] + y*
frame->linesize[plane] + x,
233 frame->linesize[plane]);
239 for (
int mb_y = 0; mb_y < (avctx->
height + 15) / 16; mb_y++)
240 for (
int mb_x = 0; mb_x < (avctx->
width + 15) / 16; mb_x++)
241 for (
int b = 0;
b < (yuv20p_half ? 4 : 6);
b++) {
242 int luma_block = yuv20p_half ? 2 : 4;
247 int plane =
b < luma_block ? 0 :
b - (yuv20p_half ? 1 : 3);
250 if (
b < luma_block) {
251 y = 16*mb_y + 8*(
b >> 1);
252 x = 16*mb_x + 8*(
b & 1);
260 frame->data[plane] + y*
frame->linesize[plane] + x,
261 (yuv20p_half &&
b < 2 ? 2 : 1) *
frame->linesize[plane]);
265 if (yuv20p_half &&
b < 2)
267 frame->data[plane] + y*
frame->linesize[plane] + x,
268 2*
frame->linesize[plane], 2*
frame->linesize[plane], 8);
272 for (
int j = 0; j < (avctx->
height + 7) /
fields / 8; j++)
273 for (
int i = 0;
i < (avctx->
width + 7) / 8;
i++)
274 for (
int plane = 0; plane < 3; plane++) {
281 dc_pred + plane,
dequant[!!plane],
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
static int get_bits_left(GetBitContext *gb)
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
static VLCElem chroma_dc_vlc[1<< CHROMA_DC_BITS]
This structure describes decoded (raw) audio or video data.
static void copy_block8(uint8_t *dst, const uint8_t *src, ptrdiff_t dstStride, ptrdiff_t srcStride, int h)
av_cold void ff_idctdsp_init(IDCTDSPContext *c, AVCodecContext *avctx)
av_cold void ff_permute_scantable(uint8_t dst[64], const uint8_t src[64], const uint8_t permutation[64])
AVCodec p
The public AVCodec.
static int dequant(AVSContext *h, int16_t *level_buf, uint8_t *run_buf, int16_t *dst, int mul, int shift, int coeff_num)
static av_cold int lead_decode_end(AVCodecContext *avctx)
static int ff_thread_once(char *control, void(*routine)(void))
#define FF_ARRAY_ELEMS(a)
static const uint8_t chroma_dc_len[]
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
#define FF_CODEC_DECODE_CB(func)
av_cold void ff_blockdsp_init(BlockDSPContext *c)
static const uint8_t luma_ac_len[]
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample format(the sample packing is implied by the sample format) and sample rate. The lists are not just lists
unsigned int bitstream_buf_size
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_RL16
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
#define CODEC_LONG_NAME(str)
the definition of that something depends on the semantic of the filter The callback must examine the status of the filter s links and proceed accordingly The status of output links is stored in the status_in and status_out fields and tested by the then the processing requires a frame on this link and the filter is expected to make efforts in that direction The status of input links is stored by the fifo and status_out fields
static VLCElem dc_vlc[1104]
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
const uint8_t ff_mjpeg_val_ac_chrominance[]
static av_always_inline int get_vlc2(GetBitContext *s, const VLCElem *table, int bits, int max_depth)
Parse a vlc code.
static av_cold int lead_decode_init(AVCodecContext *avctx)
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
int(* init)(AVBSFContext *ctx)
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
const uint8_t ff_mjpeg_val_ac_luminance[]
#define DECLARE_ALIGNED(n, t, v)
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t intptr_t int int16_t * dst
static const uint8_t luma_dc_len[]
uint8_t permutated_scantable[64]
static int decode_block(LeadContext *s, GetBitContext *gb, const VLCElem *dc_table, int dc_bits, const VLCElem *ac_table, int ac_bits, int16_t *dc_pred, const uint16_t *dequant, uint8_t *dst, int stride)
static int get_xbits(GetBitContext *s, int n)
Read MPEG-1 dc-style VLC (sign bit + mantissa with no MSB).
static int zero(InterplayACMContext *s, unsigned ind, unsigned col)
#define i(width, name, range_min, range_max)
const FFCodec ff_lead_decoder
static VLCElem luma_ac_vlc[1160]
void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_malloc but the buffer has additional AV_INPUT_BUFFER_PADDING_SIZE at the end w...
const uint8_t ff_mjpeg_std_chrominance_quant_tbl[64]
static int lead_decode_frame(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *avpkt)
static av_cold void lead_init_static_data(void)
const char * name
Name of the codec implementation.
static VLCElem luma_dc_vlc[1<< LUMA_DC_BITS]
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
static void calc_dequant(uint16_t *dequant, const uint8_t *quant_tbl, int q)
const uint8_t ff_zigzag_direct[64]
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
const uint8_t ff_mjpeg_std_luminance_quant_tbl[64]
main external API structure.
static VLCElem chroma_ac_vlc[1160]
static VLCElem ac_vlc[554]
static const uint8_t chroma_ac_len[]
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
#define avpriv_request_sample(...)
#define VLC_INIT_STATIC_TABLE_FROM_LENGTHS(vlc_table, nb_bits, nb_codes, lens, lens_wrap, syms, syms_wrap, syms_size, offset, flags)
This structure stores compressed data.
int width
picture width / height.
The exact code depends on how similar the blocks are and how related they are to the block
#define AVERROR_INVALIDDATA
Invalid data found when processing input.