Go to the documentation of this file.
67 s->frame_width,
s->frame_height);
80 #define QUALITY_THRESHOLD 100
81 #define THRESHOLD_MULTIPLIER 0.6
89 score += (pix1[
i] - pix2[
i]) * (pix1[
i] - pix2[
i]);
95 int threshold,
int lambda,
int intra)
97 int count, y, x,
i, j,
split, best_mean, best_score, best_count;
100 int w = 2 << (
level + 2 >> 1);
101 int h = 2 << (
level + 1 >> 1);
103 int16_t (*
block)[256] =
s->encoded_block_levels[
level];
104 const int8_t *codebook_sum, *
codebook;
105 const uint16_t(*mean_vlc)[2];
106 const uint8_t(*multistage_vlc)[2];
117 for (y = 0; y <
h; y++) {
118 for (x = 0; x <
w; x++) {
131 for (y = 0; y <
h; y++) {
132 for (x = 0; x <
w; x++) {
146 for (count = 1; count < 7; count++) {
147 int best_vector_score = INT_MAX;
148 int best_vector_sum = -999, best_vector_mean = -999;
149 const int stage = count - 1;
150 const int8_t *vector;
152 for (
i = 0;
i < 16;
i++) {
153 int sum = codebook_sum[stage * 16 +
i];
160 if (score < best_vector_score) {
164 best_vector_score = score;
165 best_vector[stage] =
i;
166 best_vector_sum = sum;
167 best_vector_mean =
mean;
172 for (j = 0; j <
size; j++)
173 block[stage + 1][j] =
block[stage][j] - vector[j];
175 best_vector_score += lambda *
177 multistage_vlc[1 + count][1]
178 + mean_vlc[best_vector_mean][1]);
180 if (best_vector_score < best_score) {
181 best_score = best_vector_score;
183 best_mean = best_vector_mean;
189 if (best_score > threshold &&
level) {
195 backup[
i] =
s->reorder_pb[
i];
197 threshold >> 1, lambda, intra);
202 if (score < best_score) {
207 s->reorder_pb[
i] = backup[
i];
214 av_assert1(best_mean >= 0 && best_mean < 256 || !intra);
215 av_assert1(best_mean >= -256 && best_mean < 256);
216 av_assert1(best_count >= 0 && best_count < 7);
221 multistage_vlc[1 + best_count][1],
222 multistage_vlc[1 + best_count][0]);
224 mean_vlc[best_mean][0]);
226 for (
i = 0;
i < best_count;
i++) {
231 for (y = 0; y <
h; y++)
232 for (x = 0; x <
w; x++)
234 block[best_count][x +
w * y] +
242 s->block_index[0]=
s->b8_stride*(
s->mb_y*2 ) +
s->mb_x*2;
243 s->block_index[1]=
s->b8_stride*(
s->mb_y*2 ) + 1 +
s->mb_x*2;
244 s->block_index[2]=
s->b8_stride*(
s->mb_y*2 + 1) +
s->mb_x*2;
245 s->block_index[3]=
s->b8_stride*(
s->mb_y*2 + 1) + 1 +
s->mb_x*2;
246 s->block_index[4]=
s->mb_stride*(
s->mb_y + 1) +
s->b8_stride*
s->mb_height*2 +
s->mb_x;
247 s->block_index[5]=
s->mb_stride*(
s->mb_y +
s->mb_height + 2) +
s->b8_stride*
s->mb_height*2 +
s->mb_x;
251 unsigned char *src_plane,
252 unsigned char *ref_plane,
253 unsigned char *decoded_plane,
258 int block_width, block_height;
262 const int lambda = (
s->quality *
s->quality) >>
270 block_width = (
width + 15) / 16;
271 block_height = (
height + 15) / 16;
274 s->m.avctx =
s->avctx;
275 s->m.current_picture_ptr = &
s->m.current_picture;
276 s->m.last_picture_ptr = &
s->m.last_picture;
277 s->m.last_picture.f->data[0] = ref_plane;
279 s->m.last_picture.f->linesize[0] =
280 s->m.new_picture->linesize[0] =
281 s->m.current_picture.f->linesize[0] =
stride;
284 s->m.mb_width = block_width;
285 s->m.mb_height = block_height;
286 s->m.mb_stride =
s->m.mb_width + 1;
287 s->m.b8_stride = 2 *
s->m.mb_width + 1;
289 s->m.pict_type =
s->pict_type;
290 s->m.motion_est =
s->motion_est;
291 s->m.me.scene_change_score = 0;
294 s->m.lambda =
s->quality;
295 s->m.qscale =
s->m.lambda * 139 +
298 s->m.lambda2 =
s->m.lambda *
s->m.lambda +
302 if (!
s->motion_val8[plane]) {
304 block_height * 2 + 2) *
305 2 *
sizeof(int16_t));
307 (block_height + 2) + 1) *
308 2 *
sizeof(int16_t));
309 if (!
s->motion_val8[plane] || !
s->motion_val16[plane])
313 s->m.mb_type =
s->mb_type;
316 s->m.current_picture.mb_mean = (uint8_t *)
s->dummy;
317 s->m.current_picture.mb_var = (uint16_t *)
s->dummy;
318 s->m.current_picture.mc_mb_var = (uint16_t *)
s->dummy;
319 s->m.current_picture.mb_type =
s->dummy;
321 s->m.current_picture.motion_val[0] =
s->motion_val8[plane] + 2;
322 s->m.p_mv_table =
s->motion_val16[plane] +
327 s->m.me.dia_size =
s->avctx->dia_size;
328 s->m.first_slice_line = 1;
329 for (y = 0; y < block_height; y++) {
330 s->m.new_picture->data[0] =
src - y * 16 *
stride;
333 for (
i = 0;
i < 16 &&
i + 16 * y <
height;
i++) {
334 memcpy(&
src[
i *
stride], &src_plane[(
i + 16 * y) * src_stride],
336 for (x =
width; x < 16 * block_width; x++)
339 for (;
i < 16 &&
i + 16 * y < 16 * block_height;
i++)
343 for (x = 0; x < block_width; x++) {
349 s->m.first_slice_line = 0;
357 s->m.first_slice_line = 1;
358 for (y = 0; y < block_height; y++) {
359 for (
i = 0;
i < 16 &&
i + 16 * y <
height;
i++) {
360 memcpy(&
src[
i *
stride], &src_plane[(
i + 16 * y) * src_stride],
362 for (x =
width; x < 16 * block_width; x++)
365 for (;
i < 16 &&
i + 16 * y < 16 * block_height;
i++)
369 for (x = 0; x < block_width; x++) {
370 uint8_t reorder_buffer[2][6][7 * 32];
373 uint8_t *decoded = decoded_plane +
offset;
375 int score[4] = { 0, 0, 0, 0 }, best;
376 uint8_t *
temp =
s->scratchbuf;
387 (
s->m.mb_type[x + y *
s->m.mb_stride] &
389 for (
i = 0;
i < 6;
i++)
394 put_bits(&
s->reorder_pb[5], vlc[1], vlc[0]);
395 score[0] = vlc[1] * lambda;
399 for (
i = 0;
i < 6;
i++) {
410 int mx, my, pred_x, pred_y, dxy;
414 if (
s->m.mb_type[x + y *
s->m.mb_stride] &
416 for (
i = 0;
i < 6;
i++)
420 put_bits(&
s->reorder_pb[5], vlc[1], vlc[0]);
422 s->m.pb =
s->reorder_pb[5];
431 s->reorder_pb[5] =
s->m.pb;
434 dxy = (mx & 1) + 2 * (my & 1);
442 decoded,
stride, 5, 64, lambda, 0);
443 best = score[1] <= score[0];
448 score[2] += vlc[1] * lambda;
449 if (score[2] < score[best] && mx == 0 && my == 0) {
451 s->hdsp.put_pixels_tab[0][0](decoded,
ref,
stride, 16);
457 for (
i = 0;
i < 6;
i++) {
466 motion_ptr[0 + 2 *
s->m.b8_stride] =
467 motion_ptr[1 + 2 *
s->m.b8_stride] =
468 motion_ptr[2 + 2 *
s->m.b8_stride] =
469 motion_ptr[3 + 2 *
s->m.b8_stride] = 0;
473 s->rd_total += score[best];
476 for (
i = 5;
i >= 0;
i--)
480 s->hdsp.put_pixels_tab[0][0](decoded,
temp,
stride, 16);
482 s->m.first_slice_line = 0;
507 for (
i = 0;
i < 3;
i++) {
523 if (avctx->
width >= 4096 || avctx->
height >= 4096) {
534 if (!
s->current_picture || !
s->last_picture) {
538 s->frame_width = avctx->
width;
539 s->frame_height = avctx->
height;
541 s->y_block_width = (
s->frame_width + 15) / 16;
542 s->y_block_height = (
s->frame_height + 15) / 16;
544 s->c_block_width = (
s->frame_width / 4 + 15) / 16;
545 s->c_block_height = (
s->frame_height / 4 + 15) / 16;
557 2 * 16 * 2 *
sizeof(uint8_t));
561 s->y_block_height *
sizeof(int16_t));
563 s->y_block_height *
sizeof(
int32_t));
566 if (!
s->m.me.temp || !
s->m.me.scratchpad || !
s->m.me.map ||
567 !
s->m.me.score_map || !
s->mb_type || !
s->dummy) {
583 const AVFrame *pict,
int *got_packet)
598 if (!
s->current_picture->data[0]) {
603 if (!
s->last_picture->data[0]) {
608 if (!
s->scratchbuf) {
627 for (
i = 0;
i < 3;
i++) {
630 s->last_picture->data[
i],
631 s->current_picture->data[
i],
632 s->frame_width / (
i ? 4 : 1),
633 s->frame_height / (
i ? 4 : 1),
635 s->current_picture->linesize[
i]);
639 for (j = 0; j <
i; j++) {
662 #define OFFSET(x) offsetof(struct SVQ1EncContext, x)
663 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
av_cold int ff_mpv_common_init(MpegEncContext *s)
init common structure for both encoder and decoder.
AVPixelFormat
Pixel format.
#define THRESHOLD_MULTIPLIER
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
static int svq1_encode_plane(SVQ1EncContext *s, int plane, unsigned char *src_plane, unsigned char *ref_plane, unsigned char *decoded_plane, int width, int height, int src_stride, int stride)
static int put_bytes_output(const PutBitContext *s)
void ff_fix_long_p_mvs(MpegEncContext *s, int type)
int ff_side_data_set_encoder_stats(AVPacket *pkt, int quality, int64_t *error, int error_count, int pict_type)
static int block_sum(const uint8_t *block, int w, int h, int linesize)
static void init_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Initialize the PutBitContext s.
const FFCodec ff_svq1_encoder
const int8_t *const ff_svq1_intra_codebooks[6]
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
This structure describes decoded (raw) audio or video data.
static void put_bits(Jpeg2000EncoderContext *s, int val, int n)
put n times val bit
#define CANDIDATE_MB_TYPE_INTER
#define AV_PKT_FLAG_KEY
The packet contains a keyframe.
static const int8_t svq1_inter_codebook_sum[4][16 *6]
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
const uint8_t ff_svq1_intra_multistage_vlc[6][8][2]
av_cold void ff_mpegvideoencdsp_init(MpegvideoEncDSPContext *c, AVCodecContext *avctx)
void ff_copy_bits(PutBitContext *pb, const uint8_t *src, int length)
Copy the content of src to the bitstream.
const uint16_t ff_svq1_inter_mean_vlc[512][2]
AVCodec p
The public AVCodec.
int ff_match_2uint16(const uint16_t(*tab)[2], int size, int a, int b)
Return the index into tab at which {a,b} match elements {[0],[1]} of tab.
int16_t * ff_h263_pred_motion(MpegEncContext *s, int block, int dir, int *px, int *py)
av_cold void ff_me_cmp_init(MECmpContext *c, AVCodecContext *avctx)
int flags
AV_CODEC_FLAG_*.
static void init_block_index(MpegEncContext *s)
#define FF_CODEC_ENCODE_CB(func)
static int put_bytes_left(const PutBitContext *s, int round_up)
static int ssd_int8_vs_int16_c(const int8_t *pix1, const int16_t *pix2, intptr_t size)
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
static av_cold int svq1_encode_init(AVCodecContext *avctx)
void ff_mpv_common_end(MpegEncContext *s)
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
#define FF_ARRAY_ELEMS(a)
#define av_assert0(cond)
assert() equivalent, that is always enabled.
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
#define AV_INPUT_BUFFER_MIN_SIZE
av_cold void ff_hpeldsp_init(HpelDSPContext *c, int flags)
static int encode_block(SVQ1EncContext *s, uint8_t *src, uint8_t *ref, uint8_t *decoded, int stride, int level, int threshold, int lambda, int intra)
static double sqr(double x)
#define LIBAVUTIL_VERSION_INT
Describe the class of an AVClass context structure.
const char * av_default_item_name(void *ptr)
Return the context name.
@ AV_PICTURE_TYPE_I
Intra.
static av_cold int svq1_encode_end(AVCodecContext *avctx)
const int8_t *const ff_svq1_inter_codebooks[6]
int ff_encode_alloc_frame(AVCodecContext *avctx, AVFrame *frame)
Allocate buffers for a frame.
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
int gop_size
the number of pictures in a group of pictures, or 0 for intra_only
int quality
quality (between 1 (good) and FF_LAMBDA_MAX (bad))
static char * split(char *message, char delim)
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
int flags
A combination of AV_PKT_FLAG values.
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
static void svq1_write_header(SVQ1EncContext *s, int frame_type)
#define i(width, name, range_min, range_max)
static int put_bits_count(PutBitContext *s)
void ff_svq1enc_init_x86(SVQ1EncContext *c)
#define av_malloc_array(a, b)
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
void ff_h263_encode_init(MpegEncContext *s)
#define FF_CODEC_CAP_INIT_THREADSAFE
The codec does not modify any global variables in the init function, allowing to call the init functi...
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
const char * name
Name of the codec implementation.
int ff_init_me(MpegEncContext *s)
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
static const int8_t svq1_intra_codebook_sum[4][16 *6]
const uint8_t ff_svq1_inter_multistage_vlc[6][8][2]
#define FFSWAP(type, a, b)
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
static const AVOption options[]
void ff_fix_long_mvs(MpegEncContext *s, uint8_t *field_select_table, int field_select, int16_t(*mv_table)[2], int f_code, int type, int truncate)
main external API structure.
void ff_estimate_p_frame_motion(MpegEncContext *s, int mb_x, int mb_y)
const uint8_t ff_svq1_block_type_vlc[4][2]
const uint16_t ff_svq1_intra_mean_vlc[256][2]
#define CANDIDATE_MB_TYPE_INTRA
static int ref[MAX_W *MAX_W]
static float mean(const float *input, int size)
@ AV_PICTURE_TYPE_P
Predicted.
int frame_number
Frame counter, set by libavcodec.
static void flush_put_bits(PutBitContext *s)
Pad the end of the output stream with zeros.
static av_always_inline int diff(const uint32_t a, const uint32_t b)
This structure stores compressed data.
int width
picture width / height.
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
@ AV_PIX_FMT_YUV410P
planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
The exact code depends on how similar the blocks are and how related they are to the block
static int svq1_encode_frame(AVCodecContext *avctx, AVPacket *pkt, const AVFrame *pict, int *got_packet)
av_cold void ff_svq1enc_init_ppc(SVQ1EncContext *c)
static const AVClass svq1enc_class
void ff_h263_encode_motion(PutBitContext *pb, int val, int f_code)
const uint16_t ff_svq1_frame_size_table[7][2]
int ff_alloc_packet(AVCodecContext *avctx, AVPacket *avpkt, int64_t size)
Check AVPacket size and allocate data.
#define QUALITY_THRESHOLD
static const unsigned codebook[256][2]