Go to the documentation of this file.
65 s->frame_width,
s->frame_height);
78 #define QUALITY_THRESHOLD 100
79 #define THRESHOLD_MULTIPLIER 0.6
87 score += (pix1[
i] - pix2[
i]) * (pix1[
i] - pix2[
i]);
93 int threshold,
int lambda,
int intra)
95 int count, y, x,
i, j,
split, best_mean, best_score, best_count;
98 int w = 2 << (
level + 2 >> 1);
99 int h = 2 << (
level + 1 >> 1);
101 int16_t (*
block)[256] =
s->encoded_block_levels[
level];
102 const int8_t *codebook_sum, *
codebook;
103 const uint16_t(*mean_vlc)[2];
104 const uint8_t(*multistage_vlc)[2];
115 for (y = 0; y <
h; y++) {
116 for (x = 0; x <
w; x++) {
129 for (y = 0; y <
h; y++) {
130 for (x = 0; x <
w; x++) {
144 for (count = 1; count < 7; count++) {
145 int best_vector_score = INT_MAX;
146 int best_vector_sum = -999, best_vector_mean = -999;
147 const int stage = count - 1;
148 const int8_t *vector;
150 for (
i = 0;
i < 16;
i++) {
151 int sum = codebook_sum[stage * 16 +
i];
158 if (score < best_vector_score) {
162 best_vector_score = score;
163 best_vector[stage] =
i;
164 best_vector_sum = sum;
165 best_vector_mean =
mean;
170 for (j = 0; j <
size; j++)
171 block[stage + 1][j] =
block[stage][j] - vector[j];
173 best_vector_score += lambda *
175 multistage_vlc[1 + count][1]
176 + mean_vlc[best_vector_mean][1]);
178 if (best_vector_score < best_score) {
179 best_score = best_vector_score;
181 best_mean = best_vector_mean;
187 if (best_score > threshold &&
level) {
193 backup[
i] =
s->reorder_pb[
i];
195 threshold >> 1, lambda, intra);
200 if (score < best_score) {
205 s->reorder_pb[
i] = backup[
i];
212 av_assert1(best_mean >= 0 && best_mean < 256 || !intra);
213 av_assert1(best_mean >= -256 && best_mean < 256);
214 av_assert1(best_count >= 0 && best_count < 7);
219 multistage_vlc[1 + best_count][1],
220 multistage_vlc[1 + best_count][0]);
222 mean_vlc[best_mean][0]);
224 for (
i = 0;
i < best_count;
i++) {
229 for (y = 0; y <
h; y++)
230 for (x = 0; x <
w; x++)
232 block[best_count][x +
w * y] +
240 s->block_index[0]=
s->b8_stride*(
s->mb_y*2 ) +
s->mb_x*2;
241 s->block_index[1]=
s->b8_stride*(
s->mb_y*2 ) + 1 +
s->mb_x*2;
242 s->block_index[2]=
s->b8_stride*(
s->mb_y*2 + 1) +
s->mb_x*2;
243 s->block_index[3]=
s->b8_stride*(
s->mb_y*2 + 1) + 1 +
s->mb_x*2;
244 s->block_index[4]=
s->mb_stride*(
s->mb_y + 1) +
s->b8_stride*
s->mb_height*2 +
s->mb_x;
245 s->block_index[5]=
s->mb_stride*(
s->mb_y +
s->mb_height + 2) +
s->b8_stride*
s->mb_height*2 +
s->mb_x;
249 unsigned char *src_plane,
250 unsigned char *ref_plane,
251 unsigned char *decoded_plane,
256 int block_width, block_height;
260 const int lambda = (
s->quality *
s->quality) >>
268 block_width = (
width + 15) / 16;
269 block_height = (
height + 15) / 16;
272 s->m.avctx =
s->avctx;
273 s->m.current_picture_ptr = &
s->m.current_picture;
274 s->m.last_picture_ptr = &
s->m.last_picture;
275 s->m.last_picture.f->data[0] = ref_plane;
277 s->m.last_picture.f->linesize[0] =
278 s->m.new_picture.f->linesize[0] =
279 s->m.current_picture.f->linesize[0] =
stride;
282 s->m.mb_width = block_width;
283 s->m.mb_height = block_height;
284 s->m.mb_stride =
s->m.mb_width + 1;
285 s->m.b8_stride = 2 *
s->m.mb_width + 1;
287 s->m.pict_type =
s->pict_type;
288 s->m.motion_est =
s->motion_est;
289 s->m.me.scene_change_score = 0;
292 s->m.lambda =
s->quality;
293 s->m.qscale =
s->m.lambda * 139 +
296 s->m.lambda2 =
s->m.lambda *
s->m.lambda +
300 if (!
s->motion_val8[plane]) {
302 block_height * 2 + 2) *
303 2 *
sizeof(int16_t));
305 (block_height + 2) + 1) *
306 2 *
sizeof(int16_t));
307 if (!
s->motion_val8[plane] || !
s->motion_val16[plane])
311 s->m.mb_type =
s->mb_type;
314 s->m.current_picture.mb_mean = (uint8_t *)
s->dummy;
315 s->m.current_picture.mb_var = (uint16_t *)
s->dummy;
316 s->m.current_picture.mc_mb_var = (uint16_t *)
s->dummy;
317 s->m.current_picture.mb_type =
s->dummy;
319 s->m.current_picture.motion_val[0] =
s->motion_val8[plane] + 2;
320 s->m.p_mv_table =
s->motion_val16[plane] +
325 s->m.me.dia_size =
s->avctx->dia_size;
326 s->m.first_slice_line = 1;
327 for (y = 0; y < block_height; y++) {
328 s->m.new_picture.f->data[0] =
src - y * 16 *
stride;
331 for (
i = 0;
i < 16 &&
i + 16 * y <
height;
i++) {
332 memcpy(&
src[
i *
stride], &src_plane[(
i + 16 * y) * src_stride],
334 for (x =
width; x < 16 * block_width; x++)
337 for (;
i < 16 &&
i + 16 * y < 16 * block_height;
i++)
341 for (x = 0; x < block_width; x++) {
347 s->m.first_slice_line = 0;
355 s->m.first_slice_line = 1;
356 for (y = 0; y < block_height; y++) {
357 for (
i = 0;
i < 16 &&
i + 16 * y <
height;
i++) {
358 memcpy(&
src[
i *
stride], &src_plane[(
i + 16 * y) * src_stride],
360 for (x =
width; x < 16 * block_width; x++)
363 for (;
i < 16 &&
i + 16 * y < 16 * block_height;
i++)
367 for (x = 0; x < block_width; x++) {
368 uint8_t reorder_buffer[2][6][7 * 32];
371 uint8_t *decoded = decoded_plane +
offset;
373 int score[4] = { 0, 0, 0, 0 }, best;
374 uint8_t *
temp =
s->scratchbuf;
385 (
s->m.mb_type[x + y *
s->m.mb_stride] &
387 for (
i = 0;
i < 6;
i++)
392 put_bits(&
s->reorder_pb[5], vlc[1], vlc[0]);
393 score[0] = vlc[1] * lambda;
397 for (
i = 0;
i < 6;
i++) {
408 int mx, my, pred_x, pred_y, dxy;
412 if (
s->m.mb_type[x + y *
s->m.mb_stride] &
414 for (
i = 0;
i < 6;
i++)
418 put_bits(&
s->reorder_pb[5], vlc[1], vlc[0]);
420 s->m.pb =
s->reorder_pb[5];
429 s->reorder_pb[5] =
s->m.pb;
432 dxy = (mx & 1) + 2 * (my & 1);
440 decoded,
stride, 5, 64, lambda, 0);
441 best = score[1] <= score[0];
446 score[2] += vlc[1] * lambda;
447 if (score[2] < score[best] && mx == 0 && my == 0) {
449 s->hdsp.put_pixels_tab[0][0](decoded,
ref,
stride, 16);
455 for (
i = 0;
i < 6;
i++) {
464 motion_ptr[0 + 2 *
s->m.b8_stride] =
465 motion_ptr[1 + 2 *
s->m.b8_stride] =
466 motion_ptr[2 + 2 *
s->m.b8_stride] =
467 motion_ptr[3 + 2 *
s->m.b8_stride] = 0;
471 s->rd_total += score[best];
474 for (
i = 5;
i >= 0;
i--)
478 s->hdsp.put_pixels_tab[0][0](decoded,
temp,
stride, 16);
480 s->m.first_slice_line = 0;
505 for (
i = 0;
i < 3;
i++) {
521 if (avctx->
width >= 4096 || avctx->
height >= 4096) {
532 if (!
s->current_picture || !
s->last_picture) {
536 s->frame_width = avctx->
width;
537 s->frame_height = avctx->
height;
539 s->y_block_width = (
s->frame_width + 15) / 16;
540 s->y_block_height = (
s->frame_height + 15) / 16;
542 s->c_block_width = (
s->frame_width / 4 + 15) / 16;
543 s->c_block_height = (
s->frame_height / 4 + 15) / 16;
555 2 * 16 * 2 *
sizeof(uint8_t));
559 s->y_block_height *
sizeof(int16_t));
561 s->y_block_height *
sizeof(
int32_t));
564 if (!
s->m.me.temp || !
s->m.me.scratchpad || !
s->m.me.map ||
565 !
s->m.me.score_map || !
s->mb_type || !
s->dummy) {
580 const AVFrame *pict,
int *got_packet)
595 if (!
s->current_picture->data[0]) {
600 if (!
s->last_picture->data[0]) {
605 if (!
s->scratchbuf) {
624 for (
i = 0;
i < 3;
i++) {
627 s->last_picture->data[
i],
628 s->current_picture->data[
i],
629 s->frame_width / (
i ? 4 : 1),
630 s->frame_height / (
i ? 4 : 1),
632 s->current_picture->linesize[
i]);
636 for (j = 0; j <
i; j++) {
659 #define OFFSET(x) offsetof(struct SVQ1EncContext, x)
660 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
av_cold int ff_mpv_common_init(MpegEncContext *s)
init common structure for both encoder and decoder.
#define FF_CODEC_CAP_INIT_THREADSAFE
The codec does not modify any global variables in the init function, allowing to call the init functi...
AVPixelFormat
Pixel format.
#define THRESHOLD_MULTIPLIER
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
static int svq1_encode_plane(SVQ1EncContext *s, int plane, unsigned char *src_plane, unsigned char *ref_plane, unsigned char *decoded_plane, int width, int height, int src_stride, int stride)
static int put_bytes_output(const PutBitContext *s)
void ff_fix_long_p_mvs(MpegEncContext *s, int type)
int ff_side_data_set_encoder_stats(AVPacket *pkt, int quality, int64_t *error, int error_count, int pict_type)
static int block_sum(const uint8_t *block, int w, int h, int linesize)
static void init_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Initialize the PutBitContext s.
const int8_t *const ff_svq1_intra_codebooks[6]
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
This structure describes decoded (raw) audio or video data.
static void put_bits(Jpeg2000EncoderContext *s, int val, int n)
put n times val bit
#define CANDIDATE_MB_TYPE_INTER
#define AV_PKT_FLAG_KEY
The packet contains a keyframe.
static const int8_t svq1_inter_codebook_sum[4][16 *6]
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
const uint8_t ff_svq1_intra_multistage_vlc[6][8][2]
av_cold void ff_mpegvideoencdsp_init(MpegvideoEncDSPContext *c, AVCodecContext *avctx)
void ff_copy_bits(PutBitContext *pb, const uint8_t *src, int length)
Copy the content of src to the bitstream.
const uint16_t ff_svq1_inter_mean_vlc[512][2]
int ff_match_2uint16(const uint16_t(*tab)[2], int size, int a, int b)
Return the index into tab at which {a,b} match elements {[0],[1]} of tab.
int16_t * ff_h263_pred_motion(MpegEncContext *s, int block, int dir, int *px, int *py)
av_cold void ff_me_cmp_init(MECmpContext *c, AVCodecContext *avctx)
int flags
AV_CODEC_FLAG_*.
static void init_block_index(MpegEncContext *s)
static int put_bytes_left(const PutBitContext *s, int round_up)
static int ssd_int8_vs_int16_c(const int8_t *pix1, const int16_t *pix2, intptr_t size)
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
static av_cold int svq1_encode_init(AVCodecContext *avctx)
void ff_mpv_common_end(MpegEncContext *s)
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
#define FF_ARRAY_ELEMS(a)
void ff_h263_encode_init(MpegEncContext *s)
#define av_assert0(cond)
assert() equivalent, that is always enabled.
static enum AVPixelFormat pix_fmts[]
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
#define AV_INPUT_BUFFER_MIN_SIZE
av_cold void ff_hpeldsp_init(HpelDSPContext *c, int flags)
static int encode_block(SVQ1EncContext *s, uint8_t *src, uint8_t *ref, uint8_t *decoded, int stride, int level, int threshold, int lambda, int intra)
static double sqr(double x)
#define LIBAVUTIL_VERSION_INT
Describe the class of an AVClass context structure.
const char * av_default_item_name(void *ptr)
Return the context name.
@ AV_PICTURE_TYPE_I
Intra.
static av_cold int svq1_encode_end(AVCodecContext *avctx)
const int8_t *const ff_svq1_inter_codebooks[6]
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
int gop_size
the number of pictures in a group of pictures, or 0 for intra_only
int quality
quality (between 1 (good) and FF_LAMBDA_MAX (bad))
static char * split(char *message, char delim)
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
int flags
A combination of AV_PKT_FLAG values.
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
static void svq1_write_header(SVQ1EncContext *s, int frame_type)
#define i(width, name, range_min, range_max)
static int put_bits_count(PutBitContext *s)
void ff_svq1enc_init_x86(SVQ1EncContext *c)
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
#define av_malloc_array(a, b)
const AVCodec ff_svq1_encoder
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
const char * name
Name of the codec implementation.
int ff_init_me(MpegEncContext *s)
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
static const int8_t svq1_intra_codebook_sum[4][16 *6]
const uint8_t ff_svq1_inter_multistage_vlc[6][8][2]
#define FFSWAP(type, a, b)
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
static const AVOption options[]
void ff_fix_long_mvs(MpegEncContext *s, uint8_t *field_select_table, int field_select, int16_t(*mv_table)[2], int f_code, int type, int truncate)
main external API structure.
void ff_estimate_p_frame_motion(MpegEncContext *s, int mb_x, int mb_y)
const uint8_t ff_svq1_block_type_vlc[4][2]
const uint16_t ff_svq1_intra_mean_vlc[256][2]
#define CANDIDATE_MB_TYPE_INTRA
static int ref[MAX_W *MAX_W]
static float mean(const float *input, int size)
@ AV_PICTURE_TYPE_P
Predicted.
int frame_number
Frame counter, set by libavcodec.
static void flush_put_bits(PutBitContext *s)
Pad the end of the output stream with zeros.
static av_always_inline int diff(const uint32_t a, const uint32_t b)
This structure stores compressed data.
int width
picture width / height.
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
@ AV_PIX_FMT_YUV410P
planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
The exact code depends on how similar the blocks are and how related they are to the block
static int svq1_encode_frame(AVCodecContext *avctx, AVPacket *pkt, const AVFrame *pict, int *got_packet)
av_cold void ff_svq1enc_init_ppc(SVQ1EncContext *c)
static const AVClass svq1enc_class
const uint16_t ff_svq1_frame_size_table[7][2]
int ff_alloc_packet(AVCodecContext *avctx, AVPacket *avpkt, int64_t size)
Check AVPacket size and allocate data.
#define QUALITY_THRESHOLD
static const unsigned codebook[256][2]
void ff_h263_encode_motion(PutBitContext *pb, int val, int f_code)