Go to the documentation of this file.
52 #if AV_GCC_VERSION_AT_LEAST(10, 0) && AV_GCC_VERSION_AT_MOST(12, 0) \
53 && !defined(__clang__) && !defined(__INTEL_COMPILER)
54 #pragma GCC optimize ("no-ipa-cp-clone")
124 s->frame_width,
s->frame_height);
137 #define QUALITY_THRESHOLD 100
138 #define THRESHOLD_MULTIPLIER 0.6
142 int threshold,
int lambda,
int intra)
144 int count, y, x,
i, j,
split, best_mean, best_score, best_count;
147 int w = 2 << (
level + 2 >> 1);
148 int h = 2 << (
level + 1 >> 1);
150 int16_t (*
block)[256] =
s->encoded_block_levels[
level];
151 const int8_t *codebook_sum, *
codebook;
152 const uint16_t(*mean_vlc)[2];
153 const uint8_t(*multistage_vlc)[2];
164 for (y = 0; y <
h; y++) {
165 for (x = 0; x <
w; x++) {
178 for (y = 0; y <
h; y++) {
179 for (x = 0; x <
w; x++) {
193 for (count = 1; count < 7; count++) {
194 int best_vector_score = INT_MAX;
195 int best_vector_sum = -999, best_vector_mean = -999;
196 const int stage = count - 1;
197 const int8_t *vector;
199 for (
i = 0;
i < 16;
i++) {
200 int sum = codebook_sum[stage * 16 +
i];
204 sqr =
s->svq1encdsp.ssd_int8_vs_int16(vector,
block[stage],
size);
207 if (score < best_vector_score) {
211 best_vector_score = score;
212 best_vector[stage] =
i;
213 best_vector_sum = sum;
214 best_vector_mean =
mean;
219 for (j = 0; j <
size; j++)
220 block[stage + 1][j] =
block[stage][j] - vector[j];
222 best_vector_score += lambda *
224 multistage_vlc[1 + count][1]
225 + mean_vlc[best_vector_mean][1]);
227 if (best_vector_score < best_score) {
228 best_score = best_vector_score;
230 best_mean = best_vector_mean;
235 if (best_mean == -128)
237 else if (best_mean == 128)
241 if (best_score > threshold &&
level) {
247 backup[
i] =
s->reorder_pb[
i];
249 threshold >> 1, lambda, intra);
254 if (score < best_score) {
259 s->reorder_pb[
i] = backup[
i];
266 av_assert1(best_mean >= 0 && best_mean < 256 || !intra);
267 av_assert1(best_mean >= -256 && best_mean < 256);
268 av_assert1(best_count >= 0 && best_count < 7);
273 multistage_vlc[1 + best_count][1],
274 multistage_vlc[1 + best_count][0]);
276 mean_vlc[best_mean][0]);
278 for (
i = 0;
i < best_count;
i++) {
283 for (y = 0; y <
h; y++)
284 for (x = 0; x <
w; x++)
286 block[best_count][x +
w * y] +
294 s->block_index[0]=
s->b8_stride*(
s->mb_y*2 ) +
s->mb_x*2;
295 s->block_index[1]=
s->b8_stride*(
s->mb_y*2 ) + 1 +
s->mb_x*2;
296 s->block_index[2]=
s->b8_stride*(
s->mb_y*2 + 1) +
s->mb_x*2;
297 s->block_index[3]=
s->b8_stride*(
s->mb_y*2 + 1) + 1 +
s->mb_x*2;
298 s->block_index[4]=
s->mb_stride*(
s->mb_y + 1) +
s->b8_stride*
s->mb_height*2 +
s->mb_x;
299 s->block_index[5]=
s->mb_stride*(
s->mb_y +
s->mb_height + 2) +
s->b8_stride*
s->mb_height*2 +
s->mb_x;
304 const unsigned char *src_plane,
305 unsigned char *ref_plane,
306 unsigned char *decoded_plane,
311 int block_width, block_height;
315 const int lambda = (
s->quality *
s->quality) >>
323 block_width = (
width + 15) / 16;
324 block_height = (
height + 15) / 16;
327 s->m.avctx =
s->avctx;
328 s->m.current_picture_ptr = &
s->m.current_picture;
329 s->m.last_picture_ptr = &
s->m.last_picture;
330 s->m.last_picture.f->data[0] = ref_plane;
332 s->m.last_picture.f->linesize[0] =
333 s->m.new_picture->linesize[0] =
334 s->m.current_picture.f->linesize[0] =
stride;
337 s->m.mb_width = block_width;
338 s->m.mb_height = block_height;
339 s->m.mb_stride =
s->m.mb_width + 1;
340 s->m.b8_stride = 2 *
s->m.mb_width + 1;
342 s->m.pict_type =
s->pict_type;
343 s->m.motion_est =
s->motion_est;
344 s->m.me.scene_change_score = 0;
347 s->m.lambda =
s->quality;
348 s->m.qscale =
s->m.lambda * 139 +
351 s->m.lambda2 =
s->m.lambda *
s->m.lambda +
355 if (!
s->motion_val8[plane]) {
357 block_height * 2 + 2) *
358 2 *
sizeof(int16_t));
360 (block_height + 2) + 1) *
361 2 *
sizeof(int16_t));
362 if (!
s->motion_val8[plane] || !
s->motion_val16[plane])
366 s->m.mb_type =
s->mb_type;
369 s->m.mb_mean = (uint8_t *)
s->dummy;
370 s->m.mb_var = (uint16_t *)
s->dummy;
371 s->m.mc_mb_var = (uint16_t *)
s->dummy;
372 s->m.current_picture.mb_type =
s->dummy;
374 s->m.current_picture.motion_val[0] =
s->motion_val8[plane] + 2;
375 s->m.p_mv_table =
s->motion_val16[plane] +
380 s->m.me.dia_size =
s->avctx->dia_size;
381 s->m.first_slice_line = 1;
382 for (y = 0; y < block_height; y++) {
383 s->m.new_picture->data[0] =
src - y * 16 *
stride;
386 for (
i = 0;
i < 16 &&
i + 16 * y <
height;
i++) {
387 memcpy(&
src[
i *
stride], &src_plane[(
i + 16 * y) * src_stride],
389 for (x =
width; x < 16 * block_width; x++)
392 for (;
i < 16 &&
i + 16 * y < 16 * block_height;
i++)
396 for (x = 0; x < block_width; x++) {
402 s->m.first_slice_line = 0;
410 s->m.first_slice_line = 1;
411 for (y = 0; y < block_height; y++) {
412 for (
i = 0;
i < 16 &&
i + 16 * y <
height;
i++) {
413 memcpy(&
src[
i *
stride], &src_plane[(
i + 16 * y) * src_stride],
415 for (x =
width; x < 16 * block_width; x++)
418 for (;
i < 16 &&
i + 16 * y < 16 * block_height;
i++)
422 for (x = 0; x < block_width; x++) {
423 uint8_t reorder_buffer[2][6][7 * 32];
426 uint8_t *decoded = decoded_plane +
offset;
428 int score[4] = { 0, 0, 0, 0 }, best;
429 uint8_t *
temp =
s->scratchbuf;
440 (
s->m.mb_type[x + y *
s->m.mb_stride] &
442 for (
i = 0;
i < 6;
i++)
451 for (
i = 0;
i < 6;
i++) {
461 int mx, my, pred_x, pred_y, dxy;
465 if (
s->m.mb_type[x + y *
s->m.mb_stride] &
467 for (
i = 0;
i < 6;
i++)
473 s->m.pb =
s->reorder_pb[5];
482 s->reorder_pb[5] =
s->m.pb;
485 dxy = (mx & 1) + 2 * (my & 1);
493 decoded,
stride, 5, 64, lambda, 0);
494 best = score[1] <= score[0];
499 if (score[2] < score[best] && mx == 0 && my == 0) {
501 s->hdsp.put_pixels_tab[0][0](decoded,
ref,
stride, 16);
507 for (
i = 0;
i < 6;
i++) {
516 motion_ptr[0 + 2 *
s->m.b8_stride] =
517 motion_ptr[1 + 2 *
s->m.b8_stride] =
518 motion_ptr[2 + 2 *
s->m.b8_stride] =
519 motion_ptr[3 + 2 *
s->m.b8_stride] = 0;
523 s->rd_total += score[best];
526 for (
i = 5;
i >= 0;
i--)
530 s->hdsp.put_pixels_tab[0][0](decoded,
temp,
stride, 16);
532 s->m.first_slice_line = 0;
556 for (
i = 0;
i < 3;
i++) {
570 int size = strlen(ident);
586 if (avctx->
width >= 4096 || avctx->
height >= 4096) {
597 if (!
s->current_picture || !
s->last_picture) {
601 s->frame_width = avctx->
width;
602 s->frame_height = avctx->
height;
604 s->y_block_width = (
s->frame_width + 15) / 16;
605 s->y_block_height = (
s->frame_height + 15) / 16;
607 s->c_block_width = (
s->frame_width / 4 + 15) / 16;
608 s->c_block_height = (
s->frame_height / 4 + 15) / 16;
620 2 * 16 * 2 *
sizeof(uint8_t));
622 s->y_block_height *
sizeof(int16_t));
624 s->y_block_height *
sizeof(
int32_t));
628 if (!
s->m.me.scratchpad || !
s->m.me.map ||
629 !
s->mb_type || !
s->dummy || !
s->m.new_picture)
641 const AVFrame *pict,
int *got_packet)
657 if (!
s->current_picture->data[0]) {
662 if (!
s->last_picture->data[0]) {
667 if (!
s->scratchbuf) {
685 for (
i = 0;
i < 3;
i++) {
688 s->last_picture->data[
i],
689 s->current_picture->data[
i],
690 s->frame_width / (
i ? 4 : 1),
691 s->frame_height / (
i ? 4 : 1),
693 s->current_picture->linesize[
i]);
697 for (j = 0; j <
i; j++) {
720 #define OFFSET(x) offsetof(struct SVQ1EncContext, x)
721 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
740 CODEC_LONG_NAME(
"Sorenson Vector Quantizer 1 / Sorenson Video 1 / SVQ1"),
av_cold int ff_mpv_common_init(MpegEncContext *s)
init common structure for both encoder and decoder.
AVPixelFormat
Pixel format.
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
static int put_bytes_output(const PutBitContext *s)
void ff_fix_long_p_mvs(MpegEncContext *s, int type)
static av_cold int write_ident(AVCodecContext *avctx, const char *ident)
int ff_side_data_set_encoder_stats(AVPacket *pkt, int quality, int64_t *error, int error_count, int pict_type)
static const AVOption options[]
#define SVQ1_BLOCK_INTRA_CODE
const FF_VISIBILITY_PUSH_HIDDEN int8_t *const ff_svq1_inter_codebooks[6]
static int block_sum(const uint8_t *block, int w, int h, int linesize)
static void init_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Initialize the PutBitContext s.
static int svq1_encode_frame(AVCodecContext *avctx, AVPacket *pkt, const AVFrame *pict, int *got_packet)
const int8_t *const ff_svq1_intra_codebooks[6]
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
This structure describes decoded (raw) audio or video data.
static void put_bits(Jpeg2000EncoderContext *s, int val, int n)
put n times val bit
enum AVPictureType pict_type
#define SVQ1_BLOCK_SKIP_CODE
static double sqr(double in)
#define CANDIDATE_MB_TYPE_INTER
int16_t encoded_block_levels[6][7][256]
#define AV_PKT_FLAG_KEY
The packet contains a keyframe.
static const int8_t svq1_inter_codebook_sum[4][16 *6]
#define FF_INPUT_BUFFER_MIN_SIZE
Used by some encoders as upper bound for the length of headers.
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
const uint8_t ff_svq1_intra_multistage_vlc[6][8][2]
av_cold void ff_mpegvideoencdsp_init(MpegvideoEncDSPContext *c, AVCodecContext *avctx)
const uint16_t ff_svq1_inter_mean_vlc[512][2]
AVCodec p
The public AVCodec.
int ff_match_2uint16(const uint16_t(*tab)[2], int size, int a, int b)
Return the index into tab at which {a,b} match elements {[0],[1]} of tab.
int16_t * ff_h263_pred_motion(MpegEncContext *s, int block, int dir, int *px, int *py)
SVQ1EncDSPContext svq1encdsp
av_cold void ff_me_cmp_init(MECmpContext *c, AVCodecContext *avctx)
int flags
AV_CODEC_FLAG_*.
#define FF_CODEC_ENCODE_CB(func)
static int put_bytes_left(const PutBitContext *s, int round_up)
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
void ff_mpv_common_end(MpegEncContext *s)
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
#define FF_ARRAY_ELEMS(a)
static int svq1_encode_plane(SVQ1EncContext *s, int plane, PutBitContext *pb, const unsigned char *src_plane, unsigned char *ref_plane, unsigned char *decoded_plane, int width, int height, int src_stride, int stride)
#define SVQ1_BLOCK_SKIP_LEN
#define AV_CODEC_CAP_ENCODER_REORDERED_OPAQUE
This encoder can reorder user opaque values from input AVFrames and return them with corresponding ou...
#define av_assert0(cond)
assert() equivalent, that is always enabled.
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
#define SVQ1_BLOCK_INTRA_LEN
#define THRESHOLD_MULTIPLIER
const FFCodec ff_svq1_encoder
static void svq1_write_header(SVQ1EncContext *s, PutBitContext *pb, int frame_type)
#define CODEC_LONG_NAME(str)
av_cold void ff_hpeldsp_init(HpelDSPContext *c, int flags)
#define LIBAVUTIL_VERSION_INT
Describe the class of an AVClass context structure.
AVFrame * current_picture
static void ff_svq1enc_init(SVQ1EncDSPContext *c)
const char * av_default_item_name(void *ptr)
Return the context name.
@ AV_PICTURE_TYPE_I
Intra.
static av_cold int svq1_encode_end(AVCodecContext *avctx)
int ff_encode_alloc_frame(AVCodecContext *avctx, AVFrame *frame)
Allocate buffers for a frame.
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
int gop_size
the number of pictures in a group of pictures, or 0 for intra_only
#define DECLARE_ALIGNED(n, t, v)
int quality
quality (between 1 (good) and FF_LAMBDA_MAX (bad))
static void init_block_index(MpegEncContext *s)
PutBitContext reorder_pb[6]
static av_always_inline int diff(const struct color_info *a, const struct color_info *b, const int trans_thresh)
static char * split(char *message, char delim)
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
#define SVQ1_BLOCK_INTER_CODE
int flags
A combination of AV_PKT_FLAG values.
#define QUALITY_THRESHOLD
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
#define SVQ1_BLOCK_INTER_LEN
#define i(width, name, range_min, range_max)
static int put_bits_count(PutBitContext *s)
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
#define av_malloc_array(a, b)
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
void ff_h263_encode_init(MpegEncContext *s)
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
const char * name
Name of the codec implementation.
int16_t(*[3] motion_val8)[2]
int ff_init_me(MpegEncContext *s)
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
static const int8_t svq1_intra_codebook_sum[4][16 *6]
const uint8_t ff_svq1_inter_multistage_vlc[6][8][2]
int64_t frame_num
Frame counter, set by libavcodec.
#define FFSWAP(type, a, b)
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
void ff_copy_bits(PutBitContext *pb, const uint8_t *src, int length)
Copy the content of src to the bitstream.
int16_t(*[3] motion_val16)[2]
void ff_fix_long_mvs(MpegEncContext *s, uint8_t *field_select_table, int field_select, int16_t(*mv_table)[2], int f_code, int type, int truncate)
main external API structure.
void ff_estimate_p_frame_motion(MpegEncContext *s, int mb_x, int mb_y)
const uint16_t ff_svq1_intra_mean_vlc[256][2]
#define CANDIDATE_MB_TYPE_INTRA
static int ref[MAX_W *MAX_W]
static float mean(const float *input, int size)
static int encode_block(SVQ1EncContext *s, uint8_t *src, uint8_t *ref, uint8_t *decoded, int stride, unsigned level, int threshold, int lambda, int intra)
static const AVClass svq1enc_class
@ AV_PICTURE_TYPE_P
Predicted.
#define AV_CODEC_FLAG_BITEXACT
Use only bitexact stuff (except (I)DCT).
static void flush_put_bits(PutBitContext *s)
Pad the end of the output stream with zeros.
This structure stores compressed data.
int width
picture width / height.
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
@ AV_PIX_FMT_YUV410P
planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
The exact code depends on how similar the blocks are and how related they are to the block
#define MKTAG(a, b, c, d)
void ff_h263_encode_motion(PutBitContext *pb, int val, int f_code)
const uint16_t ff_svq1_frame_size_table[7][2]
static av_cold int svq1_encode_init(AVCodecContext *avctx)
int ff_alloc_packet(AVCodecContext *avctx, AVPacket *avpkt, int64_t size)
Check AVPacket size and allocate data.
static const unsigned codebook[256][2]