Go to the documentation of this file.
66 #define CACHE_SIZE (1<<(3*NBITS))
109 #define OFFSET(x) offsetof(PaletteUseContext, x)
110 #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
122 {
"alpha_threshold",
"set the alpha threshold for transparency",
OFFSET(trans_thresh),
AV_OPT_TYPE_INT, {.i64=128}, 0, 255,
FLAGS },
146 &
ctx->inputs[0]->outcfg.formats)) < 0 ||
148 &
ctx->inputs[1]->outcfg.formats)) < 0 ||
150 &
ctx->outputs[0]->incfg.formats)) < 0)
156 int eb,
int scale,
int shift)
158 return px >> 24 << 24
167 const int dr =
c1[1] -
c2[1];
168 const int dg =
c1[2] -
c2[2];
169 const int db =
c1[3] -
c2[3];
171 if (
c1[0] < trans_thresh &&
c2[0] < trans_thresh) {
173 }
else if (
c1[0] >= trans_thresh &&
c2[0] >= trans_thresh) {
174 return dr*dr + dg*dg + db*db;
176 return 255*255 + 255*255 + 255*255;
182 int i, pal_id = -1, min_dist = INT_MAX;
185 const uint32_t
c = palette[
i];
187 if (
c >> 24 >= trans_thresh) {
189 palette[
i]>>24 & 0xff,
190 palette[
i]>>16 & 0xff,
191 palette[
i]>> 8 & 0xff,
194 const int d =
diff(palargb, argb, trans_thresh);
213 const int trans_thresh,
218 int dx, nearer_kd_id, further_kd_id;
220 const int current_to_target =
diff(target, current, trans_thresh);
222 if (current_to_target < nearest->dist_sqd) {
224 nearest->
dist_sqd = current_to_target;
228 dx = target[
s] - current[
s];
233 if (nearer_kd_id != -1)
236 if (further_kd_id != -1 && dx*dx < nearest->dist_sqd)
255 int pos = 0, best_node_id = -1, best_dist = INT_MAX, cur_color_id = 0;
261 const struct color_node *kd = &root[cur_color_id];
263 const int current_to_target =
diff(target, current, trans_thresh);
267 if (current_to_target < best_dist) {
268 best_node_id = cur_color_id;
269 if (!current_to_target)
271 best_dist = current_to_target;
277 const int dx = target[
split] - current[
split];
278 int nearer_kd_id, further_kd_id;
284 if (nearer_kd_id != -1) {
285 if (further_kd_id != -1) {
296 cur_color_id = nearer_kd_id;
298 }
else if (dx*dx < best_dist) {
303 cur_color_id = further_kd_id;
314 }
while (node->
dx2 >= best_dist);
325 #define COLORMAP_NEAREST(search, palette, root, target, trans_thresh) \
326 search == COLOR_SEARCH_NNS_ITERATIVE ? colormap_nearest_iterative(root, target, trans_thresh) : \
327 search == COLOR_SEARCH_NNS_RECURSIVE ? colormap_nearest_recursive(root, target, trans_thresh) : \
328 colormap_nearest_bruteforce(palette, target, trans_thresh)
350 if (a < s->trans_thresh &&
s->transparency_index >= 0) {
351 return s->transparency_index;
371 uint32_t
c,
int *er,
int *eg,
int *eb,
382 dstc =
s->palette[dstx];
383 *er =
r - (dstc >> 16 & 0xff);
384 *eg =
g - (dstc >> 8 & 0xff);
385 *eb =
b - (dstc & 0xff);
390 int x_start,
int y_start,
int w,
int h,
395 const int src_linesize =
in ->linesize[0] >> 2;
396 const int dst_linesize =
out->linesize[0];
397 uint32_t *
src = ((uint32_t *)
in ->
data[0]) + y_start*src_linesize;
398 uint8_t *dst =
out->data[0] + y_start*dst_linesize;
403 for (y = y_start; y <
h; y++) {
404 for (x = x_start; x <
w; x++) {
408 const int d =
s->ordered_dither[(y & 7)<<3 | (x & 7)];
423 const int right = x <
w - 1, down = y <
h - 1;
431 if ( down)
src[src_linesize + x ] =
dither_color(
src[src_linesize + x ], er, eg, eb, 3, 3);
432 if (right && down)
src[src_linesize + x + 1] =
dither_color(
src[src_linesize + x + 1], er, eg, eb, 2, 3);
435 const int right = x <
w - 1, down = y <
h - 1,
left = x > x_start;
444 if ( down)
src[src_linesize + x ] =
dither_color(
src[src_linesize + x ], er, eg, eb, 5, 4);
445 if (right && down)
src[src_linesize + x + 1] =
dither_color(
src[src_linesize + x + 1], er, eg, eb, 1, 4);
448 const int right = x <
w - 1, down = y <
h - 1,
left = x > x_start;
449 const int right2 = x <
w - 2, left2 = x > x_start + 1;
460 if (left2)
src[ src_linesize + x - 2] =
dither_color(
src[ src_linesize + x - 2], er, eg, eb, 1, 4);
462 if (1)
src[ src_linesize + x ] =
dither_color(
src[ src_linesize + x ], er, eg, eb, 3, 4);
463 if (right)
src[ src_linesize + x + 1] =
dither_color(
src[ src_linesize + x + 1], er, eg, eb, 2, 4);
464 if (right2)
src[ src_linesize + x + 2] =
dither_color(
src[ src_linesize + x + 2], er, eg, eb, 1, 4);
468 const int right = x <
w - 1, down = y <
h - 1,
left = x > x_start;
477 if ( down)
src[src_linesize + x ] =
dither_color(
src[src_linesize + x ], er, eg, eb, 1, 2);
500 int parent_id,
int node_id,
504 const uint32_t fontcolor = node->
val[1] > 0x50 &&
505 node->
val[2] > 0x50 &&
506 node->
val[3] > 0x50 ? 0 : 0xffffff;
507 const int rgb_comp = node->
split - 1;
509 "label=\"%c%02X%c%02X%c%02X%c\" "
510 "fillcolor=\"#%02x%02x%02x\" "
511 "fontcolor=\"#%06"PRIX32
"\"]\n",
513 "[ "[rgb_comp], node->
val[1],
514 "][ "[rgb_comp], node->
val[2],
515 " ]["[rgb_comp], node->
val[3],
517 node->
val[1], node->
val[2], node->
val[3],
542 av_bprintf(&buf,
" node [style=filled fontsize=10 shape=box]\n");
546 fwrite(buf.str, 1, buf.len,
f);
557 for (
r = 0;
r < 256;
r++) {
558 for (
g = 0;
g < 256;
g++) {
559 for (
b = 0;
b < 256;
b++) {
561 const int r1 =
COLORMAP_NEAREST(search_method, palette, node, argb, trans_thresh);
564 const uint32_t
c1 = palette[r1];
565 const uint32_t
c2 = palette[r2];
566 const uint8_t palargb1[] = { 0xff,
c1>>16 & 0xff,
c1>> 8 & 0xff,
c1 & 0xff };
567 const uint8_t palargb2[] = { 0xff,
c2>>16 & 0xff,
c2>> 8 & 0xff,
c2 & 0xff };
568 const int d1 =
diff(palargb1, argb, trans_thresh);
569 const int d2 =
diff(palargb2, argb, trans_thresh);
572 "/!\\ %02X%02X%02X: %d ! %d (%06"PRIX32
" ! %06"PRIX32
") / dist: %d ! %d\n",
573 r,
g,
b, r1, r2,
c1 & 0xffffff,
c2 & 0xffffff, d1, d2);
595 #define DECLARE_CMP_FUNC(name, pos) \
596 static int cmp_##name(const void *pa, const void *pb) \
598 const struct color *a = pa; \
599 const struct color *b = pb; \
600 return (a->value >> (8 * (3 - (pos))) & 0xff) \
601 - (b->value >> (8 * (3 - (pos))) & 0xff); \
612 const int trans_thresh,
617 unsigned nb_color = 0;
619 struct color tmp_pal[256];
622 ranges.
min[0] = ranges.
min[1] = ranges.
min[2] = 0xff;
623 ranges.
max[0] = ranges.
max[1] = ranges.
max[2] = 0x00;
626 const uint32_t
c = palette[
i];
632 if (
a < trans_thresh) {
636 if (color_used[
i] || (
a != 0xff) ||
637 r < box->
min[0] || g < box->
min[1] || b < box->
min[2] ||
641 if (
r < ranges.
min[0]) ranges.
min[0] =
r;
642 if (
g < ranges.
min[1]) ranges.
min[1] =
g;
643 if (
b < ranges.
min[2]) ranges.
min[2] =
b;
645 if (
r > ranges.
max[0]) ranges.
max[0] =
r;
646 if (
g > ranges.
max[1]) ranges.
max[1] =
g;
647 if (
b > ranges.
max[2]) ranges.
max[2] =
b;
649 tmp_pal[nb_color].
value =
c;
659 wr = ranges.
max[0] - ranges.
min[0];
660 wg = ranges.
max[1] - ranges.
min[1];
661 wb = ranges.
max[2] - ranges.
min[2];
662 if (wr >= wg && wr >= wb) longest = 1;
663 if (wg >= wr && wg >= wb) longest = 2;
664 if (wb >= wr && wb >= wg) longest = 3;
666 *component = longest;
671 return tmp_pal[nb_color >> 1].
pal_id;
677 const uint32_t *palette,
678 const int trans_thresh,
682 int component, cur_id;
683 int node_left_id = -1, node_right_id = -1;
686 const int pal_id =
get_next_color(color_used, palette, trans_thresh, &component, box);
692 cur_id = (*nb_used)++;
695 node->
split = component;
697 node->
val[0] =
c>>24 & 0xff;
698 node->
val[1] =
c>>16 & 0xff;
699 node->
val[2] =
c>> 8 & 0xff;
700 node->
val[3] =
c & 0xff;
702 color_used[pal_id] = 1;
706 box1.
max[component-1] = node->
val[component];
707 box2.
min[component-1] = node->
val[component] + 1;
709 node_left_id =
colormap_insert(
map, color_used, nb_used, palette, trans_thresh, &box1);
711 if (box2.
min[component-1] <= box2.
max[component-1])
712 node_right_id =
colormap_insert(
map, color_used, nb_used, palette, trans_thresh, &box2);
722 const int c1 = *(
const uint32_t *)
a & 0xffffff;
723 const int c2 = *(
const uint32_t *)
b & 0xffffff;
731 uint32_t last_color = 0;
737 if (
s->transparency_index >= 0) {
739 if ((
s->palette[
i]>>24 & 0xff) == 0) {
740 s->transparency_index =
i;
747 const uint32_t
c =
s->palette[
i];
748 if (
i != 0 &&
c == last_color) {
753 if (
c >> 24 <
s->trans_thresh) {
759 box.
min[0] = box.
min[1] = box.
min[2] = 0x00;
760 box.
max[0] = box.
max[1] = box.
max[2] = 0xff;
767 if (
s->debug_accuracy) {
774 const AVFrame *in2,
int frame_count)
777 const uint32_t *palette =
s->palette;
778 uint32_t *
src1 = (uint32_t *)in1->
data[0];
780 const int src1_linesize = in1->
linesize[0] >> 2;
781 const int src2_linesize = in2->
linesize[0];
783 unsigned mean_err = 0;
786 for (x = 0; x < in1->
width; x++) {
787 const uint32_t
c1 =
src1[x];
788 const uint32_t
c2 = palette[src2[x]];
789 const uint8_t argb1[] = {0xff,
c1 >> 16 & 0xff,
c1 >> 8 & 0xff,
c1 & 0xff};
790 const uint8_t argb2[] = {0xff,
c2 >> 16 & 0xff,
c2 >> 8 & 0xff,
c2 & 0xff};
791 mean_err +=
diff(argb1, argb2,
s->trans_thresh);
793 src1 += src1_linesize;
794 src2 += src2_linesize;
797 s->total_mean_err += mean_err;
800 mean_err / div,
s->total_mean_err / (div * frame_count));
806 int *xp,
int *yp,
int *wp,
int *hp)
808 int x_start = 0, y_start = 0;
814 int x_end = cur_src->
width - 1,
815 y_end = cur_src->
height - 1;
816 const uint32_t *prv_srcp = (
const uint32_t *)prv_src->
data[0];
817 const uint32_t *cur_srcp = (
const uint32_t *)cur_src->
data[0];
821 const int prv_src_linesize = prv_src->
linesize[0] >> 2;
822 const int cur_src_linesize = cur_src->
linesize[0] >> 2;
823 const int prv_dst_linesize = prv_dst->
linesize[0];
824 const int cur_dst_linesize = cur_dst->
linesize[0];
827 while (y_start < y_end && !memcmp(prv_srcp + y_start*prv_src_linesize,
828 cur_srcp + y_start*cur_src_linesize,
829 cur_src->
width * 4)) {
830 memcpy(cur_dstp + y_start*cur_dst_linesize,
831 prv_dstp + y_start*prv_dst_linesize,
835 while (y_end > y_start && !memcmp(prv_srcp + y_end*prv_src_linesize,
836 cur_srcp + y_end*cur_src_linesize,
837 cur_src->
width * 4)) {
838 memcpy(cur_dstp + y_end*cur_dst_linesize,
839 prv_dstp + y_end*prv_dst_linesize,
844 height = y_end + 1 - y_start;
847 while (x_start < x_end) {
849 for (y = y_start; y <= y_end; y++) {
850 if (prv_srcp[y*prv_src_linesize + x_start] != cur_srcp[y*cur_src_linesize + x_start]) {
859 while (x_end > x_start) {
861 for (y = y_start; y <= y_end; y++) {
862 if (prv_srcp[y*prv_src_linesize + x_end] != cur_srcp[y*cur_src_linesize + x_end]) {
871 width = x_end + 1 - x_start;
874 for (y = y_start; y <= y_end; y++)
875 memcpy(cur_dstp + y*cur_dst_linesize,
876 prv_dstp + y*prv_dst_linesize, x_start);
878 if (x_end != cur_src->
width - 1) {
879 const int copy_len = cur_src->
width - 1 - x_end;
880 for (y = y_start; y <= y_end; y++)
881 memcpy(cur_dstp + y*cur_dst_linesize + x_end + 1,
882 prv_dstp + y*prv_dst_linesize + x_end + 1,
907 s->last_out,
out, &x, &y, &
w, &
h);
918 ff_dlog(
ctx,
"%dx%d rect: (%d;%d) -> (%d,%d) [area:%dx%d]\n",
919 w,
h, x, y, x+
w, y+
h,
in->width,
in->height);
928 if (
s->calc_mean_err)
943 s->fs.opt_repeatlast = 1;
947 outlink->
w =
ctx->inputs[0]->w;
948 outlink->
h =
ctx->inputs[0]->h;
962 "Palette input must contain exactly %d pixels. "
963 "Specified input has %dx%d=%d pixels\n",
974 const uint32_t *p = (
const uint32_t *)palette_frame->
data[0];
975 const int p_linesize = palette_frame->
linesize[0] >> 2;
977 s->transparency_index = -1;
980 memset(
s->palette, 0,
sizeof(
s->palette));
981 memset(
s->map, 0,
sizeof(
s->map));
984 memset(
s->cache, 0,
sizeof(
s->cache));
988 for (y = 0; y < palette_frame->
height; y++) {
989 for (x = 0; x < palette_frame->
width; x++) {
990 s->palette[
i] = p[x];
991 if (p[x]>>24 <
s->trans_thresh) {
992 s->transparency_index =
i;
1002 s->palette_loaded = 1;
1017 if (!
master || !second) {
1021 if (!
s->palette_loaded) {
1031 #define DEFINE_SET_FRAME(color_search, name, value) \
1032 static int set_frame_##name(PaletteUseContext *s, AVFrame *out, AVFrame *in, \
1033 int x_start, int y_start, int w, int h) \
1035 return set_frame(s, out, in, x_start, y_start, w, h, value, color_search); \
1038 #define DEFINE_SET_FRAME_COLOR_SEARCH(color_search, color_search_macro) \
1039 DEFINE_SET_FRAME(color_search_macro, color_search##_##none, DITHERING_NONE) \
1040 DEFINE_SET_FRAME(color_search_macro, color_search##_##bayer, DITHERING_BAYER) \
1041 DEFINE_SET_FRAME(color_search_macro, color_search##_##heckbert, DITHERING_HECKBERT) \
1042 DEFINE_SET_FRAME(color_search_macro, color_search##_##floyd_steinberg, DITHERING_FLOYD_STEINBERG) \
1043 DEFINE_SET_FRAME(color_search_macro, color_search##_##sierra2, DITHERING_SIERRA2) \
1044 DEFINE_SET_FRAME(color_search_macro, color_search##_##sierra2_4a, DITHERING_SIERRA2_4A) \
1050 #define DITHERING_ENTRIES(color_search) { \
1051 set_frame_##color_search##_none, \
1052 set_frame_##color_search##_bayer, \
1053 set_frame_##color_search##_heckbert, \
1054 set_frame_##color_search##_floyd_steinberg, \
1055 set_frame_##color_search##_sierra2, \
1056 set_frame_##color_search##_sierra2_4a, \
1067 const int q = p ^ (p >> 3);
1068 return (p & 4) >> 2 | (q & 4) >> 1 \
1069 | (p & 2) << 1 | (q & 2) << 2 \
1070 | (p & 1) << 4 | (q & 1) << 5;
1079 if (!
s->last_in || !
s->last_out) {
1089 const int delta = 1 << (5 -
s->bayer_scale);
1138 .
name =
"paletteuse",
1147 .priv_class = &paletteuse_class,
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
int ff_framesync_configure(FFFrameSync *fs)
Configure a frame sync structure.
#define AV_BPRINT_SIZE_UNLIMITED
static int config_input_palette(AVFilterLink *inlink)
AVPixelFormat
Pixel format.
static av_always_inline int diff(const uint8_t *c1, const uint8_t *c2, const int trans_thresh)
static av_always_inline int get_dst_color_err(PaletteUseContext *s, uint32_t c, int *er, int *eg, int *eb, const enum color_search_method search_method)
static void colormap_nearest_node(const struct color_node *map, const int node_pos, const uint8_t *target, const int trans_thresh, struct nearest_color *nearest)
static int query_formats(AVFilterContext *ctx)
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
int av_bprint_finalize(AVBPrint *buf, char **ret_str)
Finalize a print buffer.
void ff_framesync_uninit(FFFrameSync *fs)
Free all memory currently allocated.
static void debug_mean_error(PaletteUseContext *s, const AVFrame *in1, const AVFrame *in2, int frame_count)
void av_bprint_init(AVBPrint *buf, unsigned size_init, unsigned size_max)
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
static av_always_inline uint32_t dither_color(uint32_t px, int er, int eg, int eb, int scale, int shift)
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
static av_cold int init(AVFilterContext *ctx)
int(* set_frame_func)(struct PaletteUseContext *s, AVFrame *out, AVFrame *in, int x_start, int y_start, int width, int height)
This structure describes decoded (raw) audio or video data.
int av_frame_make_writable(AVFrame *frame)
Ensure that the frame data is writable, avoiding data copy if possible.
static av_cold void uninit(AVFilterContext *ctx)
void * av_dynarray2_add(void **tab_ptr, int *nb_ptr, size_t elem_size, const uint8_t *elem_data)
Add an element of size elem_size to a dynamic array.
static av_always_inline uint8_t colormap_nearest_bruteforce(const uint32_t *palette, const uint8_t *argb, const int trans_thresh)
AVFilter ff_vf_paletteuse
static int disp_tree(const struct color_node *node, const char *fname)
const char * name
Filter name.
@ EXT_INFINITY
Extend the frame to infinity.
A link between two filters.
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
static av_always_inline uint8_t colormap_nearest_recursive(const struct color_node *node, const uint8_t *rgb, const int trans_thresh)
static int debug_accuracy(const struct color_node *node, const uint32_t *palette, const int trans_thresh, const enum color_search_method search_method)
static int dither_value(int p)
@ COLOR_SEARCH_BRUTEFORCE
static int apply_palette(AVFilterLink *inlink, AVFrame *in, AVFrame **outf)
struct cache_node cache[CACHE_SIZE]
A filter pad used for either input or output.
static int colormap_insert(struct color_node *map, uint8_t *color_used, int *nb_used, const uint32_t *palette, const int trans_thresh, const struct color_rect *box)
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
static av_always_inline uint8_t colormap_nearest_iterative(const struct color_node *root, const uint8_t *target, const int trans_thresh)
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
static void set_processing_window(enum diff_mode diff_mode, const AVFrame *prv_src, const AVFrame *cur_src, const AVFrame *prv_dst, AVFrame *cur_dst, int *xp, int *yp, int *wp, int *hp)
#define FF_ARRAY_ELEMS(a)
FILE * av_fopen_utf8(const char *path, const char *mode)
Open a file using a UTF-8 filename.
#define DEFINE_SET_FRAME_COLOR_SEARCH(color_search, color_search_macro)
static int config_output(AVFilterLink *outlink)
static const AVFilterPad outputs[]
static const set_frame_func set_frame_lut[NB_COLOR_SEARCHES][NB_DITHERING]
static int load_apply_palette(FFFrameSync *fs)
Describe the class of an AVClass context structure.
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
#define fs(width, name, subs,...)
#define COLORMAP_NEAREST(search, palette, root, target, trans_thresh)
static int get_next_color(const uint8_t *color_used, const uint32_t *palette, const int trans_thresh, int *component, const struct color_rect *box)
#define DITHERING_ENTRIES(color_search)
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several inputs
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
static const cmp_func cmp_funcs[]
static void disp_node(AVBPrint *buf, const struct color_node *map, int parent_id, int node_id, int depth)
static const AVOption paletteuse_options[]
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
int ff_framesync_init_dualinput(FFFrameSync *fs, AVFilterContext *parent)
Initialize a frame sync structure for dualinput.
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
#define av_err2str(errnum)
Convenience macro, the return value should be used only directly in function arguments but never stan...
@ DITHERING_FLOYD_STEINBERG
uint32_t palette[AVPALETTE_COUNT]
static const uint32_t color[16+AV_CLASS_CATEGORY_NB]
AVFilterContext * src
source filter
static char * split(char *message, char delim)
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
static av_always_inline int color_get(PaletteUseContext *s, uint32_t color, uint8_t a, uint8_t r, uint8_t g, uint8_t b, const enum color_search_method search_method)
Check if the requested color is in the cache already.
#define AV_LOG_INFO
Standard information.
#define DECLARE_CMP_FUNC(name, pos)
static int activate(AVFilterContext *ctx)
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) #define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac) { } void ff_audio_convert_free(AudioConvert **ac) { if(! *ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);} AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map) { AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method !=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2) { ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc) { av_free(ac);return NULL;} return ac;} in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar) { ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar ? ac->channels :1;} else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;} int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in) { int use_generic=1;int len=in->nb_samples;int p;if(ac->dc) { av_log(ac->avr, AV_LOG_TRACE, "%d samples - audio_convert: %s to %s (dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> in
#define AV_QSORT(p, num, type, cmp)
Quicksort This sort is fast, and fully inplace but not stable and it is possible to construct input t...
int w
agreed upon image width
static int cmp_pal_entry(const void *a, const void *b)
struct cached_color * entries
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
const char * name
Pad name.
static void load_colormap(PaletteUseContext *s)
@ AV_PIX_FMT_PAL8
8 bits with AV_PIX_FMT_RGB32 palette
void av_bprintf(AVBPrint *buf, const char *fmt,...)
static av_always_inline int set_frame(PaletteUseContext *s, AVFrame *out, AVFrame *in, int x_start, int y_start, int w, int h, enum dithering_mode dither, const enum color_search_method search_method)
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled left
int h
agreed upon image height
@ COLOR_SEARCH_NNS_ITERATIVE
int(* cmp_func)(const void *, const void *)
struct color_node map[AVPALETTE_COUNT]
@ COLOR_SEARCH_NNS_RECURSIVE
AVRational time_base
Define the time base used by the PTS of the frames/samples which will pass through this link.
static int shift(int a, int b)
const VDPAUPixFmtMap * map
static const AVFilterPad paletteuse_outputs[]
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
int ff_framesync_activate(FFFrameSync *fs)
Examine the frames in the filter's input and try to produce output.
AVFILTER_DEFINE_CLASS(paletteuse)
int ff_framesync_dualinput_get_writable(FFFrameSync *fs, AVFrame **f0, AVFrame **f1)
Same as ff_framesync_dualinput_get(), but make sure that f0 is writable.
static const AVFilterPad paletteuse_inputs[]
static void load_palette(PaletteUseContext *s, const AVFrame *palette_frame)
static const uint8_t dither[8][8]