Go to the documentation of this file.
70 #if CONFIG_HALDCLUT_FILTER
84 #define OFFSET(x) offsetof(LUT3DContext, x)
85 #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
86 #define COMMON_OPTIONS \
87 { "interp", "select interpolation mode", OFFSET(interpolation), AV_OPT_TYPE_INT, {.i64=INTERPOLATE_TETRAHEDRAL}, 0, NB_INTERP_MODE-1, FLAGS, "interp_mode" }, \
88 { "nearest", "use values from the nearest defined points", 0, AV_OPT_TYPE_CONST, {.i64=INTERPOLATE_NEAREST}, INT_MIN, INT_MAX, FLAGS, "interp_mode" }, \
89 { "trilinear", "interpolate values using the 8 points defining a cube", 0, AV_OPT_TYPE_CONST, {.i64=INTERPOLATE_TRILINEAR}, INT_MIN, INT_MAX, FLAGS, "interp_mode" }, \
90 { "tetrahedral", "interpolate values using a tetrahedron", 0, AV_OPT_TYPE_CONST, {.i64=INTERPOLATE_TETRAHEDRAL}, INT_MIN, INT_MAX, FLAGS, "interp_mode" }, \
93 static inline float lerpf(
float v0,
float v1,
float f)
95 return v0 + (v1 -
v0) *
f;
106 #define NEAR(x) ((int)((x) + .5))
107 #define PREV(x) ((int)(x))
108 #define NEXT(x) (FFMIN((int)(x) + 1, lut3d->lutsize - 1))
128 const struct rgbvec d = {
s->r - prev[0],
s->g - prev[1],
s->b - prev[2]};
129 const struct rgbvec c000 = lut3d->lut[prev[0]][prev[1]][prev[2]];
130 const struct rgbvec c001 = lut3d->lut[prev[0]][prev[1]][next[2]];
131 const struct rgbvec c010 = lut3d->lut[prev[0]][next[1]][prev[2]];
132 const struct rgbvec c011 = lut3d->lut[prev[0]][next[1]][next[2]];
133 const struct rgbvec c100 = lut3d->lut[next[0]][prev[1]][prev[2]];
134 const struct rgbvec c101 = lut3d->lut[next[0]][prev[1]][next[2]];
135 const struct rgbvec c110 = lut3d->lut[next[0]][next[1]][prev[2]];
136 const struct rgbvec c111 = lut3d->lut[next[0]][next[1]][next[2]];
156 const struct rgbvec d = {
s->r - prev[0],
s->g - prev[1],
s->b - prev[2]};
157 const struct rgbvec c000 = lut3d->lut[prev[0]][prev[1]][prev[2]];
158 const struct rgbvec c111 = lut3d->lut[next[0]][next[1]][next[2]];
162 const struct rgbvec c100 = lut3d->lut[next[0]][prev[1]][prev[2]];
163 const struct rgbvec c110 = lut3d->lut[next[0]][next[1]][prev[2]];
164 c.r = (1-d.
r) * c000.
r + (d.
r-d.
g) * c100.
r + (d.
g-d.
b) * c110.
r + (d.
b) * c111.
r;
165 c.g = (1-d.
r) * c000.
g + (d.
r-d.
g) * c100.
g + (d.
g-d.
b) * c110.
g + (d.
b) * c111.
g;
166 c.b = (1-d.
r) * c000.
b + (d.
r-d.
g) * c100.
b + (d.
g-d.
b) * c110.
b + (d.
b) * c111.
b;
167 }
else if (d.
r > d.
b) {
168 const struct rgbvec c100 = lut3d->lut[next[0]][prev[1]][prev[2]];
169 const struct rgbvec c101 = lut3d->lut[next[0]][prev[1]][next[2]];
170 c.r = (1-d.
r) * c000.
r + (d.
r-d.
b) * c100.
r + (d.
b-d.
g) * c101.
r + (d.
g) * c111.
r;
171 c.g = (1-d.
r) * c000.
g + (d.
r-d.
b) * c100.
g + (d.
b-d.
g) * c101.
g + (d.
g) * c111.
g;
172 c.b = (1-d.
r) * c000.
b + (d.
r-d.
b) * c100.
b + (d.
b-d.
g) * c101.
b + (d.
g) * c111.
b;
174 const struct rgbvec c001 = lut3d->lut[prev[0]][prev[1]][next[2]];
175 const struct rgbvec c101 = lut3d->lut[next[0]][prev[1]][next[2]];
176 c.r = (1-d.
b) * c000.
r + (d.
b-d.
r) * c001.
r + (d.
r-d.
g) * c101.
r + (d.
g) * c111.
r;
177 c.g = (1-d.
b) * c000.
g + (d.
b-d.
r) * c001.
g + (d.
r-d.
g) * c101.
g + (d.
g) * c111.
g;
178 c.b = (1-d.
b) * c000.
b + (d.
b-d.
r) * c001.
b + (d.
r-d.
g) * c101.
b + (d.
g) * c111.
b;
182 const struct rgbvec c001 = lut3d->lut[prev[0]][prev[1]][next[2]];
183 const struct rgbvec c011 = lut3d->lut[prev[0]][next[1]][next[2]];
184 c.r = (1-d.
b) * c000.
r + (d.
b-d.
g) * c001.
r + (d.
g-d.
r) * c011.
r + (d.
r) * c111.
r;
185 c.g = (1-d.
b) * c000.
g + (d.
b-d.
g) * c001.
g + (d.
g-d.
r) * c011.
g + (d.
r) * c111.
g;
186 c.b = (1-d.
b) * c000.
b + (d.
b-d.
g) * c001.
b + (d.
g-d.
r) * c011.
b + (d.
r) * c111.
b;
187 }
else if (d.
b > d.
r) {
188 const struct rgbvec c010 = lut3d->lut[prev[0]][next[1]][prev[2]];
189 const struct rgbvec c011 = lut3d->lut[prev[0]][next[1]][next[2]];
190 c.r = (1-d.
g) * c000.
r + (d.
g-d.
b) * c010.
r + (d.
b-d.
r) * c011.
r + (d.
r) * c111.
r;
191 c.g = (1-d.
g) * c000.
g + (d.
g-d.
b) * c010.
g + (d.
b-d.
r) * c011.
g + (d.
r) * c111.
g;
192 c.b = (1-d.
g) * c000.
b + (d.
g-d.
b) * c010.
b + (d.
b-d.
r) * c011.
b + (d.
r) * c111.
b;
194 const struct rgbvec c010 = lut3d->lut[prev[0]][next[1]][prev[2]];
195 const struct rgbvec c110 = lut3d->lut[next[0]][next[1]][prev[2]];
196 c.r = (1-d.
g) * c000.
r + (d.
g-d.
r) * c010.
r + (d.
r-d.
b) * c110.
r + (d.
b) * c111.
r;
197 c.g = (1-d.
g) * c000.
g + (d.
g-d.
r) * c010.
g + (d.
r-d.
b) * c110.
g + (d.
b) * c111.
g;
198 c.b = (1-d.
g) * c000.
b + (d.
g-d.
r) * c010.
b + (d.
r-d.
b) * c110.
b + (d.
b) * c111.
b;
204 #define DEFINE_INTERP_FUNC_PLANAR(name, nbits, depth) \
205 static int interp_##nbits##_##name##_p##depth(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs) \
208 const LUT3DContext *lut3d = ctx->priv; \
209 const ThreadData *td = arg; \
210 const AVFrame *in = td->in; \
211 const AVFrame *out = td->out; \
212 const int direct = out == in; \
213 const int slice_start = (in->height * jobnr ) / nb_jobs; \
214 const int slice_end = (in->height * (jobnr+1)) / nb_jobs; \
215 uint8_t *grow = out->data[0] + slice_start * out->linesize[0]; \
216 uint8_t *brow = out->data[1] + slice_start * out->linesize[1]; \
217 uint8_t *rrow = out->data[2] + slice_start * out->linesize[2]; \
218 uint8_t *arow = out->data[3] + slice_start * out->linesize[3]; \
219 const uint8_t *srcgrow = in->data[0] + slice_start * in->linesize[0]; \
220 const uint8_t *srcbrow = in->data[1] + slice_start * in->linesize[1]; \
221 const uint8_t *srcrrow = in->data[2] + slice_start * in->linesize[2]; \
222 const uint8_t *srcarow = in->data[3] + slice_start * in->linesize[3]; \
223 const float scale_r = (lut3d->scale.r / ((1<<depth) - 1)) * (lut3d->lutsize - 1); \
224 const float scale_g = (lut3d->scale.g / ((1<<depth) - 1)) * (lut3d->lutsize - 1); \
225 const float scale_b = (lut3d->scale.b / ((1<<depth) - 1)) * (lut3d->lutsize - 1); \
227 for (y = slice_start; y < slice_end; y++) { \
228 uint##nbits##_t *dstg = (uint##nbits##_t *)grow; \
229 uint##nbits##_t *dstb = (uint##nbits##_t *)brow; \
230 uint##nbits##_t *dstr = (uint##nbits##_t *)rrow; \
231 uint##nbits##_t *dsta = (uint##nbits##_t *)arow; \
232 const uint##nbits##_t *srcg = (const uint##nbits##_t *)srcgrow; \
233 const uint##nbits##_t *srcb = (const uint##nbits##_t *)srcbrow; \
234 const uint##nbits##_t *srcr = (const uint##nbits##_t *)srcrrow; \
235 const uint##nbits##_t *srca = (const uint##nbits##_t *)srcarow; \
236 for (x = 0; x < in->width; x++) { \
237 const struct rgbvec scaled_rgb = {srcr[x] * scale_r, \
239 srcb[x] * scale_b}; \
240 struct rgbvec vec = interp_##name(lut3d, &scaled_rgb); \
241 dstr[x] = av_clip_uintp2(vec.r * (float)((1<<depth) - 1), depth); \
242 dstg[x] = av_clip_uintp2(vec.g * (float)((1<<depth) - 1), depth); \
243 dstb[x] = av_clip_uintp2(vec.b * (float)((1<<depth) - 1), depth); \
244 if (!direct && in->linesize[3]) \
247 grow += out->linesize[0]; \
248 brow += out->linesize[1]; \
249 rrow += out->linesize[2]; \
250 arow += out->linesize[3]; \
251 srcgrow += in->linesize[0]; \
252 srcbrow += in->linesize[1]; \
253 srcrrow += in->linesize[2]; \
254 srcarow += in->linesize[3]; \
283 #define DEFINE_INTERP_FUNC(name, nbits) \
284 static int interp_##nbits##_##name(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs) \
287 const LUT3DContext *lut3d = ctx->priv; \
288 const ThreadData *td = arg; \
289 const AVFrame *in = td->in; \
290 const AVFrame *out = td->out; \
291 const int direct = out == in; \
292 const int step = lut3d->step; \
293 const uint8_t r = lut3d->rgba_map[R]; \
294 const uint8_t g = lut3d->rgba_map[G]; \
295 const uint8_t b = lut3d->rgba_map[B]; \
296 const uint8_t a = lut3d->rgba_map[A]; \
297 const int slice_start = (in->height * jobnr ) / nb_jobs; \
298 const int slice_end = (in->height * (jobnr+1)) / nb_jobs; \
299 uint8_t *dstrow = out->data[0] + slice_start * out->linesize[0]; \
300 const uint8_t *srcrow = in ->data[0] + slice_start * in ->linesize[0]; \
301 const float scale_r = (lut3d->scale.r / ((1<<nbits) - 1)) * (lut3d->lutsize - 1); \
302 const float scale_g = (lut3d->scale.g / ((1<<nbits) - 1)) * (lut3d->lutsize - 1); \
303 const float scale_b = (lut3d->scale.b / ((1<<nbits) - 1)) * (lut3d->lutsize - 1); \
305 for (y = slice_start; y < slice_end; y++) { \
306 uint##nbits##_t *dst = (uint##nbits##_t *)dstrow; \
307 const uint##nbits##_t *src = (const uint##nbits##_t *)srcrow; \
308 for (x = 0; x < in->width * step; x += step) { \
309 const struct rgbvec scaled_rgb = {src[x + r] * scale_r, \
310 src[x + g] * scale_g, \
311 src[x + b] * scale_b}; \
312 struct rgbvec vec = interp_##name(lut3d, &scaled_rgb); \
313 dst[x + r] = av_clip_uint##nbits(vec.r * (float)((1<<nbits) - 1)); \
314 dst[x + g] = av_clip_uint##nbits(vec.g * (float)((1<<nbits) - 1)); \
315 dst[x + b] = av_clip_uint##nbits(vec.b * (float)((1<<nbits) - 1)); \
316 if (!direct && step == 4) \
317 dst[x + a] = src[x + a]; \
319 dstrow += out->linesize[0]; \
320 srcrow += in ->linesize[0]; \
333 #define MAX_LINE_SIZE 512
339 return !*p || *p ==
'#';
342 #define NEXT_LINE(loop_cond) do { \
343 if (!fgets(line, sizeof(line), f)) { \
344 av_log(ctx, AV_LOG_ERROR, "Unexpected EOF\n"); \
345 return AVERROR_INVALIDDATA; \
360 if (!strncmp(
line,
"3DLUTSIZE ", 10)) {
369 for (k = 0; k <
size; k++) {
370 for (j = 0; j <
size; j++) {
373 if (k != 0 || j != 0 ||
i != 0)
388 float min[3] = {0.0, 0.0, 0.0};
389 float max[3] = {1.0, 1.0, 1.0};
392 if (!strncmp(
line,
"LUT_3D_SIZE", 11)) {
401 for (k = 0; k <
size; k++) {
402 for (j = 0; j <
size; j++) {
409 if (!strncmp(
line,
"DOMAIN_", 7)) {
411 if (!strncmp(
line + 7,
"MIN ", 4)) vals =
min;
412 else if (!strncmp(
line + 7,
"MAX ", 4)) vals =
max;
419 }
else if (!strncmp(
line,
"TITLE", 5)) {
447 const float scale = 16*16*16;
451 for (k = 0; k <
size; k++) {
452 for (j = 0; j <
size; j++) {
476 uint8_t rgb_map[3] = {0, 1, 2};
481 else if (!strncmp(
line,
"values", 6)) {
482 const char *p =
line + 6;
483 #define SET_COLOR(id) do { \
484 while (av_isspace(*p)) \
487 case 'r': rgb_map[id] = 0; break; \
488 case 'g': rgb_map[id] = 1; break; \
489 case 'b': rgb_map[id] = 2; break; \
491 while (*p && !av_isspace(*p)) \
501 if (
in == -1 ||
out == -1) {
505 if (
in < 2 ||
out < 2 ||
513 scale = 1. / (
out - 1);
515 for (k = 0; k <
size; k++) {
516 for (j = 0; j <
size; j++) {
524 vec->
r =
val[rgb_map[0]] * scale;
525 vec->
g =
val[rgb_map[1]] * scale;
526 vec->
b =
val[rgb_map[2]] * scale;
537 float in_min[3] = {0.0, 0.0, 0.0};
538 float in_max[3] = {1.0, 1.0, 1.0};
539 float out_min[3] = {0.0, 0.0, 0.0};
540 float out_max[3] = {1.0, 1.0, 1.0};
541 int inside_metadata = 0,
size;
544 if (strncmp(
line,
"CSPLUTV100", 10)) {
550 if (strncmp(
line,
"3D", 2)) {
558 if (!strncmp(
line,
"BEGIN METADATA", 14)) {
562 if (!strncmp(
line,
"END METADATA", 12)) {
566 if (inside_metadata == 0) {
567 int size_r, size_g, size_b;
569 for (
int i = 0;
i < 3;
i++) {
570 int npoints = strtol(
line,
NULL, 0);
586 if (
av_sscanf(
line,
"%d %d %d", &size_r, &size_g, &size_b) != 3)
588 if (size_r != size_g || size_r != size_b) {
601 for (
int k = 0; k <
size; k++) {
602 for (
int j = 0; j <
size; j++) {
603 for (
int i = 0;
i <
size;
i++) {
605 if (k != 0 || j != 0 ||
i != 0)
609 vec->
r *= out_max[0] - out_min[0];
610 vec->
g *= out_max[1] - out_min[1];
611 vec->
b *= out_max[2] - out_min[2];
620 lut3d->
scale.
r = av_clipf(1. / (in_max[0] - in_min[0]), 0.
f, 1.
f);
621 lut3d->
scale.
g = av_clipf(1. / (in_max[1] - in_min[1]), 0.
f, 1.
f);
622 lut3d->
scale.
b = av_clipf(1. / (in_max[2] - in_min[2]), 0.
f, 1.
f);
630 const float c = 1. / (
size - 1);
633 for (k = 0; k <
size; k++) {
634 for (j = 0; j <
size; j++) {
671 int depth, is16bit = 0,
planar = 0;
675 depth =
desc->comp[0].depth;
702 #define SET_FUNC(name) do { \
705 case 8: lut3d->interp = interp_8_##name##_p8; break; \
706 case 9: lut3d->interp = interp_16_##name##_p9; break; \
707 case 10: lut3d->interp = interp_16_##name##_p10; break; \
708 case 12: lut3d->interp = interp_16_##name##_p12; break; \
709 case 14: lut3d->interp = interp_16_##name##_p14; break; \
710 case 16: lut3d->interp = interp_16_##name##_p16; break; \
712 } else if (is16bit) { lut3d->interp = interp_16_##name; \
713 } else { lut3d->interp = interp_8_##name; } \
765 #if CONFIG_LUT3D_FILTER
766 static const AVOption lut3d_options[] = {
787 f = fopen(lut3d->
file,
"r");
794 ext = strrchr(lut3d->
file,
'.');
853 .priv_class = &lut3d_class,
858 #if CONFIG_HALDCLUT_FILTER
863 const int linesize =
frame->linesize[0];
864 const int w = lut3d->clut_width;
865 const int step = lut3d->clut_step;
866 const uint8_t *rgba_map = lut3d->clut_rgba_map;
869 #define LOAD_CLUT(nbits) do { \
870 int i, j, k, x = 0, y = 0; \
872 for (k = 0; k < level; k++) { \
873 for (j = 0; j < level; j++) { \
874 for (i = 0; i < level; i++) { \
875 const uint##nbits##_t *src = (const uint##nbits##_t *) \
876 (data + y*linesize + x*step); \
877 struct rgbvec *vec = &lut3d->lut[i][j][k]; \
878 vec->r = src[rgba_map[0]] / (float)((1<<(nbits)) - 1); \
879 vec->g = src[rgba_map[1]] / (float)((1<<(nbits)) - 1); \
880 vec->b = src[rgba_map[2]] / (float)((1<<(nbits)) - 1); \
890 switch (lut3d->clut_bits) {
891 case 8: LOAD_CLUT(8);
break;
892 case 16: LOAD_CLUT(16);
break;
901 const int glinesize =
frame->linesize[0];
902 const int blinesize =
frame->linesize[1];
903 const int rlinesize =
frame->linesize[2];
904 const int w = lut3d->clut_width;
907 #define LOAD_CLUT_PLANAR(nbits, depth) do { \
908 int i, j, k, x = 0, y = 0; \
910 for (k = 0; k < level; k++) { \
911 for (j = 0; j < level; j++) { \
912 for (i = 0; i < level; i++) { \
913 const uint##nbits##_t *gsrc = (const uint##nbits##_t *) \
914 (datag + y*glinesize); \
915 const uint##nbits##_t *bsrc = (const uint##nbits##_t *) \
916 (datab + y*blinesize); \
917 const uint##nbits##_t *rsrc = (const uint##nbits##_t *) \
918 (datar + y*rlinesize); \
919 struct rgbvec *vec = &lut3d->lut[i][j][k]; \
920 vec->r = gsrc[x] / (float)((1<<(depth)) - 1); \
921 vec->g = bsrc[x] / (float)((1<<(depth)) - 1); \
922 vec->b = rsrc[x] / (float)((1<<(depth)) - 1); \
932 switch (lut3d->clut_bits) {
933 case 8: LOAD_CLUT_PLANAR(8, 8);
break;
934 case 9: LOAD_CLUT_PLANAR(16, 9);
break;
935 case 10: LOAD_CLUT_PLANAR(16, 10);
break;
936 case 12: LOAD_CLUT_PLANAR(16, 12);
break;
937 case 14: LOAD_CLUT_PLANAR(16, 14);
break;
938 case 16: LOAD_CLUT_PLANAR(16, 16);
break;
951 outlink->
w =
ctx->inputs[0]->w;
952 outlink->
h =
ctx->inputs[0]->h;
974 lut3d->clut_bits =
desc->comp[0].depth;
997 const int max_clut_level = sqrt(
MAX_LEVEL);
998 const int max_clut_size = max_clut_level*max_clut_level*max_clut_level;
1000 "(maximum level is %d, or %dx%d CLUT)\n",
1001 max_clut_level, max_clut_size, max_clut_size);
1022 if (lut3d->clut_planar)
1023 update_clut_planar(
ctx->priv, second);
1025 update_clut_packed(
ctx->priv, second);
1034 lut3d->fs.on_event = update_apply_clut;
1044 static const AVOption haldclut_options[] = {
1058 .config_props = config_clut,
1076 .preinit = haldclut_framesync_preinit,
1077 .
init = haldclut_init,
1078 .
uninit = haldclut_uninit,
1081 .
inputs = haldclut_inputs,
1083 .priv_class = &haldclut_class,
1088 #if CONFIG_LUT1D_FILTER
1090 enum interp_1d_mode {
1091 INTERPOLATE_1D_NEAREST,
1092 INTERPOLATE_1D_LINEAR,
1093 INTERPOLATE_1D_CUBIC,
1094 INTERPOLATE_1D_COSINE,
1095 INTERPOLATE_1D_SPLINE,
1099 #define MAX_1D_LEVEL 65536
1101 typedef struct LUT1DContext {
1108 float lut[3][MAX_1D_LEVEL];
1114 #define OFFSET(x) offsetof(LUT1DContext, x)
1116 static void set_identity_matrix_1d(LUT1DContext *lut1d,
int size)
1118 const float c = 1. / (
size - 1);
1121 lut1d->lutsize =
size;
1123 lut1d->lut[0][
i] =
i *
c;
1124 lut1d->lut[1][
i] =
i *
c;
1125 lut1d->lut[2][
i] =
i *
c;
1131 LUT1DContext *lut1d =
ctx->priv;
1133 float in_min[3] = {0.0, 0.0, 0.0};
1134 float in_max[3] = {1.0, 1.0, 1.0};
1135 float out_min[3] = {0.0, 0.0, 0.0};
1136 float out_max[3] = {1.0, 1.0, 1.0};
1137 int inside_metadata = 0,
size;
1140 if (strncmp(
line,
"CSPLUTV100", 10)) {
1146 if (strncmp(
line,
"1D", 2)) {
1154 if (!strncmp(
line,
"BEGIN METADATA", 14)) {
1155 inside_metadata = 1;
1158 if (!strncmp(
line,
"END METADATA", 12)) {
1159 inside_metadata = 0;
1162 if (inside_metadata == 0) {
1163 for (
int i = 0;
i < 3;
i++) {
1164 int npoints = strtol(
line,
NULL, 0);
1182 if (size < 2 || size > MAX_1D_LEVEL) {
1187 lut1d->lutsize =
size;
1189 for (
int i = 0;
i <
size;
i++) {
1191 if (
av_sscanf(
line,
"%f %f %f", &lut1d->lut[0][
i], &lut1d->lut[1][
i], &lut1d->lut[2][
i]) != 3)
1193 lut1d->lut[0][
i] *= out_max[0] - out_min[0];
1194 lut1d->lut[1][
i] *= out_max[1] - out_min[1];
1195 lut1d->lut[2][
i] *= out_max[2] - out_min[2];
1202 lut1d->scale.r = av_clipf(1. / (in_max[0] - in_min[0]), 0.
f, 1.
f);
1203 lut1d->scale.g = av_clipf(1. / (in_max[1] - in_min[1]), 0.
f, 1.
f);
1204 lut1d->scale.b = av_clipf(1. / (in_max[2] - in_min[2]), 0.
f, 1.
f);
1211 LUT1DContext *lut1d =
ctx->priv;
1213 float min[3] = {0.0, 0.0, 0.0};
1214 float max[3] = {1.0, 1.0, 1.0};
1217 if (!strncmp(
line,
"LUT_1D_SIZE", 11)) {
1221 if (size < 2 || size > MAX_1D_LEVEL) {
1225 lut1d->lutsize =
size;
1230 if (!strncmp(
line,
"DOMAIN_", 7)) {
1232 if (!strncmp(
line + 7,
"MIN ", 4)) vals =
min;
1233 else if (!strncmp(
line + 7,
"MAX ", 4)) vals =
max;
1240 }
else if (!strncmp(
line,
"LUT_1D_INPUT_RANGE ", 19)) {
1245 }
else if (!strncmp(
line,
"TITLE", 5)) {
1249 if (
av_sscanf(
line,
"%f %f %f", &lut1d->lut[0][
i], &lut1d->lut[1][
i], &lut1d->lut[2][
i]) != 3)
1256 lut1d->scale.r = av_clipf(1. / (
max[0] -
min[0]), 0.
f, 1.
f);
1257 lut1d->scale.g = av_clipf(1. / (
max[1] -
min[1]), 0.
f, 1.
f);
1258 lut1d->scale.b = av_clipf(1. / (
max[2] -
min[2]), 0.
f, 1.
f);
1263 static const AVOption lut1d_options[] = {
1266 {
"nearest",
"use values from the nearest defined points", 0,
AV_OPT_TYPE_CONST, {.i64=INTERPOLATE_1D_NEAREST}, INT_MIN, INT_MAX,
FLAGS,
"interp_mode" },
1267 {
"linear",
"use values from the linear interpolation", 0,
AV_OPT_TYPE_CONST, {.i64=INTERPOLATE_1D_LINEAR}, INT_MIN, INT_MAX,
FLAGS,
"interp_mode" },
1268 {
"cosine",
"use values from the cosine interpolation", 0,
AV_OPT_TYPE_CONST, {.i64=INTERPOLATE_1D_COSINE}, INT_MIN, INT_MAX,
FLAGS,
"interp_mode" },
1269 {
"cubic",
"use values from the cubic interpolation", 0,
AV_OPT_TYPE_CONST, {.i64=INTERPOLATE_1D_CUBIC}, INT_MIN, INT_MAX,
FLAGS,
"interp_mode" },
1270 {
"spline",
"use values from the spline interpolation", 0,
AV_OPT_TYPE_CONST, {.i64=INTERPOLATE_1D_SPLINE}, INT_MIN, INT_MAX,
FLAGS,
"interp_mode" },
1276 static inline float interp_1d_nearest(
const LUT1DContext *lut1d,
1277 int idx,
const float s)
1279 return lut1d->lut[idx][
NEAR(
s)];
1282 #define NEXT1D(x) (FFMIN((int)(x) + 1, lut1d->lutsize - 1))
1284 static inline float interp_1d_linear(
const LUT1DContext *lut1d,
1285 int idx,
const float s)
1287 const int prev =
PREV(
s);
1288 const int next = NEXT1D(
s);
1289 const float d =
s - prev;
1290 const float p = lut1d->lut[idx][prev];
1291 const float n = lut1d->lut[idx][next];
1296 static inline float interp_1d_cosine(
const LUT1DContext *lut1d,
1297 int idx,
const float s)
1299 const int prev =
PREV(
s);
1300 const int next = NEXT1D(
s);
1301 const float d =
s - prev;
1302 const float p = lut1d->lut[idx][prev];
1303 const float n = lut1d->lut[idx][next];
1304 const float m = (1.f -
cosf(d *
M_PI)) * .5
f;
1309 static inline float interp_1d_cubic(
const LUT1DContext *lut1d,
1310 int idx,
const float s)
1312 const int prev =
PREV(
s);
1313 const int next = NEXT1D(
s);
1314 const float mu =
s - prev;
1317 float y0 = lut1d->lut[idx][
FFMAX(prev - 1, 0)];
1318 float y1 = lut1d->lut[idx][prev];
1319 float y2 = lut1d->lut[idx][next];
1320 float y3 = lut1d->lut[idx][
FFMIN(next + 1, lut1d->lutsize - 1)];
1324 a0 = y3 - y2 - y0 + y1;
1329 return a0 * mu * mu2 +
a1 * mu2 +
a2 * mu +
a3;
1332 static inline float interp_1d_spline(
const LUT1DContext *lut1d,
1333 int idx,
const float s)
1335 const int prev =
PREV(
s);
1336 const int next = NEXT1D(
s);
1337 const float x =
s - prev;
1338 float c0,
c1,
c2, c3;
1340 float y0 = lut1d->lut[idx][
FFMAX(prev - 1, 0)];
1341 float y1 = lut1d->lut[idx][prev];
1342 float y2 = lut1d->lut[idx][next];
1343 float y3 = lut1d->lut[idx][
FFMIN(next + 1, lut1d->lutsize - 1)];
1346 c1 = .5f * (y2 - y0);
1347 c2 = y0 - 2.5f * y1 + 2.f * y2 - .5f * y3;
1348 c3 = .5f * (y3 - y0) + 1.5
f * (y1 - y2);
1350 return ((c3 * x +
c2) * x +
c1) * x + c0;
1353 #define DEFINE_INTERP_FUNC_PLANAR_1D(name, nbits, depth) \
1354 static int interp_1d_##nbits##_##name##_p##depth(AVFilterContext *ctx, \
1355 void *arg, int jobnr, \
1359 const LUT1DContext *lut1d = ctx->priv; \
1360 const ThreadData *td = arg; \
1361 const AVFrame *in = td->in; \
1362 const AVFrame *out = td->out; \
1363 const int direct = out == in; \
1364 const int slice_start = (in->height * jobnr ) / nb_jobs; \
1365 const int slice_end = (in->height * (jobnr+1)) / nb_jobs; \
1366 uint8_t *grow = out->data[0] + slice_start * out->linesize[0]; \
1367 uint8_t *brow = out->data[1] + slice_start * out->linesize[1]; \
1368 uint8_t *rrow = out->data[2] + slice_start * out->linesize[2]; \
1369 uint8_t *arow = out->data[3] + slice_start * out->linesize[3]; \
1370 const uint8_t *srcgrow = in->data[0] + slice_start * in->linesize[0]; \
1371 const uint8_t *srcbrow = in->data[1] + slice_start * in->linesize[1]; \
1372 const uint8_t *srcrrow = in->data[2] + slice_start * in->linesize[2]; \
1373 const uint8_t *srcarow = in->data[3] + slice_start * in->linesize[3]; \
1374 const float factor = (1 << depth) - 1; \
1375 const float scale_r = (lut1d->scale.r / factor) * (lut1d->lutsize - 1); \
1376 const float scale_g = (lut1d->scale.g / factor) * (lut1d->lutsize - 1); \
1377 const float scale_b = (lut1d->scale.b / factor) * (lut1d->lutsize - 1); \
1379 for (y = slice_start; y < slice_end; y++) { \
1380 uint##nbits##_t *dstg = (uint##nbits##_t *)grow; \
1381 uint##nbits##_t *dstb = (uint##nbits##_t *)brow; \
1382 uint##nbits##_t *dstr = (uint##nbits##_t *)rrow; \
1383 uint##nbits##_t *dsta = (uint##nbits##_t *)arow; \
1384 const uint##nbits##_t *srcg = (const uint##nbits##_t *)srcgrow; \
1385 const uint##nbits##_t *srcb = (const uint##nbits##_t *)srcbrow; \
1386 const uint##nbits##_t *srcr = (const uint##nbits##_t *)srcrrow; \
1387 const uint##nbits##_t *srca = (const uint##nbits##_t *)srcarow; \
1388 for (x = 0; x < in->width; x++) { \
1389 float r = srcr[x] * scale_r; \
1390 float g = srcg[x] * scale_g; \
1391 float b = srcb[x] * scale_b; \
1392 r = interp_1d_##name(lut1d, 0, r); \
1393 g = interp_1d_##name(lut1d, 1, g); \
1394 b = interp_1d_##name(lut1d, 2, b); \
1395 dstr[x] = av_clip_uintp2(r * factor, depth); \
1396 dstg[x] = av_clip_uintp2(g * factor, depth); \
1397 dstb[x] = av_clip_uintp2(b * factor, depth); \
1398 if (!direct && in->linesize[3]) \
1399 dsta[x] = srca[x]; \
1401 grow += out->linesize[0]; \
1402 brow += out->linesize[1]; \
1403 rrow += out->linesize[2]; \
1404 arow += out->linesize[3]; \
1405 srcgrow += in->linesize[0]; \
1406 srcbrow += in->linesize[1]; \
1407 srcrrow += in->linesize[2]; \
1408 srcarow += in->linesize[3]; \
1413 DEFINE_INTERP_FUNC_PLANAR_1D(nearest, 8, 8)
1414 DEFINE_INTERP_FUNC_PLANAR_1D(
linear, 8, 8)
1415 DEFINE_INTERP_FUNC_PLANAR_1D(cosine, 8, 8)
1416 DEFINE_INTERP_FUNC_PLANAR_1D(cubic, 8, 8)
1417 DEFINE_INTERP_FUNC_PLANAR_1D(spline, 8, 8)
1419 DEFINE_INTERP_FUNC_PLANAR_1D(nearest, 16, 9)
1420 DEFINE_INTERP_FUNC_PLANAR_1D(
linear, 16, 9)
1421 DEFINE_INTERP_FUNC_PLANAR_1D(cosine, 16, 9)
1422 DEFINE_INTERP_FUNC_PLANAR_1D(cubic, 16, 9)
1423 DEFINE_INTERP_FUNC_PLANAR_1D(spline, 16, 9)
1425 DEFINE_INTERP_FUNC_PLANAR_1D(nearest, 16, 10)
1426 DEFINE_INTERP_FUNC_PLANAR_1D(
linear, 16, 10)
1427 DEFINE_INTERP_FUNC_PLANAR_1D(cosine, 16, 10)
1428 DEFINE_INTERP_FUNC_PLANAR_1D(cubic, 16, 10)
1429 DEFINE_INTERP_FUNC_PLANAR_1D(spline, 16, 10)
1431 DEFINE_INTERP_FUNC_PLANAR_1D(nearest, 16, 12)
1432 DEFINE_INTERP_FUNC_PLANAR_1D(
linear, 16, 12)
1433 DEFINE_INTERP_FUNC_PLANAR_1D(cosine, 16, 12)
1434 DEFINE_INTERP_FUNC_PLANAR_1D(cubic, 16, 12)
1435 DEFINE_INTERP_FUNC_PLANAR_1D(spline, 16, 12)
1437 DEFINE_INTERP_FUNC_PLANAR_1D(nearest, 16, 14)
1438 DEFINE_INTERP_FUNC_PLANAR_1D(
linear, 16, 14)
1439 DEFINE_INTERP_FUNC_PLANAR_1D(cosine, 16, 14)
1440 DEFINE_INTERP_FUNC_PLANAR_1D(cubic, 16, 14)
1441 DEFINE_INTERP_FUNC_PLANAR_1D(spline, 16, 14)
1443 DEFINE_INTERP_FUNC_PLANAR_1D(nearest, 16, 16)
1444 DEFINE_INTERP_FUNC_PLANAR_1D(
linear, 16, 16)
1445 DEFINE_INTERP_FUNC_PLANAR_1D(cosine, 16, 16)
1446 DEFINE_INTERP_FUNC_PLANAR_1D(cubic, 16, 16)
1447 DEFINE_INTERP_FUNC_PLANAR_1D(spline, 16, 16)
1449 #define DEFINE_INTERP_FUNC_1D(name, nbits) \
1450 static int interp_1d_##nbits##_##name(AVFilterContext *ctx, void *arg, \
1451 int jobnr, int nb_jobs) \
1454 const LUT1DContext *lut1d = ctx->priv; \
1455 const ThreadData *td = arg; \
1456 const AVFrame *in = td->in; \
1457 const AVFrame *out = td->out; \
1458 const int direct = out == in; \
1459 const int step = lut1d->step; \
1460 const uint8_t r = lut1d->rgba_map[R]; \
1461 const uint8_t g = lut1d->rgba_map[G]; \
1462 const uint8_t b = lut1d->rgba_map[B]; \
1463 const uint8_t a = lut1d->rgba_map[A]; \
1464 const int slice_start = (in->height * jobnr ) / nb_jobs; \
1465 const int slice_end = (in->height * (jobnr+1)) / nb_jobs; \
1466 uint8_t *dstrow = out->data[0] + slice_start * out->linesize[0]; \
1467 const uint8_t *srcrow = in ->data[0] + slice_start * in ->linesize[0]; \
1468 const float factor = (1 << nbits) - 1; \
1469 const float scale_r = (lut1d->scale.r / factor) * (lut1d->lutsize - 1); \
1470 const float scale_g = (lut1d->scale.g / factor) * (lut1d->lutsize - 1); \
1471 const float scale_b = (lut1d->scale.b / factor) * (lut1d->lutsize - 1); \
1473 for (y = slice_start; y < slice_end; y++) { \
1474 uint##nbits##_t *dst = (uint##nbits##_t *)dstrow; \
1475 const uint##nbits##_t *src = (const uint##nbits##_t *)srcrow; \
1476 for (x = 0; x < in->width * step; x += step) { \
1477 float rr = src[x + r] * scale_r; \
1478 float gg = src[x + g] * scale_g; \
1479 float bb = src[x + b] * scale_b; \
1480 rr = interp_1d_##name(lut1d, 0, rr); \
1481 gg = interp_1d_##name(lut1d, 1, gg); \
1482 bb = interp_1d_##name(lut1d, 2, bb); \
1483 dst[x + r] = av_clip_uint##nbits(rr * factor); \
1484 dst[x + g] = av_clip_uint##nbits(gg * factor); \
1485 dst[x + b] = av_clip_uint##nbits(bb * factor); \
1486 if (!direct && step == 4) \
1487 dst[x + a] = src[x + a]; \
1489 dstrow += out->linesize[0]; \
1490 srcrow += in ->linesize[0]; \
1495 DEFINE_INTERP_FUNC_1D(nearest, 8)
1496 DEFINE_INTERP_FUNC_1D(
linear, 8)
1497 DEFINE_INTERP_FUNC_1D(cosine, 8)
1498 DEFINE_INTERP_FUNC_1D(cubic, 8)
1499 DEFINE_INTERP_FUNC_1D(spline, 8)
1501 DEFINE_INTERP_FUNC_1D(nearest, 16)
1502 DEFINE_INTERP_FUNC_1D(
linear, 16)
1503 DEFINE_INTERP_FUNC_1D(cosine, 16)
1504 DEFINE_INTERP_FUNC_1D(cubic, 16)
1505 DEFINE_INTERP_FUNC_1D(spline, 16)
1509 int depth, is16bit = 0,
planar = 0;
1510 LUT1DContext *lut1d =
inlink->dst->priv;
1513 depth =
desc->comp[0].depth;
1515 switch (
inlink->format) {
1540 #define SET_FUNC_1D(name) do { \
1543 case 8: lut1d->interp = interp_1d_8_##name##_p8; break; \
1544 case 9: lut1d->interp = interp_1d_16_##name##_p9; break; \
1545 case 10: lut1d->interp = interp_1d_16_##name##_p10; break; \
1546 case 12: lut1d->interp = interp_1d_16_##name##_p12; break; \
1547 case 14: lut1d->interp = interp_1d_16_##name##_p14; break; \
1548 case 16: lut1d->interp = interp_1d_16_##name##_p16; break; \
1550 } else if (is16bit) { lut1d->interp = interp_1d_16_##name; \
1551 } else { lut1d->interp = interp_1d_8_##name; } \
1554 switch (lut1d->interpolation) {
1555 case INTERPOLATE_1D_NEAREST: SET_FUNC_1D(nearest);
break;
1556 case INTERPOLATE_1D_LINEAR: SET_FUNC_1D(
linear);
break;
1557 case INTERPOLATE_1D_COSINE: SET_FUNC_1D(cosine);
break;
1558 case INTERPOLATE_1D_CUBIC: SET_FUNC_1D(cubic);
break;
1559 case INTERPOLATE_1D_SPLINE: SET_FUNC_1D(spline);
break;
1572 LUT1DContext *lut1d =
ctx->priv;
1574 lut1d->scale.r = lut1d->scale.g = lut1d->scale.b = 1.f;
1577 set_identity_matrix_1d(lut1d, 32);
1581 f = fopen(lut1d->file,
"r");
1588 ext = strrchr(lut1d->file,
'.');
1599 ret = parse_cinespace_1d(
ctx,
f);
1605 if (!
ret && !lut1d->lutsize) {
1618 LUT1DContext *lut1d =
ctx->priv;
1657 .filter_frame = filter_frame_1d,
1658 .config_props = config_input_1d,
1674 .priv_size =
sizeof(LUT1DContext),
1679 .priv_class = &lut1d_class,
static AVFrame * apply_lut(AVFilterLink *inlink, AVFrame *in)
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
static int config_input(AVFilterLink *inlink)
#define AV_PIX_FMT_GBRAP16
static float lerpf(float v0, float v1, float f)
int ff_framesync_configure(FFFrameSync *fs)
Configure a frame sync structure.
#define AV_LOG_WARNING
Something somehow does not look correct.
AVPixelFormat
Pixel format.
static av_cold int init(AVCodecContext *avctx)
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
static int parse_m3d(AVFilterContext *ctx, FILE *f)
void ff_framesync_uninit(FFFrameSync *fs)
Free all memory currently allocated.
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
int() avfilter_action_func(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
A function pointer passed to the AVFilterGraph::execute callback to be executed multiple times,...
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
static struct rgbvec lerp(const struct rgbvec *v0, const struct rgbvec *v1, float f)
static int parse_dat(AVFilterContext *ctx, FILE *f)
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
int av_strcasecmp(const char *a, const char *b)
Locale-independent case-insensitive compare.
static av_const int av_isspace(int c)
Locale-independent conversion of ASCII isspace.
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
static av_cold int end(AVCodecContext *avctx)
This structure describes decoded (raw) audio or video data.
trying all byte sequences megabyte in length and selecting the best looking sequence will yield cases to try But a word about which is also called distortion Distortion can be quantified by almost any quality measurement one chooses the sum of squared differences is used but more complex methods that consider psychovisual effects can be used as well It makes no difference in this discussion First step
static int skip_line(const char *p)
static int linear(InterplayACMContext *s, unsigned ind, unsigned col)
#define FRAMESYNC_DEFINE_CLASS(name, context, field)
static struct rgbvec interp_tetrahedral(const LUT3DContext *lut3d, const struct rgbvec *s)
Tetrahedral interpolation.
@ AV_PIX_FMT_BGR24
packed RGB 8:8:8, 24bpp, BGRBGR...
@ AV_PIX_FMT_BGRA
packed BGRA 8:8:8:8, 32bpp, BGRABGRA...
const char * name
Filter name.
static int parse_cube(AVFilterContext *ctx, FILE *f)
AVFormatInternal * internal
An opaque field for libavformat internal usage.
A link between two filters.
@ INTERPOLATE_TETRAHEDRAL
int av_pix_fmt_count_planes(enum AVPixelFormat pix_fmt)
static int parse_3dl(AVFilterContext *ctx, FILE *f)
static void set_identity_matrix(LUT3DContext *lut3d, int size)
#define AV_PIX_FMT_GBRP14
@ AV_PIX_FMT_GBRAP
planar GBRA 4:4:4:4 32bpp
#define AV_PIX_FMT_GBRP10
static struct rgbvec interp_trilinear(const LUT3DContext *lut3d, const struct rgbvec *s)
Interpolate using the 8 vertices of a cube.
static int config_output(AVFilterLink *outlink)
A filter pad used for either input or output.
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
#define AV_PIX_FMT_GBRAP10
uint8_t pi<< 24) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_U8,(uint64_t)((*(const uint8_t *) pi - 0x80U))<< 56) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16,(*(const int16_t *) pi >>8)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S16,(uint64_t)(*(const int16_t *) pi)<< 48) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32,(*(const int32_t *) pi >>24)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S32,(uint64_t)(*(const int32_t *) pi)<< 32) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S64,(*(const int64_t *) pi >>56)+0x80) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S64, *(const int64_t *) pi *(1.0f/(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S64, *(const int64_t *) pi *(1.0/(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_FLT, llrintf(*(const float *) pi *(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_DBL, llrint(*(const double *) pi *(INT64_C(1)<< 63))) #define FMT_PAIR_FUNC(out, in) static conv_func_type *const fmt_pair_to_conv_functions[AV_SAMPLE_FMT_NB *AV_SAMPLE_FMT_NB]={ FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S64), };static void cpy1(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, len);} static void cpy2(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 2 *len);} static void cpy4(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 4 *len);} static void cpy8(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 8 *len);} AudioConvert *swri_audio_convert_alloc(enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, const int *ch_map, int flags) { AudioConvert *ctx;conv_func_type *f=fmt_pair_to_conv_functions[av_get_packed_sample_fmt(out_fmt)+AV_SAMPLE_FMT_NB *av_get_packed_sample_fmt(in_fmt)];if(!f) return NULL;ctx=av_mallocz(sizeof(*ctx));if(!ctx) return NULL;if(channels==1){ in_fmt=av_get_planar_sample_fmt(in_fmt);out_fmt=av_get_planar_sample_fmt(out_fmt);} ctx->channels=channels;ctx->conv_f=f;ctx->ch_map=ch_map;if(in_fmt==AV_SAMPLE_FMT_U8||in_fmt==AV_SAMPLE_FMT_U8P) memset(ctx->silence, 0x80, sizeof(ctx->silence));if(out_fmt==in_fmt &&!ch_map) { switch(av_get_bytes_per_sample(in_fmt)){ case 1:ctx->simd_f=cpy1;break;case 2:ctx->simd_f=cpy2;break;case 4:ctx->simd_f=cpy4;break;case 8:ctx->simd_f=cpy8;break;} } if(HAVE_X86ASM &&1) swri_audio_convert_init_x86(ctx, out_fmt, in_fmt, channels);if(ARCH_ARM) swri_audio_convert_init_arm(ctx, out_fmt, in_fmt, channels);if(ARCH_AARCH64) swri_audio_convert_init_aarch64(ctx, out_fmt, in_fmt, channels);return ctx;} void swri_audio_convert_free(AudioConvert **ctx) { av_freep(ctx);} int swri_audio_convert(AudioConvert *ctx, AudioData *out, AudioData *in, int len) { int ch;int off=0;const int os=(out->planar ? 1 :out->ch_count) *out->bps;unsigned misaligned=0;av_assert0(ctx->channels==out->ch_count);if(ctx->in_simd_align_mask) { int planes=in->planar ? in->ch_count :1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) in->ch[ch];misaligned|=m &ctx->in_simd_align_mask;} if(ctx->out_simd_align_mask) { int planes=out->planar ? out->ch_count :1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) out->ch[ch];misaligned|=m &ctx->out_simd_align_mask;} if(ctx->simd_f &&!ctx->ch_map &&!misaligned){ off=len &~15;av_assert1(off >=0);av_assert1(off<=len);av_assert2(ctx->channels==SWR_CH_MAX||!in->ch[ctx->channels]);if(off >0){ if(out->planar==in->planar){ int planes=out->planar ? out->ch_count :1;for(ch=0;ch< planes;ch++){ ctx->simd_f(out-> ch const uint8_t **in ch off *out planar
#define AV_PIX_FMT_GBRAP12
#define av_assert0(cond)
assert() equivalent, that is always enabled.
static const AVFilterPad outputs[]
static enum AVPixelFormat pix_fmts[]
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
#define DEFINE_INTERP_FUNC(name, nbits)
@ AV_PIX_FMT_RGBA
packed RGBA 8:8:8:8, 32bpp, RGBARGBA...
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
#define AV_PIX_FMT_GBRP16
#define AV_PIX_FMT_RGBA64
int av_sscanf(const char *string, const char *format,...)
See libc sscanf manual for more information.
Describe the class of an AVClass context structure.
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
#define fs(width, name, subs,...)
filter_frame For filters that do not use the activate() callback
@ AV_PIX_FMT_BGR0
packed BGR 8:8:8, 32bpp, BGRXBGRX... X=unused/undefined
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several inputs
@ AV_PIX_FMT_ABGR
packed ABGR 8:8:8:8, 32bpp, ABGRABGR...
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
@ AV_PIX_FMT_RGB24
packed RGB 8:8:8, 24bpp, RGBRGB...
int ff_framesync_init_dualinput(FFFrameSync *fs, AVFilterContext *parent)
Initialize a frame sync structure for dualinput.
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
int av_get_padded_bits_per_pixel(const AVPixFmtDescriptor *pixdesc)
Return the number of bits per pixel for the pixel format described by pixdesc, including any padding ...
#define av_err2str(errnum)
Convenience macro, the return value should be used only directly in function arguments but never stan...
int av_frame_is_writable(AVFrame *frame)
Check if the frame data is writable.
AVFilterContext * src
source filter
const char const char void * val
@ AV_PIX_FMT_RGB0
packed RGB 8:8:8, 32bpp, RGBXRGBX... X=unused/undefined
static int interpolation(DeclickChannel *c, const double *src, int ar_order, double *acoefficients, int *index, int nb_errors, double *auxiliary, double *interpolated)
#define AV_LOG_INFO
Standard information.
#define AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC
Some filters support a generic "enable" expression option that can be used to enable or disable a fil...
#define AVFILTER_DEFINE_CLASS(fname)
@ AV_PIX_FMT_ARGB
packed ARGB 8:8:8:8, 32bpp, ARGBARGB...
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) #define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac) { } void ff_audio_convert_free(AudioConvert **ac) { if(! *ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);} AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map) { AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method !=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2) { ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc) { av_free(ac);return NULL;} return ac;} in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar) { ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar ? ac->channels :1;} else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;} int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in) { int use_generic=1;int len=in->nb_samples;int p;if(ac->dc) { av_log(ac->avr, AV_LOG_TRACE, "%d samples - audio_convert: %s to %s (dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> in
#define AV_PIX_FMT_BGRA64
#define i(width, name, range_min, range_max)
avfilter_action_func * interp
int w
agreed upon image width
#define DEFINE_INTERP_FUNC_PLANAR(name, nbits, depth)
#define AV_PIX_FMT_GBRP12
int ff_filter_get_nb_threads(AVFilterContext *ctx)
Get number of threads for current filter instance.
Used for passing data between threads.
const char * name
Pad name.
static int parse_cinespace(AVFilterContext *ctx, FILE *f)
@ AV_PIX_FMT_0BGR
packed BGR 8:8:8, 32bpp, XBGRXBGR... X=unused/undefined
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
static int query_formats(AVFilterContext *ctx)
struct rgbvec lut[MAX_LEVEL][MAX_LEVEL][MAX_LEVEL]
int h
agreed upon image height
static struct rgbvec interp_nearest(const LUT3DContext *lut3d, const struct rgbvec *s)
Get the nearest defined point.
AVRational time_base
Define the time base used by the PTS of the frames/samples which will pass through this link.
#define NEXT_LINE(loop_cond)
@ AV_PIX_FMT_GBRP
planar GBR 4:4:4 24bpp
#define AVFILTER_FLAG_SLICE_THREADS
The filter supports multithreading by splitting frames into multiple parts and processing them concur...
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
int ff_fill_rgba_map(uint8_t *rgba_map, enum AVPixelFormat pix_fmt)
#define AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL
Same as AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC, except that the filter will have its filter_frame() c...
#define flags(name, subs,...)
@ AV_PIX_FMT_0RGB
packed RGB 8:8:8, 32bpp, XRGBXRGB... X=unused/undefined
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
static av_cold int uninit(AVCodecContext *avctx)
int ff_framesync_activate(FFFrameSync *fs)
Examine the frames in the filter's input and try to produce output.
int ff_framesync_dualinput_get(FFFrameSync *fs, AVFrame **f0, AVFrame **f1)
int interpolation
interp_mode