FFmpeg
vf_colorconstancy.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2018 Mina Sami
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * Color Constancy filter
24  *
25  * @see http://colorconstancy.com/
26  *
27  * @cite
28  * J. van de Weijer, Th. Gevers, A. Gijsenij "Edge-Based Color Constancy".
29  */
30 
31 #include "libavutil/imgutils.h"
32 #include "libavutil/opt.h"
33 
34 #include "avfilter.h"
35 #include "internal.h"
36 #include "video.h"
37 
38 #include <math.h>
39 
40 #define GREY_EDGE "greyedge"
41 
42 #define SQRT3 1.73205080757
43 
44 #define NUM_PLANES 3
45 #define MAX_DIFF_ORD 2
46 #define MAX_META_DATA 4
47 #define MAX_DATA 4
48 
49 #define INDEX_TEMP 0
50 #define INDEX_DX 1
51 #define INDEX_DY 2
52 #define INDEX_DXY 3
53 #define INDEX_NORM INDEX_DX
54 #define INDEX_SRC 0
55 #define INDEX_DST 1
56 #define INDEX_ORD 2
57 #define INDEX_DIR 3
58 #define DIR_X 0
59 #define DIR_Y 1
60 
61 /**
62  * Used for passing data between threads.
63  */
64 typedef struct ThreadData {
65  AVFrame *in, *out;
68 } ThreadData;
69 
70 /**
71  * Common struct for all algorithms contexts.
72  */
73 typedef struct ColorConstancyContext {
74  const AVClass *class;
75 
76  int difford;
77  int minknorm; /**< @minknorm = 0 : getMax instead */
78  double sigma;
79 
81  int planeheight[4];
82  int planewidth[4];
83 
85  double *gauss[MAX_DIFF_ORD+1];
86 
87  double white[NUM_PLANES];
89 
90 #define OFFSET(x) offsetof(ColorConstancyContext, x)
91 #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
92 
93 #define GINDX(s, i) ( (i) - ((s) >> 2) )
94 
95 /**
96  * Sets gauss filters used for calculating gauss derivatives. Filter size
97  * depends on sigma which is a user option hence we calculate these
98  * filters each time. Also each higher order depends on lower ones. Sigma
99  * can be zero only at difford = 0, then we only convert data to double
100  * instead.
101  *
102  * @param ctx the filter context.
103  *
104  * @return 0 in case of success, a negative value corresponding to an
105  * AVERROR code in case of failure.
106  */
108 {
109  ColorConstancyContext *s = ctx->priv;
110  int filtersize = s->filtersize;
111  int difford = s->difford;
112  double sigma = s->sigma;
113  double sum1, sum2;
114  int i;
115 
116  for (i = 0; i <= difford; ++i) {
117  s->gauss[i] = av_calloc(filtersize, sizeof(*s->gauss[i]));
118  if (!s->gauss[i]) {
119  for (; i >= 0; --i) {
120  av_freep(&s->gauss[i]);
121  }
122  return AVERROR(ENOMEM);
123  }
124  }
125 
126  // Order 0
127  av_log(ctx, AV_LOG_TRACE, "Setting 0-d gauss with filtersize = %d.\n", filtersize);
128  sum1 = 0.0;
129  if (!sigma) {
130  s->gauss[0][0] = 1; // Copying data to double instead of convolution
131  } else {
132  for (i = 0; i < filtersize; ++i) {
133  s->gauss[0][i] = exp(- pow(GINDX(filtersize, i), 2.) / (2 * sigma * sigma)) / ( sqrt(2 * M_PI) * sigma );
134  sum1 += s->gauss[0][i];
135  }
136  for (i = 0; i < filtersize; ++i) {
137  s->gauss[0][i] /= sum1;
138  }
139  }
140  // Order 1
141  if (difford > 0) {
142  av_log(ctx, AV_LOG_TRACE, "Setting 1-d gauss with filtersize = %d.\n", filtersize);
143  sum1 = 0.0;
144  for (i = 0; i < filtersize; ++i) {
145  s->gauss[1][i] = - (GINDX(filtersize, i) / pow(sigma, 2)) * s->gauss[0][i];
146  sum1 += s->gauss[1][i] * GINDX(filtersize, i);
147  }
148 
149  for (i = 0; i < filtersize; ++i) {
150  s->gauss[1][i] /= sum1;
151  }
152 
153  // Order 2
154  if (difford > 1) {
155  av_log(ctx, AV_LOG_TRACE, "Setting 2-d gauss with filtersize = %d.\n", filtersize);
156  sum1 = 0.0;
157  for (i = 0; i < filtersize; ++i) {
158  s->gauss[2][i] = ( pow(GINDX(filtersize, i), 2) / pow(sigma, 4) - 1/pow(sigma, 2) )
159  * s->gauss[0][i];
160  sum1 += s->gauss[2][i];
161  }
162 
163  sum2 = 0.0;
164  for (i = 0; i < filtersize; ++i) {
165  s->gauss[2][i] -= sum1 / (filtersize);
166  sum2 += (0.5 * GINDX(filtersize, i) * GINDX(filtersize, i) * s->gauss[2][i]);
167  }
168  for (i = 0; i < filtersize ; ++i) {
169  s->gauss[2][i] /= sum2;
170  }
171  }
172  }
173  return 0;
174 }
175 
176 /**
177  * Frees up buffers used by grey edge for storing derivatives final
178  * and intermidiate results. Number of buffers and number of planes
179  * for last buffer are given so it can be safely called at allocation
180  * failure instances.
181  *
182  * @param td holds the buffers.
183  * @param nb_buff number of buffers to be freed.
184  * @param nb_planes number of planes for last buffer to be freed.
185  */
186 static void cleanup_derivative_buffers(ThreadData *td, int nb_buff, int nb_planes)
187 {
188  int b, p;
189 
190  for (b = 0; b < nb_buff; ++b) {
191  for (p = 0; p < NUM_PLANES; ++p) {
192  av_freep(&td->data[b][p]);
193  }
194  }
195  // Final buffer may not be fully allocated at fail cases
196  for (p = 0; p < nb_planes; ++p) {
197  av_freep(&td->data[b][p]);
198  }
199 }
200 
201 /**
202  * Allocates buffers used by grey edge for storing derivatives final
203  * and intermidiate results.
204  *
205  * @param ctx the filter context.
206  * @param td holds the buffers.
207  *
208  * @return 0 in case of success, a negative value corresponding to an
209  * AVERROR code in case of failure.
210  */
212 {
213  ColorConstancyContext *s = ctx->priv;
214  int nb_buff = s->difford + 1;
215  int b, p;
216 
217  av_log(ctx, AV_LOG_TRACE, "Allocating %d buffer(s) for grey edge.\n", nb_buff);
218  for (b = 0; b <= nb_buff; ++b) { // We need difford + 1 buffers
219  for (p = 0; p < NUM_PLANES; ++p) {
220  td->data[b][p] = av_calloc(s->planeheight[p] * s->planewidth[p],
221  sizeof(*td->data[b][p]));
222  if (!td->data[b][p]) {
224  return AVERROR(ENOMEM);
225  }
226  }
227  }
228  return 0;
229 }
230 
231 #define CLAMP(x, mx) av_clip((x), 0, (mx-1))
232 #define INDX2D(r, c, w) ( (r) * (w) + (c) )
233 #define GAUSS(s, sr, sc, sls, sh, sw, g) ( (s)[ INDX2D(CLAMP((sr), (sh)), CLAMP((sc), (sw)), (sls)) ] * (g) )
234 
235 /**
236  * Slice calculation of gaussian derivatives. Applies 1-D gaussian derivative filter
237  * either horizontally or vertically according to meta data given in thread data.
238  * When convoluting horizontally source is always the in frame withing thread data
239  * while when convoluting vertically source is a buffer.
240  *
241  * @param ctx the filter context.
242  * @param arg data to be passed between threads.
243  * @param jobnr current job nubmer.
244  * @param nb_jobs total number of jobs.
245  *
246  * @return 0.
247  */
248 static int slice_get_derivative(AVFilterContext* ctx, void* arg, int jobnr, int nb_jobs)
249 {
250  ColorConstancyContext *s = ctx->priv;
251  ThreadData *td = arg;
252  AVFrame *in = td->in;
253  const int ord = td->meta_data[INDEX_ORD];
254  const int dir = td->meta_data[INDEX_DIR];
255  const int src_index = td->meta_data[INDEX_SRC];
256  const int dst_index = td->meta_data[INDEX_DST];
257  const int filtersize = s->filtersize;
258  const double *gauss = s->gauss[ord];
259  int plane;
260 
261  for (plane = 0; plane < NUM_PLANES; ++plane) {
262  const int height = s->planeheight[plane];
263  const int width = s->planewidth[plane];
264  const int in_linesize = in->linesize[plane];
265  double *dst = td->data[dst_index][plane];
266  int slice_start, slice_end;
267  int r, c, g;
268 
269  if (dir == DIR_X) {
270  /** Applying gauss horizontally along each row */
271  const uint8_t *src = in->data[plane];
272  slice_start = (height * jobnr ) / nb_jobs;
273  slice_end = (height * (jobnr + 1)) / nb_jobs;
274 
275  for (r = slice_start; r < slice_end; ++r) {
276  for (c = 0; c < width; ++c) {
277  dst[INDX2D(r, c, width)] = 0;
278  for (g = 0; g < filtersize; ++g) {
279  dst[INDX2D(r, c, width)] += GAUSS(src, r, c + GINDX(filtersize, g),
280  in_linesize, height, width, gauss[g]);
281  }
282  }
283  }
284  } else {
285  /** Applying gauss vertically along each column */
286  const double *src = td->data[src_index][plane];
287  slice_start = (width * jobnr ) / nb_jobs;
288  slice_end = (width * (jobnr + 1)) / nb_jobs;
289 
290  for (c = slice_start; c < slice_end; ++c) {
291  for (r = 0; r < height; ++r) {
292  dst[INDX2D(r, c, width)] = 0;
293  for (g = 0; g < filtersize; ++g) {
294  dst[INDX2D(r, c, width)] += GAUSS(src, r + GINDX(filtersize, g), c,
295  width, height, width, gauss[g]);
296  }
297  }
298  }
299  }
300 
301  }
302  return 0;
303 }
304 
305 /**
306  * Slice Frobius normalization of gaussian derivatives. Only called for difford values of
307  * 1 or 2.
308  *
309  * @param ctx the filter context.
310  * @param arg data to be passed between threads.
311  * @param jobnr current job nubmer.
312  * @param nb_jobs total number of jobs.
313  *
314  * @return 0.
315  */
316 static int slice_normalize(AVFilterContext* ctx, void* arg, int jobnr, int nb_jobs)
317 {
318  ColorConstancyContext *s = ctx->priv;
319  ThreadData *td = arg;
320  const int difford = s->difford;
321  int plane;
322 
323  for (plane = 0; plane < NUM_PLANES; ++plane) {
324  const int height = s->planeheight[plane];
325  const int width = s->planewidth[plane];
326  const int64_t numpixels = width * (int64_t)height;
327  const int slice_start = (numpixels * jobnr ) / nb_jobs;
328  const int slice_end = (numpixels * (jobnr+1)) / nb_jobs;
329  const double *dx = td->data[INDEX_DX][plane];
330  const double *dy = td->data[INDEX_DY][plane];
331  double *norm = td->data[INDEX_NORM][plane];
332  int i;
333 
334  if (difford == 1) {
335  for (i = slice_start; i < slice_end; ++i) {
336  norm[i] = sqrt( pow(dx[i], 2) + pow(dy[i], 2));
337  }
338  } else {
339  const double *dxy = td->data[INDEX_DXY][plane];
340  for (i = slice_start; i < slice_end; ++i) {
341  norm[i] = sqrt( pow(dx[i], 2) + 4 * pow(dxy[i], 2) + pow(dy[i], 2) );
342  }
343  }
344  }
345 
346  return 0;
347 }
348 
349 /**
350  * Utility function for setting up differentiation data/metadata.
351  *
352  * @param ctx the filter context.
353  * @param td to be used for passing data between threads.
354  * @param ord ord of differentiation.
355  * @param dir direction of differentiation.
356  * @param src index of source used for differentiation.
357  * @param dst index destination used for saving differentiation result.
358  * @param dim maximum dimension in current direction.
359  * @param nb_threads number of threads to use.
360  */
361 static void av_always_inline
363  int src, int dst, int dim, int nb_threads) {
364  td->meta_data[INDEX_ORD] = ord;
365  td->meta_data[INDEX_DIR] = dir;
366  td->meta_data[INDEX_SRC] = src;
367  td->meta_data[INDEX_DST] = dst;
369  NULL, FFMIN(dim, nb_threads));
370 }
371 
372 /**
373  * Main control function for calculating gaussian derivatives.
374  *
375  * @param ctx the filter context.
376  * @param td holds the buffers used for storing results.
377  *
378  * @return 0 in case of success, a negative value corresponding to an
379  * AVERROR code in case of failure.
380  */
382 {
383  ColorConstancyContext *s = ctx->priv;
384  int nb_threads = s->nb_threads;
385  int height = s->planeheight[1];
386  int width = s->planewidth[1];
387 
388  switch(s->difford) {
389  case 0:
390  if (!s->sigma) { // Only copy once
391  get_deriv(ctx, td, 0, DIR_X, 0 , INDEX_NORM, height, nb_threads);
392  } else {
393  get_deriv(ctx, td, 0, DIR_X, 0, INDEX_TEMP, height, nb_threads);
394  get_deriv(ctx, td, 0, DIR_Y, INDEX_TEMP, INDEX_NORM, width , nb_threads);
395  // save to INDEX_NORM because this will not be normalied and
396  // end gry edge filter expects result to be found in INDEX_NORM
397  }
398  return 0;
399 
400  case 1:
401  get_deriv(ctx, td, 1, DIR_X, 0, INDEX_TEMP, height, nb_threads);
402  get_deriv(ctx, td, 0, DIR_Y, INDEX_TEMP, INDEX_DX, width , nb_threads);
403 
404  get_deriv(ctx, td, 0, DIR_X, 0, INDEX_TEMP, height, nb_threads);
405  get_deriv(ctx, td, 1, DIR_Y, INDEX_TEMP, INDEX_DY, width , nb_threads);
406  return 0;
407 
408  case 2:
409  get_deriv(ctx, td, 2, DIR_X, 0, INDEX_TEMP, height, nb_threads);
410  get_deriv(ctx, td, 0, DIR_Y, INDEX_TEMP, INDEX_DX, width , nb_threads);
411 
412  get_deriv(ctx, td, 0, DIR_X, 0, INDEX_TEMP, height, nb_threads);
413  get_deriv(ctx, td, 2, DIR_Y, INDEX_TEMP, INDEX_DY, width , nb_threads);
414 
415  get_deriv(ctx, td, 1, DIR_X, 0, INDEX_TEMP, height, nb_threads);
416  get_deriv(ctx, td, 1, DIR_Y, INDEX_TEMP, INDEX_DXY, width , nb_threads);
417  return 0;
418 
419  default:
420  av_log(ctx, AV_LOG_ERROR, "Unsupported difford value: %d.\n", s->difford);
421  return AVERROR(EINVAL);
422  }
423 
424 }
425 
426 /**
427  * Slice function for grey edge algorithm that does partial summing/maximizing
428  * of gaussian derivatives.
429  *
430  * @param ctx the filter context.
431  * @param arg data to be passed between threads.
432  * @param jobnr current job nubmer.
433  * @param nb_jobs total number of jobs.
434  *
435  * @return 0.
436  */
437 static int filter_slice_grey_edge(AVFilterContext* ctx, void* arg, int jobnr, int nb_jobs)
438 {
439  ColorConstancyContext *s = ctx->priv;
440  ThreadData *td = arg;
441  AVFrame *in = td->in;
442  int minknorm = s->minknorm;
443  const uint8_t thresh = 255;
444  int plane;
445 
446  for (plane = 0; plane < NUM_PLANES; ++plane) {
447  const int height = s->planeheight[plane];
448  const int width = s->planewidth[plane];
449  const int in_linesize = in->linesize[plane];
450  const int slice_start = (height * jobnr) / nb_jobs;
451  const int slice_end = (height * (jobnr+1)) / nb_jobs;
452  const uint8_t *img_data = in->data[plane];
453  const double *src = td->data[INDEX_NORM][plane];
454  double *dst = td->data[INDEX_DST][plane];
455  int r, c;
456 
457  dst[jobnr] = 0;
458  if (!minknorm) {
459  for (r = slice_start; r < slice_end; ++r) {
460  for (c = 0; c < width; ++c) {
461  dst[jobnr] = FFMAX( dst[jobnr], fabs(src[INDX2D(r, c, width)])
462  * (img_data[INDX2D(r, c, in_linesize)] < thresh) );
463  }
464  }
465  } else {
466  for (r = slice_start; r < slice_end; ++r) {
467  for (c = 0; c < width; ++c) {
468  dst[jobnr] += ( pow( fabs(src[INDX2D(r, c, width)] / 255.), minknorm)
469  * (img_data[INDX2D(r, c, in_linesize)] < thresh) );
470  }
471  }
472  }
473  }
474  return 0;
475 }
476 
477 /**
478  * Main control function for grey edge algorithm.
479  *
480  * @param ctx the filter context.
481  * @param in frame to perfrom grey edge on.
482  *
483  * @return 0 in case of success, a negative value corresponding to an
484  * AVERROR code in case of failure.
485  */
487 {
488  ColorConstancyContext *s = ctx->priv;
489  ThreadData td;
490  int minknorm = s->minknorm;
491  int difford = s->difford;
492  double *white = s->white;
493  int nb_jobs = FFMIN3(s->planeheight[1], s->planewidth[1], s->nb_threads);
494  int plane, job, ret;
495 
496  td.in = in;
498  if (ret) {
499  return ret;
500  }
501  get_derivative(ctx, &td);
502  if (difford > 0) {
504  }
505 
507  if (!minknorm) {
508  for (plane = 0; plane < NUM_PLANES; ++plane) {
509  white[plane] = 0; // All values are absolute
510  for (job = 0; job < nb_jobs; ++job) {
511  white[plane] = FFMAX(white[plane] , td.data[INDEX_DST][plane][job]);
512  }
513  }
514  } else {
515  for (plane = 0; plane < NUM_PLANES; ++plane) {
516  white[plane] = 0;
517  for (job = 0; job < nb_jobs; ++job) {
518  white[plane] += td.data[INDEX_DST][plane][job];
519  }
520  white[plane] = pow(white[plane], 1./minknorm);
521  }
522  }
523 
524  cleanup_derivative_buffers(&td, difford + 1, NUM_PLANES);
525  return 0;
526 }
527 
528 /**
529  * Normalizes estimated illumination since only illumination vector
530  * direction is required for color constancy.
531  *
532  * @param light the estimated illumination to be normalized in place
533  */
534 static void normalize_light(double *light)
535 {
536  double abs_val = pow( pow(light[0], 2.0) + pow(light[1], 2.0) + pow(light[2], 2.0), 0.5);
537  int plane;
538 
539  // TODO: check if setting to 1.0 when estimated = 0.0 is the best thing to do
540 
541  if (!abs_val) {
542  for (plane = 0; plane < NUM_PLANES; ++plane) {
543  light[plane] = 1.0;
544  }
545  } else {
546  for (plane = 0; plane < NUM_PLANES; ++plane) {
547  light[plane] = (light[plane] / abs_val);
548  if (!light[plane]) { // to avoid division by zero when correcting
549  light[plane] = 1.0;
550  }
551  }
552  }
553 }
554 
555 /**
556  * Redirects to corresponding algorithm estimation function and performs normalization
557  * after estimation.
558  *
559  * @param ctx the filter context.
560  * @param in frame to perfrom estimation on.
561  *
562  * @return 0 in case of success, a negative value corresponding to an
563  * AVERROR code in case of failure.
564  */
566 {
567  ColorConstancyContext *s = ctx->priv;
568  int ret;
569 
570  ret = filter_grey_edge(ctx, in);
571 
572  av_log(ctx, AV_LOG_DEBUG, "Estimated illumination= %f %f %f\n",
573  s->white[0], s->white[1], s->white[2]);
574  normalize_light(s->white);
575  av_log(ctx, AV_LOG_DEBUG, "Estimated illumination after normalization= %f %f %f\n",
576  s->white[0], s->white[1], s->white[2]);
577 
578  return ret;
579 }
580 
581 /**
582  * Performs simple correction via diagonal transformation model.
583  *
584  * @param ctx the filter context.
585  * @param arg data to be passed between threads.
586  * @param jobnr current job nubmer.
587  * @param nb_jobs total number of jobs.
588  *
589  * @return 0.
590  */
591 static int diagonal_transformation(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
592 {
593  ColorConstancyContext *s = ctx->priv;
594  ThreadData *td = arg;
595  AVFrame *in = td->in;
596  AVFrame *out = td->out;
597  int plane;
598 
599  for (plane = 0; plane < NUM_PLANES; ++plane) {
600  const int height = s->planeheight[plane];
601  const int width = s->planewidth[plane];
602  const int64_t numpixels = width * (int64_t)height;
603  const int slice_start = (numpixels * jobnr) / nb_jobs;
604  const int slice_end = (numpixels * (jobnr+1)) / nb_jobs;
605  const uint8_t *src = in->data[plane];
606  uint8_t *dst = out->data[plane];
607  double temp;
608  unsigned i;
609 
610  for (i = slice_start; i < slice_end; ++i) {
611  temp = src[i] / (s->white[plane] * SQRT3);
612  dst[i] = av_clip_uint8((int)(temp + 0.5));
613  }
614  }
615  return 0;
616 }
617 
618 /**
619  * Main control function for correcting scene illumination based on
620  * estimated illumination.
621  *
622  * @param ctx the filter context.
623  * @param in holds frame to correct
624  * @param out holds corrected frame
625  */
627 {
628  ColorConstancyContext *s = ctx->priv;
629  ThreadData td;
630  int nb_jobs = FFMIN3(s->planeheight[1], s->planewidth[1], s->nb_threads);
631 
632  td.in = in;
633  td.out = out;
635 }
636 
638 {
639  AVFilterContext *ctx = inlink->dst;
640  ColorConstancyContext *s = ctx->priv;
642  const double break_off_sigma = 3.0;
643  double sigma = s->sigma;
644  int ret;
645 
646  if (!floor(break_off_sigma * sigma + 0.5) && s->difford) {
647  av_log(ctx, AV_LOG_ERROR, "floor(%f * sigma) must be > 0 when difford > 0.\n", break_off_sigma);
648  return AVERROR(EINVAL);
649  }
650 
651  s->filtersize = 2 * floor(break_off_sigma * sigma + 0.5) + 1;
652  if (ret=set_gauss(ctx)) {
653  return ret;
654  }
655 
656  s->nb_threads = ff_filter_get_nb_threads(ctx);
657  s->planewidth[1] = s->planewidth[2] = AV_CEIL_RSHIFT(inlink->w, desc->log2_chroma_w);
658  s->planewidth[0] = s->planewidth[3] = inlink->w;
659  s->planeheight[1] = s->planeheight[2] = AV_CEIL_RSHIFT(inlink->h, desc->log2_chroma_h);
660  s->planeheight[0] = s->planeheight[3] = inlink->h;
661 
662  return 0;
663 }
664 
666 {
667  AVFilterContext *ctx = inlink->dst;
668  AVFilterLink *outlink = ctx->outputs[0];
669  AVFrame *out;
670  int ret;
671  int direct = 0;
672 
674  if (ret) {
675  av_frame_free(&in);
676  return ret;
677  }
678 
679  if (av_frame_is_writable(in)) {
680  direct = 1;
681  out = in;
682  } else {
683  out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
684  if (!out) {
685  av_frame_free(&in);
686  return AVERROR(ENOMEM);
687  }
689  }
691 
692  if (!direct)
693  av_frame_free(&in);
694 
695  return ff_filter_frame(outlink, out);
696 }
697 
699 {
700  ColorConstancyContext *s = ctx->priv;
701  int difford = s->difford;
702  int i;
703 
704  for (i = 0; i <= difford; ++i) {
705  av_freep(&s->gauss[i]);
706  }
707 }
708 
710  {
711  .name = "default",
712  .type = AVMEDIA_TYPE_VIDEO,
713  .config_props = config_props,
714  .filter_frame = filter_frame,
715  },
716 };
717 
718 static const AVOption greyedge_options[] = {
719  { "difford", "set differentiation order", OFFSET(difford), AV_OPT_TYPE_INT, {.i64=1}, 0, 2, FLAGS },
720  { "minknorm", "set Minkowski norm", OFFSET(minknorm), AV_OPT_TYPE_INT, {.i64=1}, 0, 20, FLAGS },
721  { "sigma", "set sigma", OFFSET(sigma), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0.0, 1024.0, FLAGS },
722  { NULL }
723 };
724 
725 AVFILTER_DEFINE_CLASS(greyedge);
726 
728  .name = GREY_EDGE,
729  .description = NULL_IF_CONFIG_SMALL("Estimates scene illumination by grey edge assumption."),
730  .priv_size = sizeof(ColorConstancyContext),
731  .priv_class = &greyedge_class,
732  .uninit = uninit,
735  // TODO: support more formats
736  // FIXME: error when saving to .jpg
739 };
FLAGS
#define FLAGS
Definition: vf_colorconstancy.c:91
INDEX_ORD
#define INDEX_ORD
Definition: vf_colorconstancy.c:56
ff_get_video_buffer
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
Definition: video.c:112
ColorConstancyContext
Common struct for all algorithms contexts.
Definition: vf_colorconstancy.c:73
ColorConstancyContext::difford
int difford
Definition: vf_colorconstancy.c:76
td
#define td
Definition: regdef.h:70
r
const char * r
Definition: vf_curves.c:126
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
out
FILE * out
Definition: movenc.c:54
filter_slice_grey_edge
static int filter_slice_grey_edge(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Slice function for grey edge algorithm that does partial summing/maximizing of gaussian derivatives.
Definition: vf_colorconstancy.c:437
ff_filter_frame
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1018
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2962
uninit
static av_cold void uninit(AVFilterContext *ctx)
Definition: vf_colorconstancy.c:698
int64_t
long long int64_t
Definition: coverity.c:34
inlink
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
Definition: filter_design.txt:212
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:130
get_derivative
static int get_derivative(AVFilterContext *ctx, ThreadData *td)
Main control function for calculating gaussian derivatives.
Definition: vf_colorconstancy.c:381
GAUSS
#define GAUSS(s, sr, sc, sls, sh, sw, g)
Definition: vf_colorconstancy.c:233
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:344
AVOption
AVOption.
Definition: opt.h:346
b
#define b
Definition: input.c:41
SQRT3
#define SQRT3
Definition: vf_colorconstancy.c:42
OFFSET
#define OFFSET(x)
Definition: vf_colorconstancy.c:90
ColorConstancyContext::white
double white[NUM_PLANES]
Definition: vf_colorconstancy.c:87
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
AVFilter::name
const char * name
Filter name.
Definition: avfilter.h:170
slice_get_derivative
static int slice_get_derivative(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Slice calculation of gaussian derivatives.
Definition: vf_colorconstancy.c:248
ColorConstancyContext::planewidth
int planewidth[4]
Definition: vf_colorconstancy.c:82
ThreadData::out
AVFrame * out
Definition: af_adeclick.c:526
video.h
normalize_light
static void normalize_light(double *light)
Normalizes estimated illumination since only illumination vector direction is required for color cons...
Definition: vf_colorconstancy.c:534
ThreadData::in
AVFrame * in
Definition: af_adecorrelate.c:153
chromatic_adaptation
static void chromatic_adaptation(AVFilterContext *ctx, AVFrame *in, AVFrame *out)
Main control function for correcting scene illumination based on estimated illumination.
Definition: vf_colorconstancy.c:626
MAX_DATA
#define MAX_DATA
Definition: vf_colorconstancy.c:47
GREY_EDGE
#define GREY_EDGE
Definition: vf_colorconstancy.c:40
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:365
greyedge_options
static const AVOption greyedge_options[]
Definition: vf_colorconstancy.c:718
ColorConstancyContext::nb_threads
int nb_threads
Definition: vf_colorconstancy.c:80
filter_grey_edge
static int filter_grey_edge(AVFilterContext *ctx, AVFrame *in)
Main control function for grey edge algorithm.
Definition: vf_colorconstancy.c:486
filter_frame
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
Definition: vf_colorconstancy.c:665
INDEX_NORM
#define INDEX_NORM
Definition: vf_colorconstancy.c:53
get_deriv
static void av_always_inline get_deriv(AVFilterContext *ctx, ThreadData *td, int ord, int dir, int src, int dst, int dim, int nb_threads)
Utility function for setting up differentiation data/metadata.
Definition: vf_colorconstancy.c:362
AVFilterPad
A filter pad used for either input or output.
Definition: internal.h:33
slice_start
static int slice_start(SliceContext *sc, VVCContext *s, VVCFrameContext *fc, const CodedBitstreamUnit *unit, const int is_first_slice)
Definition: vvcdec.c:694
AV_LOG_TRACE
#define AV_LOG_TRACE
Extremely verbose debugging, useful for libav* development.
Definition: log.h:206
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
av_cold
#define av_cold
Definition: attributes.h:90
INDEX_SRC
#define INDEX_SRC
Definition: vf_colorconstancy.c:54
ff_video_default_filterpad
const AVFilterPad ff_video_default_filterpad[1]
An AVFilterPad array whose only entry has name "default" and is of type AVMEDIA_TYPE_VIDEO.
Definition: video.c:37
DIR_X
#define DIR_X
Definition: vf_colorconstancy.c:58
width
#define width
config_props
static int config_props(AVFilterLink *inlink)
Definition: vf_colorconstancy.c:637
s
#define s(width, name)
Definition: cbs_vp9.c:198
AV_CEIL_RSHIFT
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:58
floor
static __device__ float floor(float a)
Definition: cuda_runtime.h:173
g
const char * g
Definition: vf_curves.c:127
AV_OPT_TYPE_DOUBLE
@ AV_OPT_TYPE_DOUBLE
Definition: opt.h:237
slice_end
static int slice_end(AVCodecContext *avctx, AVFrame *pict)
Handle slice ends.
Definition: mpeg12dec.c:1725
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:201
ctx
AVFormatContext * ctx
Definition: movenc.c:48
ColorConstancyContext::minknorm
int minknorm
@minknorm = 0 : getMax instead
Definition: vf_colorconstancy.c:77
FILTER_INPUTS
#define FILTER_INPUTS(array)
Definition: internal.h:182
arg
const char * arg
Definition: jacosubdec.c:67
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:66
fabs
static __device__ float fabs(float a)
Definition: cuda_runtime.h:182
NULL
#define NULL
Definition: coverity.c:32
av_frame_copy_props
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:679
ColorConstancyContext::filtersize
int filtersize
Definition: vf_colorconstancy.c:84
GINDX
#define GINDX(s, i)
Definition: vf_colorconstancy.c:93
ColorConstancyContext::planeheight
int planeheight[4]
Definition: vf_colorconstancy.c:81
setup_derivative_buffers
static int setup_derivative_buffers(AVFilterContext *ctx, ThreadData *td)
Allocates buffers used by grey edge for storing derivatives final and intermidiate results.
Definition: vf_colorconstancy.c:211
exp
int8_t exp
Definition: eval.c:74
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
INDX2D
#define INDX2D(r, c, w)
Definition: vf_colorconstancy.c:232
cleanup_derivative_buffers
static void cleanup_derivative_buffers(ThreadData *td, int nb_buff, int nb_planes)
Frees up buffers used by grey edge for storing derivatives final and intermidiate results.
Definition: vf_colorconstancy.c:186
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:106
av_frame_is_writable
int av_frame_is_writable(AVFrame *frame)
Check if the frame data is writable.
Definition: frame.c:615
height
#define height
INDEX_TEMP
#define INDEX_TEMP
Definition: vf_colorconstancy.c:49
set_gauss
static int set_gauss(AVFilterContext *ctx)
Sets gauss filters used for calculating gauss derivatives.
Definition: vf_colorconstancy.c:107
M_PI
#define M_PI
Definition: mathematics.h:67
internal.h
AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC
#define AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC
Some filters support a generic "enable" expression option that can be used to enable or disable a fil...
Definition: avfilter.h:147
FILTER_SINGLE_PIXFMT
#define FILTER_SINGLE_PIXFMT(pix_fmt_)
Definition: internal.h:172
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:255
FFMIN3
#define FFMIN3(a, b, c)
Definition: macros.h:50
illumination_estimation
static int illumination_estimation(AVFilterContext *ctx, AVFrame *in)
Redirects to corresponding algorithm estimation function and performs normalization after estimation.
Definition: vf_colorconstancy.c:565
ff_filter_get_nb_threads
int ff_filter_get_nb_threads(AVFilterContext *ctx)
Get number of threads for current filter instance.
Definition: avfilter.c:825
ThreadData
Used for passing data between threads.
Definition: dsddec.c:69
av_always_inline
#define av_always_inline
Definition: attributes.h:49
ThreadData::data
double * data[MAX_DATA][NUM_PLANES]
Definition: vf_colorconstancy.c:67
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
diagonal_transformation
static int diagonal_transformation(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Performs simple correction via diagonal transformation model.
Definition: vf_colorconstancy.c:591
INDEX_DX
#define INDEX_DX
Definition: vf_colorconstancy.c:50
MAX_DIFF_ORD
#define MAX_DIFF_ORD
Definition: vf_colorconstancy.c:45
AVFilterPad::name
const char * name
Pad name.
Definition: internal.h:39
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Definition: mem.c:262
NUM_PLANES
#define NUM_PLANES
Definition: vf_colorconstancy.c:44
AVFilter
Filter definition.
Definition: avfilter.h:166
dim
int dim
Definition: vorbis_enc_data.h:425
ret
ret
Definition: filter_design.txt:187
ff_vf_greyedge
const AVFilter ff_vf_greyedge
Definition: vf_colorconstancy.c:727
DIR_Y
#define DIR_Y
Definition: vf_colorconstancy.c:59
INDEX_DY
#define INDEX_DY
Definition: vf_colorconstancy.c:51
ThreadData::meta_data
int meta_data[MAX_META_DATA]
Definition: vf_colorconstancy.c:66
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Definition: opt.h:235
avfilter.h
slice_normalize
static int slice_normalize(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Slice Frobius normalization of gaussian derivatives.
Definition: vf_colorconstancy.c:316
temp
else temp
Definition: vf_mcdeint.c:263
MAX_META_DATA
#define MAX_META_DATA
Definition: vf_colorconstancy.c:46
AVFILTER_DEFINE_CLASS
AVFILTER_DEFINE_CLASS(greyedge)
av_clip_uint8
#define av_clip_uint8
Definition: common.h:104
colorconstancy_inputs
static const AVFilterPad colorconstancy_inputs[]
Definition: vf_colorconstancy.c:709
AVFilterContext
An instance of a filter.
Definition: avfilter.h:407
AV_PIX_FMT_GBRP
@ AV_PIX_FMT_GBRP
planar GBR 4:4:4 24bpp
Definition: pixfmt.h:165
AVFILTER_FLAG_SLICE_THREADS
#define AVFILTER_FLAG_SLICE_THREADS
The filter supports multithreading by splitting frames into multiple parts and processing them concur...
Definition: avfilter.h:117
desc
const char * desc
Definition: libsvtav1.c:75
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
ColorConstancyContext::gauss
double * gauss[MAX_DIFF_ORD+1]
Definition: vf_colorconstancy.c:85
AVPixFmtDescriptor
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:69
FILTER_OUTPUTS
#define FILTER_OUTPUTS(array)
Definition: internal.h:183
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
src
INIT_CLIP pixel * src
Definition: h264pred_template.c:418
imgutils.h
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
Definition: frame.h:389
INDEX_DIR
#define INDEX_DIR
Definition: vf_colorconstancy.c:57
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
INDEX_DXY
#define INDEX_DXY
Definition: vf_colorconstancy.c:52
ff_filter_execute
static av_always_inline int ff_filter_execute(AVFilterContext *ctx, avfilter_action_func *func, void *arg, int *ret, int nb_jobs)
Definition: internal.h:134
ColorConstancyContext::sigma
double sigma
Definition: vf_colorconstancy.c:78
INDEX_DST
#define INDEX_DST
Definition: vf_colorconstancy.c:55