FFmpeg
vf_lensfun.c
Go to the documentation of this file.
1 /*
2  * Copyright (C) 2007 by Andrew Zabolotny (author of lensfun, from which this filter derives from)
3  * Copyright (C) 2018 Stephen Seo
4  *
5  * This file is part of FFmpeg.
6  *
7  * This program is free software: you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License as published by
9  * the Free Software Foundation, either version 3 of the License, or
10  * (at your option) any later version.
11  *
12  * This program is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15  * GNU General Public License for more details.
16  *
17  * You should have received a copy of the GNU General Public License
18  * along with this program. If not, see <https://www.gnu.org/licenses/>.
19  */
20 
21 /**
22  * @file
23  * Lensfun filter, applies lens correction with parameters from the lensfun database
24  *
25  * @see https://lensfun.sourceforge.net/
26  */
27 
28 #include <float.h>
29 #include <math.h>
30 
31 #include "libavutil/opt.h"
32 #include "avfilter.h"
33 #include "filters.h"
34 #include "internal.h"
35 #include "video.h"
36 
37 #include <lensfun.h>
38 
39 #define LANCZOS_RESOLUTION 256
40 
41 enum Mode {
42  VIGNETTING = 0x1,
45 };
46 
51 };
52 
53 typedef struct VignettingThreadData {
54  int width, height;
55  uint8_t *data_in;
58  lfModifier *modifier;
60 
62  int width, height;
63  const float *distortion_coords;
64  const uint8_t *data_in;
65  uint8_t *data_out;
67  const float *interpolation;
68  int mode;
71 
72 typedef struct LensfunContext {
73  const AVClass *class;
74  const char *make, *model, *lens_model, *db_path;
75  int mode;
76  float focal_length;
77  float aperture;
79  float scale;
81  int reverse;
83 
85  float *interpolation;
86 
87  lfLens *lens;
88  lfCamera *camera;
89  lfModifier *modifier;
91 
92 #define OFFSET(x) offsetof(LensfunContext, x)
93 #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
94 static const AVOption lensfun_options[] = {
95  { "make", "set camera maker", OFFSET(make), AV_OPT_TYPE_STRING, {.str=NULL}, 0, 0, FLAGS },
96  { "model", "set camera model", OFFSET(model), AV_OPT_TYPE_STRING, {.str=NULL}, 0, 0, FLAGS },
97  { "lens_model", "set lens model", OFFSET(lens_model), AV_OPT_TYPE_STRING, {.str=NULL}, 0, 0, FLAGS },
98  { "db_path", "set path to database", OFFSET(db_path), AV_OPT_TYPE_STRING, {.str=NULL}, 0, 0, FLAGS },
99  { "mode", "set mode", OFFSET(mode), AV_OPT_TYPE_INT, {.i64=GEOMETRY_DISTORTION}, 0, VIGNETTING | GEOMETRY_DISTORTION | SUBPIXEL_DISTORTION, FLAGS, .unit = "mode" },
100  { "vignetting", "fix lens vignetting", 0, AV_OPT_TYPE_CONST, {.i64=VIGNETTING}, 0, 0, FLAGS, .unit = "mode" },
101  { "geometry", "correct geometry distortion", 0, AV_OPT_TYPE_CONST, {.i64=GEOMETRY_DISTORTION}, 0, 0, FLAGS, .unit = "mode" },
102  { "subpixel", "fix chromatic aberrations", 0, AV_OPT_TYPE_CONST, {.i64=SUBPIXEL_DISTORTION}, 0, 0, FLAGS, .unit = "mode" },
103  { "vig_geo", "fix lens vignetting and correct geometry distortion", 0, AV_OPT_TYPE_CONST, {.i64=VIGNETTING | GEOMETRY_DISTORTION}, 0, 0, FLAGS, .unit = "mode" },
104  { "vig_subpixel", "fix lens vignetting and chromatic aberrations", 0, AV_OPT_TYPE_CONST, {.i64=VIGNETTING | SUBPIXEL_DISTORTION}, 0, 0, FLAGS, .unit = "mode" },
105  { "distortion", "correct geometry distortion and chromatic aberrations", 0, AV_OPT_TYPE_CONST, {.i64=GEOMETRY_DISTORTION | SUBPIXEL_DISTORTION}, 0, 0, FLAGS, .unit = "mode" },
106  { "all", NULL, 0, AV_OPT_TYPE_CONST, {.i64=VIGNETTING | GEOMETRY_DISTORTION | SUBPIXEL_DISTORTION}, 0, 0, FLAGS, .unit = "mode" },
107  { "focal_length", "focal length of video (zoom; constant for the duration of the use of this filter)", OFFSET(focal_length), AV_OPT_TYPE_FLOAT, {.dbl=18}, 0.0, DBL_MAX, FLAGS },
108  { "aperture", "aperture (constant for the duration of the use of this filter)", OFFSET(aperture), AV_OPT_TYPE_FLOAT, {.dbl=3.5}, 0.0, DBL_MAX, FLAGS },
109  { "focus_distance", "focus distance (constant for the duration of the use of this filter)", OFFSET(focus_distance), AV_OPT_TYPE_FLOAT, {.dbl=1000.0f}, 0.0, DBL_MAX, FLAGS },
110  { "scale", "scale factor applied after corrections (0.0 means automatic scaling)", OFFSET(scale), AV_OPT_TYPE_FLOAT, {.dbl=0.0}, 0.0, DBL_MAX, FLAGS },
111  { "target_geometry", "target geometry of the lens correction (only when geometry correction is enabled)", OFFSET(target_geometry), AV_OPT_TYPE_INT, {.i64=LF_RECTILINEAR}, 0, INT_MAX, FLAGS, .unit = "lens_geometry" },
112  { "rectilinear", "rectilinear lens (default)", 0, AV_OPT_TYPE_CONST, {.i64=LF_RECTILINEAR}, 0, 0, FLAGS, .unit = "lens_geometry" },
113  { "fisheye", "fisheye lens", 0, AV_OPT_TYPE_CONST, {.i64=LF_FISHEYE}, 0, 0, FLAGS, .unit = "lens_geometry" },
114  { "panoramic", "panoramic (cylindrical)", 0, AV_OPT_TYPE_CONST, {.i64=LF_PANORAMIC}, 0, 0, FLAGS, .unit = "lens_geometry" },
115  { "equirectangular", "equirectangular", 0, AV_OPT_TYPE_CONST, {.i64=LF_EQUIRECTANGULAR}, 0, 0, FLAGS, .unit = "lens_geometry" },
116  { "fisheye_orthographic", "orthographic fisheye", 0, AV_OPT_TYPE_CONST, {.i64=LF_FISHEYE_ORTHOGRAPHIC}, 0, 0, FLAGS, .unit = "lens_geometry" },
117  { "fisheye_stereographic", "stereographic fisheye", 0, AV_OPT_TYPE_CONST, {.i64=LF_FISHEYE_STEREOGRAPHIC}, 0, 0, FLAGS, .unit = "lens_geometry" },
118  { "fisheye_equisolid", "equisolid fisheye", 0, AV_OPT_TYPE_CONST, {.i64=LF_FISHEYE_EQUISOLID}, 0, 0, FLAGS, .unit = "lens_geometry" },
119  { "fisheye_thoby", "fisheye as measured by thoby", 0, AV_OPT_TYPE_CONST, {.i64=LF_FISHEYE_THOBY}, 0, 0, FLAGS, .unit = "lens_geometry" },
120  { "reverse", "Does reverse correction (regular image to lens distorted)", OFFSET(reverse), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS },
121  { "interpolation", "Type of interpolation", OFFSET(interpolation_type), AV_OPT_TYPE_INT, {.i64=LINEAR}, 0, LANCZOS, FLAGS, .unit = "interpolation" },
122  { "nearest", NULL, 0, AV_OPT_TYPE_CONST, {.i64=NEAREST}, 0, 0, FLAGS, .unit = "interpolation" },
123  { "linear", NULL, 0, AV_OPT_TYPE_CONST, {.i64=LINEAR}, 0, 0, FLAGS, .unit = "interpolation" },
124  { "lanczos", NULL, 0, AV_OPT_TYPE_CONST, {.i64=LANCZOS}, 0, 0, FLAGS, .unit = "interpolation" },
125  { NULL }
126 };
127 
128 AVFILTER_DEFINE_CLASS(lensfun);
129 
131 {
132  LensfunContext *lensfun = ctx->priv;
133  lfDatabase *db;
134  const lfCamera **cameras;
135  const lfLens **lenses;
136 
137  db = lf_db_create();
138  if ((lensfun->db_path ? lf_db_load_path(db, lensfun->db_path) : lf_db_load(db)) != LF_NO_ERROR) {
139  lf_db_destroy(db);
140  av_log(ctx, AV_LOG_FATAL, "Failed to load lensfun database from %s path\n",
141  lensfun->db_path ? lensfun->db_path : "default");
142  return AVERROR_INVALIDDATA;
143  }
144 
145  if (!lensfun->make || !lensfun->model) {
146  const lfCamera *const *cameras = lf_db_get_cameras(db);
147 
148  av_log(ctx, AV_LOG_FATAL, "Option \"make\" or option \"model\" not specified\n");
149  av_log(ctx, AV_LOG_INFO, "Available values for \"make\" and \"model\":\n");
150  for (int i = 0; cameras && cameras[i]; i++)
151  av_log(ctx, AV_LOG_INFO, "\t%s\t%s\n", cameras[i]->Maker, cameras[i]->Model);
152  lf_db_destroy(db);
153  return AVERROR(EINVAL);
154  } else if (!lensfun->lens_model) {
155  const lfLens *const *lenses = lf_db_get_lenses(db);
156 
157  av_log(ctx, AV_LOG_FATAL, "Option \"lens_model\" not specified\n");
158  av_log(ctx, AV_LOG_INFO, "Available values for \"lens_model\":\n");
159  for (int i = 0; lenses && lenses[i]; i++)
160  av_log(ctx, AV_LOG_INFO, "\t%s\t(make %s)\n", lenses[i]->Model, lenses[i]->Maker);
161  lf_db_destroy(db);
162  return AVERROR(EINVAL);
163  }
164 
165  lensfun->lens = lf_lens_create();
166  lensfun->camera = lf_camera_create();
167 
168  cameras = lf_db_find_cameras(db, lensfun->make, lensfun->model);
169  if (cameras && *cameras) {
170  lf_camera_copy(lensfun->camera, *cameras);
171  av_log(ctx, AV_LOG_INFO, "Using camera %s\n", lensfun->camera->Model);
172  } else {
173  lf_free(cameras);
174  lf_db_destroy(db);
175  av_log(ctx, AV_LOG_FATAL, "Failed to find camera in lensfun database\n");
176  return AVERROR_INVALIDDATA;
177  }
178  lf_free(cameras);
179 
180  lenses = lf_db_find_lenses(db, lensfun->camera, NULL, lensfun->lens_model, 0);
181  if (lenses && *lenses) {
182  lf_lens_copy(lensfun->lens, *lenses);
183  av_log(ctx, AV_LOG_INFO, "Using lens %s\n", lensfun->lens->Model);
184  } else {
185  lf_free(lenses);
186  lf_db_destroy(db);
187  av_log(ctx, AV_LOG_FATAL, "Failed to find lens in lensfun database\n");
188  return AVERROR_INVALIDDATA;
189  }
190  lf_free(lenses);
191 
192  lf_db_destroy(db);
193  return 0;
194 }
195 
196 static float lanczos_kernel(float x)
197 {
198  if (x == 0.0f) {
199  return 1.0f;
200  } else if (x > -2.0f && x < 2.0f) {
201  return (2.0f * sin(M_PI * x) * sin(M_PI / 2.0f * x)) / (M_PI * M_PI * x * x);
202  } else {
203  return 0.0f;
204  }
205 }
206 
208 {
209  AVFilterContext *ctx = inlink->dst;
210  LensfunContext *lensfun = ctx->priv;
211  int index;
212  float a;
213 
214  if (!lensfun->modifier) {
215  if (lensfun->camera && lensfun->lens) {
216  lensfun->modifier = lf_modifier_create(lensfun->lens,
217  lensfun->focal_length,
218  lensfun->camera->CropFactor,
219  inlink->w,
220  inlink->h, LF_PF_U8, lensfun->reverse);
221  if (lensfun->mode & VIGNETTING)
222  lf_modifier_enable_vignetting_correction(lensfun->modifier, lensfun->aperture, lensfun->focus_distance);
223  if (lensfun->mode & GEOMETRY_DISTORTION) {
224  lf_modifier_enable_distortion_correction(lensfun->modifier);
225  lf_modifier_enable_projection_transform(lensfun->modifier, lensfun->target_geometry);
226  lf_modifier_enable_scaling(lensfun->modifier, lensfun->scale);
227  }
228  if (lensfun->mode & SUBPIXEL_DISTORTION)
229  lf_modifier_enable_tca_correction(lensfun->modifier);
230  } else {
231  // lensfun->camera and lensfun->lens should have been initialized
232  return AVERROR_BUG;
233  }
234  }
235 
236  if (!lensfun->distortion_coords) {
237  if (lensfun->mode & SUBPIXEL_DISTORTION) {
238  lensfun->distortion_coords = av_malloc_array(inlink->w * inlink->h, sizeof(float) * 2 * 3);
239  if (!lensfun->distortion_coords)
240  return AVERROR(ENOMEM);
241  if (lensfun->mode & GEOMETRY_DISTORTION) {
242  // apply both geometry and subpixel distortion
243  lf_modifier_apply_subpixel_geometry_distortion(lensfun->modifier,
244  0, 0,
245  inlink->w, inlink->h,
246  lensfun->distortion_coords);
247  } else {
248  // apply only subpixel distortion
249  lf_modifier_apply_subpixel_distortion(lensfun->modifier,
250  0, 0,
251  inlink->w, inlink->h,
252  lensfun->distortion_coords);
253  }
254  } else if (lensfun->mode & GEOMETRY_DISTORTION) {
255  lensfun->distortion_coords = av_malloc_array(inlink->w * inlink->h, sizeof(float) * 2);
256  if (!lensfun->distortion_coords)
257  return AVERROR(ENOMEM);
258  // apply only geometry distortion
259  lf_modifier_apply_geometry_distortion(lensfun->modifier,
260  0, 0,
261  inlink->w, inlink->h,
262  lensfun->distortion_coords);
263  }
264  }
265 
266  if (!lensfun->interpolation)
267  if (lensfun->interpolation_type == LANCZOS) {
268  lensfun->interpolation = av_malloc_array(LANCZOS_RESOLUTION, sizeof(float) * 4);
269  if (!lensfun->interpolation)
270  return AVERROR(ENOMEM);
271  for (index = 0; index < 4 * LANCZOS_RESOLUTION; ++index) {
272  if (index == 0) {
273  lensfun->interpolation[index] = 1.0f;
274  } else {
275  a = sqrtf((float)index / LANCZOS_RESOLUTION);
276  lensfun->interpolation[index] = lanczos_kernel(a);
277  }
278  }
279  }
280 
281  return 0;
282 }
283 
284 static int vignetting_filter_slice(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
285 {
287  const int slice_start = thread_data->height * jobnr / nb_jobs;
288  const int slice_end = thread_data->height * (jobnr + 1) / nb_jobs;
289 
290  lf_modifier_apply_color_modification(thread_data->modifier,
291  thread_data->data_in + slice_start * thread_data->linesize_in,
292  0,
293  slice_start,
294  thread_data->width,
296  thread_data->pixel_composition,
297  thread_data->linesize_in);
298 
299  return 0;
300 }
301 
302 static float square(float x)
303 {
304  return x * x;
305 }
306 
307 static int distortion_correction_filter_slice(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
308 {
310  const int slice_start = thread_data->height * jobnr / nb_jobs;
311  const int slice_end = thread_data->height * (jobnr + 1) / nb_jobs;
312 
313  int x, y, i, j, rgb_index;
314  float interpolated, new_x, new_y, d, norm;
315  int new_x_int, new_y_int;
316  for (y = slice_start; y < slice_end; ++y)
317  for (x = 0; x < thread_data->width; ++x)
318  for (rgb_index = 0; rgb_index < 3; ++rgb_index) {
319  if (thread_data->mode & SUBPIXEL_DISTORTION) {
320  // subpixel (and possibly geometry) distortion correction was applied, correct distortion
321  switch(thread_data->interpolation_type) {
322  case NEAREST:
323  new_x_int = thread_data->distortion_coords[x * 2 * 3 + y * thread_data->width * 2 * 3 + rgb_index * 2] + 0.5f;
324  new_y_int = thread_data->distortion_coords[x * 2 * 3 + y * thread_data->width * 2 * 3 + rgb_index * 2 + 1] + 0.5f;
325  if (new_x_int < 0 || new_x_int >= thread_data->width || new_y_int < 0 || new_y_int >= thread_data->height) {
326  thread_data->data_out[x * 3 + rgb_index + y * thread_data->linesize_out] = 0;
327  } else {
328  thread_data->data_out[x * 3 + rgb_index + y * thread_data->linesize_out] = thread_data->data_in[new_x_int * 3 + rgb_index + new_y_int * thread_data->linesize_in];
329  }
330  break;
331  case LINEAR:
332  interpolated = 0.0f;
333  new_x = thread_data->distortion_coords[x * 2 * 3 + y * thread_data->width * 2 * 3 + rgb_index * 2];
334  new_x_int = new_x;
335  new_y = thread_data->distortion_coords[x * 2 * 3 + y * thread_data->width * 2 * 3 + rgb_index * 2 + 1];
336  new_y_int = new_y;
337  if (new_x_int < 0 || new_x_int + 1 >= thread_data->width || new_y_int < 0 || new_y_int + 1 >= thread_data->height) {
338  thread_data->data_out[x * 3 + rgb_index + y * thread_data->linesize_out] = 0;
339  } else {
340  thread_data->data_out[x * 3 + rgb_index + y * thread_data->linesize_out] =
341  thread_data->data_in[ new_x_int * 3 + rgb_index + new_y_int * thread_data->linesize_in] * (new_x_int + 1 - new_x) * (new_y_int + 1 - new_y)
342  + thread_data->data_in[(new_x_int + 1) * 3 + rgb_index + new_y_int * thread_data->linesize_in] * (new_x - new_x_int) * (new_y_int + 1 - new_y)
343  + thread_data->data_in[ new_x_int * 3 + rgb_index + (new_y_int + 1) * thread_data->linesize_in] * (new_x_int + 1 - new_x) * (new_y - new_y_int)
344  + thread_data->data_in[(new_x_int + 1) * 3 + rgb_index + (new_y_int + 1) * thread_data->linesize_in] * (new_x - new_x_int) * (new_y - new_y_int);
345  }
346  break;
347  case LANCZOS:
348  interpolated = 0.0f;
349  norm = 0.0f;
350  new_x = thread_data->distortion_coords[x * 2 * 3 + y * thread_data->width * 2 * 3 + rgb_index * 2];
351  new_x_int = new_x;
352  new_y = thread_data->distortion_coords[x * 2 * 3 + y * thread_data->width * 2 * 3 + rgb_index * 2 + 1];
353  new_y_int = new_y;
354  for (j = 0; j < 4; ++j)
355  for (i = 0; i < 4; ++i) {
356  if (new_x_int + i - 2 < 0 || new_x_int + i - 2 >= thread_data->width || new_y_int + j - 2 < 0 || new_y_int + j - 2 >= thread_data->height)
357  continue;
358  d = square(new_x - (new_x_int + i - 2)) * square(new_y - (new_y_int + j - 2));
359  if (d >= 4.0f)
360  continue;
361  d = thread_data->interpolation[(int)(d * LANCZOS_RESOLUTION)];
362  norm += d;
363  interpolated += thread_data->data_in[(new_x_int + i - 2) * 3 + rgb_index + (new_y_int + j - 2) * thread_data->linesize_in] * d;
364  }
365  if (norm == 0.0f) {
366  thread_data->data_out[x * 3 + rgb_index + y * thread_data->linesize_out] = 0;
367  } else {
368  interpolated /= norm;
369  thread_data->data_out[x * 3 + rgb_index + y * thread_data->linesize_out] = interpolated < 0.0f ? 0.0f : interpolated > 255.0f ? 255.0f : interpolated;
370  }
371  break;
372  }
373  } else if (thread_data->mode & GEOMETRY_DISTORTION) {
374  // geometry distortion correction was applied, correct distortion
375  switch(thread_data->interpolation_type) {
376  case NEAREST:
377  new_x_int = thread_data->distortion_coords[x * 2 + y * thread_data->width * 2] + 0.5f;
378  new_y_int = thread_data->distortion_coords[x * 2 + y * thread_data->width * 2 + 1] + 0.5f;
379  if (new_x_int < 0 || new_x_int >= thread_data->width || new_y_int < 0 || new_y_int >= thread_data->height) {
380  thread_data->data_out[x * 3 + rgb_index + y * thread_data->linesize_out] = 0;
381  } else {
382  thread_data->data_out[x * 3 + rgb_index + y * thread_data->linesize_out] = thread_data->data_in[new_x_int * 3 + rgb_index + new_y_int * thread_data->linesize_in];
383  }
384  break;
385  case LINEAR:
386  interpolated = 0.0f;
387  new_x = thread_data->distortion_coords[x * 2 + y * thread_data->width * 2];
388  new_x_int = new_x;
389  new_y = thread_data->distortion_coords[x * 2 + y * thread_data->width * 2 + 1];
390  new_y_int = new_y;
391  if (new_x_int < 0 || new_x_int + 1 >= thread_data->width || new_y_int < 0 || new_y_int + 1 >= thread_data->height) {
392  thread_data->data_out[x * 3 + rgb_index + y * thread_data->linesize_out] = 0;
393  } else {
394  thread_data->data_out[x * 3 + rgb_index + y * thread_data->linesize_out] =
395  thread_data->data_in[ new_x_int * 3 + rgb_index + new_y_int * thread_data->linesize_in] * (new_x_int + 1 - new_x) * (new_y_int + 1 - new_y)
396  + thread_data->data_in[(new_x_int + 1) * 3 + rgb_index + new_y_int * thread_data->linesize_in] * (new_x - new_x_int) * (new_y_int + 1 - new_y)
397  + thread_data->data_in[ new_x_int * 3 + rgb_index + (new_y_int + 1) * thread_data->linesize_in] * (new_x_int + 1 - new_x) * (new_y - new_y_int)
398  + thread_data->data_in[(new_x_int + 1) * 3 + rgb_index + (new_y_int + 1) * thread_data->linesize_in] * (new_x - new_x_int) * (new_y - new_y_int);
399  }
400  break;
401  case LANCZOS:
402  interpolated = 0.0f;
403  norm = 0.0f;
404  new_x = thread_data->distortion_coords[x * 2 + y * thread_data->width * 2];
405  new_x_int = new_x;
406  new_y = thread_data->distortion_coords[x * 2 + 1 + y * thread_data->width * 2];
407  new_y_int = new_y;
408  for (j = 0; j < 4; ++j)
409  for (i = 0; i < 4; ++i) {
410  if (new_x_int + i - 2 < 0 || new_x_int + i - 2 >= thread_data->width || new_y_int + j - 2 < 0 || new_y_int + j - 2 >= thread_data->height)
411  continue;
412  d = square(new_x - (new_x_int + i - 2)) * square(new_y - (new_y_int + j - 2));
413  if (d >= 4.0f)
414  continue;
415  d = thread_data->interpolation[(int)(d * LANCZOS_RESOLUTION)];
416  norm += d;
417  interpolated += thread_data->data_in[(new_x_int + i - 2) * 3 + rgb_index + (new_y_int + j - 2) * thread_data->linesize_in] * d;
418  }
419  if (norm == 0.0f) {
420  thread_data->data_out[x * 3 + rgb_index + y * thread_data->linesize_out] = 0;
421  } else {
422  interpolated /= norm;
423  thread_data->data_out[x * 3 + rgb_index + y * thread_data->linesize_out] = interpolated < 0.0f ? 0.0f : interpolated > 255.0f ? 255.0f : interpolated;
424  }
425  break;
426  }
427  } else {
428  // no distortion correction was applied
429  thread_data->data_out[x * 3 + rgb_index + y * thread_data->linesize_out] = thread_data->data_in[x * 3 + rgb_index + y * thread_data->linesize_in];
430  }
431  }
432 
433  return 0;
434 }
435 
437 {
438  AVFilterContext *ctx = inlink->dst;
439  LensfunContext *lensfun = ctx->priv;
440  AVFilterLink *outlink = ctx->outputs[0];
441  AVFrame *out;
442  VignettingThreadData vignetting_thread_data;
443  DistortionCorrectionThreadData distortion_correction_thread_data;
444  int ret;
445 
446  if (lensfun->mode & VIGNETTING) {
448  if (ret < 0) {
449  av_frame_free(&in);
450  return ret;
451  }
452 
453  vignetting_thread_data = (VignettingThreadData) {
454  .width = inlink->w,
455  .height = inlink->h,
456  .data_in = in->data[0],
457  .linesize_in = in->linesize[0],
458  .pixel_composition = LF_CR_3(RED, GREEN, BLUE),
459  .modifier = lensfun->modifier
460  };
461 
463  &vignetting_thread_data, NULL,
464  FFMIN(outlink->h, ff_filter_get_nb_threads(ctx)));
465  }
466 
467  if (lensfun->mode & (GEOMETRY_DISTORTION | SUBPIXEL_DISTORTION)) {
468  out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
469  if (!out) {
470  av_frame_free(&in);
471  return AVERROR(ENOMEM);
472  }
474 
475  distortion_correction_thread_data = (DistortionCorrectionThreadData) {
476  .width = inlink->w,
477  .height = inlink->h,
478  .distortion_coords = lensfun->distortion_coords,
479  .data_in = in->data[0],
480  .data_out = out->data[0],
481  .linesize_in = in->linesize[0],
482  .linesize_out = out->linesize[0],
483  .interpolation = lensfun->interpolation,
484  .mode = lensfun->mode,
485  .interpolation_type = lensfun->interpolation_type
486  };
487 
489  &distortion_correction_thread_data, NULL,
490  FFMIN(outlink->h, ff_filter_get_nb_threads(ctx)));
491 
492  av_frame_free(&in);
493  return ff_filter_frame(outlink, out);
494  } else {
495  return ff_filter_frame(outlink, in);
496  }
497 }
498 
500 {
501  LensfunContext *lensfun = ctx->priv;
502 
503  if (lensfun->camera)
504  lf_camera_destroy(lensfun->camera);
505  if (lensfun->lens)
506  lf_lens_destroy(lensfun->lens);
507  if (lensfun->modifier)
508  lf_modifier_destroy(lensfun->modifier);
509  av_freep(&lensfun->distortion_coords);
510  av_freep(&lensfun->interpolation);
511 }
512 
513 static const AVFilterPad lensfun_inputs[] = {
514  {
515  .name = "default",
516  .type = AVMEDIA_TYPE_VIDEO,
517  .config_props = config_props,
518  .filter_frame = filter_frame,
519  },
520 };
521 
523  .name = "lensfun",
524  .description = NULL_IF_CONFIG_SMALL("Apply correction to an image based on info derived from the lensfun database."),
525  .priv_size = sizeof(LensfunContext),
526  .init = init,
527  .uninit = uninit,
531  .priv_class = &lensfun_class,
533 };
LensfunContext::focal_length
float focal_length
Definition: vf_lensfun.c:76
ff_get_video_buffer
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
Definition: video.c:112
distortion_correction_filter_slice
static int distortion_correction_filter_slice(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_lensfun.c:307
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
LINEAR
@ LINEAR
Definition: vf_lensfun.c:49
out
FILE * out
Definition: movenc.c:54
ff_filter_frame
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1018
lanczos_kernel
static float lanczos_kernel(float x)
Definition: vf_lensfun.c:196
LensfunContext
Definition: vf_lensfun.c:72
inlink
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
Definition: filter_design.txt:212
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:130
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:344
LensfunContext::model
const char * model
Definition: vf_lensfun.c:74
AVOption
AVOption.
Definition: opt.h:346
DistortionCorrectionThreadData
Definition: vf_lensfun.c:61
float.h
AVFilter::name
const char * name
Filter name.
Definition: avfilter.h:170
LensfunContext::reverse
int reverse
Definition: vf_lensfun.c:81
LensfunContext::camera
lfCamera * camera
Definition: vf_lensfun.c:88
DistortionCorrectionThreadData::data_out
uint8_t * data_out
Definition: vf_lensfun.c:65
video.h
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:365
uninit
static av_cold void uninit(AVFilterContext *ctx)
Definition: vf_lensfun.c:499
VignettingThreadData::data_in
uint8_t * data_in
Definition: vf_lensfun.c:55
LensfunContext::interpolation
float * interpolation
Definition: vf_lensfun.c:85
OFFSET
#define OFFSET(x)
Definition: vf_lensfun.c:92
DistortionCorrectionThreadData::height
int height
Definition: vf_lensfun.c:62
BLUE
#define BLUE
Definition: vf_huesaturation.c:41
DistortionCorrectionThreadData::distortion_coords
const float * distortion_coords
Definition: vf_lensfun.c:63
lensfun_inputs
static const AVFilterPad lensfun_inputs[]
Definition: vf_lensfun.c:513
AVFilterPad
A filter pad used for either input or output.
Definition: internal.h:33
slice_start
static int slice_start(SliceContext *sc, VVCContext *s, VVCFrameContext *fc, const CodedBitstreamUnit *unit, const int is_first_slice)
Definition: vvcdec.c:694
VignettingThreadData::pixel_composition
int pixel_composition
Definition: vf_lensfun.c:57
av_cold
#define av_cold
Definition: attributes.h:90
ff_video_default_filterpad
const AVFilterPad ff_video_default_filterpad[1]
An AVFilterPad array whose only entry has name "default" and is of type AVMEDIA_TYPE_VIDEO.
Definition: video.c:37
thread_data
Definition: vf_lut.c:338
Mode
Mode
Frame type (Table 1a in 3GPP TS 26.101)
Definition: amrnbdata.h:39
VignettingThreadData::modifier
lfModifier * modifier
Definition: vf_lensfun.c:58
square
static float square(float x)
Definition: vf_lensfun.c:302
slice_end
static int slice_end(AVCodecContext *avctx, AVFrame *pict)
Handle slice ends.
Definition: mpeg12dec.c:1725
VignettingThreadData::width
int width
Definition: vf_lensfun.c:54
filters.h
DistortionCorrectionThreadData::mode
int mode
Definition: vf_lensfun.c:68
ctx
AVFormatContext * ctx
Definition: movenc.c:48
LensfunContext::db_path
const char * db_path
Definition: vf_lensfun.c:74
FILTER_INPUTS
#define FILTER_INPUTS(array)
Definition: internal.h:182
ff_inlink_make_frame_writable
int ff_inlink_make_frame_writable(AVFilterLink *link, AVFrame **rframe)
Make sure a frame is writable.
Definition: avfilter.c:1492
VignettingThreadData::linesize_in
int linesize_in
Definition: vf_lensfun.c:56
arg
const char * arg
Definition: jacosubdec.c:67
LensfunContext::focus_distance
float focus_distance
Definition: vf_lensfun.c:78
DistortionCorrectionThreadData::linesize_out
int linesize_out
Definition: vf_lensfun.c:66
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:66
DistortionCorrectionThreadData::linesize_in
int linesize_in
Definition: vf_lensfun.c:66
NULL
#define NULL
Definition: coverity.c:32
av_frame_copy_props
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:679
SUBPIXEL_DISTORTION
@ SUBPIXEL_DISTORTION
Definition: vf_lensfun.c:44
lensfun_options
static const AVOption lensfun_options[]
Definition: vf_lensfun.c:94
sqrtf
static __device__ float sqrtf(float a)
Definition: cuda_runtime.h:184
vignetting_filter_slice
static int vignetting_filter_slice(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_lensfun.c:284
filter_frame
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
Definition: vf_lensfun.c:436
index
int index
Definition: gxfenc.c:89
LensfunContext::distortion_coords
float * distortion_coords
Definition: vf_lensfun.c:84
VignettingThreadData::height
int height
Definition: vf_lensfun.c:54
DistortionCorrectionThreadData::interpolation
const float * interpolation
Definition: vf_lensfun.c:67
f
f
Definition: af_crystalizer.c:121
LensfunContext::interpolation_type
int interpolation_type
Definition: vf_lensfun.c:82
AV_PIX_FMT_RGB24
@ AV_PIX_FMT_RGB24
packed RGB 8:8:8, 24bpp, RGBRGB...
Definition: pixfmt.h:75
scale
static void scale(int *out, const int *in, const int w, const int h, const int shift)
Definition: vvc_intra.c:291
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:106
LensfunContext::target_geometry
int target_geometry
Definition: vf_lensfun.c:80
LANCZOS
@ LANCZOS
Definition: vf_lensfun.c:50
LANCZOS_RESOLUTION
#define LANCZOS_RESOLUTION
Definition: vf_lensfun.c:39
FLAGS
#define FLAGS
Definition: vf_lensfun.c:93
NEAREST
@ NEAREST
Definition: vf_lensfun.c:48
a
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
Definition: undefined.txt:41
M_PI
#define M_PI
Definition: mathematics.h:67
AV_LOG_INFO
#define AV_LOG_INFO
Standard information.
Definition: log.h:191
internal.h
config_props
static int config_props(AVFilterLink *inlink)
Definition: vf_lensfun.c:207
AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC
#define AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC
Some filters support a generic "enable" expression option that can be used to enable or disable a fil...
Definition: avfilter.h:147
AV_OPT_TYPE_FLOAT
@ AV_OPT_TYPE_FLOAT
Definition: opt.h:238
FILTER_SINGLE_PIXFMT
#define FILTER_SINGLE_PIXFMT(pix_fmt_)
Definition: internal.h:172
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:255
LensfunContext::aperture
float aperture
Definition: vf_lensfun.c:77
LensfunContext::make
const char * make
Definition: vf_lensfun.c:74
av_malloc_array
#define av_malloc_array(a, b)
Definition: tableprint_vlc.h:31
ff_filter_get_nb_threads
int ff_filter_get_nb_threads(AVFilterContext *ctx)
Get number of threads for current filter instance.
Definition: avfilter.c:825
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
AVFilterPad::name
const char * name
Pad name.
Definition: internal.h:39
DistortionCorrectionThreadData::data_in
const uint8_t * data_in
Definition: vf_lensfun.c:64
AVFilter
Filter definition.
Definition: avfilter.h:166
ret
ret
Definition: filter_design.txt:187
AV_LOG_FATAL
#define AV_LOG_FATAL
Something went wrong and recovery is not possible.
Definition: log.h:174
LensfunContext::mode
int mode
Definition: vf_lensfun.c:75
InterpolationType
InterpolationType
Definition: vf_lensfun.c:47
LensfunContext::lens_model
const char * lens_model
Definition: vf_lensfun.c:74
GEOMETRY_DISTORTION
@ GEOMETRY_DISTORTION
Definition: vf_lensfun.c:43
DistortionCorrectionThreadData::interpolation_type
int interpolation_type
Definition: vf_lensfun.c:69
mode
mode
Definition: ebur128.h:83
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Definition: opt.h:235
avfilter.h
LensfunContext::scale
float scale
Definition: vf_lensfun.c:79
VignettingThreadData
Definition: vf_lensfun.c:53
LensfunContext::modifier
lfModifier * modifier
Definition: vf_lensfun.c:89
RED
#define RED
Definition: vf_huesaturation.c:37
GREEN
#define GREEN
Definition: vf_huesaturation.c:39
AVFilterContext
An instance of a filter.
Definition: avfilter.h:407
AVFILTER_FLAG_SLICE_THREADS
#define AVFILTER_FLAG_SLICE_THREADS
The filter supports multithreading by splitting frames into multiple parts and processing them concur...
Definition: avfilter.h:117
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
VIGNETTING
@ VIGNETTING
Definition: vf_lensfun.c:42
AVFILTER_DEFINE_CLASS
AVFILTER_DEFINE_CLASS(lensfun)
DistortionCorrectionThreadData::width
int width
Definition: vf_lensfun.c:62
AV_OPT_TYPE_BOOL
@ AV_OPT_TYPE_BOOL
Definition: opt.h:251
Model
Definition: mss12.h:40
FILTER_OUTPUTS
#define FILTER_OUTPUTS(array)
Definition: internal.h:183
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
d
d
Definition: ffmpeg_filter.c:409
AVERROR_BUG
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
Definition: error.h:52
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
Definition: frame.h:389
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
AV_OPT_TYPE_STRING
@ AV_OPT_TYPE_STRING
Definition: opt.h:239
ff_filter_execute
static av_always_inline int ff_filter_execute(AVFilterContext *ctx, avfilter_action_func *func, void *arg, int *ret, int nb_jobs)
Definition: internal.h:134
int
int
Definition: ffmpeg_filter.c:409
AV_OPT_TYPE_CONST
@ AV_OPT_TYPE_CONST
Definition: opt.h:244
LensfunContext::lens
lfLens * lens
Definition: vf_lensfun.c:87
init
static av_cold int init(AVFilterContext *ctx)
Definition: vf_lensfun.c:130
ff_vf_lensfun
const AVFilter ff_vf_lensfun
Definition: vf_lensfun.c:522