FFmpeg
vf_deshake.c
Go to the documentation of this file.
1 /*
2  * Copyright (C) 2010 Georg Martius <georg.martius@web.de>
3  * Copyright (C) 2010 Daniel G. Taylor <dan@programmer-art.org>
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 /**
23  * @file
24  * fast deshake / depan video filter
25  *
26  * SAD block-matching motion compensation to fix small changes in
27  * horizontal and/or vertical shift. This filter helps remove camera shake
28  * from hand-holding a camera, bumping a tripod, moving on a vehicle, etc.
29  *
30  * Algorithm:
31  * - For each frame with one previous reference frame
32  * - For each block in the frame
33  * - If contrast > threshold then find likely motion vector
34  * - For all found motion vectors
35  * - Find most common, store as global motion vector
36  * - Find most likely rotation angle
37  * - Transform image along global motion
38  *
39  * TODO:
40  * - Fill frame edges based on previous/next reference frames
41  * - Fill frame edges by stretching image near the edges?
42  * - Can this be done quickly and look decent?
43  *
44  * Dark Shikari links to http://wiki.videolan.org/SoC_x264_2010#GPU_Motion_Estimation_2
45  * for an algorithm similar to what could be used here to get the gmv
46  * It requires only a couple diamond searches + fast downscaling
47  *
48  * Special thanks to Jason Kotenko for his help with the algorithm and my
49  * inability to see simple errors in C code.
50  */
51 
52 #include "avfilter.h"
53 #include "internal.h"
54 #include "video.h"
55 #include "libavutil/common.h"
56 #include "libavutil/emms.h"
57 #include "libavutil/file_open.h"
58 #include "libavutil/mem.h"
59 #include "libavutil/opt.h"
60 #include "libavutil/pixdesc.h"
61 #include "libavutil/qsort.h"
62 
63 #include "deshake.h"
64 
65 #define OFFSET(x) offsetof(DeshakeContext, x)
66 #define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
67 
68 static const AVOption deshake_options[] = {
69  { "x", "set x for the rectangular search area", OFFSET(cx), AV_OPT_TYPE_INT, {.i64=-1}, -1, INT_MAX, .flags = FLAGS },
70  { "y", "set y for the rectangular search area", OFFSET(cy), AV_OPT_TYPE_INT, {.i64=-1}, -1, INT_MAX, .flags = FLAGS },
71  { "w", "set width for the rectangular search area", OFFSET(cw), AV_OPT_TYPE_INT, {.i64=-1}, -1, INT_MAX, .flags = FLAGS },
72  { "h", "set height for the rectangular search area", OFFSET(ch), AV_OPT_TYPE_INT, {.i64=-1}, -1, INT_MAX, .flags = FLAGS },
73  { "rx", "set x for the rectangular search area", OFFSET(rx), AV_OPT_TYPE_INT, {.i64=16}, 0, MAX_R, .flags = FLAGS },
74  { "ry", "set y for the rectangular search area", OFFSET(ry), AV_OPT_TYPE_INT, {.i64=16}, 0, MAX_R, .flags = FLAGS },
75  { "edge", "set edge mode", OFFSET(edge), AV_OPT_TYPE_INT, {.i64=FILL_MIRROR}, FILL_BLANK, FILL_COUNT-1, FLAGS, "edge"},
76  { "blank", "fill zeroes at blank locations", 0, AV_OPT_TYPE_CONST, {.i64=FILL_BLANK}, INT_MIN, INT_MAX, FLAGS, "edge" },
77  { "original", "original image at blank locations", 0, AV_OPT_TYPE_CONST, {.i64=FILL_ORIGINAL}, INT_MIN, INT_MAX, FLAGS, "edge" },
78  { "clamp", "extruded edge value at blank locations", 0, AV_OPT_TYPE_CONST, {.i64=FILL_CLAMP}, INT_MIN, INT_MAX, FLAGS, "edge" },
79  { "mirror", "mirrored edge at blank locations", 0, AV_OPT_TYPE_CONST, {.i64=FILL_MIRROR}, INT_MIN, INT_MAX, FLAGS, "edge" },
80  { "blocksize", "set motion search blocksize", OFFSET(blocksize), AV_OPT_TYPE_INT, {.i64=8}, 4, 128, .flags = FLAGS },
81  { "contrast", "set contrast threshold for blocks", OFFSET(contrast), AV_OPT_TYPE_INT, {.i64=125}, 1, 255, .flags = FLAGS },
82  { "search", "set search strategy", OFFSET(search), AV_OPT_TYPE_INT, {.i64=EXHAUSTIVE}, EXHAUSTIVE, SEARCH_COUNT-1, FLAGS, "smode" },
83  { "exhaustive", "exhaustive search", 0, AV_OPT_TYPE_CONST, {.i64=EXHAUSTIVE}, INT_MIN, INT_MAX, FLAGS, "smode" },
84  { "less", "less exhaustive search", 0, AV_OPT_TYPE_CONST, {.i64=SMART_EXHAUSTIVE}, INT_MIN, INT_MAX, FLAGS, "smode" },
85  { "filename", "set motion search detailed log file name", OFFSET(filename), AV_OPT_TYPE_STRING, {.str=NULL}, .flags = FLAGS },
86  { "opencl", "ignored", OFFSET(opencl), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, .flags = FLAGS },
87  { NULL }
88 };
89 
90 AVFILTER_DEFINE_CLASS(deshake);
91 
92 static int cmp(const void *a, const void *b)
93 {
94  return FFDIFFSIGN(*(const double *)a, *(const double *)b);
95 }
96 
97 /**
98  * Cleaned mean (cuts off 20% of values to remove outliers and then averages)
99  */
100 static double clean_mean(double *values, int count)
101 {
102  double mean = 0;
103  int cut = count / 5;
104  int x;
105 
106  AV_QSORT(values, count, double, cmp);
107 
108  for (x = cut; x < count - cut; x++) {
109  mean += values[x];
110  }
111 
112  return mean / (count - cut * 2);
113 }
114 
115 /**
116  * Find the most likely shift in motion between two frames for a given
117  * macroblock. Test each block against several shifts given by the rx
118  * and ry attributes. Searches using a simple matrix of those shifts and
119  * chooses the most likely shift by the smallest difference in blocks.
120  */
121 static void find_block_motion(DeshakeContext *deshake, uint8_t *src1,
122  uint8_t *src2, int cx, int cy, int stride,
124 {
125  int x, y;
126  int diff;
127  int smallest = INT_MAX;
128  int tmp, tmp2;
129 
130  #define CMP(i, j) deshake->sad(src1 + cy * stride + cx, stride,\
131  src2 + (j) * stride + (i), stride)
132 
133  if (deshake->search == EXHAUSTIVE) {
134  // Compare every possible position - this is sloooow!
135  for (y = -deshake->ry; y <= deshake->ry; y++) {
136  for (x = -deshake->rx; x <= deshake->rx; x++) {
137  diff = CMP(cx - x, cy - y);
138  if (diff < smallest) {
139  smallest = diff;
140  mv->x = x;
141  mv->y = y;
142  }
143  }
144  }
145  } else if (deshake->search == SMART_EXHAUSTIVE) {
146  // Compare every other possible position and find the best match
147  for (y = -deshake->ry + 1; y < deshake->ry; y += 2) {
148  for (x = -deshake->rx + 1; x < deshake->rx; x += 2) {
149  diff = CMP(cx - x, cy - y);
150  if (diff < smallest) {
151  smallest = diff;
152  mv->x = x;
153  mv->y = y;
154  }
155  }
156  }
157 
158  // Hone in on the specific best match around the match we found above
159  tmp = mv->x;
160  tmp2 = mv->y;
161 
162  for (y = tmp2 - 1; y <= tmp2 + 1; y++) {
163  for (x = tmp - 1; x <= tmp + 1; x++) {
164  if (x == tmp && y == tmp2)
165  continue;
166 
167  diff = CMP(cx - x, cy - y);
168  if (diff < smallest) {
169  smallest = diff;
170  mv->x = x;
171  mv->y = y;
172  }
173  }
174  }
175  }
176 
177  if (smallest > 512) {
178  mv->x = -1;
179  mv->y = -1;
180  }
181  emms_c();
182  //av_log(NULL, AV_LOG_ERROR, "%d\n", smallest);
183  //av_log(NULL, AV_LOG_ERROR, "Final: (%d, %d) = %d x %d\n", cx, cy, mv->x, mv->y);
184 }
185 
186 /**
187  * Find the contrast of a given block. When searching for global motion we
188  * really only care about the high contrast blocks, so using this method we
189  * can actually skip blocks we don't care much about.
190  */
191 static int block_contrast(uint8_t *src, int x, int y, int stride, int blocksize)
192 {
193  int highest = 0;
194  int lowest = 255;
195  int i, j, pos;
196 
197  for (i = 0; i <= blocksize * 2; i++) {
198  // We use a width of 16 here to match the sad function
199  for (j = 0; j <= 15; j++) {
200  pos = (y + i) * stride + (x + j);
201  if (src[pos] < lowest)
202  lowest = src[pos];
203  else if (src[pos] > highest) {
204  highest = src[pos];
205  }
206  }
207  }
208 
209  return highest - lowest;
210 }
211 
212 /**
213  * Find the rotation for a given block.
214  */
215 static double block_angle(int x, int y, int cx, int cy, IntMotionVector *shift)
216 {
217  double a1, a2, diff;
218 
219  a1 = atan2(y - cy, x - cx);
220  a2 = atan2(y - cy + shift->y, x - cx + shift->x);
221 
222  diff = a2 - a1;
223 
224  return (diff > M_PI) ? diff - 2 * M_PI :
225  (diff < -M_PI) ? diff + 2 * M_PI :
226  diff;
227 }
228 
229 /**
230  * Find the estimated global motion for a scene given the most likely shift
231  * for each block in the frame. The global motion is estimated to be the
232  * same as the motion from most blocks in the frame, so if most blocks
233  * move one pixel to the right and two pixels down, this would yield a
234  * motion vector (1, -2).
235  */
236 static void find_motion(DeshakeContext *deshake, uint8_t *src1, uint8_t *src2,
237  int width, int height, int stride, Transform *t)
238 {
239  int x, y;
240  IntMotionVector mv = {0, 0};
241  int count_max_value = 0;
242  int contrast;
243 
244  int pos;
245  int center_x = 0, center_y = 0;
246  double p_x, p_y;
247 
248  av_fast_malloc(&deshake->angles, &deshake->angles_size, width * height / (16 * deshake->blocksize) * sizeof(*deshake->angles));
249 
250  // Reset counts to zero
251  for (x = 0; x < deshake->rx * 2 + 1; x++) {
252  for (y = 0; y < deshake->ry * 2 + 1; y++) {
253  deshake->counts[x][y] = 0;
254  }
255  }
256 
257  pos = 0;
258  // Find motion for every block and store the motion vector in the counts
259  for (y = deshake->ry; y < height - deshake->ry - (deshake->blocksize * 2); y += deshake->blocksize * 2) {
260  // We use a width of 16 here to match the sad function
261  for (x = deshake->rx; x < width - deshake->rx - 16; x += 16) {
262  // If the contrast is too low, just skip this block as it probably
263  // won't be very useful to us.
264  contrast = block_contrast(src2, x, y, stride, deshake->blocksize);
265  if (contrast > deshake->contrast) {
266  //av_log(NULL, AV_LOG_ERROR, "%d\n", contrast);
267  find_block_motion(deshake, src1, src2, x, y, stride, &mv);
268  if (mv.x != -1 && mv.y != -1) {
269  deshake->counts[mv.x + deshake->rx][mv.y + deshake->ry] += 1;
270  if (x > deshake->rx && y > deshake->ry)
271  deshake->angles[pos++] = block_angle(x, y, 0, 0, &mv);
272 
273  center_x += mv.x;
274  center_y += mv.y;
275  }
276  }
277  }
278  }
279 
280  if (pos) {
281  center_x /= pos;
282  center_y /= pos;
283  t->angle = clean_mean(deshake->angles, pos);
284  if (t->angle < 0.001)
285  t->angle = 0;
286  } else {
287  t->angle = 0;
288  }
289 
290  // Find the most common motion vector in the frame and use it as the gmv
291  for (y = deshake->ry * 2; y >= 0; y--) {
292  for (x = 0; x < deshake->rx * 2 + 1; x++) {
293  //av_log(NULL, AV_LOG_ERROR, "%5d ", deshake->counts[x][y]);
294  if (deshake->counts[x][y] > count_max_value) {
295  t->vec.x = x - deshake->rx;
296  t->vec.y = y - deshake->ry;
297  count_max_value = deshake->counts[x][y];
298  }
299  }
300  //av_log(NULL, AV_LOG_ERROR, "\n");
301  }
302 
303  p_x = (center_x - width / 2.0);
304  p_y = (center_y - height / 2.0);
305  t->vec.x += (cos(t->angle)-1)*p_x - sin(t->angle)*p_y;
306  t->vec.y += sin(t->angle)*p_x + (cos(t->angle)-1)*p_y;
307 
308  // Clamp max shift & rotation?
309  t->vec.x = av_clipf(t->vec.x, -deshake->rx * 2, deshake->rx * 2);
310  t->vec.y = av_clipf(t->vec.y, -deshake->ry * 2, deshake->ry * 2);
311  t->angle = av_clipf(t->angle, -0.1, 0.1);
312 
313  //av_log(NULL, AV_LOG_ERROR, "%d x %d\n", avg->x, avg->y);
314 }
315 
317  int width, int height, int cw, int ch,
318  const float *matrix_y, const float *matrix_uv,
320  enum FillMethod fill, AVFrame *in, AVFrame *out)
321 {
322  int i = 0, ret = 0;
323  const float *matrixs[3];
324  int plane_w[3], plane_h[3];
325  matrixs[0] = matrix_y;
326  matrixs[1] = matrixs[2] = matrix_uv;
327  plane_w[0] = width;
328  plane_w[1] = plane_w[2] = cw;
329  plane_h[0] = height;
330  plane_h[1] = plane_h[2] = ch;
331 
332  for (i = 0; i < 3; i++) {
333  // Transform the luma and chroma planes
334  ret = ff_affine_transform(in->data[i], out->data[i], in->linesize[i],
335  out->linesize[i], plane_w[i], plane_h[i],
336  matrixs[i], interpolate, fill);
337  if (ret < 0)
338  return ret;
339  }
340  return ret;
341 }
342 
344 {
345  DeshakeContext *deshake = ctx->priv;
346 
347  deshake->refcount = 20; // XXX: add to options?
348  deshake->blocksize /= 2;
349  deshake->blocksize = av_clip(deshake->blocksize, 4, 128);
350 
351  if (deshake->rx % 16) {
352  av_log(ctx, AV_LOG_ERROR, "rx must be a multiple of 16\n");
353  return AVERROR_PATCHWELCOME;
354  }
355 
356  if (deshake->filename)
357  deshake->fp = avpriv_fopen_utf8(deshake->filename, "w");
358  if (deshake->fp)
359  fwrite("Ori x, Avg x, Fin x, Ori y, Avg y, Fin y, Ori angle, Avg angle, Fin angle, Ori zoom, Avg zoom, Fin zoom\n", 1, 104, deshake->fp);
360 
361  // Quadword align left edge of box for MMX code, adjust width if necessary
362  // to keep right margin
363  if (deshake->cx > 0) {
364  deshake->cw += deshake->cx - (deshake->cx & ~15);
365  deshake->cx &= ~15;
366  }
367  deshake->transform = deshake_transform_c;
368 
369  av_log(ctx, AV_LOG_VERBOSE, "cx: %d, cy: %d, cw: %d, ch: %d, rx: %d, ry: %d, edge: %d blocksize: %d contrast: %d search: %d\n",
370  deshake->cx, deshake->cy, deshake->cw, deshake->ch,
371  deshake->rx, deshake->ry, deshake->edge, deshake->blocksize * 2, deshake->contrast, deshake->search);
372 
373  return 0;
374 }
375 
376 static const enum AVPixelFormat pix_fmts[] = {
380 };
381 
383 {
384  DeshakeContext *deshake = link->dst->priv;
385 
386  deshake->ref = NULL;
387  deshake->last.vec.x = 0;
388  deshake->last.vec.y = 0;
389  deshake->last.angle = 0;
390  deshake->last.zoom = 0;
391 
392  return 0;
393 }
394 
396 {
397  DeshakeContext *deshake = ctx->priv;
398  av_frame_free(&deshake->ref);
399  av_freep(&deshake->angles);
400  deshake->angles_size = 0;
401  if (deshake->fp)
402  fclose(deshake->fp);
403 }
404 
406 {
407  DeshakeContext *deshake = link->dst->priv;
408  AVFilterLink *outlink = link->dst->outputs[0];
409  AVFrame *out;
410  Transform t = {{0},0}, orig = {{0},0};
411  float matrix_y[9], matrix_uv[9];
412  float alpha = 2.0 / deshake->refcount;
413  char tmp[256];
414  int ret = 0;
416  const int chroma_width = AV_CEIL_RSHIFT(link->w, desc->log2_chroma_w);
417  const int chroma_height = AV_CEIL_RSHIFT(link->h, desc->log2_chroma_h);
418  int aligned;
419  float transform_zoom;
420 
421  out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
422  if (!out) {
423  av_frame_free(&in);
424  return AVERROR(ENOMEM);
425  }
427 
428  aligned = !((intptr_t)in->data[0] & 15 | in->linesize[0] & 15);
429  deshake->sad = av_pixelutils_get_sad_fn(4, 4, aligned, deshake); // 16x16, 2nd source unaligned
430  if (!deshake->sad)
431  return AVERROR(EINVAL);
432 
433  if (deshake->cx < 0 || deshake->cy < 0 || deshake->cw < 0 || deshake->ch < 0) {
434  // Find the most likely global motion for the current frame
435  find_motion(deshake, (deshake->ref == NULL) ? in->data[0] : deshake->ref->data[0], in->data[0], link->w, link->h, in->linesize[0], &t);
436  } else {
437  uint8_t *src1 = (deshake->ref == NULL) ? in->data[0] : deshake->ref->data[0];
438  uint8_t *src2 = in->data[0];
439 
440  deshake->cx = FFMIN(deshake->cx, link->w);
441  deshake->cy = FFMIN(deshake->cy, link->h);
442 
443  if ((unsigned)deshake->cx + (unsigned)deshake->cw > link->w) deshake->cw = link->w - deshake->cx;
444  if ((unsigned)deshake->cy + (unsigned)deshake->ch > link->h) deshake->ch = link->h - deshake->cy;
445 
446  // Quadword align right margin
447  deshake->cw &= ~15;
448 
449  src1 += deshake->cy * in->linesize[0] + deshake->cx;
450  src2 += deshake->cy * in->linesize[0] + deshake->cx;
451 
452  find_motion(deshake, src1, src2, deshake->cw, deshake->ch, in->linesize[0], &t);
453  }
454 
455 
456  // Copy transform so we can output it later to compare to the smoothed value
457  orig.vec.x = t.vec.x;
458  orig.vec.y = t.vec.y;
459  orig.angle = t.angle;
460  orig.zoom = t.zoom;
461 
462  // Generate a one-sided moving exponential average
463  deshake->avg.vec.x = alpha * t.vec.x + (1.0 - alpha) * deshake->avg.vec.x;
464  deshake->avg.vec.y = alpha * t.vec.y + (1.0 - alpha) * deshake->avg.vec.y;
465  deshake->avg.angle = alpha * t.angle + (1.0 - alpha) * deshake->avg.angle;
466  deshake->avg.zoom = alpha * t.zoom + (1.0 - alpha) * deshake->avg.zoom;
467 
468  // Remove the average from the current motion to detect the motion that
469  // is not on purpose, just as jitter from bumping the camera
470  t.vec.x -= deshake->avg.vec.x;
471  t.vec.y -= deshake->avg.vec.y;
472  t.angle -= deshake->avg.angle;
473  t.zoom -= deshake->avg.zoom;
474 
475  // Invert the motion to undo it
476  t.vec.x *= -1;
477  t.vec.y *= -1;
478  t.angle *= -1;
479 
480  // Write statistics to file
481  if (deshake->fp) {
482  snprintf(tmp, 256, "%f, %f, %f, %f, %f, %f, %f, %f, %f, %f, %f, %f\n", orig.vec.x, deshake->avg.vec.x, t.vec.x, orig.vec.y, deshake->avg.vec.y, t.vec.y, orig.angle, deshake->avg.angle, t.angle, orig.zoom, deshake->avg.zoom, t.zoom);
483  fwrite(tmp, 1, strlen(tmp), deshake->fp);
484  }
485 
486  // Turn relative current frame motion into absolute by adding it to the
487  // last absolute motion
488  t.vec.x += deshake->last.vec.x;
489  t.vec.y += deshake->last.vec.y;
490  t.angle += deshake->last.angle;
491  t.zoom += deshake->last.zoom;
492 
493  // Shrink motion by 10% to keep things centered in the camera frame
494  t.vec.x *= 0.9;
495  t.vec.y *= 0.9;
496  t.angle *= 0.9;
497 
498  // Store the last absolute motion information
499  deshake->last.vec.x = t.vec.x;
500  deshake->last.vec.y = t.vec.y;
501  deshake->last.angle = t.angle;
502  deshake->last.zoom = t.zoom;
503 
504  transform_zoom = 1.0 + t.zoom / 100.0;
505 
506  // Generate a luma transformation matrix
507  ff_get_matrix(t.vec.x, t.vec.y, t.angle, transform_zoom, transform_zoom, matrix_y);
508  // Generate a chroma transformation matrix
509  ff_get_matrix(t.vec.x / (link->w / chroma_width), t.vec.y / (link->h / chroma_height), t.angle, transform_zoom, transform_zoom, matrix_uv);
510  // Transform the luma and chroma planes
511  ret = deshake->transform(link->dst, link->w, link->h, chroma_width, chroma_height,
512  matrix_y, matrix_uv, INTERPOLATE_BILINEAR, deshake->edge, in, out);
513 
514  // Cleanup the old reference frame
515  av_frame_free(&deshake->ref);
516 
517  if (ret < 0)
518  goto fail;
519 
520  // Store the current frame as the reference frame for calculating the
521  // motion of the next frame
522  deshake->ref = in;
523 
524  return ff_filter_frame(outlink, out);
525 fail:
526  av_frame_free(&out);
527  return ret;
528 }
529 
530 static const AVFilterPad deshake_inputs[] = {
531  {
532  .name = "default",
533  .type = AVMEDIA_TYPE_VIDEO,
534  .filter_frame = filter_frame,
535  .config_props = config_props,
536  },
537 };
538 
540  .name = "deshake",
541  .description = NULL_IF_CONFIG_SMALL("Stabilize shaky video."),
542  .priv_size = sizeof(DeshakeContext),
543  .init = init,
544  .uninit = uninit,
548  .priv_class = &deshake_class,
549 };
ff_get_video_buffer
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
Definition: video.c:108
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
MAX_R
#define MAX_R
Definition: deshake.h:53
av_clip
#define av_clip
Definition: common.h:96
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
SMART_EXHAUSTIVE
@ SMART_EXHAUSTIVE
Search most possible positions (faster)
Definition: deshake.h:33
DeshakeContext::ry
int ry
Maximum vertical shift.
Definition: deshake.h:62
MotionVector::y
int16_t y
Definition: agm.c:39
out
FILE * out
Definition: movenc.c:54
ff_filter_frame
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:978
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2964
FILL_CLAMP
@ FILL_CLAMP
Definition: transform.h:54
src1
const pixel * src1
Definition: h264pred_template.c:421
FILTER_PIXFMTS_ARRAY
#define FILTER_PIXFMTS_ARRAY(array)
Definition: internal.h:172
mv
static const int8_t mv[256][2]
Definition: 4xm.c:80
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:100
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:340
tmp
static uint8_t tmp[11]
Definition: aes_ctr.c:28
pixdesc.h
DeshakeContext::angles_size
unsigned angles_size
Definition: deshake.h:59
deshake_options
static const AVOption deshake_options[]
Definition: vf_deshake.c:68
AVOption
AVOption.
Definition: opt.h:251
b
#define b
Definition: input.c:41
DeshakeContext::transform
int(* transform)(AVFilterContext *ctx, int width, int height, int cw, int ch, const float *matrix_y, const float *matrix_uv, enum InterpolateMethod interpolate, enum FillMethod fill, AVFrame *in, AVFrame *out)
Definition: deshake.h:78
AV_LOG_VERBOSE
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:196
AV_PIX_FMT_YUV440P
@ AV_PIX_FMT_YUV440P
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
Definition: pixfmt.h:99
deshake_inputs
static const AVFilterPad deshake_inputs[]
Definition: vf_deshake.c:530
AVFilter::name
const char * name
Filter name.
Definition: avfilter.h:170
InterpolateMethod
InterpolateMethod
Definition: transform.h:39
FLAGS
#define FLAGS
Definition: vf_deshake.c:66
video.h
block_contrast
static int block_contrast(uint8_t *src, int x, int y, int stride, int blocksize)
Find the contrast of a given block.
Definition: vf_deshake.c:191
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:361
DeshakeContext::blocksize
int blocksize
Size of blocks to compare.
Definition: deshake.h:64
FILL_ORIGINAL
@ FILL_ORIGINAL
Definition: transform.h:53
FILL_BLANK
@ FILL_BLANK
Definition: transform.h:52
AVFILTER_DEFINE_CLASS
AVFILTER_DEFINE_CLASS(deshake)
MotionVector::x
int16_t x
Definition: agm.c:39
DeshakeContext::last
Transform last
Transform from last frame.
Definition: deshake.h:68
ff_vf_deshake
const AVFilter ff_vf_deshake
Definition: vf_deshake.c:539
fail
#define fail()
Definition: checkasm.h:138
DeshakeContext::ch
int ch
Definition: deshake.h:73
init
static av_cold int init(AVFilterContext *ctx)
Definition: vf_deshake.c:343
cmp
static int cmp(const void *a, const void *b)
Definition: vf_deshake.c:92
CMP
#define CMP(i, j)
AVFilterPad
A filter pad used for either input or output.
Definition: internal.h:47
FFDIFFSIGN
#define FFDIFFSIGN(x, y)
Comparator.
Definition: macros.h:45
a1
#define a1
Definition: regdef.h:47
aligned
static int aligned(int val)
Definition: dashdec.c:170
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
av_cold
#define av_cold
Definition: attributes.h:90
OFFSET
#define OFFSET(x)
Definition: vf_deshake.c:65
ff_video_default_filterpad
const AVFilterPad ff_video_default_filterpad[1]
An AVFilterPad array whose only entry has name "default" and is of type AVMEDIA_TYPE_VIDEO.
Definition: video.c:36
Transform::vec
MotionVector vec
Motion vector.
Definition: deshake.h:48
AV_PIX_FMT_YUVJ422P
@ AV_PIX_FMT_YUVJ422P
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
Definition: pixfmt.h:79
emms_c
#define emms_c()
Definition: emms.h:63
FILL_COUNT
@ FILL_COUNT
Definition: transform.h:56
width
#define width
DeshakeContext::refcount
int refcount
Number of reference frames (defines averaging window)
Definition: deshake.h:69
AV_CEIL_RSHIFT
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:51
DeshakeContext::avg
Transform avg
Definition: deshake.h:71
Transform::zoom
double zoom
Zoom percentage.
Definition: deshake.h:50
DeshakeContext::ref
AVFrame * ref
Previous frame.
Definition: deshake.h:60
ctx
AVFormatContext * ctx
Definition: movenc.c:48
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
FILTER_INPUTS
#define FILTER_INPUTS(array)
Definition: internal.h:192
file_open.h
link
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a link
Definition: filter_design.txt:23
AV_PIX_FMT_YUVJ444P
@ AV_PIX_FMT_YUVJ444P
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
Definition: pixfmt.h:80
DeshakeContext::filename
char * filename
Motion search detailed log filename.
Definition: deshake.h:76
NULL
#define NULL
Definition: coverity.c:32
AVERROR_PATCHWELCOME
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:64
av_frame_copy_props
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:736
AV_PIX_FMT_YUVJ420P
@ AV_PIX_FMT_YUVJ420P
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
Definition: pixfmt.h:78
block_angle
static double block_angle(int x, int y, int cx, int cy, IntMotionVector *shift)
Find the rotation for a given block.
Definition: vf_deshake.c:215
av_clipf
av_clipf
Definition: af_crystalizer.c:121
DeshakeContext::cx
int cx
Definition: deshake.h:74
DeshakeContext::cw
int cw
Crop motion search to this box.
Definition: deshake.h:72
find_motion
static void find_motion(DeshakeContext *deshake, uint8_t *src1, uint8_t *src2, int width, int height, int stride, Transform *t)
Find the estimated global motion for a scene given the most likely shift for each block in the frame.
Definition: vf_deshake.c:236
qsort.h
clean_mean
static double clean_mean(double *values, int count)
Cleaned mean (cuts off 20% of values to remove outliers and then averages)
Definition: vf_deshake.c:100
find_block_motion
static void find_block_motion(DeshakeContext *deshake, uint8_t *src1, uint8_t *src2, int cx, int cy, int stride, IntMotionVector *mv)
Find the most likely shift in motion between two frames for a given macroblock.
Definition: vf_deshake.c:121
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: vf_deshake.c:376
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:106
shift
static int shift(int a, int b)
Definition: bonk.c:262
DeshakeContext::counts
int counts[2 *MAX_R+1][2 *MAX_R+1]
Definition: deshake.h:57
FILL_MIRROR
@ FILL_MIRROR
Definition: transform.h:55
AVFrame::format
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames,...
Definition: frame.h:427
diff
static av_always_inline int diff(const struct color_info *a, const struct color_info *b, const int trans_thresh)
Definition: vf_paletteuse.c:164
DeshakeContext::angles
double * angles
< Scratch buffer for motion search
Definition: deshake.h:58
height
#define height
a
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
Definition: undefined.txt:41
INTERPOLATE_BILINEAR
@ INTERPOLATE_BILINEAR
Definition: transform.h:41
interpolate
static void interpolate(float *out, float v1, float v2, int size)
Definition: twinvq.c:84
M_PI
#define M_PI
Definition: mathematics.h:67
internal.h
emms.h
filter_frame
static int filter_frame(AVFilterLink *link, AVFrame *in)
Definition: vf_deshake.c:405
ff_get_matrix
void ff_get_matrix(float x_shift, float y_shift, float angle, float scale_x, float scale_y, float *matrix)
Get an affine transformation matrix from given translation, rotation, and zoom factors.
Definition: transform.c:106
config_props
static int config_props(AVFilterLink *link)
Definition: vf_deshake.c:382
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:255
AV_QSORT
#define AV_QSORT(p, num, type, cmp)
Quicksort This sort is fast, and fully inplace but not stable and it is possible to construct input t...
Definition: qsort.h:33
src2
const pixel * src2
Definition: h264pred_template.c:422
SEARCH_COUNT
@ SEARCH_COUNT
Definition: deshake.h:34
a2
#define a2
Definition: regdef.h:48
common.h
DeshakeContext::contrast
int contrast
Contrast threshold.
Definition: deshake.h:65
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
AV_PIX_FMT_YUVJ440P
@ AV_PIX_FMT_YUVJ440P
planar YUV 4:4:0 full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV440P and setting color_range
Definition: pixfmt.h:100
Transform::angle
double angle
Angle of rotation.
Definition: deshake.h:49
DeshakeContext::search
int search
Motion search method.
Definition: deshake.h:66
AVFilterPad::name
const char * name
Pad name.
Definition: internal.h:53
avpriv_fopen_utf8
FILE * avpriv_fopen_utf8(const char *path, const char *mode)
Open a file using a UTF-8 filename.
Definition: file_open.c:159
stride
#define stride
Definition: h264pred_template.c:537
AVFilter
Filter definition.
Definition: avfilter.h:166
ret
ret
Definition: filter_design.txt:187
search
static float search(FOCContext *foc, int pass, int maxpass, int xmin, int xmax, int ymin, int ymax, int *best_x, int *best_y, float best_score)
Definition: vf_find_rect.c:147
EXHAUSTIVE
@ EXHAUSTIVE
Search all possible positions.
Definition: deshake.h:32
pos
unsigned int pos
Definition: spdifenc.c:413
deshake_transform_c
static int deshake_transform_c(AVFilterContext *ctx, int width, int height, int cw, int ch, const float *matrix_y, const float *matrix_uv, enum InterpolateMethod interpolate, enum FillMethod fill, AVFrame *in, AVFrame *out)
Definition: vf_deshake.c:316
uninit
static av_cold void uninit(AVFilterContext *ctx)
Definition: vf_deshake.c:395
ff_affine_transform
int ff_affine_transform(const uint8_t *src, uint8_t *dst, int src_stride, int dst_stride, int width, int height, const float *matrix, enum InterpolateMethod interpolate, enum FillMethod fill)
Do an affine transformation with the given interpolation method.
Definition: transform.c:125
DeshakeContext::edge
int edge
Edge fill method.
Definition: deshake.h:63
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
Transform
Definition: deshake.h:47
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Definition: opt.h:225
avfilter.h
deshake.h
av_pixelutils_get_sad_fn
av_pixelutils_sad_fn av_pixelutils_get_sad_fn(int w_bits, int h_bits, int aligned, void *log_ctx)
Get a potentially optimized pointer to a Sum-of-absolute-differences function (see the av_pixelutils_...
Definition: pixelutils.c:72
values
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return values
Definition: filter_design.txt:263
mean
static float mean(const float *input, int size)
Definition: vf_nnedi.c:862
DeshakeContext::fp
FILE * fp
Definition: deshake.h:70
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:71
AVFilterContext
An instance of a filter.
Definition: avfilter.h:397
desc
const char * desc
Definition: libsvtav1.c:83
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
AV_PIX_FMT_YUV422P
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:70
mem.h
AVPixFmtDescriptor
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:69
alpha
static const int16_t alpha[]
Definition: ilbcdata.h:55
DeshakeContext::rx
int rx
Maximum horizontal shift.
Definition: deshake.h:61
AV_OPT_TYPE_BOOL
@ AV_OPT_TYPE_BOOL
Definition: opt.h:244
FILTER_OUTPUTS
#define FILTER_OUTPUTS(array)
Definition: internal.h:193
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
av_fast_malloc
void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size)
Allocate a buffer, reusing the given one if large enough.
Definition: mem.c:555
src
INIT_CLIP pixel * src
Definition: h264pred_template.c:418
AV_PIX_FMT_YUV411P
@ AV_PIX_FMT_YUV411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
Definition: pixfmt.h:73
FillMethod
FillMethod
Definition: transform.h:51
DeshakeContext
Definition: deshake.h:55
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
Definition: frame.h:385
AV_PIX_FMT_YUV410P
@ AV_PIX_FMT_YUV410P
planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
Definition: pixfmt.h:72
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
AV_OPT_TYPE_STRING
@ AV_OPT_TYPE_STRING
Definition: opt.h:229
DeshakeContext::cy
int cy
Definition: deshake.h:75
AV_OPT_TYPE_CONST
@ AV_OPT_TYPE_CONST
Definition: opt.h:234
snprintf
#define snprintf
Definition: snprintf.h:34
IntMotionVector
Definition: deshake.h:37
DeshakeContext::sad
av_pixelutils_sad_fn sad
Sum of the absolute difference function.
Definition: deshake.h:67