FFmpeg
vf_vibrance.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2018 Paul B Mahol
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 #include "libavutil/opt.h"
22 #include "libavutil/pixdesc.h"
23 #include "avfilter.h"
24 #include "drawutils.h"
25 #include "internal.h"
26 #include "video.h"
27 
28 #define R 0
29 #define G 1
30 #define B 2
31 #define A 3
32 
33 typedef struct VibranceContext {
34  const AVClass *class;
35 
36  float intensity;
37  float balance[3];
38  float lcoeffs[3];
39  int alternate;
40 
41  int step;
42  int depth;
43  uint8_t rgba_map[4];
44 
46  int jobnr, int nb_jobs);
48 
49 static inline float lerpf(float v0, float v1, float f)
50 {
51  return v0 + (v1 - v0) * f;
52 }
53 
54 typedef struct ThreadData {
55  AVFrame *out, *in;
56 } ThreadData;
57 
58 static int vibrance_slice8(AVFilterContext *avctx, void *arg, int jobnr, int nb_jobs)
59 {
60  VibranceContext *s = avctx->priv;
61  ThreadData *td = arg;
62  AVFrame *frame = td->out;
63  AVFrame *in = td->in;
64  const int width = frame->width;
65  const int height = frame->height;
66  const float scale = 1.f / 255.f;
67  const float gc = s->lcoeffs[0];
68  const float bc = s->lcoeffs[1];
69  const float rc = s->lcoeffs[2];
70  const float intensity = s->intensity;
71  const float alternate = s->alternate ? 1.f : -1.f;
72  const float gintensity = intensity * s->balance[0];
73  const float bintensity = intensity * s->balance[1];
74  const float rintensity = intensity * s->balance[2];
75  const float sgintensity = alternate * FFSIGN(gintensity);
76  const float sbintensity = alternate * FFSIGN(bintensity);
77  const float srintensity = alternate * FFSIGN(rintensity);
78  const int slice_start = (height * jobnr) / nb_jobs;
79  const int slice_end = (height * (jobnr + 1)) / nb_jobs;
80  const ptrdiff_t glinesize = frame->linesize[0];
81  const ptrdiff_t blinesize = frame->linesize[1];
82  const ptrdiff_t rlinesize = frame->linesize[2];
83  const ptrdiff_t alinesize = frame->linesize[3];
84  const ptrdiff_t gslinesize = in->linesize[0];
85  const ptrdiff_t bslinesize = in->linesize[1];
86  const ptrdiff_t rslinesize = in->linesize[2];
87  const ptrdiff_t aslinesize = in->linesize[3];
88  const uint8_t *gsrc = in->data[0] + slice_start * glinesize;
89  const uint8_t *bsrc = in->data[1] + slice_start * blinesize;
90  const uint8_t *rsrc = in->data[2] + slice_start * rlinesize;
91  uint8_t *gptr = frame->data[0] + slice_start * glinesize;
92  uint8_t *bptr = frame->data[1] + slice_start * blinesize;
93  uint8_t *rptr = frame->data[2] + slice_start * rlinesize;
94  const uint8_t *asrc = in->data[3];
95  uint8_t *aptr = frame->data[3];
96 
97  for (int y = slice_start; y < slice_end; y++) {
98  for (int x = 0; x < width; x++) {
99  float g = gsrc[x] * scale;
100  float b = bsrc[x] * scale;
101  float r = rsrc[x] * scale;
102  float max_color = FFMAX3(r, g, b);
103  float min_color = FFMIN3(r, g, b);
104  float color_saturation = max_color - min_color;
105  float luma = g * gc + r * rc + b * bc;
106  const float cg = 1.f + gintensity * (1.f - sgintensity * color_saturation);
107  const float cb = 1.f + bintensity * (1.f - sbintensity * color_saturation);
108  const float cr = 1.f + rintensity * (1.f - srintensity * color_saturation);
109 
110  g = lerpf(luma, g, cg);
111  b = lerpf(luma, b, cb);
112  r = lerpf(luma, r, cr);
113 
114  gptr[x] = av_clip_uint8(g * 255.f);
115  bptr[x] = av_clip_uint8(b * 255.f);
116  rptr[x] = av_clip_uint8(r * 255.f);
117  }
118 
119  if (aptr && alinesize && frame != in)
120  memcpy(aptr + alinesize * y, asrc + aslinesize * y, width);
121 
122  gsrc += gslinesize;
123  bsrc += bslinesize;
124  rsrc += rslinesize;
125  gptr += glinesize;
126  bptr += blinesize;
127  rptr += rlinesize;
128  }
129 
130  return 0;
131 }
132 
133 static int vibrance_slice16(AVFilterContext *avctx, void *arg, int jobnr, int nb_jobs)
134 {
135  VibranceContext *s = avctx->priv;
136  ThreadData *td = arg;
137  AVFrame *frame = td->out;
138  AVFrame *in = td->in;
139  const int depth = s->depth;
140  const float max = (1 << depth) - 1;
141  const float scale = 1.f / max;
142  const float gc = s->lcoeffs[0];
143  const float bc = s->lcoeffs[1];
144  const float rc = s->lcoeffs[2];
145  const int width = frame->width;
146  const int height = frame->height;
147  const float intensity = s->intensity;
148  const float alternate = s->alternate ? 1.f : -1.f;
149  const float gintensity = intensity * s->balance[0];
150  const float bintensity = intensity * s->balance[1];
151  const float rintensity = intensity * s->balance[2];
152  const float sgintensity = alternate * FFSIGN(gintensity);
153  const float sbintensity = alternate * FFSIGN(bintensity);
154  const float srintensity = alternate * FFSIGN(rintensity);
155  const int slice_start = (height * jobnr) / nb_jobs;
156  const int slice_end = (height * (jobnr + 1)) / nb_jobs;
157  const ptrdiff_t gslinesize = in->linesize[0] / 2;
158  const ptrdiff_t bslinesize = in->linesize[1] / 2;
159  const ptrdiff_t rslinesize = in->linesize[2] / 2;
160  const ptrdiff_t aslinesize = in->linesize[3] / 2;
161  const ptrdiff_t glinesize = frame->linesize[0] / 2;
162  const ptrdiff_t blinesize = frame->linesize[1] / 2;
163  const ptrdiff_t rlinesize = frame->linesize[2] / 2;
164  const ptrdiff_t alinesize = frame->linesize[3] / 2;
165  const uint16_t *gsrc = (const uint16_t *)in->data[0] + slice_start * gslinesize;
166  const uint16_t *bsrc = (const uint16_t *)in->data[1] + slice_start * bslinesize;
167  const uint16_t *rsrc = (const uint16_t *)in->data[2] + slice_start * rslinesize;
168  uint16_t *gptr = (uint16_t *)frame->data[0] + slice_start * glinesize;
169  uint16_t *bptr = (uint16_t *)frame->data[1] + slice_start * blinesize;
170  uint16_t *rptr = (uint16_t *)frame->data[2] + slice_start * rlinesize;
171  const uint16_t *asrc = (const uint16_t *)in->data[3];
172  uint16_t *aptr = (uint16_t *)frame->data[3];
173 
174  for (int y = slice_start; y < slice_end; y++) {
175  for (int x = 0; x < width; x++) {
176  float g = gsrc[x] * scale;
177  float b = bsrc[x] * scale;
178  float r = rsrc[x] * scale;
179  float max_color = FFMAX3(r, g, b);
180  float min_color = FFMIN3(r, g, b);
181  float color_saturation = max_color - min_color;
182  float luma = g * gc + r * rc + b * bc;
183  const float cg = 1.f + gintensity * (1.f - sgintensity * color_saturation);
184  const float cb = 1.f + bintensity * (1.f - sbintensity * color_saturation);
185  const float cr = 1.f + rintensity * (1.f - srintensity * color_saturation);
186 
187  g = lerpf(luma, g, cg);
188  b = lerpf(luma, b, cb);
189  r = lerpf(luma, r, cr);
190 
191  gptr[x] = av_clip_uintp2_c(g * max, depth);
192  bptr[x] = av_clip_uintp2_c(b * max, depth);
193  rptr[x] = av_clip_uintp2_c(r * max, depth);
194  }
195 
196  if (aptr && alinesize && frame != in)
197  memcpy(aptr + alinesize * y, asrc + aslinesize * y, width * 2);
198 
199  gsrc += gslinesize;
200  bsrc += bslinesize;
201  rsrc += rslinesize;
202  gptr += glinesize;
203  bptr += blinesize;
204  rptr += rlinesize;
205  }
206 
207  return 0;
208 }
209 
210 static int vibrance_slice8p(AVFilterContext *avctx, void *arg, int jobnr, int nb_jobs)
211 {
212  VibranceContext *s = avctx->priv;
213  ThreadData *td = arg;
214  AVFrame *frame = td->out;
215  AVFrame *in = td->in;
216  const int step = s->step;
217  const int width = frame->width;
218  const int height = frame->height;
219  const float scale = 1.f / 255.f;
220  const float gc = s->lcoeffs[0];
221  const float bc = s->lcoeffs[1];
222  const float rc = s->lcoeffs[2];
223  const uint8_t roffset = s->rgba_map[R];
224  const uint8_t goffset = s->rgba_map[G];
225  const uint8_t boffset = s->rgba_map[B];
226  const uint8_t aoffset = s->rgba_map[A];
227  const float intensity = s->intensity;
228  const float alternate = s->alternate ? 1.f : -1.f;
229  const float gintensity = intensity * s->balance[0];
230  const float bintensity = intensity * s->balance[1];
231  const float rintensity = intensity * s->balance[2];
232  const float sgintensity = alternate * FFSIGN(gintensity);
233  const float sbintensity = alternate * FFSIGN(bintensity);
234  const float srintensity = alternate * FFSIGN(rintensity);
235  const int slice_start = (height * jobnr) / nb_jobs;
236  const int slice_end = (height * (jobnr + 1)) / nb_jobs;
237  const ptrdiff_t linesize = frame->linesize[0];
238  const ptrdiff_t slinesize = in->linesize[0];
239  const uint8_t *src = in->data[0] + slice_start * slinesize;
240  uint8_t *ptr = frame->data[0] + slice_start * linesize;
241 
242  for (int y = slice_start; y < slice_end; y++) {
243  for (int x = 0; x < width; x++) {
244  float g = src[x * step + goffset] * scale;
245  float b = src[x * step + boffset] * scale;
246  float r = src[x * step + roffset] * scale;
247  float max_color = FFMAX3(r, g, b);
248  float min_color = FFMIN3(r, g, b);
249  float color_saturation = max_color - min_color;
250  float luma = g * gc + r * rc + b * bc;
251  const float cg = 1.f + gintensity * (1.f - sgintensity * color_saturation);
252  const float cb = 1.f + bintensity * (1.f - sbintensity * color_saturation);
253  const float cr = 1.f + rintensity * (1.f - srintensity * color_saturation);
254 
255  g = lerpf(luma, g, cg);
256  b = lerpf(luma, b, cb);
257  r = lerpf(luma, r, cr);
258 
259  ptr[x * step + goffset] = av_clip_uint8(g * 255.f);
260  ptr[x * step + boffset] = av_clip_uint8(b * 255.f);
261  ptr[x * step + roffset] = av_clip_uint8(r * 255.f);
262 
263  if (frame != in)
264  ptr[x * step + aoffset] = src[x * step + aoffset];
265  }
266 
267  ptr += linesize;
268  src += slinesize;
269  }
270 
271  return 0;
272 }
273 
274 static int vibrance_slice16p(AVFilterContext *avctx, void *arg, int jobnr, int nb_jobs)
275 {
276  VibranceContext *s = avctx->priv;
277  ThreadData *td = arg;
278  AVFrame *frame = td->out;
279  AVFrame *in = td->in;
280  const int step = s->step;
281  const int depth = s->depth;
282  const float max = (1 << depth) - 1;
283  const float scale = 1.f / max;
284  const float gc = s->lcoeffs[0];
285  const float bc = s->lcoeffs[1];
286  const float rc = s->lcoeffs[2];
287  const uint8_t roffset = s->rgba_map[R];
288  const uint8_t goffset = s->rgba_map[G];
289  const uint8_t boffset = s->rgba_map[B];
290  const uint8_t aoffset = s->rgba_map[A];
291  const int width = frame->width;
292  const int height = frame->height;
293  const float intensity = s->intensity;
294  const float alternate = s->alternate ? 1.f : -1.f;
295  const float gintensity = intensity * s->balance[0];
296  const float bintensity = intensity * s->balance[1];
297  const float rintensity = intensity * s->balance[2];
298  const float sgintensity = alternate * FFSIGN(gintensity);
299  const float sbintensity = alternate * FFSIGN(bintensity);
300  const float srintensity = alternate * FFSIGN(rintensity);
301  const int slice_start = (height * jobnr) / nb_jobs;
302  const int slice_end = (height * (jobnr + 1)) / nb_jobs;
303  const ptrdiff_t linesize = frame->linesize[0] / 2;
304  const ptrdiff_t slinesize = in->linesize[0] / 2;
305  const uint16_t *src = (const uint16_t *)in->data[0] + slice_start * slinesize;
306  uint16_t *ptr = (uint16_t *)frame->data[0] + slice_start * linesize;
307 
308  for (int y = slice_start; y < slice_end; y++) {
309  for (int x = 0; x < width; x++) {
310  float g = src[x * step + goffset] * scale;
311  float b = src[x * step + boffset] * scale;
312  float r = src[x * step + roffset] * scale;
313  float max_color = FFMAX3(r, g, b);
314  float min_color = FFMIN3(r, g, b);
315  float color_saturation = max_color - min_color;
316  float luma = g * gc + r * rc + b * bc;
317  const float cg = 1.f + gintensity * (1.f - sgintensity * color_saturation);
318  const float cb = 1.f + bintensity * (1.f - sbintensity * color_saturation);
319  const float cr = 1.f + rintensity * (1.f - srintensity * color_saturation);
320 
321  g = lerpf(luma, g, cg);
322  b = lerpf(luma, b, cb);
323  r = lerpf(luma, r, cr);
324 
325  ptr[x * step + goffset] = av_clip_uintp2_c(g * max, depth);
326  ptr[x * step + boffset] = av_clip_uintp2_c(b * max, depth);
327  ptr[x * step + roffset] = av_clip_uintp2_c(r * max, depth);
328  if (frame != in)
329  ptr[x * step + aoffset] = src[x * step + aoffset];
330  }
331 
332  ptr += linesize;
333  src += slinesize;
334  }
335 
336  return 0;
337 }
338 
340 {
341  AVFilterContext *avctx = link->dst;
342  AVFilterLink *outlink = avctx->outputs[0];
343  VibranceContext *s = avctx->priv;
344  ThreadData td;
345  AVFrame *out;
346  int res;
347 
348  if (av_frame_is_writable(in)) {
349  out = in;
350  } else {
351  out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
352  if (!out) {
353  av_frame_free(&in);
354  return AVERROR(ENOMEM);
355  }
357  }
358 
359  td.out = out;
360  td.in = in;
361  if (res = ff_filter_execute(avctx, s->do_slice, &td, NULL,
362  FFMIN(out->height, ff_filter_get_nb_threads(avctx))))
363  return res;
364 
365  if (out != in)
366  av_frame_free(&in);
367  return ff_filter_frame(outlink, out);
368 }
369 
370 static const enum AVPixelFormat pixel_fmts[] = {
383 };
384 
386 {
387  AVFilterContext *avctx = inlink->dst;
388  VibranceContext *s = avctx->priv;
390  int planar = desc->flags & AV_PIX_FMT_FLAG_PLANAR;
391 
392  s->step = desc->nb_components;
393  if (inlink->format == AV_PIX_FMT_RGB0 ||
394  inlink->format == AV_PIX_FMT_0RGB ||
395  inlink->format == AV_PIX_FMT_BGR0 ||
396  inlink->format == AV_PIX_FMT_0BGR)
397  s->step = 4;
398 
399  s->depth = desc->comp[0].depth;
400  s->do_slice = s->depth <= 8 ? vibrance_slice8 : vibrance_slice16;
401  if (!planar)
402  s->do_slice = s->depth <= 8 ? vibrance_slice8p : vibrance_slice16p;
403 
404  ff_fill_rgba_map(s->rgba_map, inlink->format);
405 
406  return 0;
407 }
408 
409 static const AVFilterPad vibrance_inputs[] = {
410  {
411  .name = "default",
412  .type = AVMEDIA_TYPE_VIDEO,
413  .filter_frame = filter_frame,
414  .config_props = config_input,
415  },
416 };
417 
418 #define OFFSET(x) offsetof(VibranceContext, x)
419 #define VF AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_RUNTIME_PARAM
420 
421 static const AVOption vibrance_options[] = {
422  { "intensity", "set the intensity value", OFFSET(intensity), AV_OPT_TYPE_FLOAT, {.dbl=0}, -2, 2, VF },
423  { "rbal", "set the red balance value", OFFSET(balance[2]), AV_OPT_TYPE_FLOAT, {.dbl=1}, -10, 10, VF },
424  { "gbal", "set the green balance value", OFFSET(balance[0]), AV_OPT_TYPE_FLOAT, {.dbl=1}, -10, 10, VF },
425  { "bbal", "set the blue balance value", OFFSET(balance[1]), AV_OPT_TYPE_FLOAT, {.dbl=1}, -10, 10, VF },
426  { "rlum", "set the red luma coefficient", OFFSET(lcoeffs[2]), AV_OPT_TYPE_FLOAT, {.dbl=0.072186}, 0, 1, VF },
427  { "glum", "set the green luma coefficient", OFFSET(lcoeffs[0]), AV_OPT_TYPE_FLOAT, {.dbl=0.715158}, 0, 1, VF },
428  { "blum", "set the blue luma coefficient", OFFSET(lcoeffs[1]), AV_OPT_TYPE_FLOAT, {.dbl=0.212656}, 0, 1, VF },
429  { "alternate", "use alternate colors", OFFSET(alternate), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, VF },
430  { NULL }
431 };
432 
433 AVFILTER_DEFINE_CLASS(vibrance);
434 
436  .name = "vibrance",
437  .description = NULL_IF_CONFIG_SMALL("Boost or alter saturation."),
438  .priv_size = sizeof(VibranceContext),
439  .priv_class = &vibrance_class,
444  .process_command = ff_filter_process_command,
445 };
ff_get_video_buffer
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
Definition: video.c:112
AV_PIX_FMT_GBRAP16
#define AV_PIX_FMT_GBRAP16
Definition: pixfmt.h:501
filter_frame
static int filter_frame(AVFilterLink *link, AVFrame *in)
Definition: vf_vibrance.c:339
td
#define td
Definition: regdef.h:70
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:71
r
const char * r
Definition: vf_curves.c:126
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
ff_vf_vibrance
const AVFilter ff_vf_vibrance
Definition: vf_vibrance.c:435
out
FILE * out
Definition: movenc.c:54
cb
static double cb(void *priv, double x, double y)
Definition: vf_geq.c:241
ff_filter_frame
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1018
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2962
VibranceContext::alternate
int alternate
Definition: vf_vibrance.c:39
FILTER_PIXFMTS_ARRAY
#define FILTER_PIXFMTS_ARRAY(array)
Definition: internal.h:162
pixel_fmts
static enum AVPixelFormat pixel_fmts[]
Definition: vf_vibrance.c:370
inlink
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
Definition: filter_design.txt:212
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:130
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:344
pixdesc.h
av_clip_uintp2_c
static av_always_inline av_const unsigned av_clip_uintp2_c(int a, int p)
Clip a signed integer to an unsigned power of two range.
Definition: common.h:278
step
trying all byte sequences megabyte in length and selecting the best looking sequence will yield cases to try But a word about which is also called distortion Distortion can be quantified by almost any quality measurement one chooses the sum of squared differences is used but more complex methods that consider psychovisual effects can be used as well It makes no difference in this discussion First step
Definition: rate_distortion.txt:58
AVFrame::width
int width
Definition: frame.h:416
VF
#define VF
Definition: vf_vibrance.c:419
AVOption
AVOption.
Definition: opt.h:346
b
#define b
Definition: input.c:41
vibrance_options
static const AVOption vibrance_options[]
Definition: vf_vibrance.c:421
AV_PIX_FMT_BGR24
@ AV_PIX_FMT_BGR24
packed RGB 8:8:8, 24bpp, BGRBGR...
Definition: pixfmt.h:76
AV_PIX_FMT_BGRA
@ AV_PIX_FMT_BGRA
packed BGRA 8:8:8:8, 32bpp, BGRABGRA...
Definition: pixfmt.h:102
max
#define max(a, b)
Definition: cuda_runtime.h:33
AVFilter::name
const char * name
Filter name.
Definition: avfilter.h:170
ThreadData::out
AVFrame * out
Definition: af_adeclick.c:526
VibranceContext::balance
float balance[3]
Definition: vf_vibrance.c:37
video.h
VibranceContext::depth
int depth
Definition: vf_vibrance.c:42
ThreadData::in
AVFrame * in
Definition: af_adecorrelate.c:153
VibranceContext::do_slice
int(* do_slice)(AVFilterContext *s, void *arg, int jobnr, int nb_jobs)
Definition: vf_vibrance.c:45
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:365
AV_PIX_FMT_GBRP14
#define AV_PIX_FMT_GBRP14
Definition: pixfmt.h:496
AV_PIX_FMT_GBRAP
@ AV_PIX_FMT_GBRAP
planar GBRA 4:4:4:4 32bpp
Definition: pixfmt.h:212
AVFilterContext::priv
void * priv
private data for use by the filter
Definition: avfilter.h:422
v0
#define v0
Definition: regdef.h:26
AV_PIX_FMT_GBRP10
#define AV_PIX_FMT_GBRP10
Definition: pixfmt.h:494
FFSIGN
#define FFSIGN(a)
Definition: common.h:73
planar
uint8_t pi<< 24) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_U8,(uint64_t)((*(const uint8_t *) pi - 0x80U))<< 56) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16,(*(const int16_t *) pi >>8)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1<< 16)) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S16,(uint64_t)(*(const int16_t *) pi)<< 48) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32,(*(const int32_t *) pi >>24)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S32,(uint64_t)(*(const int32_t *) pi)<< 32) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S64,(*(const int64_t *) pi >>56)+0x80) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S64, *(const int64_t *) pi *(1.0f/(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S64, *(const int64_t *) pi *(1.0/(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_FLT, llrintf(*(const float *) pi *(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_DBL, llrint(*(const double *) pi *(UINT64_C(1)<< 63))) #define FMT_PAIR_FUNC(out, in) static conv_func_type *const fmt_pair_to_conv_functions[AV_SAMPLE_FMT_NB *AV_SAMPLE_FMT_NB]={ FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S64), };static void cpy1(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, len);} static void cpy2(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 2 *len);} static void cpy4(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 4 *len);} static void cpy8(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 8 *len);} AudioConvert *swri_audio_convert_alloc(enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, const int *ch_map, int flags) { AudioConvert *ctx;conv_func_type *f=fmt_pair_to_conv_functions[av_get_packed_sample_fmt(out_fmt)+AV_SAMPLE_FMT_NB *av_get_packed_sample_fmt(in_fmt)];if(!f) return NULL;ctx=av_mallocz(sizeof(*ctx));if(!ctx) return NULL;if(channels==1){ in_fmt=av_get_planar_sample_fmt(in_fmt);out_fmt=av_get_planar_sample_fmt(out_fmt);} ctx->channels=channels;ctx->conv_f=f;ctx->ch_map=ch_map;if(in_fmt==AV_SAMPLE_FMT_U8||in_fmt==AV_SAMPLE_FMT_U8P) memset(ctx->silence, 0x80, sizeof(ctx->silence));if(out_fmt==in_fmt &&!ch_map) { switch(av_get_bytes_per_sample(in_fmt)){ case 1:ctx->simd_f=cpy1;break;case 2:ctx->simd_f=cpy2;break;case 4:ctx->simd_f=cpy4;break;case 8:ctx->simd_f=cpy8;break;} } return ctx;} void swri_audio_convert_free(AudioConvert **ctx) { av_freep(ctx);} int swri_audio_convert(AudioConvert *ctx, AudioData *out, AudioData *in, int len) { int ch;int off=0;const int os=(out->planar ? 1 :out->ch_count) *out->bps;unsigned misaligned=0;av_assert0(ctx->channels==out->ch_count);if(ctx->in_simd_align_mask) { int planes=in->planar ? in->ch_count :1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) in->ch[ch];misaligned|=m &ctx->in_simd_align_mask;} if(ctx->out_simd_align_mask) { int planes=out->planar ? out->ch_count :1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) out->ch[ch];misaligned|=m &ctx->out_simd_align_mask;} if(ctx->simd_f &&!ctx->ch_map &&!misaligned){ off=len &~15;av_assert1(off >=0);av_assert1(off<=len);av_assert2(ctx->channels==SWR_CH_MAX||!in->ch[ctx->channels]);if(off >0){ if(out->planar==in->planar){ int planes=out->planar ? out->ch_count :1;for(ch=0;ch< planes;ch++){ ctx->simd_f(out->ch+ch,(const uint8_t **) in->ch+ch, off *(out-> planar
Definition: audioconvert.c:56
AVFilterPad
A filter pad used for either input or output.
Definition: internal.h:33
slice_start
static int slice_start(SliceContext *sc, VVCContext *s, VVCFrameContext *fc, const CodedBitstreamUnit *unit, const int is_first_slice)
Definition: vvcdec.c:694
av_cold
#define av_cold
Definition: attributes.h:90
ff_video_default_filterpad
const AVFilterPad ff_video_default_filterpad[1]
An AVFilterPad array whose only entry has name "default" and is of type AVMEDIA_TYPE_VIDEO.
Definition: video.c:37
AV_PIX_FMT_GBRAP10
#define AV_PIX_FMT_GBRAP10
Definition: pixfmt.h:498
width
#define width
s
#define s(width, name)
Definition: cbs_vp9.c:198
AV_PIX_FMT_GBRAP12
#define AV_PIX_FMT_GBRAP12
Definition: pixfmt.h:499
VibranceContext::intensity
float intensity
Definition: vf_vibrance.c:36
g
const char * g
Definition: vf_curves.c:127
slice_end
static int slice_end(AVCodecContext *avctx, AVFrame *pict)
Handle slice ends.
Definition: mpeg12dec.c:1725
AVFILTER_DEFINE_CLASS
AVFILTER_DEFINE_CLASS(vibrance)
vibrance_inputs
static const AVFilterPad vibrance_inputs[]
Definition: vf_vibrance.c:409
FILTER_INPUTS
#define FILTER_INPUTS(array)
Definition: internal.h:182
link
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a link
Definition: filter_design.txt:23
AV_PIX_FMT_RGBA
@ AV_PIX_FMT_RGBA
packed RGBA 8:8:8:8, 32bpp, RGBARGBA...
Definition: pixfmt.h:100
frame
static AVFrame * frame
Definition: demux_decode.c:54
arg
const char * arg
Definition: jacosubdec.c:67
AV_PIX_FMT_GBRP16
#define AV_PIX_FMT_GBRP16
Definition: pixfmt.h:497
AV_PIX_FMT_RGBA64
#define AV_PIX_FMT_RGBA64
Definition: pixfmt.h:468
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:66
AV_PIX_FMT_BGR48
#define AV_PIX_FMT_BGR48
Definition: pixfmt.h:469
NULL
#define NULL
Definition: coverity.c:32
av_frame_copy_props
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:679
A
#define A
Definition: vf_vibrance.c:31
vibrance_slice8
static int vibrance_slice8(AVFilterContext *avctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_vibrance.c:58
AV_PIX_FMT_BGR0
@ AV_PIX_FMT_BGR0
packed BGR 8:8:8, 32bpp, BGRXBGRX... X=unused/undefined
Definition: pixfmt.h:265
AV_PIX_FMT_GBRP9
#define AV_PIX_FMT_GBRP9
Definition: pixfmt.h:493
AV_PIX_FMT_ABGR
@ AV_PIX_FMT_ABGR
packed ABGR 8:8:8:8, 32bpp, ABGRABGR...
Definition: pixfmt.h:101
VibranceContext::rgba_map
uint8_t rgba_map[4]
Definition: vf_vibrance.c:43
lerpf
static float lerpf(float v0, float v1, float f)
Definition: vf_vibrance.c:49
f
f
Definition: af_crystalizer.c:121
AV_PIX_FMT_RGB24
@ AV_PIX_FMT_RGB24
packed RGB 8:8:8, 24bpp, RGBRGB...
Definition: pixfmt.h:75
scale
static void scale(int *out, const int *in, const int w, const int h, const int shift)
Definition: vvc_intra.c:291
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:106
R
#define R
Definition: vf_vibrance.c:28
AV_PIX_FMT_RGB48
#define AV_PIX_FMT_RGB48
Definition: pixfmt.h:464
av_frame_is_writable
int av_frame_is_writable(AVFrame *frame)
Check if the frame data is writable.
Definition: frame.c:615
ff_filter_process_command
int ff_filter_process_command(AVFilterContext *ctx, const char *cmd, const char *arg, char *res, int res_len, int flags)
Generic processing of user supplied commands that are set in the same way as the filter options.
Definition: avfilter.c:890
height
#define height
AV_PIX_FMT_RGB0
@ AV_PIX_FMT_RGB0
packed RGB 8:8:8, 32bpp, RGBXRGBX... X=unused/undefined
Definition: pixfmt.h:263
VibranceContext::lcoeffs
float lcoeffs[3]
Definition: vf_vibrance.c:38
internal.h
AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC
#define AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC
Some filters support a generic "enable" expression option that can be used to enable or disable a fil...
Definition: avfilter.h:147
AV_PIX_FMT_ARGB
@ AV_PIX_FMT_ARGB
packed ARGB 8:8:8:8, 32bpp, ARGBARGB...
Definition: pixfmt.h:99
AV_OPT_TYPE_FLOAT
@ AV_OPT_TYPE_FLOAT
Definition: opt.h:238
vibrance_slice16
static int vibrance_slice16(AVFilterContext *avctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_vibrance.c:133
VibranceContext::step
int step
Definition: vf_vibrance.c:41
AV_PIX_FMT_BGRA64
#define AV_PIX_FMT_BGRA64
Definition: pixfmt.h:473
OFFSET
#define OFFSET(x)
Definition: vf_vibrance.c:418
FFMIN3
#define FFMIN3(a, b, c)
Definition: macros.h:50
AV_PIX_FMT_GBRP12
#define AV_PIX_FMT_GBRP12
Definition: pixfmt.h:495
ff_filter_get_nb_threads
int ff_filter_get_nb_threads(AVFilterContext *ctx)
Get number of threads for current filter instance.
Definition: avfilter.c:825
ThreadData
Used for passing data between threads.
Definition: dsddec.c:69
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
AVFilterPad::name
const char * name
Pad name.
Definition: internal.h:39
AVFilter
Filter definition.
Definition: avfilter.h:166
vibrance_slice16p
static int vibrance_slice16p(AVFilterContext *avctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_vibrance.c:274
AV_PIX_FMT_0BGR
@ AV_PIX_FMT_0BGR
packed BGR 8:8:8, 32bpp, XBGRXBGR... X=unused/undefined
Definition: pixfmt.h:264
config_input
static av_cold int config_input(AVFilterLink *inlink)
Definition: vf_vibrance.c:385
AVFrame::height
int height
Definition: frame.h:416
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:72
avfilter.h
AV_PIX_FMT_FLAG_PLANAR
#define AV_PIX_FMT_FLAG_PLANAR
At least one pixel component is not in the first data plane.
Definition: pixdesc.h:132
av_clip_uint8
#define av_clip_uint8
Definition: common.h:104
AVFilterContext
An instance of a filter.
Definition: avfilter.h:407
AV_PIX_FMT_GBRP
@ AV_PIX_FMT_GBRP
planar GBR 4:4:4 24bpp
Definition: pixfmt.h:165
AVFILTER_FLAG_SLICE_THREADS
#define AVFILTER_FLAG_SLICE_THREADS
The filter supports multithreading by splitting frames into multiple parts and processing them concur...
Definition: avfilter.h:117
desc
const char * desc
Definition: libsvtav1.c:75
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
G
#define G
Definition: vf_vibrance.c:29
AVPixFmtDescriptor
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:69
AV_OPT_TYPE_BOOL
@ AV_OPT_TYPE_BOOL
Definition: opt.h:251
cr
static double cr(void *priv, double x, double y)
Definition: vf_geq.c:242
FILTER_OUTPUTS
#define FILTER_OUTPUTS(array)
Definition: internal.h:183
src
INIT_CLIP pixel * src
Definition: h264pred_template.c:418
ff_fill_rgba_map
int ff_fill_rgba_map(uint8_t *rgba_map, enum AVPixelFormat pix_fmt)
Definition: drawutils.c:35
FFMAX3
#define FFMAX3(a, b, c)
Definition: macros.h:48
B
#define B
Definition: vf_vibrance.c:30
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
Definition: frame.h:389
AV_PIX_FMT_0RGB
@ AV_PIX_FMT_0RGB
packed RGB 8:8:8, 32bpp, XRGBXRGB... X=unused/undefined
Definition: pixfmt.h:262
VibranceContext
Definition: vf_vibrance.c:33
drawutils.h
ff_filter_execute
static av_always_inline int ff_filter_execute(AVFilterContext *ctx, avfilter_action_func *func, void *arg, int *ret, int nb_jobs)
Definition: internal.h:134
int
int
Definition: ffmpeg_filter.c:409
vibrance_slice8p
static int vibrance_slice8p(AVFilterContext *avctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_vibrance.c:210
AVFilterContext::outputs
AVFilterLink ** outputs
array of pointers to output links
Definition: avfilter.h:419