38 int jobnr,
int nb_jobs);
41 static inline float lerpf(
float v0,
float v1,
float f)
43 return v0 + (v1 -
v0) * f;
56 const float gintensity = intensity * s->
balance[0];
57 const float bintensity = intensity * s->
balance[1];
58 const float rintensity = intensity * s->
balance[2];
59 const int slice_start = (height * jobnr) / nb_jobs;
60 const int slice_end = (height * (jobnr + 1)) / nb_jobs;
61 const int glinesize = frame->
linesize[0];
62 const int blinesize = frame->
linesize[1];
63 const int rlinesize = frame->
linesize[2];
64 uint8_t *gptr = frame->
data[0] + slice_start * glinesize;
65 uint8_t *bptr = frame->
data[1] + slice_start * blinesize;
66 uint8_t *rptr = frame->
data[2] + slice_start * rlinesize;
68 for (
int y = slice_start; y <
slice_end; y++) {
69 for (
int x = 0; x <
width; x++) {
70 float g = gptr[x] / 255.f;
71 float b = bptr[x] / 255.f;
72 float r = rptr[x] / 255.f;
73 float max_color =
FFMAX3(r, g, b);
74 float min_color =
FFMIN3(r, g, b);
75 float color_saturation = max_color - min_color;
76 float luma = g * gc + r * rc + b * bc;
77 const float cg = 1.f + gintensity * (1.f -
FFSIGN(gintensity) * color_saturation);
78 const float cb = 1.f + bintensity * (1.f -
FFSIGN(bintensity) * color_saturation);
79 const float cr = 1.f + rintensity * (1.f -
FFSIGN(rintensity) * color_saturation);
81 g =
lerpf(luma, g, cg);
82 b =
lerpf(luma, b, cb);
83 r =
lerpf(luma, r, cr);
85 gptr[x] = av_clip_uint8(g * 255.
f);
86 bptr[x] = av_clip_uint8(b * 255.f);
87 rptr[x] = av_clip_uint8(r * 255.f);
102 const int depth = s->
depth;
103 const float max = (1 << depth) - 1;
104 const float gc = s->
lcoeffs[0];
105 const float bc = s->
lcoeffs[1];
106 const float rc = s->
lcoeffs[2];
110 const float gintensity = intensity * s->
balance[0];
111 const float bintensity = intensity * s->
balance[1];
112 const float rintensity = intensity * s->
balance[2];
113 const int slice_start = (height * jobnr) / nb_jobs;
114 const int slice_end = (height * (jobnr + 1)) / nb_jobs;
115 const int glinesize = frame->
linesize[0] / 2;
116 const int blinesize = frame->
linesize[1] / 2;
117 const int rlinesize = frame->
linesize[2] / 2;
118 uint16_t *gptr = (uint16_t *)frame->
data[0] + slice_start * glinesize;
119 uint16_t *bptr = (uint16_t *)frame->
data[1] + slice_start * blinesize;
120 uint16_t *rptr = (uint16_t *)frame->
data[2] + slice_start * rlinesize;
123 for (
int x = 0; x <
width; x++) {
124 float g = gptr[x] / max;
125 float b = bptr[x] / max;
126 float r = rptr[x] / max;
127 float max_color =
FFMAX3(r, g, b);
128 float min_color =
FFMIN3(r, g, b);
129 float color_saturation = max_color - min_color;
130 float luma = g * gc + r * rc + b * bc;
131 const float cg = 1.f + gintensity * (1.f -
FFSIGN(gintensity) * color_saturation);
132 const float cb = 1.f + bintensity * (1.f -
FFSIGN(bintensity) * color_saturation);
133 const float cr = 1.f + rintensity * (1.f -
FFSIGN(rintensity) * color_saturation);
135 g =
lerpf(luma, g, cg);
136 b =
lerpf(luma, b, cb);
137 r =
lerpf(luma, r, cr);
215 #define OFFSET(x) offsetof(VibranceContext, x)
216 #define VF AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
235 .priv_class = &vibrance_class,
237 .
inputs = vibrance_inputs,
AVFILTER_DEFINE_CLASS(vibrance)
static av_cold int query_formats(AVFilterContext *avctx)
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
This structure describes decoded (raw) audio or video data.
#define AV_PIX_FMT_GBRAP10
Main libavfilter public API header.
#define AV_PIX_FMT_GBRP10
#define AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC
Some filters support a generic "enable" expression option that can be used to enable or disable a fil...
const char * name
Pad name.
static double cb(void *priv, double x, double y)
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
AVComponentDescriptor comp[4]
Parameters that describe how pixels are packed.
static int vibrance_slice8(AVFilterContext *avctx, void *arg, int jobnr, int nb_jobs)
A filter pad used for either input or output.
A link between two filters.
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
void * priv
private data for use by the filter
#define AVFILTER_FLAG_SLICE_THREADS
The filter supports multithreading by splitting frames into multiple parts and processing them concur...
#define AV_PIX_FMT_GBRAP12
static const AVOption vibrance_options[]
#define AV_PIX_FMT_GBRAP16
int(* do_slice)(AVFilterContext *s, void *arg, int jobnr, int nb_jobs)
#define AV_PIX_FMT_GBRP16
int ff_filter_get_nb_threads(AVFilterContext *ctx)
Get number of threads for current filter instance.
static const AVFilterPad vibrance_inputs[]
static const AVFilterPad inputs[]
#define AV_PIX_FMT_GBRP14
static const AVFilterPad outputs[]
int format
agreed upon media format
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
static int filter_frame(AVFilterLink *link, AVFrame *frame)
Describe the class of an AVClass context structure.
const char * name
Filter name.
static const AVFilterPad vibrance_outputs[]
static av_cold int config_input(AVFilterLink *inlink)
AVFilterLink ** outputs
array of pointers to output links
static float lerpf(float v0, float v1, float f)
#define AV_PIX_FMT_GBRP12
#define flags(name, subs,...)
AVFilterInternal * internal
An opaque struct for libavfilter internal use.
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
planar GBRA 4:4:4:4 32bpp
static int vibrance_slice16(AVFilterContext *avctx, void *arg, int jobnr, int nb_jobs)
avfilter_execute_func * execute
static int slice_end(AVCodecContext *avctx, AVFrame *pict)
Handle slice ends.
AVFilterContext * dst
dest filter
static double cr(void *priv, double x, double y)
int depth
Number of bits in the component.
AVPixelFormat
Pixel format.
static av_always_inline av_const unsigned av_clip_uintp2_c(int a, int p)
Clip a signed integer to an unsigned power of two range.