Go to the documentation of this file.
37 #define RED (1 << REDS)
38 #define YELLOW (1 << YELLOWS)
39 #define GREEN (1 << GREENS)
40 #define CYAN (1 << CYANS)
41 #define BLUE (1 << BLUES)
42 #define MAGENTA (1 << MAGENTAS)
74 const int ir = *
r, ig = *
g,
ib = *
b;
76 *
r = (ir * m[0][0] + ig * m[1][0] +
ib * m[2][0] ) >> 16;
77 *
g = (ir * m[0][1] + ig * m[1][1] +
ib * m[2][1] ) >> 16;
78 *
b = (ir * m[0][2] + ig * m[1][2] +
ib * m[2][2] ) >> 16;
81 #define FAST_DIV255(x) ((((x) + 128) * 257) >> 16)
93 #define HUESATURATION(name, type, clip, xall) \
94 static int do_slice_##name##_##xall(AVFilterContext *ctx, \
96 int jobnr, int nb_jobs) \
98 HueSaturationContext *s = ctx->priv; \
99 AVFrame *frame = arg; \
100 const int imax = (1 << name) - 1; \
101 const float strength = s->strength; \
102 const int colors = s->colors; \
103 const int step = s->step; \
104 const int width = frame->width; \
105 const int process_h = frame->height; \
106 const int slice_start = (process_h * jobnr ) / nb_jobs; \
107 const int slice_end = (process_h * (jobnr+1)) / nb_jobs; \
108 const ptrdiff_t linesize = frame->linesize[0] / sizeof(type); \
109 type *row = (type *)frame->data[0] + linesize * slice_start; \
110 const uint8_t offset_r = s->rgba_map[R]; \
111 const uint8_t offset_g = s->rgba_map[G]; \
112 const uint8_t offset_b = s->rgba_map[B]; \
113 type *dst_r = row + offset_r; \
114 type *dst_g = row + offset_g; \
115 type *dst_b = row + offset_b; \
117 for (int y = slice_start; y < slice_end; y++) { \
118 for (int x = 0; x < width * step; x += step) { \
119 int ir, ig, ib, ro, go, bo; \
121 ir = ro = dst_r[x]; \
122 ig = go = dst_g[x]; \
123 ib = bo = dst_b[x]; \
126 get_triplet(s->imatrix, &ir, &ig, &ib); \
128 const int min = FFMIN3(ir, ig, ib); \
129 const int max = FFMAX3(ir, ig, ib); \
130 const int flags = (ir == max) << REDS \
131 | (ir == min) << CYANS \
132 | (ig == max) << GREENS \
133 | (ig == min) << MAGENTAS \
134 | (ib == max) << BLUES \
135 | (ib == min) << YELLOWS; \
136 if (colors & flags) { \
140 f = FFMAX(f, ir - FFMAX(ig, ib)); \
141 if (colors & YELLOW) \
142 f = FFMAX(f, FFMIN(ir, ig) - ib); \
143 if (colors & GREEN) \
144 f = FFMAX(f, ig - FFMAX(ir, ib)); \
146 f = FFMAX(f, FFMIN(ig, ib) - ir); \
148 f = FFMAX(f, ib - FFMAX(ir, ig)); \
149 if (colors & MAGENTA) \
150 f = FFMAX(f, FFMIN(ir, ib) - ig); \
151 f = FFMIN(f * strength, imax); \
152 get_triplet(s->imatrix, &ir, &ig, &ib); \
153 ir = lerpi##name(ro, ir, f, imax); \
154 ig = lerpi##name(go, ig, f, imax); \
155 ib = lerpi##name(bo, ib, f, imax); \
159 dst_r[x] = clip(ir); \
160 dst_g[x] = clip(ig); \
161 dst_b[x] = clip(ib); \
180 for (
int y = 0; y < 4; y++)
181 for (
int x = 0; x < 4; x++)
189 for (
int y = 0; y < 4; y++) {
190 for (
int x = 0; x < 4; x++) {
191 temp[y][x] =
b[y][0] *
a[0][x]
198 for (
int y = 0; y < 4; y++) {
199 for (
int x = 0; x < 4; x++)
200 c[y][x] =
temp[y][x];
217 float rlw,
float glw,
float blw)
219 float s = 1.f - saturation;
220 float a =
s * rlw + saturation;
224 float e =
s * glw + saturation;
228 float i =
s * blw + saturation;
231 m[0][0] =
a; m[0][1] =
b; m[0][2] =
c; m[0][3] = 0.f;
232 m[1][0] = d; m[1][1] = e; m[1][2] =
f; m[1][3] = 0.f;
233 m[2][0] =
g; m[2][1] =
h; m[2][2] =
i; m[2][3] = 0.f;
234 m[3][0] = 0.f; m[3][1] = 0.f; m[3][2] = 0.f; m[3][3] = 1.f;
241 for (
int y = 0; y < 4; y++)
242 for (
int x = 0; x < 4; x++)
250 m[0][0] = 1.f; m[0][1] = 0.f; m[0][2] = 0.f; m[0][3] = 0.f;
251 m[1][0] = 0.f; m[1][1] = rc; m[1][2] = rs; m[1][3] = 0.f;
252 m[2][0] = 0.f; m[2][1] = -rs; m[2][2] = rc; m[2][3] = 0.f;
253 m[3][0] = 0.f; m[3][1] = 0.f; m[3][2] = 0.f; m[3][3] = 1.f;
262 m[0][0] = rc; m[0][1] = 0.f; m[0][2] = -rs; m[0][3] = 0.f;
263 m[1][0] = 0.f; m[1][1] = 1.f; m[1][2] = 0.f; m[1][3] = 0.f;
264 m[2][0] = rs; m[2][1] = 0.f; m[2][2] = rc; m[2][3] = 0.f;
265 m[3][0] = 0.f; m[3][1] = 0.f; m[3][2] = 0.f; m[3][3] = 1.f;
274 m[0][0] = rc; m[0][1] = rs; m[0][2] = 0.f; m[0][3] = 0.f;
275 m[1][0] = -rs; m[1][1] = rc; m[1][2] = 0.f; m[1][3] = 0.f;
276 m[2][0] = 0.f; m[2][1] = 0.f; m[2][2] = 1.f; m[2][3] = 0.f;
277 m[3][0] = 0.f; m[3][1] = 0.f; m[3][2] = 0.f; m[3][3] = 1.f;
286 m[0][0] = 1.f; m[0][1] = 0.f; m[0][2] = dx; m[0][3] = 0.f;
287 m[1][0] = 0.f; m[1][1] = 1.f; m[1][2] = dy; m[1][3] = 0.f;
288 m[2][0] = 0.f; m[2][1] = 0.f; m[2][2] = 1.f; m[2][3] = 0.f;
289 m[3][0] = 0.f; m[3][1] = 0.f; m[3][2] = 0.f; m[3][3] = 1.f;
295 float x,
float y,
float z,
296 float *tx,
float *ty,
float *tz)
305 float rlw,
float glw,
float blw)
307 float mag, lx, ly, lz;
340 float xrs, xrc, yrs, yrc, zrs, zrc, mag;
362 float i = 1.f +
s->intensity;
363 float saturation = 1.f +
s->saturation;
369 s->rlw,
s->glw,
s->blw);
373 s->rlw,
s->glw,
s->blw);
410 s->depth =
desc->comp[0].depth;
411 s->bpp =
s->depth >> 3;
416 s->planewidth[0] =
s->planewidth[3] =
inlink->w;
418 s->planeheight[0] =
s->planeheight[3] =
inlink->h;
420 s->do_slice[0] =
s->depth <= 8 ? do_slice_8_0 : do_slice_16_0;
421 s->do_slice[1] =
s->depth <= 8 ? do_slice_8_1 : do_slice_16_1;
436 #define OFFSET(x) offsetof(HueSaturationContext, x)
437 #define VF AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_RUNTIME_PARAM
462 .
name =
"huesaturation",
465 .priv_class = &huesaturation_class,
static void x_rotate_matrix(float matrix[4][4], float rs, float rc)
static void get_triplet(int64_t m[4][4], int *r, int *g, int *b)
static int lerpi16(int v0, int v1, int f, int max)
AVPixelFormat
Pixel format.
#define FILTER_PIXFMTS_ARRAY(array)
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
#define FILTER_INPUTS(array)
This structure describes decoded (raw) audio or video data.
static void saturation_matrix(float matrix[4][4], float saturation, float rlw, float glw, float blw)
static int lerpi8(int v0, int v1, int f, int max)
@ AV_PIX_FMT_BGR24
packed RGB 8:8:8, 24bpp, BGRBGR...
@ AV_PIX_FMT_BGRA
packed BGRA 8:8:8:8, 32bpp, BGRABGRA...
const char * name
Filter name.
static void init_matrix(HueSaturationContext *s)
A link between two filters.
static void y_rotate_matrix(float matrix[4][4], float rs, float rc)
static void matrix2imatrix(float matrix[4][4], int64_t imatrix[4][4])
A filter pad used for either input or output.
int(* do_slice[2])(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
const AVFilterPad ff_video_default_filterpad[1]
An AVFilterPad array whose only entry has name "default" and is of type AVMEDIA_TYPE_VIDEO.
AVFILTER_DEFINE_CLASS(huesaturation)
#define AV_CEIL_RSHIFT(a, b)
static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
#define FILTER_OUTPUTS(array)
@ AV_PIX_FMT_RGBA
packed RGBA 8:8:8:8, 32bpp, RGBARGBA...
static void z_shear_matrix(float matrix[4][4], float dx, float dy)
#define AV_PIX_FMT_RGBA64
Describe the class of an AVClass context structure.
static void transform_point(float matrix[4][4], float x, float y, float z, float *tx, float *ty, float *tz)
static __device__ float sqrtf(float a)
@ AV_PIX_FMT_BGR0
packed BGR 8:8:8, 32bpp, BGRXBGRX... X=unused/undefined
static void matrix_multiply(float a[4][4], float b[4][4], float c[4][4])
@ AV_PIX_FMT_ABGR
packed ABGR 8:8:8:8, 32bpp, ABGRABGR...
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
#define AVFILTERPAD_FLAG_NEEDS_WRITABLE
The filter expects writable frames from its input link, duplicating data buffers if needed.
@ AV_PIX_FMT_RGB24
packed RGB 8:8:8, 24bpp, RGBRGB...
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
int av_get_padded_bits_per_pixel(const AVPixFmtDescriptor *pixdesc)
Return the number of bits per pixel for the pixel format described by pixdesc, including any padding ...
const AVFilter ff_vf_huesaturation
static const AVFilterPad huesaturation_inputs[]
int ff_filter_process_command(AVFilterContext *ctx, const char *cmd, const char *arg, char *res, int res_len, int flags)
Generic processing of user supplied commands that are set in the same way as the filter options.
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
static void identity_matrix(float matrix[4][4])
@ AV_PIX_FMT_RGB0
packed RGB 8:8:8, 32bpp, RGBXRGBX... X=unused/undefined
#define AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC
Some filters support a generic "enable" expression option that can be used to enable or disable a fil...
@ AV_PIX_FMT_ARGB
packed ARGB 8:8:8:8, 32bpp, ARGBARGB...
@ AV_OPT_TYPE_FLOAT
Underlying C type is float.
#define AV_PIX_FMT_BGRA64
static const AVOption huesaturation_options[]
#define i(width, name, range_min, range_max)
int ff_filter_get_nb_threads(AVFilterContext *ctx)
Get number of threads for current filter instance.
static void hue_rotate_matrix(float matrix[4][4], float rotation, float rlw, float glw, float blw)
const char * name
Pad name.
static void colorscale_matrix(float matrix[4][4], float r, float g, float b)
@ AV_PIX_FMT_0BGR
packed BGR 8:8:8, 32bpp, XBGRXBGR... X=unused/undefined
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
static void z_rotate_matrix(float matrix[4][4], float rs, float rc)
#define HUESATURATION(name, type, clip, xall)
int ff_filter_execute(AVFilterContext *ctx, avfilter_action_func *func, void *arg, int *ret, int nb_jobs)
#define AVFILTER_FLAG_SLICE_THREADS
The filter supports multithreading by splitting frames into multiple parts and processing them concur...
static void shue_rotate_matrix(float m[4][4], float rotation)
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
static enum AVPixelFormat pixel_fmts[]
@ AV_OPT_TYPE_BOOL
Underlying C type is int.
int ff_fill_rgba_map(uint8_t *rgba_map, enum AVPixelFormat pix_fmt)
static av_cold int config_input(AVFilterLink *inlink)
@ AV_OPT_TYPE_FLAGS
Underlying C type is unsigned int.
@ AV_PIX_FMT_0RGB
packed RGB 8:8:8, 32bpp, XRGBXRGB... X=unused/undefined
@ AV_OPT_TYPE_CONST
Special option type for declaring named constants.