Go to the documentation of this file.
33 #define OFFSET(x) offsetof(ConvolutionContext, x)
34 #define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
65 static const int same5x5[25] = {0, 0, 0, 0, 0,
71 static const int same7x7[49] = {0, 0, 0, 0, 0, 0, 0,
111 float scale,
float delta,
const int *
const matrix,
112 const uint8_t *
c[],
int peak,
int radius,
115 uint16_t *dst = (uint16_t *)dstp;
118 for (x = 0; x <
width; x++) {
124 dst[x] = av_clip(sqrtf(suma*suma + sumb*sumb) * scale +
delta, 0, peak);
129 float scale,
float delta,
const int *
const matrix,
130 const uint8_t *
c[],
int peak,
int radius,
133 uint16_t *dst = (uint16_t *)dstp;
136 for (x = 0; x <
width; x++) {
140 dst[x] = av_clip(sqrtf(suma*suma + sumb*sumb) * scale +
delta, 0, peak);
145 float scale,
float delta,
const int *
const matrix,
146 const uint8_t *
c[],
int peak,
int radius,
149 uint16_t *dst = (uint16_t *)dstp;
152 for (x = 0; x <
width; x++) {
158 dst[x] = av_clip(sqrtf(suma*suma + sumb*sumb) * scale +
delta, 0, peak);
163 float scale,
float delta,
const int *
const matrix,
164 const uint8_t *
c[],
int peak,
int radius,
169 const uint8_t *c6 =
c[6], *c7 =
c[7], *c8 =
c[8];
172 for (x = 0; x <
width; x++) {
173 float suma = c0[x] * -1 +
c1[x] * -1 +
c2[x] * -1 +
174 c6[x] * 1 + c7[x] * 1 + c8[x] * 1;
175 float sumb = c0[x] * -1 +
c2[x] * 1 + c3[x] * -1 +
176 c5[x] * 1 + c6[x] * -1 + c8[x] * 1;
178 dst[x] = av_clip_uint8(sqrtf(suma*suma + sumb*sumb) * scale +
delta);
183 float scale,
float delta,
const int *
const matrix,
184 const uint8_t *
c[],
int peak,
int radius,
189 for (x = 0; x <
width; x++) {
190 float suma =
c[0][x] * 1 +
c[1][x] * -1;
191 float sumb =
c[4][x] * 1 +
c[3][x] * -1;
193 dst[x] = av_clip_uint8(sqrtf(suma*suma + sumb*sumb) * scale +
delta);
198 float scale,
float delta,
const int *
const matrix,
199 const uint8_t *
c[],
int peak,
int radius,
204 const uint8_t *c6 =
c[6], *c7 =
c[7], *c8 =
c[8];
207 for (x = 0; x <
width; x++) {
208 float suma = c0[x] * -1 +
c1[x] * -2 +
c2[x] * -1 +
209 c6[x] * 1 + c7[x] * 2 + c8[x] * 1;
210 float sumb = c0[x] * -1 +
c2[x] * 1 + c3[x] * -2 +
211 c5[x] * 2 + c6[x] * -1 + c8[x] * 1;
213 dst[x] = av_clip_uint8(sqrtf(suma*suma + sumb*sumb) * scale +
delta);
218 float rdiv,
float bias,
const int *
const matrix,
219 const uint8_t *
c[],
int peak,
int radius,
222 uint16_t *dst = (uint16_t *)dstp;
225 for (x = 0; x <
width; x++) {
226 int sum =
AV_RN16A(&
c[0][2 * x]) * matrix[0] +
235 sum = (
int)(sum * rdiv + bias + 0.5
f);
236 dst[x] = av_clip(sum, 0, peak);
241 float rdiv,
float bias,
const int *
const matrix,
242 const uint8_t *
c[],
int peak,
int radius,
245 uint16_t *dst = (uint16_t *)dstp;
248 for (x = 0; x <
width; x++) {
251 for (
i = 0;
i < 25;
i++)
254 sum = (
int)(sum * rdiv + bias + 0.5
f);
255 dst[x] = av_clip(sum, 0, peak);
260 float rdiv,
float bias,
const int *
const matrix,
261 const uint8_t *
c[],
int peak,
int radius,
264 uint16_t *dst = (uint16_t *)dstp;
267 for (x = 0; x <
width; x++) {
270 for (
i = 0;
i < 49;
i++)
273 sum = (
int)(sum * rdiv + bias + 0.5
f);
274 dst[x] = av_clip(sum, 0, peak);
279 float rdiv,
float bias,
const int *
const matrix,
280 const uint8_t *
c[],
int peak,
int radius,
283 uint16_t *dst = (uint16_t *)dstp;
286 for (x = 0; x <
width; x++) {
289 for (
i = 0;
i < 2 * radius + 1;
i++)
292 sum = (
int)(sum * rdiv + bias + 0.5
f);
293 dst[x] = av_clip(sum, 0, peak);
298 float rdiv,
float bias,
const int *
const matrix,
299 const uint8_t *
c[],
int peak,
int radius,
302 uint16_t *dst = (uint16_t *)dstp;
305 for (y = 0; y <
height; y++) {
308 for (
i = 0;
i < 2 * radius + 1;
i++)
311 sum = (
int)(sum * rdiv + bias + 0.5
f);
312 dst[0] = av_clip(sum, 0, peak);
318 float rdiv,
float bias,
const int *
const matrix,
319 const uint8_t *
c[],
int peak,
int radius,
324 for (x = 0; x <
width; x++) {
327 for (
i = 0;
i < 49;
i++)
328 sum +=
c[
i][x] * matrix[
i];
330 sum = (
int)(sum * rdiv + bias + 0.5
f);
331 dst[x] = av_clip_uint8(sum);
336 float rdiv,
float bias,
const int *
const matrix,
337 const uint8_t *
c[],
int peak,
int radius,
342 for (x = 0; x <
width; x++) {
345 for (
i = 0;
i < 25;
i++)
346 sum +=
c[
i][x] * matrix[
i];
348 sum = (
int)(sum * rdiv + bias + 0.5
f);
349 dst[x] = av_clip_uint8(sum);
354 float rdiv,
float bias,
const int *
const matrix,
355 const uint8_t *
c[],
int peak,
int radius,
359 const uint8_t *c3 =
c[3], *c4 =
c[4], *c5 =
c[5];
360 const uint8_t *c6 =
c[6], *c7 =
c[7], *c8 =
c[8];
363 for (x = 0; x <
width; x++) {
364 int sum = c0[x] * matrix[0] +
c1[x] * matrix[1] +
c2[x] * matrix[2] +
365 c3[x] * matrix[3] + c4[x] * matrix[4] + c5[x] * matrix[5] +
366 c6[x] * matrix[6] + c7[x] * matrix[7] + c8[x] * matrix[8];
367 sum = (
int)(sum * rdiv + bias + 0.5
f);
368 dst[x] = av_clip_uint8(sum);
373 float rdiv,
float bias,
const int *
const matrix,
374 const uint8_t *
c[],
int peak,
int radius,
379 for (x = 0; x <
width; x++) {
382 for (
i = 0;
i < 2 * radius + 1;
i++)
383 sum +=
c[
i][x] * matrix[
i];
385 sum = (
int)(sum * rdiv + bias + 0.5
f);
386 dst[x] = av_clip_uint8(sum);
391 float rdiv,
float bias,
const int *
const matrix,
392 const uint8_t *
c[],
int peak,
int radius,
397 for (y = 0; y <
height; y++) {
400 for (
i = 0;
i < 2 * radius + 1;
i++)
403 sum = (
int)(sum * rdiv + bias + 0.5
f);
404 dst[0] = av_clip_uint8(sum);
410 int x,
int w,
int y,
int h,
int bpc)
414 for (
i = 0;
i < 9;
i++) {
415 int xoff =
FFABS(x + ((
i % 3) - 1));
416 int yoff =
FFABS(y + (
i / 3) - 1);
418 xoff = xoff >=
w ? 2 *
w - 1 - xoff : xoff;
419 yoff = yoff >=
h ? 2 *
h - 1 - yoff : yoff;
426 int x,
int w,
int y,
int h,
int bpc)
430 for (
i = 0;
i < 25;
i++) {
431 int xoff =
FFABS(x + ((
i % 5) - 2));
432 int yoff =
FFABS(y + (
i / 5) - 2);
434 xoff = xoff >=
w ? 2 *
w - 1 - xoff : xoff;
435 yoff = yoff >=
h ? 2 *
h - 1 - yoff : yoff;
442 int x,
int w,
int y,
int h,
int bpc)
446 for (
i = 0;
i < 49;
i++) {
447 int xoff =
FFABS(x + ((
i % 7) - 3));
448 int yoff =
FFABS(y + (
i / 7) - 3);
450 xoff = xoff >=
w ? 2 *
w - 1 - xoff : xoff;
451 yoff = yoff >=
h ? 2 *
h - 1 - yoff : yoff;
458 int x,
int w,
int y,
int h,
int bpc)
462 for (
i = 0;
i < radius * 2 + 1;
i++) {
463 int xoff =
FFABS(x +
i - radius);
465 xoff = xoff >=
w ? 2 *
w - 1 - xoff : xoff;
472 int x,
int w,
int y,
int h,
int bpc)
476 for (
i = 0;
i < radius * 2 + 1;
i++) {
477 int xoff =
FFABS(x +
i - radius);
479 xoff = xoff >=
h ? 2 *
h - 1 - xoff : xoff;
493 for (plane = 0; plane <
s->nb_planes; plane++) {
494 const int mode =
s->mode[plane];
495 const int bpc =
s->bpc;
496 const int radius =
s->size[plane] / 2;
497 const int height =
s->planeheight[plane];
498 const int width =
s->planewidth[plane];
499 const int stride =
in->linesize[plane];
500 const int dstride =
out->linesize[plane];
503 const int slice_start = (sizeh * jobnr) / nb_jobs;
504 const int slice_end = (sizeh * (jobnr+1)) / nb_jobs;
505 const float rdiv =
s->rdiv[plane];
506 const float bias =
s->bias[plane];
510 const int *matrix =
s->matrix[plane];
514 if (
s->copy[plane]) {
524 for (y = slice_start; y <
slice_end; y++) {
528 for (x = 0; x < radius; x++) {
533 s->filter[plane](dst + yoff + xoff, 1, rdiv,
534 bias, matrix,
c,
s->max, radius,
538 s->filter[plane](dst + yoff + xoff, sizew - 2 * radius,
539 rdiv, bias, matrix,
c,
s->max, radius,
541 for (x = sizew - radius; x < sizew; x++) {
546 s->filter[plane](dst + yoff + xoff, 1, rdiv,
547 bias, matrix,
c,
s->max, radius,
565 s->depth =
desc->comp[0].depth;
566 s->max = (1 <<
s->depth) - 1;
569 s->planewidth[0] =
s->planewidth[3] =
inlink->w;
571 s->planeheight[0] =
s->planeheight[3] =
inlink->h;
575 s->bpc = (
s->depth + 7) / 8;
577 if (!strcmp(
ctx->filter->name,
"convolution")) {
579 for (p = 0; p <
s->nb_planes; p++) {
584 else if (
s->size[p] == 3)
586 else if (
s->size[p] == 5)
588 else if (
s->size[p] == 7)
592 #if CONFIG_CONVOLUTION_FILTER && ARCH_X86_64
595 }
else if (!strcmp(
ctx->filter->name,
"prewitt")) {
597 for (p = 0; p <
s->nb_planes; p++)
599 }
else if (!strcmp(
ctx->filter->name,
"roberts")) {
601 for (p = 0; p <
s->nb_planes; p++)
603 }
else if (!strcmp(
ctx->filter->name,
"sobel")) {
605 for (p = 0; p <
s->nb_planes; p++)
640 if (!strcmp(
ctx->filter->name,
"convolution")) {
641 for (
i = 0;
i < 4;
i++) {
642 int *matrix = (
int *)
s->matrix[
i];
643 char *p, *
arg, *saveptr =
NULL;
646 p =
s->matrix_str[
i];
647 while (
s->matrix_length[
i] < 49) {
652 sscanf(
arg,
"%d", &matrix[
s->matrix_length[
i]]);
653 sum += matrix[
s->matrix_length[
i]];
654 s->matrix_length[
i]++;
657 if (!(
s->matrix_length[
i] & 1)) {
664 s->size[
i] =
s->matrix_length[
i];
668 s->size[
i] =
s->matrix_length[
i];
669 }
else if (
s->matrix_length[
i] == 9) {
676 }
else if (
s->matrix_length[
i] == 25) {
683 }
else if (
s->matrix_length[
i] == 49) {
697 s->rdiv[
i] = 1. / sum;
699 if (
s->copy[
i] && (
s->rdiv[
i] != 1. ||
s->bias[
i] != 0.))
702 }
else if (!strcmp(
ctx->filter->name,
"prewitt")) {
703 for (
i = 0;
i < 4;
i++) {
704 if ((1 <<
i) &
s->planes)
710 s->rdiv[
i] =
s->scale;
711 s->bias[
i] =
s->delta;
713 }
else if (!strcmp(
ctx->filter->name,
"roberts")) {
714 for (
i = 0;
i < 4;
i++) {
715 if ((1 <<
i) &
s->planes)
721 s->rdiv[
i] =
s->scale;
722 s->bias[
i] =
s->delta;
724 }
else if (!strcmp(
ctx->filter->name,
"sobel")) {
725 for (
i = 0;
i < 4;
i++) {
726 if ((1 <<
i) &
s->planes)
732 s->rdiv[
i] =
s->scale;
733 s->bias[
i] =
s->delta;
758 #if CONFIG_CONVOLUTION_FILTER
761 .
name =
"convolution",
764 .priv_class = &convolution_class,
774 #if CONFIG_PREWITT_FILTER
776 static const AVOption prewitt_options[] = {
789 .priv_class = &prewitt_class,
799 #if CONFIG_SOBEL_FILTER
801 static const AVOption sobel_options[] = {
814 .priv_class = &sobel_class,
824 #if CONFIG_ROBERTS_FILTER
826 static const AVOption roberts_options[] = {
839 .priv_class = &roberts_class,
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
#define AV_PIX_FMT_YUVA422P16
#define AV_PIX_FMT_GBRAP16
AVPixelFormat
Pixel format.
static void setup_5x5(int radius, const uint8_t *c[], const uint8_t *src, int stride, int x, int w, int y, int h, int bpc)
static const int same5x5[25]
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
static void filter_5x5(uint8_t *dst, int width, float rdiv, float bias, const int *const matrix, const uint8_t *c[], int peak, int radius, int dstride, int stride)
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
static void filter_roberts(uint8_t *dst, int width, float scale, float delta, const int *const matrix, const uint8_t *c[], int peak, int radius, int dstride, int stride)
static const AVFilterPad convolution_inputs[]
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
static int query_formats(AVFilterContext *ctx)
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
static void filter16_roberts(uint8_t *dstp, int width, float scale, float delta, const int *const matrix, const uint8_t *c[], int peak, int radius, int dstride, int stride)
#define AV_PIX_FMT_YUVA422P9
This structure describes decoded (raw) audio or video data.
#define AV_PIX_FMT_YUVA420P16
#define AV_PIX_FMT_YUVA420P10
#define AV_PIX_FMT_YUV420P10
@ AV_PIX_FMT_YUV440P
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
const char * name
Filter name.
AVFormatInternal * internal
An opaque field for libavformat internal usage.
A link between two filters.
#define AV_PIX_FMT_YUVA422P10
static void sobel(int w, int h, uint16_t *dst, int dst_linesize, int8_t *dir, int dir_linesize, const uint8_t *src, int src_linesize)
static void setup_row(int radius, const uint8_t *c[], const uint8_t *src, int stride, int x, int w, int y, int h, int bpc)
void av_image_copy_plane(uint8_t *dst, int dst_linesize, const uint8_t *src, int src_linesize, int bytewidth, int height)
Copy image plane from src to dst.
int av_pix_fmt_count_planes(enum AVPixelFormat pix_fmt)
#define AV_PIX_FMT_YUVA420P9
static void filter_column(uint8_t *dst, int height, float rdiv, float bias, const int *const matrix, const uint8_t *c[], int peak, int radius, int dstride, int stride)
#define AV_PIX_FMT_GBRP14
@ AV_PIX_FMT_GBRAP
planar GBRA 4:4:4:4 32bpp
#define AV_PIX_FMT_GBRP10
#define AV_PIX_FMT_YUVA444P16
static void setup_column(int radius, const uint8_t *c[], const uint8_t *src, int stride, int x, int w, int y, int h, int bpc)
#define AV_PIX_FMT_YUV422P9
static void filter16_3x3(uint8_t *dstp, int width, float rdiv, float bias, const int *const matrix, const uint8_t *c[], int peak, int radius, int dstride, int stride)
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
#define AV_PIX_FMT_GRAY16
A filter pad used for either input or output.
static void filter16_sobel(uint8_t *dstp, int width, float scale, float delta, const int *const matrix, const uint8_t *c[], int peak, int radius, int dstride, int stride)
#define AV_PIX_FMT_YUV444P10
@ AV_PIX_FMT_YUVJ411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples) full scale (JPEG), deprecated in favor ...
static void filter16_column(uint8_t *dstp, int height, float rdiv, float bias, const int *const matrix, const uint8_t *c[], int peak, int radius, int dstride, int stride)
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
#define AV_PIX_FMT_YUV422P16
@ AV_PIX_FMT_YUVJ422P
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
#define AV_PIX_FMT_GBRAP10
#define AV_PIX_FMT_GBRAP12
@ AV_PIX_FMT_YUVA420P
planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)
#define AV_PIX_FMT_YUV444P16
#define AV_CEIL_RSHIFT(a, b)
static int slice_end(AVCodecContext *avctx, AVFrame *pict)
Handle slice ends.
char * av_strtok(char *s, const char *delim, char **saveptr)
Split the string into several tokens which can be accessed by successive calls to av_strtok().
AVFILTER_DEFINE_CLASS(convolution)
static const AVFilterPad outputs[]
static enum AVPixelFormat pix_fmts[]
#define AV_PIX_FMT_YUVA444P12
#define AV_PIX_FMT_YUV420P9
#define AV_PIX_FMT_YUV420P16
#define AV_PIX_FMT_GRAY14
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
@ AV_PIX_FMT_YUVJ444P
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
#define AV_PIX_FMT_GRAY10
static av_cold int init(AVFilterContext *ctx)
#define AV_PIX_FMT_GBRP16
static void filter_sobel(uint8_t *dst, int width, float scale, float delta, const int *const matrix, const uint8_t *c[], int peak, int radius, int dstride, int stride)
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
@ AV_PIX_FMT_YUVJ420P
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
#define AV_PIX_FMT_YUV422P10
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several inputs
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
#define AV_PIX_FMT_YUV422P12
AVFilter ff_vf_convolution
#define AV_PIX_FMT_YUV444P12
@ AV_PIX_FMT_YUVA444P
planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples)
#define AV_PIX_FMT_YUVA444P10
static void filter16_row(uint8_t *dstp, int width, float rdiv, float bias, const int *const matrix, const uint8_t *c[], int peak, int radius, int dstride, int stride)
#define AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC
Some filters support a generic "enable" expression option that can be used to enable or disable a fil...
static const struct @315 planes[]
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) #define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac) { } void ff_audio_convert_free(AudioConvert **ac) { if(! *ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);} AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map) { AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method !=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2) { ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc) { av_free(ac);return NULL;} return ac;} in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar) { ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar ? ac->channels :1;} else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;} int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in) { int use_generic=1;int len=in->nb_samples;int p;if(ac->dc) { av_log(ac->avr, AV_LOG_TRACE, "%d samples - audio_convert: %s to %s (dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> in
static int filter_slice(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
#define i(width, name, range_min, range_max)
int w
agreed upon image width
static void setup_7x7(int radius, const uint8_t *c[], const uint8_t *src, int stride, int x, int w, int y, int h, int bpc)
#define AV_PIX_FMT_GBRP12
int ff_filter_get_nb_threads(AVFilterContext *ctx)
Get number of threads for current filter instance.
Used for passing data between threads.
static void setup_3x3(int radius, const uint8_t *c[], const uint8_t *src, int stride, int x, int w, int y, int h, int bpc)
@ AV_PIX_FMT_YUVJ440P
planar YUV 4:4:0 full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV440P and setting color_range
static void filter16_7x7(uint8_t *dstp, int width, float rdiv, float bias, const int *const matrix, const uint8_t *c[], int peak, int radius, int dstride, int stride)
const char * name
Pad name.
#define AV_PIX_FMT_YUV444P9
static const AVOption convolution_options[]
#define AV_PIX_FMT_YUVA444P9
#define AV_PIX_FMT_YUV420P12
#define AV_PIX_FMT_YUV422P14
static const AVFilterPad convolution_outputs[]
int h
agreed upon image height
#define AV_PIX_FMT_YUVA422P12
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
static void filter16_prewitt(uint8_t *dstp, int width, float scale, float delta, const int *const matrix, const uint8_t *c[], int peak, int radius, int dstride, int stride)
@ AV_PIX_FMT_GBRP
planar GBR 4:4:4 24bpp
#define AVFILTER_FLAG_SLICE_THREADS
The filter supports multithreading by splitting frames into multiple parts and processing them concur...
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
static void filter_7x7(uint8_t *dst, int width, float rdiv, float bias, const int *const matrix, const uint8_t *c[], int peak, int radius, int dstride, int stride)
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
static void filter_row(uint8_t *dst, int width, float rdiv, float bias, const int *const matrix, const uint8_t *c[], int peak, int radius, int dstride, int stride)
void ff_convolution_init_x86(ConvolutionContext *s)
static const int same3x3[9]
static int config_input(AVFilterLink *inlink)
static void filter16_5x5(uint8_t *dstp, int width, float rdiv, float bias, const int *const matrix, const uint8_t *c[], int peak, int radius, int dstride, int stride)
@ AV_PIX_FMT_YUV411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
#define flags(name, subs,...)
@ AV_PIX_FMT_YUV410P
planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
static const int same7x7[49]
#define AV_PIX_FMT_YUV440P12
#define AV_PIX_FMT_YUV444P14
static void filter_3x3(uint8_t *dst, int width, float rdiv, float bias, const int *const matrix, const uint8_t *c[], int peak, int radius, int dstride, int stride)
#define AV_PIX_FMT_GRAY12
@ AV_PIX_FMT_YUVA422P
planar YUV 4:2:2 24bpp, (1 Cr & Cb sample per 2x1 Y & A samples)
#define AV_PIX_FMT_YUV420P14
static void filter_prewitt(uint8_t *dst, int width, float scale, float delta, const int *const matrix, const uint8_t *c[], int peak, int radius, int dstride, int stride)