Go to the documentation of this file.
28 float rdiv,
float bias,
const int *
const matrix,
29 const uint8_t *
c[],
int peak,
int radius,
34 const uint8_t *
c[],
int peak,
int radius,
42 for (
i = 0;
i < 4;
i++) {
44 if (
s->matrix_length[
i] == 9 &&
s->depth == 8) {
57 for (
int i = 0;
i < nb_planes;
i++) {
int av_get_cpu_flags(void)
Return the flags which specify extensions supported by the CPU.
static atomic_int cpu_flags
void ff_filter_sobel_avx512icl(uint8_t *dst, int width, float scale, float delta, const int *const matrix, const uint8_t *c[], int peak, int radius, int dstride, int stride, int size)
static int bias(int x, int c)
av_cold void ff_sobel_init_x86(ConvolutionContext *s, int depth, int nb_planes)
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
av_cold void ff_convolution_init_x86(ConvolutionContext *s)
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t intptr_t int int16_t * dst
#define i(width, name, range_min, range_max)
#define EXTERNAL_SSE4(flags)
#define EXTERNAL_AVX512ICL(flags)
static void scale(int *out, const int *in, const int w, const int h, const int shift)
void ff_filter_3x3_sse4(uint8_t *dst, int width, float rdiv, float bias, const int *const matrix, const uint8_t *c[], int peak, int radius, int dstride, int stride, int size)