58 #define OFFSET(x) offsetof(ConvolutionContext, x)
59 #define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
83 static const int same5x5[25] = {0, 0, 0, 0, 0,
119 memcpy(line, srcp, width);
121 for (i = mergin; i > 0; i--) {
123 line[width - 1 + i] = line[width - 1 - i];
131 memcpy(line, srcp, width * 2);
133 for (i = mergin; i > 0; i--) {
135 line[width - 1 + i] = line[width - 1 - i];
151 const int peak = (1 << s->
depth) - 1;
153 const int bstride = s->
bstride;
156 const int slice_start = (height * jobnr) / nb_jobs;
157 const int slice_end = (height * (jobnr+1)) / nb_jobs;
158 const uint16_t *
src = (
const uint16_t *)in->
data[plane] + slice_start * stride;
160 const float scale = s->
scale;
162 uint16_t *p0 = (uint16_t *)s->
bptrs[jobnr] + 16;
163 uint16_t *p1 = p0 + bstride;
164 uint16_t *p2 = p1 + bstride;
165 uint16_t *orig = p0, *
end = p2;
171 for (y = slice_start; y <
slice_end; y++) {
172 src += stride * (y < height - 1 ? 1 : -1);
175 for (x = 0; x <
width; x++) {
176 int suma = p0[x - 1] * -1 +
182 int sumb = p0[x - 1] * -1 +
189 dst[x] = av_clip(sqrt(suma*suma + sumb*sumb) * scale + delta, 0, peak);
194 p2 = (p2 ==
end) ? orig: p2 + bstride;
208 const int peak = (1 << s->
depth) - 1;
210 const int bstride = s->
bstride;
213 const int slice_start = (height * jobnr) / nb_jobs;
214 const int slice_end = (height * (jobnr+1)) / nb_jobs;
215 const uint16_t *
src = (
const uint16_t *)in->
data[plane] + slice_start * stride;
217 const float scale = s->
scale;
219 uint16_t *p0 = (uint16_t *)s->
bptrs[jobnr] + 16;
220 uint16_t *p1 = p0 + bstride;
221 uint16_t *p2 = p1 + bstride;
222 uint16_t *orig = p0, *
end = p2;
228 for (y = slice_start; y <
slice_end; y++) {
229 src += stride * (y < height - 1 ? 1 : -1);
232 for (x = 0; x <
width; x++) {
233 int suma = p0[x - 1] * 1 +
235 int sumb = p0[x ] * 1 +
238 dst[x] = av_clip(sqrt(suma*suma + sumb*sumb) * scale + delta, 0, peak);
243 p2 = (p2 ==
end) ? orig: p2 + bstride;
257 const int peak = (1 << s->
depth) - 1;
259 const int bstride = s->
bstride;
262 const int slice_start = (height * jobnr) / nb_jobs;
263 const int slice_end = (height * (jobnr+1)) / nb_jobs;
264 const uint16_t *
src = (
const uint16_t *)in->
data[plane] + slice_start * stride;
266 const float scale = s->
scale;
268 uint16_t *p0 = (uint16_t *)s->
bptrs[jobnr] + 16;
269 uint16_t *p1 = p0 + bstride;
270 uint16_t *p2 = p1 + bstride;
271 uint16_t *orig = p0, *
end = p2;
277 for (y = slice_start; y <
slice_end; y++) {
278 src += stride * (y < height - 1 ? 1 : -1);
281 for (x = 0; x <
width; x++) {
282 int suma = p0[x - 1] * -1 +
288 int sumb = p0[x - 1] * -1 +
295 dst[x] = av_clip(sqrt(suma*suma + sumb*sumb) * scale + delta, 0, peak);
300 p2 = (p2 ==
end) ? orig: p2 + bstride;
315 const int bstride = s->
bstride;
318 const int slice_start = (height * jobnr) / nb_jobs;
319 const int slice_end = (height * (jobnr+1)) / nb_jobs;
322 const float scale = s->
scale;
330 line_copy8(p0, src + stride * (slice_start == 0 ? 1 : -1), width, 1);
333 for (y = slice_start; y <
slice_end; y++) {
334 src += stride * (y < height - 1 ? 1 : -1);
337 for (x = 0; x <
width; x++) {
338 int suma = p0[x - 1] * -1 +
344 int sumb = p0[x - 1] * -1 +
351 dst[x] = av_clip_uint8(sqrt(suma*suma + sumb*sumb) * scale + delta);
356 p2 = (p2 ==
end) ? orig: p2 + bstride;
371 const int bstride = s->
bstride;
374 const int slice_start = (height * jobnr) / nb_jobs;
375 const int slice_end = (height * (jobnr+1)) / nb_jobs;
378 const float scale = s->
scale;
386 line_copy8(p0, src + stride * (slice_start == 0 ? 1 : -1), width, 1);
389 for (y = slice_start; y <
slice_end; y++) {
390 src += stride * (y < height - 1 ? 1 : -1);
393 for (x = 0; x <
width; x++) {
394 int suma = p0[x - 1] * 1 +
396 int sumb = p0[x ] * 1 +
399 dst[x] = av_clip_uint8(sqrt(suma*suma + sumb*sumb) * scale + delta);
404 p2 = (p2 ==
end) ? orig: p2 + bstride;
419 const int bstride = s->
bstride;
422 const int slice_start = (height * jobnr) / nb_jobs;
423 const int slice_end = (height * (jobnr+1)) / nb_jobs;
426 const float scale = s->
scale;
434 line_copy8(p0, src + stride * (slice_start == 0 ? 1 : -1), width, 1);
437 for (y = slice_start; y <
slice_end; y++) {
438 src += stride * (y < height - 1 ? 1 : -1);
441 for (x = 0; x <
width; x++) {
442 int suma = p0[x - 1] * -1 +
448 int sumb = p0[x - 1] * -1 +
455 dst[x] = av_clip_uint8(sqrt(suma*suma + sumb*sumb) * scale + delta);
460 p2 = (p2 ==
end) ? orig: p2 + bstride;
474 const int peak = (1 << s->
depth) - 1;
476 const int bstride = s->
bstride;
479 const int slice_start = (height * jobnr) / nb_jobs;
480 const int slice_end = (height * (jobnr+1)) / nb_jobs;
481 const uint16_t *
src = (
const uint16_t *)in->
data[plane] + slice_start * stride;
483 uint16_t *p0 = (uint16_t *)s->
bptrs[jobnr] + 16;
484 uint16_t *p1 = p0 + bstride;
485 uint16_t *p2 = p1 + bstride;
486 uint16_t *orig = p0, *
end = p2;
487 const int *matrix = s->
matrix[plane];
488 const float rdiv = s->
rdiv[plane];
489 const float bias = s->
bias[plane];
495 for (y = slice_start; y <
slice_end; y++) {
496 src += stride * (y < height - 1 ? 1 : -1);
499 for (x = 0; x <
width; x++) {
500 int sum = p0[x - 1] * matrix[0] +
502 p0[x + 1] * matrix[2] +
503 p1[x - 1] * matrix[3] +
505 p1[x + 1] * matrix[5] +
506 p2[x - 1] * matrix[6] +
508 p2[x + 1] * matrix[8];
509 sum = (
int)(sum * rdiv + bias + 0.5f);
510 dst[x] = av_clip(sum, 0, peak);
515 p2 = (p2 ==
end) ? orig: p2 + bstride;
529 const int peak = (1 << s->
depth) - 1;
531 const int bstride = s->
bstride;
534 const int slice_start = (height * jobnr) / nb_jobs;
535 const int slice_end = (height * (jobnr+1)) / nb_jobs;
536 const uint16_t *
src = (
const uint16_t *)in->
data[plane] + slice_start * stride;
538 uint16_t *p0 = (uint16_t *)s->
bptrs[jobnr] + 16;
539 uint16_t *p1 = p0 + bstride;
540 uint16_t *p2 = p1 + bstride;
541 uint16_t *p3 = p2 + bstride;
542 uint16_t *p4 = p3 + bstride;
543 uint16_t *orig = p0, *
end = p4;
544 const int *matrix = s->
matrix[plane];
545 float rdiv = s->
rdiv[plane];
546 float bias = s->
bias[plane];
550 line_copy16(p1, src + stride * (slice_start == 0 ? 1 : -1), width, 2);
555 for (y = slice_start; y <
slice_end; y++) {
556 uint16_t *
array[] = {
557 p0 - 2, p0 - 1, p0, p0 + 1, p0 + 2,
558 p1 - 2, p1 - 1, p1, p1 + 1, p1 + 2,
559 p2 - 2, p2 - 1, p2, p2 + 1, p2 + 2,
560 p3 - 2, p3 - 1, p3, p3 + 1, p3 + 2,
561 p4 - 2, p4 - 1, p4, p4 + 1, p4 + 2
564 src += stride * (y < height - 2 ? 1 : -1);
567 for (x = 0; x <
width; x++) {
570 for (i = 0; i < 25; i++) {
571 sum += *(array[i] + x) * matrix[i];
573 sum = (
int)(sum * rdiv + bias + 0.5f);
574 dst[x] = av_clip(sum, 0, peak);
581 p4 = (p4 ==
end) ? orig: p4 + bstride;
596 const int bstride = s->
bstride;
599 const int slice_start = (height * jobnr) / nb_jobs;
600 const int slice_end = (height * (jobnr+1)) / nb_jobs;
612 line_copy8(p0, src + stride * (slice_start == 0 ? 1 : -1), width, 1);
615 for (y = slice_start; y <
slice_end; y++) {
616 src += stride * (y < height - 1 ? 1 : -1);
619 for (x = 0; x <
width; x++) {
620 int sum = p0[x - 1] * matrix[0] +
622 p0[x + 1] * matrix[2] +
623 p1[x - 1] * matrix[3] +
625 p1[x + 1] * matrix[5] +
626 p2[x - 1] * matrix[6] +
628 p2[x + 1] * matrix[8];
629 sum = (
int)(sum * rdiv + bias + 0.5f);
630 dst[x] = av_clip_uint8(sum);
635 p2 = (p2 ==
end) ? orig: p2 + bstride;
650 const int bstride = s->
bstride;
653 const int slice_start = (height * jobnr) / nb_jobs;
654 const int slice_end = (height * (jobnr+1)) / nb_jobs;
668 line_copy8(p0, src + 2 * stride * (slice_start < 2 ? 1 : -1), width, 2);
669 line_copy8(p1, src + stride * (slice_start == 0 ? 1 : -1), width, 2);
675 for (y = slice_start; y <
slice_end; y++) {
677 p0 - 2, p0 - 1, p0, p0 + 1, p0 + 2,
678 p1 - 2, p1 - 1, p1, p1 + 1, p1 + 2,
679 p2 - 2, p2 - 1, p2, p2 + 1, p2 + 2,
680 p3 - 2, p3 - 1, p3, p3 + 1, p3 + 2,
681 p4 - 2, p4 - 1, p4, p4 + 1, p4 + 2
684 src += stride * (y < height - 2 ? 1 : -1);
687 for (x = 0; x <
width; x++) {
690 for (i = 0; i < 25; i++) {
691 sum += *(array[i] + x) * matrix[i];
693 sum = (
int)(sum * rdiv + bias + 0.5f);
694 dst[x] = av_clip_uint8(sum);
701 p4 = (p4 ==
end) ? orig: p4 + bstride;
738 if (!strcmp(ctx->
filter->
name,
"convolution")) {
743 else if (s->
size[p] == 5)
747 }
else if (!strcmp(ctx->
filter->
name,
"prewitt")) {
751 }
else if (!strcmp(ctx->
filter->
name,
"roberts")) {
755 }
else if (!strcmp(ctx->
filter->
name,
"sobel")) {
779 for (plane = 0; plane < s->
nb_planes; plane++) {
782 if (s->
copy[plane]) {
805 if (!strcmp(ctx->
filter->
name,
"convolution")) {
806 for (i = 0; i < 4; i++) {
807 int *matrix = (
int *)s->
matrix[i];
808 char *p, *
arg, *saveptr =
NULL;
836 }
else if (!strcmp(ctx->
filter->
name,
"prewitt")) {
837 for (i = 0; i < 4; i++) {
843 }
else if (!strcmp(ctx->
filter->
name,
"roberts")) {
844 for (i = 0; i < 4; i++) {
850 }
else if (!strcmp(ctx->
filter->
name,
"sobel")) {
851 for (i = 0; i < 4; i++) {
888 #if CONFIG_CONVOLUTION_FILTER
891 .
name =
"convolution",
894 .priv_class = &convolution_class,
898 .
inputs = convolution_inputs,
899 .
outputs = convolution_outputs,
905 #if CONFIG_PREWITT_FILTER
907 static const AVOption prewitt_options[] = {
920 .priv_class = &prewitt_class,
924 .
inputs = convolution_inputs,
925 .
outputs = convolution_outputs,
931 #if CONFIG_SOBEL_FILTER
933 static const AVOption sobel_options[] = {
946 .priv_class = &sobel_class,
950 .
inputs = convolution_inputs,
951 .
outputs = convolution_outputs,
957 #if CONFIG_ROBERTS_FILTER
959 static const AVOption roberts_options[] = {
972 .priv_class = &roberts_class,
976 .
inputs = convolution_inputs,
977 .
outputs = convolution_outputs,
static int filter16_prewitt(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
#define AV_PIX_FMT_YUVA422P16
static int filter16_3x3(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
#define AV_PIX_FMT_YUVA422P9
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
This structure describes decoded (raw) audio or video data.
#define AV_PIX_FMT_YUVA420P10
#define AV_PIX_FMT_YUV444P14
#define AV_PIX_FMT_GBRAP10
#define AV_PIX_FMT_YUVA422P10
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
int av_pix_fmt_count_planes(enum AVPixelFormat pix_fmt)
Main libavfilter public API header.
static void sobel(int w, int h, uint16_t *dst, int dst_linesize, int8_t *dir, int dir_linesize, const uint8_t *src, int src_linesize)
int h
agreed upon image height
#define AV_PIX_FMT_GBRP10
static int filter16_5x5(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
#define AV_PIX_FMT_YUV420P12
static int filter_sobel(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
int(* filter[4])(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
uint8_t log2_chroma_w
Amount to shift the luma width right to find the chroma width.
void * av_calloc(size_t nmemb, size_t size)
Non-inlined equivalent of av_mallocz_array().
#define AV_PIX_FMT_GRAY10
#define AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC
Some filters support a generic "enable" expression option that can be used to enable or disable a fil...
BYTE int const BYTE * srcp
const char * name
Pad name.
#define AV_PIX_FMT_GRAY12
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)
AVComponentDescriptor comp[4]
Parameters that describe how pixels are packed.
static av_cold int end(AVCodecContext *avctx)
static int filter_roberts(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
#define AV_PIX_FMT_YUVA420P9
static av_cold void uninit(AVFilterContext *ctx)
planar YUV 4:4:0 full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV440P and setting color_range...
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
#define AV_PIX_FMT_YUV444P16
#define AV_PIX_FMT_YUV422P12
#define AV_PIX_FMT_YUVA420P16
static void line_copy16(uint16_t *line, const uint16_t *srcp, int width, int mergin)
A filter pad used for either input or output.
A link between two filters.
static void line_copy8(uint8_t *line, const uint8_t *srcp, int width, int mergin)
planar YUV 4:2:2 24bpp, (1 Cr & Cb sample per 2x1 Y & A samples)
uint8_t log2_chroma_h
Amount to shift the luma height right to find the chroma height.
static int filter16_roberts(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
static const AVFilterPad convolution_inputs[]
void * priv
private data for use by the filter
#define AVFILTER_FLAG_SLICE_THREADS
The filter supports multithreading by splitting frames into multiple parts and processing them concur...
#define AV_PIX_FMT_YUVA444P16
#define AV_PIX_FMT_GBRAP12
#define AV_PIX_FMT_YUV444P10
#define AV_PIX_FMT_GBRAP16
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
int w
agreed upon image width
#define AV_PIX_FMT_YUV422P9
#define AV_PIX_FMT_GBRP16
int ff_filter_get_nb_threads(AVFilterContext *ctx)
Get number of threads for current filter instance.
#define AV_PIX_FMT_GRAY16
static const AVFilterPad convolution_outputs[]
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
static const AVOption convolution_options[]
static int query_formats(AVFilterContext *ctx)
static av_cold int init(AVFilterContext *ctx)
#define AV_PIX_FMT_YUVA444P10
static const AVFilterPad outputs[]
#define AV_PIX_FMT_YUV444P9
#define AV_PIX_FMT_GBRP14
int format
agreed upon media format
#define AV_PIX_FMT_YUV420P16
static const AVFilterPad inputs[]
#define AV_PIX_FMT_YUV420P14
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
static int filter16_sobel(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples)
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(constuint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(constuint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(constint16_t *) pi >>8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t,*(constint16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t,*(constint16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(constint32_t *) pi >>24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t,*(constint32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t,*(constint32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(constfloat *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(constfloat *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(constfloat *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(constdouble *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(constdouble *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(constdouble *) pi *(1U<< 31))))#defineSET_CONV_FUNC_GROUP(ofmt, ifmt) staticvoidset_generic_function(AudioConvert *ac){}voidff_audio_convert_free(AudioConvert **ac){if(!*ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);}AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enumAVSampleFormatout_fmt, enumAVSampleFormatin_fmt, intchannels, intsample_rate, intapply_map){AudioConvert *ac;intin_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) returnNULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method!=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt)>2){ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc){av_free(ac);returnNULL;}returnac;}in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar){ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar?ac->channels:1;}elseif(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;elseac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);returnac;}intff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in){intuse_generic=1;intlen=in->nb_samples;intp;if(ac->dc){av_log(ac->avr, AV_LOG_TRACE,"%dsamples-audio_convert:%sto%s(dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));returnff_convert_dither(ac-> in
#define AV_PIX_FMT_YUV420P10
planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
Describe the class of an AVClass context structure.
AVFILTER_DEFINE_CLASS(convolution)
static const int same3x3[9]
const char * name
Filter name.
#define AV_PIX_FMT_YUV440P12
#define AV_PIX_FMT_YUV420P9
AVFilterLink ** outputs
array of pointers to output links
static enum AVPixelFormat pix_fmts[]
#define AV_PIX_FMT_YUV422P14
#define AV_PIX_FMT_GBRP12
AVFilterInternal * internal
An opaque struct for libavfilter internal use.
#define AV_PIX_FMT_YUV422P10
#define AV_PIX_FMT_YUV444P12
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
char * av_strtok(char *s, const char *delim, char **saveptr)
Split the string into several tokens which can be accessed by successive calls to av_strtok()...
GLint GLenum GLboolean GLsizei stride
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
planar GBRA 4:4:4:4 32bpp
static const int same5x5[25]
#define AV_PIX_FMT_YUVA444P9
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
static int filter_5x5(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
avfilter_execute_func * execute
static int filter_prewitt(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
static int slice_end(AVCodecContext *avctx, AVFrame *pict)
Handle slice ends.
AVFilterContext * dst
dest filter
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples) full scale (JPEG), deprecated in favor ...
static int config_input(AVFilterLink *inlink)
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
static int array[MAX_W *MAX_W]
#define av_malloc_array(a, b)
void av_image_copy_plane(uint8_t *dst, int dst_linesize, const uint8_t *src, int src_linesize, int bytewidth, int height)
Copy image plane from src to dst.
static int filter_3x3(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
int depth
Number of bits in the component.
AVPixelFormat
Pixel format.
const AVFilter * filter
the AVFilter of which this is an instance
#define AV_PIX_FMT_YUV422P16
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
#define AV_CEIL_RSHIFT(a, b)