Go to the documentation of this file.
39 static const uint16_t
NNEDI_NNS[] = { 16, 32, 64, 128, 256 };
106 int src_stride,
int dst_stride,
109 int src_stride,
int dst_stride,
112 const void *
src, ptrdiff_t src_stride,
117 #define OFFSET(x) offsetof(NNEDIContext, x)
118 #define RFLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_RUNTIME_PARAM
119 #define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
125 {
"interlaced",
"only deinterlace frames marked as interlaced", 0,
AV_OPT_TYPE_CONST, {.i64=1}, 0, 0,
RFLAGS,
"deint" },
127 {
"af",
"use frame flags, both fields", 0,
AV_OPT_TYPE_CONST, {.i64=-2}, 0, 0,
RFLAGS,
"field" },
128 {
"a",
"use frame flags, single field", 0,
AV_OPT_TYPE_CONST, {.i64=-1}, 0, 0,
RFLAGS,
"field" },
131 {
"tf",
"use both fields, top first", 0,
AV_OPT_TYPE_CONST, {.i64=2}, 0, 0,
RFLAGS,
"field" },
132 {
"bf",
"use both fields, bottom first", 0,
AV_OPT_TYPE_CONST, {.i64=3}, 0, 0,
RFLAGS,
"field" },
134 {
"nsize",
"set size of local neighborhood around each pixel, used by the predictor neural network",
OFFSET(nsize),
AV_OPT_TYPE_INT, {.i64=6}, 0, 6,
RFLAGS,
"nsize" },
173 outlink->
w =
ctx->inputs[0]->w;
174 outlink->
h =
ctx->inputs[0]->h;
217 int n,
float scale,
float bias)
221 sum =
s->fdsp->scalarproduct_float(kernel,
input, n);
223 y = sum * scale + bias + 1e-20
f;
230 return x / (1.0f +
fabsf(x));
240 const void *
src, ptrdiff_t src_stride,
245 const float *src_p =
src;
248 const float *
window = src_p - 2 * src_stride - 5;
250 for (
int j = 0; j <
N; j++) {
254 for (
int i = 0;
i < 4;
i++)
255 memcpy(
input +
i * 12,
window +
i * src_stride + j, 12 *
sizeof(
float));
258 for (
int n = 0; n < 4; n++)
263 for (
int n = 0; n < 4; n++)
268 for (
int n = 0; n < 4; n++)
276 const void *
src, ptrdiff_t src_stride,
281 const float *src_p =
src;
284 const float *
window = src_p - 2 * src_stride - 6;
286 for (
int j = 0; j <
N; j += 4) {
290 for (
int i = 0;
i < 4;
i++)
291 memcpy(
input +
i * 16,
window +
i * src_stride + j, 16 *
sizeof(
float));
293 for (
int n = 0; n < 4; n++)
297 for (
int n = 0; n < 4; n++)
300 for (
int n = 0; n < 4; n++)
301 prescreen[j + n] =
state[n + 4] > 0.
f;
307 return nn * model->
nsize;
335 float *buf,
float mstd[4],
338 const float scale = 1.f / model->
nsize;
343 for (
int i = 0;
i < model->
ydim;
i++) {
344 memcpy(buf,
src, model->
xdim *
sizeof(
float));
346 for (
int j = 0; j < model->
xdim; j++) {
357 mstd[0] = sum * scale;
360 tmp = sum_sq * scale - mstd[0] * mstd[0];
361 if (
tmp < FLT_EPSILON) {
365 mstd[1] = sqrtf(
tmp);
366 mstd[2] = 1.0f / mstd[1];
381 static void wae5(
const float *softmax,
const float *el,
382 int n,
float mstd[4])
384 float vsum = 0.0f, wsum = 0.0f;
386 for (
int i = 0;
i < n;
i++) {
392 mstd[3] += (5.0f * vsum) / wsum * mstd[1] + mstd[0];
398 const void *
src, ptrdiff_t src_stride,
void *dst,
403 const float *src_p =
src;
407 const float *
window = src_p - (model->
ydim / 2) * src_stride - (model->
xdim / 2 - 1);
408 const int filter_size = model->
nsize;
409 const int nns = model->
nns;
411 for (
int i = 0;
i <
N;
i++) {
413 float activation[256 * 2];
423 for (
int nn = 0; nn < nns; nn++)
426 for (
int nn = 0; nn < nns; nn++)
430 wae5(activation, activation + nns, nns, mstd);
433 for (
int nn = 0; nn < nns; nn++)
436 for (
int nn = 0; nn < nns; nn++)
440 wae5(activation, activation + nns, nns, mstd);
443 dst_p[
i] = mstd[3] * (use_q2 ? 0.5f : 1.f);
448 int src_stride,
int dst_stride,
451 for (
int y = 0; y <
height; y++) {
452 for (
int x = 0; x < 32; x++)
453 dst[-x - 1] =
src[x];
455 for (
int x = 0; x <
width; x++)
458 for (
int x = 0; x < 32; x++)
467 int src_stride,
int dst_stride,
470 const uint16_t *
src = (
const uint16_t *)srcp;
474 for (
int y = 0; y <
height; y++) {
475 for (
int x = 0; x < 32; x++)
476 dst[-x - 1] =
src[x] * scale;
478 for (
int x = 0; x <
width; x++)
479 dst[x] =
src[x] * scale;
481 for (
int x = 0; x < 32; x++)
490 int src_stride,
int dst_stride,
494 for (
int y = 0; y <
height; y++) {
495 for (
int x = 0; x <
width; x++)
504 int src_stride,
int dst_stride,
508 uint16_t *dst = (uint16_t *)dstp;
512 for (
int y = 0; y <
height; y++) {
513 for (
int x = 0; x <
width; x++)
522 void *dst,
const uint8_t *prescreen,
int n)
524 const float *src_p =
src;
526 const float *
window = src_p - 2 * src_stride;
528 for (
int i = 0;
i < n;
i++) {
534 accum += (-3.0f / 32.0f) *
window[0 * src_stride +
i];
535 accum += (19.0f / 32.0f) *
window[1 * src_stride +
i];
536 accum += (19.0f / 32.0f) *
window[2 * src_stride +
i];
537 accum += (-3.0f / 32.0f) *
window[3 * src_stride +
i];
548 const float in_scale =
s->in_scale;
549 const float out_scale =
s->out_scale;
550 const int depth =
s->depth;
552 const int tff =
s->field_n == (
s->field < 0 ?
interlaced ?
in->top_field_first : 1 :
556 for (
int p = 0; p <
s->nb_planes; p++) {
557 const int height =
s->planeheight[p];
558 const int width =
s->planewidth[p];
559 const int slice_start = 2 * ((
height / 2 * jobnr) / nb_jobs);
563 uint8_t *dst =
out->data[p] + slice_start *
out->linesize[p];
564 const int src_linesize =
in->linesize[p];
565 const int dst_linesize =
out->linesize[p];
566 uint8_t *prescreen_buf =
s->prescreen_buf[jobnr];
567 float *srcbuf =
s->input_buf[jobnr];
568 const int srcbuf_stride =
width + 64;
569 float *dstbuf =
s->output_buf[jobnr];
570 const int dstbuf_stride =
width;
571 const int slice_height = (
slice_end - slice_start) / 2;
577 if (!(
s->process_plane & (1 << p))) {
579 in->data[p] + slice_start *
in->linesize[p],
585 y_out = slice_start + (tff ^ (slice_start & 1));
586 in_line = src_data + (y_out * src_linesize);
587 out_line = dst_data + (y_out * dst_linesize);
590 memcpy(out_line, in_line,
s->linesize[p]);
592 in_line += src_linesize * 2;
593 out_line += dst_linesize * 2;
596 y_out = slice_start + ((!tff) ^ (slice_start & 1));
598 s->read(src_data +
FFMAX(y_out - 5, tff) * src_linesize,
600 src_linesize * 2, srcbuf_stride,
602 srcbuf += srcbuf_stride;
604 s->read(src_data +
FFMAX(y_out - 3, tff) * src_linesize,
606 src_linesize * 2, srcbuf_stride,
608 srcbuf += srcbuf_stride;
610 s->read(src_data +
FFMAX(y_out - 1, tff) * src_linesize,
612 src_linesize * 2, srcbuf_stride,
614 srcbuf += srcbuf_stride;
616 in_line = src_data +
FFMIN(y_out + 1,
height - 1 - !tff) * src_linesize;
617 out_line = dst_data + (y_out * dst_linesize);
619 s->read(in_line, srcbuf + 32, src_linesize * 2, srcbuf_stride,
620 width, slice_height - last_slice, in_scale);
622 y_out += (slice_height - last_slice) * 2;
624 s->read(src_data +
FFMIN(y_out + 1,
height - 1 - !tff) * src_linesize,
625 srcbuf + 32 + srcbuf_stride * (slice_height - last_slice),
626 src_linesize * 2, srcbuf_stride,
629 s->read(src_data +
FFMIN(y_out + 3,
height - 1 - !tff) * src_linesize,
630 srcbuf + 32 + srcbuf_stride * (slice_height + 1 - last_slice),
631 src_linesize * 2, srcbuf_stride,
634 s->read(src_data +
FFMIN(y_out + 5,
height - 1 - !tff) * src_linesize,
635 srcbuf + 32 + srcbuf_stride * (slice_height + 2 - last_slice),
636 src_linesize * 2, srcbuf_stride,
639 for (
int y = 0; y <
slice_end - slice_start; y += 2) {
641 s->prescreen[
s->pscrn > 1](
ctx, srcbuf + (y / 2) * srcbuf_stride + 32,
642 srcbuf_stride, prescreen_buf,
width,
643 &
s->prescreener[
s->pscrn - 1]);
646 srcbuf + (y / 2) * srcbuf_stride + 32,
648 dstbuf + (y / 2) * dstbuf_stride,
649 prescreen_buf,
width,
650 &
s->coeffs[
s->etype][
s->nnsparam][
s->nsize],
s->qual == 2);
655 dstbuf + (y / 2) * dstbuf_stride,
656 prescreen_buf,
width);
659 s->write(dstbuf, out_line, dstbuf_stride, dst_linesize * 2,
660 width, slice_height, depth, out_scale);
681 if (
s->field == -2 ||
s->field > 1)
682 s->field_n = !
s->field_n;
698 if ((
s->deint && !
in->interlaced_frame) ||
ctx->is_disabled) {
705 s->pts =
s->prev->pts * 2;
707 if (
ret < 0 || (
s->field > -2 &&
s->field < 2)) {
713 s->pts =
s->prev->pts +
in->pts;
738 ctx->outputs[0]->time_base);
742 }
else if (
ret < 0) {
751 memcpy(dst, *
data, n *
sizeof(
float));
766 int filter_size = nns * xdim * ydim;
770 data =
av_calloc(filter_size + bias_size, 4 *
sizeof(
float));
777 coeffs->
nsize = xdim * ydim;
798 copy_weights(&
s->prescreener[0].kernel_l0[0][0], 4 * 48, &bdata);
801 copy_weights(&
s->prescreener[0].kernel_l1[0][0], 4 * 4, &bdata);
804 copy_weights(&
s->prescreener[0].kernel_l2[0][0], 4 * 8, &bdata);
807 for (
int i = 0;
i < 3;
i++) {
809 float kernel_l0_shuffled[4 * 64];
810 float kernel_l1_shuffled[4 * 4];
818 for (
int n = 0; n < 4; n++) {
819 for (
int k = 0; k < 64; k++)
820 data->kernel_l0[n][k] = kernel_l0_shuffled[(k / 8) * 32 + n * 8 + k % 8];
821 for (
int k = 0; k < 4; k++)
822 data->kernel_l1[n][k] = kernel_l1_shuffled[k * 4 + n];
826 for (
int m = 0; m < 2; m++) {
828 for (
int i = 0;
i < 5;
i++) {
832 for (
int j = 0; j < 7; j++) {
836 const int filter_size = xdim * ydim;
882 for (
int n = 0; n < 4; n++) {
891 for (
int n = 0; n < 4; n++) {
900 const int filter_size = model->
nsize;
901 const int nns = model->
nns;
902 const float scale = 1.f / nns;
904 double softmax_means[256];
905 double elliott_means[256];
906 double mean_filter[48 * 6] = { 0 };
910 for (
int nn = 0; nn < nns; nn++) {
911 softmax_means[nn] =
mean(model->
softmax_q1 + nn * filter_size, filter_size);
912 elliott_means[nn] =
mean(model->
elliott_q1 + nn * filter_size, filter_size);
914 for (
int k = 0; k < filter_size; k++)
915 mean_filter[k] += model->
softmax_q1[nn * filter_size + k] - softmax_means[nn];
918 for (
int k = 0; k < filter_size; k++)
919 mean_filter[k] *= scale;
923 for (
int nn = 0; nn < nns; nn++) {
924 for (
int k = 0; k < filter_size; k++) {
925 model->
softmax_q1[nn * filter_size + k] -= softmax_means[nn] + mean_filter[k];
926 model->
elliott_q1[nn * filter_size + k] -= elliott_means[nn];
932 memset(mean_filter, 0,
sizeof(mean_filter));
934 for (
int nn = 0; nn < nns; nn++) {
935 softmax_means[nn] =
mean(model->
softmax_q2 + nn * filter_size, filter_size);
936 elliott_means[nn] =
mean(model->
elliott_q2 + nn * filter_size, filter_size);
938 for (
int k = 0; k < filter_size; k++) {
939 mean_filter[k] += model->
softmax_q2[nn * filter_size + k] - softmax_means[nn];
943 for (
int k = 0; k < filter_size; k++)
944 mean_filter[k] *= scale;
948 for (
int nn = 0; nn < nns; nn++) {
949 for (
int k = 0; k < filter_size; k++) {
950 model->
softmax_q2[nn * filter_size + k] -= softmax_means[nn] + mean_filter[k];
951 model->
elliott_q2[nn * filter_size + k] -= elliott_means[nn];
961 FILE *weights_file =
NULL;
962 int64_t weights_size;
973 if (fseek(weights_file, 0, SEEK_END)) {
975 fclose(weights_file);
979 weights_size = ftell(weights_file);
981 if (weights_size == -1) {
982 fclose(weights_file);
986 fclose(weights_file);
991 if (fseek(weights_file, 0, SEEK_SET)) {
992 fclose(weights_file);
999 fclose(weights_file);
1005 fclose(weights_file);
1011 fclose(weights_file);
1035 s->depth =
desc->comp[0].depth;
1042 s->planewidth[0] =
s->planewidth[3] =
inlink->w;
1044 s->planeheight[0] =
s->planeheight[3] =
inlink->h;
1046 s->half = ((1 << 8) - 1) / 2.f;
1047 s->out_scale = 1 << (
s->depth - 8);
1048 s->in_scale = 1.f /
s->out_scale;
1069 for (
int i = 0;
i < 2;
i++) {
1070 for (
int j = 0; j < 5; j++) {
1071 for (
int k = 0; k < 7; k++)
1076 s->input_size = (
s->planewidth[0] + 64) * (
s->planeheight[0] + 6);
1077 s->input_buf =
av_calloc(
s->nb_threads,
sizeof(*
s->input_buf));
1081 for (
int i = 0;
i <
s->nb_threads;
i++) {
1082 s->input_buf[
i] =
av_calloc(
s->input_size,
sizeof(**
s->input_buf));
1083 if (!
s->input_buf[
i])
1087 s->output_buf =
av_calloc(
s->nb_threads,
sizeof(*
s->output_buf));
1091 for (
int i = 0;
i <
s->nb_threads;
i++) {
1092 s->output_buf[
i] =
av_calloc(
s->input_size,
sizeof(**
s->output_buf));
1093 if (!
s->output_buf[
i])
1097 s->prescreen_buf =
av_calloc(
s->nb_threads,
sizeof(*
s->prescreen_buf));
1098 if (!
s->prescreen_buf)
1101 for (
int i = 0;
i <
s->nb_threads;
i++) {
1102 s->prescreen_buf[
i] =
av_calloc(
s->planewidth[0],
sizeof(**
s->prescreen_buf));
1103 if (!
s->prescreen_buf[
i])
1114 for (
int i = 0;
i <
s->nb_threads &&
s->prescreen_buf;
i++)
1119 for (
int i = 0;
i <
s->nb_threads &&
s->input_buf;
i++)
1124 for (
int i = 0;
i <
s->nb_threads &&
s->output_buf;
i++)
1130 for (
int i = 0;
i < 2;
i++) {
1131 for (
int j = 0; j < 5; j++) {
1132 for (
int k = 0; k < 7; k++) {
1163 .description =
NULL_IF_CONFIG_SMALL(
"Apply neural network edge directed interpolation intra-only deinterlacer."),
1165 .priv_class = &nnedi_class,
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
#define AV_PIX_FMT_YUVA422P16
#define AV_PIX_FMT_GBRAP16
static float dot_dsp(const NNEDIContext *const s, const float *kernel, const float *input, int n, float scale, float bias)
AVPixelFormat
Pixel format.
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
static int get_frame(AVFilterContext *ctx, int is_second)
static void subtract_mean_predictor(PredictorCoefficients *model)
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
#define AVERROR_EOF
End of file.
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
static const float * softmax_q2_filter(int nn, const PredictorCoefficients *const model)
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
#define AV_PIX_FMT_YUVA422P9
This structure describes decoded (raw) audio or video data.
static av_always_inline av_const unsigned av_clip_uintp2_c(int a, int p)
Clip a signed integer to an unsigned power of two range.
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
#define AV_PIX_FMT_YUVA420P16
#define AV_PIX_FMT_YUVA420P10
static void read_bytes(const uint8_t *src, float *dst, int src_stride, int dst_stride, int width, int height, float scale)
static uint8_t half(int a, int b)
#define AV_PIX_FMT_YUV420P10
int ff_request_frame(AVFilterLink *link)
Request an input frame from the filter at the other end of the link.
static void process_old(AVFilterContext *ctx, const void *src, ptrdiff_t src_stride, uint8_t *prescreen, int N, const PrescreenerCoefficients *const m_data)
void(* prescreen[2])(AVFilterContext *ctx, const void *src, ptrdiff_t src_stride, uint8_t *prescreen, int N, const PrescreenerCoefficients *const coeffs)
AVFILTER_DEFINE_CLASS(nnedi)
static av_cold void uninit(AVFilterContext *ctx)
@ AV_PIX_FMT_YUV440P
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
const char * name
Filter name.
AVFormatInternal * internal
An opaque field for libavformat internal usage.
A link between two filters.
#define AV_PIX_FMT_YUVA422P10
void av_image_copy_plane(uint8_t *dst, int dst_linesize, const uint8_t *src, int src_linesize, int bytewidth, int height)
Copy image plane from src to dst.
static float elliott(float x)
int av_pix_fmt_count_planes(enum AVPixelFormat pix_fmt)
#define AV_PIX_FMT_YUVA420P9
static SDL_Window * window
static const float * elliott_q2_filter(int nn, const PredictorCoefficients *const model)
#define AV_PIX_FMT_GBRP14
@ AV_PIX_FMT_GBRAP
planar GBRA 4:4:4:4 32bpp
#define AV_PIX_FMT_GBRP10
#define AV_PIX_FMT_YUVA444P16
#define AV_PIX_FMT_YUV422P9
static double val(void *priv, double ch)
static const uint8_t NNEDI_XDIM[]
#define AV_PIX_FMT_GRAY16
static __device__ float fabsf(float a)
A filter pad used for either input or output.
static void write_words(const float *src, uint8_t *dstp, int src_stride, int dst_stride, int width, int height, int depth, float scale)
static float softmax_exp(float x)
#define AV_PIX_FMT_YUV444P10
@ AV_PIX_FMT_YUVJ411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples) full scale (JPEG), deprecated in favor ...
static int filter_slice(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
static const AVFilterPad inputs[]
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
#define AV_PIX_FMT_YUV422P16
FILE * av_fopen_utf8(const char *path, const char *mode)
Open a file using a UTF-8 filename.
static float * allocate(float **ptr, int size)
@ AV_PIX_FMT_YUVJ422P
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
#define AV_PIX_FMT_GBRAP10
static const uint16_t NNEDI_NNS[]
int av_image_fill_linesizes(int linesizes[4], enum AVPixelFormat pix_fmt, int width)
Fill plane linesizes for an image with pixel format pix_fmt and width width.
#define AV_PIX_FMT_GBRAP12
@ AV_PIX_FMT_YUVA420P
planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)
#define AV_PIX_FMT_YUV444P16
#define AV_CEIL_RSHIFT(a, b)
static int slice_end(AVCodecContext *avctx, AVFrame *pict)
Handle slice ends.
AVRational frame_rate
Frame rate of the stream on the link, or 1/0 if unknown or variable; if left to 0/0,...
static enum AVPixelFormat pix_fmts[]
float kernel_l0[4][16 *4]
static int request_frame(AVFilterLink *link)
#define AV_PIX_FMT_YUVA444P12
#define AV_PIX_FMT_YUV420P9
#define AV_PIX_FMT_YUV420P16
#define AV_PIX_FMT_GRAY14
AVFrame * av_frame_clone(const AVFrame *src)
Create a new frame that references the same data as src.
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this field
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a link
@ AV_PIX_FMT_YUVJ444P
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
#define AV_PIX_FMT_GRAY10
#define AV_PIX_FMT_GBRP16
Describe the class of an AVClass context structure.
static const float * softmax_q1_filter(int nn, const PredictorCoefficients *const model)
static void interpolation(const void *src, ptrdiff_t src_stride, void *dst, const uint8_t *prescreen, int n)
#define LOCAL_ALIGNED_32(t, v,...)
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
PredictorCoefficients coeffs[2][5][7]
static void subtract_mean_old(PrescreenerCoefficients *coeffs, float half)
Rational number (pair of numerator and denominator).
static int query_formats(AVFilterContext *ctx)
@ AV_PIX_FMT_YUVJ420P
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
static void transform_elliott(float *input, int size)
#define AV_PIX_FMT_YUV440P10
void(* write)(const float *src, uint8_t *dst, int src_stride, int dst_stride, int width, int height, int depth, float scale)
static const uint8_t NNEDI_YDIM[]
static const AVOption nnedi_options[]
#define AV_PIX_FMT_YUV422P10
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
static int process_command(AVFilterContext *ctx, const char *cmd, const char *args, char *res, int res_len, int flags)
#define AV_PIX_FMT_YUV422P12
static void wae5(const float *softmax, const float *el, int n, float mstd[4])
#define AV_PIX_FMT_YUV444P12
static const float * elliott_q1_filter(int nn, const PredictorCoefficients *const model)
AVFilterContext * src
source filter
int ff_filter_process_command(AVFilterContext *ctx, const char *cmd, const char *arg, char *res, int res_len, int flags)
Generic processing of user supplied commands that are set in the same way as the filter options.
static void predictor(AVFilterContext *ctx, const void *src, ptrdiff_t src_stride, void *dst, const uint8_t *prescreen, int N, const PredictorCoefficients *const model, int use_q2)
@ AV_PIX_FMT_YUVA444P
planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples)
#define AV_PIX_FMT_YUVA444P10
static void transform_softmax_exp(float *input, int size)
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some input
static const AVFilterPad outputs[]
static av_cold int init(AVFilterContext *ctx)
#define DECLARE_ALIGNED(n, t, v)
int interlaced_frame
The content of the picture is interlaced.
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) #define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac) { } void ff_audio_convert_free(AudioConvert **ac) { if(! *ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);} AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map) { AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method !=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2) { ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc) { av_free(ac);return NULL;} return ac;} in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar) { ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar ? ac->channels :1;} else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;} int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in) { int use_generic=1;int len=in->nb_samples;int p;if(ac->dc) { av_log(ac->avr, AV_LOG_TRACE, "%d samples - audio_convert: %s to %s (dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> in
static int filter_offset(int nn, const PredictorCoefficients *const model)
int w
agreed upon image width
#define AV_PIX_FMT_GBRP12
int ff_filter_get_nb_threads(AVFilterContext *ctx)
Get number of threads for current filter instance.
@ AV_PIX_FMT_YUVJ440P
planar YUV 4:4:0 full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV440P and setting color_range
static av_always_inline AVRational av_inv_q(AVRational q)
Invert a rational.
const char * name
Pad name.
static void write_bytes(const float *src, uint8_t *dst, int src_stride, int dst_stride, int width, int height, int depth, float scale)
void(* read)(const uint8_t *src, float *dst, int src_stride, int dst_stride, int width, int height, float scale)
#define AV_PIX_FMT_YUV444P9
static void subtract_mean_new(PrescreenerCoefficients *coeffs, float half)
#define AV_PIX_FMT_YUVA444P9
static void gather_input(const float *src, ptrdiff_t src_stride, float *buf, float mstd[4], const PredictorCoefficients *const model)
#define AV_PIX_FMT_YUV420P12
#define AV_PIX_FMT_YUV422P14
static int allocate_model(PredictorCoefficients *coeffs, int xdim, int ydim, int nns)
void * av_calloc(size_t nmemb, size_t size)
Non-inlined equivalent of av_mallocz_array().
int h
agreed upon image height
static const size_t NNEDI_WEIGHTS_SIZE
#define AV_PIX_FMT_YUVA422P12
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
static void process_new(AVFilterContext *ctx, const void *src, ptrdiff_t src_stride, uint8_t *prescreen, int N, const PrescreenerCoefficients *const m_data)
static float mean(const float *input, int size)
AVRational time_base
Define the time base used by the PTS of the frames/samples which will pass through this link.
static void read_words(const uint8_t *srcp, float *dst, int src_stride, int dst_stride, int width, int height, float scale)
static void transform(float *input, int size, float mean, float half)
AVRational av_mul_q(AVRational b, AVRational c)
Multiply two rationals.
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
@ AV_PIX_FMT_GBRP
planar GBR 4:4:4 24bpp
#define AVFILTER_FLAG_SLICE_THREADS
The filter supports multithreading by splitting frames into multiple parts and processing them concur...
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
static int read_weights(AVFilterContext *ctx, const float *bdata)
av_cold AVFloatDSPContext * avpriv_float_dsp_alloc(int bit_exact)
Allocate a float DSP context.
@ AV_PIX_FMT_YUV411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
static int config_input(AVFilterLink *inlink)
#define AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL
Same as AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC, except that the filter will have its filter_frame() c...
#define flags(name, subs,...)
@ AV_PIX_FMT_YUV410P
planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
static void copy_weights(float *dst, int n, const float **data)
#define AV_PIX_FMT_YUV440P12
#define AV_PIX_FMT_YUV444P14
#define AV_PIX_FMT_GRAY12
@ AV_PIX_FMT_YUVA422P
planar YUV 4:2:2 24bpp, (1 Cr & Cb sample per 2x1 Y & A samples)
#define AV_PIX_FMT_YUV420P14
PrescreenerCoefficients prescreener[4]
static int config_output(AVFilterLink *outlink)