FFmpeg
f_select.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2011 Stefano Sabatini
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * filter for selecting which frame passes in the filterchain
24  */
25 
26 #include "config_components.h"
27 
28 #include "libavutil/avstring.h"
29 #include "libavutil/eval.h"
30 #include "libavutil/fifo.h"
31 #include "libavutil/imgutils.h"
32 #include "libavutil/internal.h"
33 #include "libavutil/opt.h"
34 #include "libavutil/pixdesc.h"
35 #include "avfilter.h"
36 #include "audio.h"
37 #include "filters.h"
38 #include "formats.h"
39 #include "video.h"
40 #include "scene_sad.h"
41 
42 static const char *const var_names[] = {
43  "TB", ///< timebase
44 
45  "pts", ///< original pts in the file of the frame
46  "start_pts", ///< first PTS in the stream, expressed in TB units
47  "prev_pts", ///< previous frame PTS
48  "prev_selected_pts", ///< previous selected frame PTS
49 
50  "t", ///< timestamp expressed in seconds
51  "start_t", ///< first PTS in the stream, expressed in seconds
52  "prev_t", ///< previous frame time
53  "prev_selected_t", ///< previously selected time
54 
55  "pict_type", ///< the type of picture in the movie
56  "I",
57  "P",
58  "B",
59  "S",
60  "SI",
61  "SP",
62  "BI",
63  "PICT_TYPE_I",
64  "PICT_TYPE_P",
65  "PICT_TYPE_B",
66  "PICT_TYPE_S",
67  "PICT_TYPE_SI",
68  "PICT_TYPE_SP",
69  "PICT_TYPE_BI",
70 
71  "interlace_type", ///< the frame interlace type
72  "PROGRESSIVE",
73  "TOPFIRST",
74  "BOTTOMFIRST",
75 
76  "consumed_samples_n",///< number of samples consumed by the filter (only audio)
77  "samples_n", ///< number of samples in the current frame (only audio)
78  "sample_rate", ///< sample rate (only audio)
79 
80  "n", ///< frame number (starting from zero)
81  "selected_n", ///< selected frame number (starting from zero)
82  "prev_selected_n", ///< number of the last selected frame
83 
84  "key", ///< tell if the frame is a key frame
85 
86  "scene",
87 
88  "concatdec_select", ///< frame is within the interval set by the concat demuxer
89 
90  "ih", ///< ih: Represents the height of the input video frame.
91  "iw", ///< iw: Represents the width of the input video frame.
92 
93  "view",
94 
95  NULL
96 };
97 
98 enum var_name {
100 
105 
110 
126 
131 
135 
139 
141 
143 
145 
148 
150 
152 };
153 
154 typedef struct SelectContext {
155  const AVClass *class;
156  char *expr_str;
159  int bitdepth;
161  ptrdiff_t width[4];
162  ptrdiff_t height[4];
163  int do_scene_detect; ///< 1 if the expression requires scene detection variables, 0 otherwise
164  ff_scene_sad_fn sad; ///< Sum of the absolute difference function (scene detect only)
165  double prev_mafd; ///< previous MAFD (scene detect only)
166  AVFrame *prev_picref; ///< previous frame (scene detect only)
167  double select;
168  int select_out; ///< mark the selected output pad index
170 } SelectContext;
171 
172 #define OFFSET(x) offsetof(SelectContext, x)
173 #define DEFINE_OPTIONS(filt_name, FLAGS) \
174 static const AVOption filt_name##_options[] = { \
175  { "expr", "set an expression to use for selecting frames", OFFSET(expr_str), AV_OPT_TYPE_STRING, { .str = "1" }, .flags=FLAGS }, \
176  { "e", "set an expression to use for selecting frames", OFFSET(expr_str), AV_OPT_TYPE_STRING, { .str = "1" }, .flags=FLAGS }, \
177  { "outputs", "set the number of outputs", OFFSET(nb_outputs), AV_OPT_TYPE_INT, {.i64 = 1}, 1, INT_MAX, .flags=FLAGS }, \
178  { "n", "set the number of outputs", OFFSET(nb_outputs), AV_OPT_TYPE_INT, {.i64 = 1}, 1, INT_MAX, .flags=FLAGS }, \
179  { NULL } \
180 }
181 
182 static int request_frame(AVFilterLink *outlink);
183 
185 {
186  SelectContext *select = ctx->priv;
187  int i, ret;
188 
189  if ((ret = av_expr_parse(&select->expr, select->expr_str,
190  var_names, NULL, NULL, NULL, NULL, 0, ctx)) < 0) {
191  av_log(ctx, AV_LOG_ERROR, "Error while parsing expression '%s'\n",
192  select->expr_str);
193  return ret;
194  }
195  select->do_scene_detect = !!strstr(select->expr_str, "scene");
196 
197  for (i = 0; i < select->nb_outputs; i++) {
198  AVFilterPad pad = { 0 };
199 
200  pad.name = av_asprintf("output%d", i);
201  if (!pad.name)
202  return AVERROR(ENOMEM);
203  pad.type = ctx->filter->inputs[0].type;
205  if ((ret = ff_append_outpad_free_name(ctx, &pad)) < 0)
206  return ret;
207  }
208 
209  return 0;
210 }
211 
212 #define INTERLACE_TYPE_P 0
213 #define INTERLACE_TYPE_T 1
214 #define INTERLACE_TYPE_B 2
215 
217 {
218  SelectContext *select = inlink->dst->priv;
220  int is_yuv = !(desc->flags & AV_PIX_FMT_FLAG_RGB) &&
221  (desc->flags & AV_PIX_FMT_FLAG_PLANAR) &&
222  desc->nb_components >= 3;
223 
224  select->bitdepth = desc->comp[0].depth;
225  select->nb_planes = is_yuv ? 1 : av_pix_fmt_count_planes(inlink->format);
226 
227  for (int plane = 0; plane < select->nb_planes; plane++) {
228  ptrdiff_t line_size = av_image_get_linesize(inlink->format, inlink->w, plane);
229  int vsub = desc->log2_chroma_h;
230 
231  select->width[plane] = line_size >> (select->bitdepth > 8);
232  select->height[plane] = plane == 1 || plane == 2 ? AV_CEIL_RSHIFT(inlink->h, vsub) : inlink->h;
233  }
234 
235  select->var_values[VAR_N] = 0.0;
236  select->var_values[VAR_SELECTED_N] = 0.0;
237 
238  select->var_values[VAR_TB] = av_q2d(inlink->time_base);
239 
241  select->var_values[VAR_PREV_PTS] = NAN;
244  select->var_values[VAR_PREV_T] = NAN;
245  select->var_values[VAR_START_PTS] = NAN;
246  select->var_values[VAR_START_T] = NAN;
247 
260 
264 
265  select->var_values[VAR_PICT_TYPE] = NAN;
266  select->var_values[VAR_INTERLACE_TYPE] = NAN;
267  select->var_values[VAR_SCENE] = NAN;
269  select->var_values[VAR_SAMPLES_N] = NAN;
270 
271  select->var_values[VAR_IH] = NAN;
272  select->var_values[VAR_IW] = NAN;
273 
274  select->var_values[VAR_SAMPLE_RATE] =
275  inlink->type == AVMEDIA_TYPE_AUDIO ? inlink->sample_rate : NAN;
276 
277  if (CONFIG_SELECT_FILTER && select->do_scene_detect) {
278  select->sad = ff_scene_sad_get_fn(select->bitdepth == 8 ? 8 : 16);
279  if (!select->sad)
280  return AVERROR(EINVAL);
281  }
282  return 0;
283 }
284 
286 {
287  double ret = 0;
288  SelectContext *select = ctx->priv;
289  AVFrame *prev_picref = select->prev_picref;
290 
291  if (prev_picref &&
292  frame->height == prev_picref->height &&
293  frame->width == prev_picref->width) {
294  uint64_t sad = 0;
295  double mafd, diff;
296  uint64_t count = 0;
297 
298  for (int plane = 0; plane < select->nb_planes; plane++) {
299  uint64_t plane_sad;
300  select->sad(prev_picref->data[plane], prev_picref->linesize[plane],
301  frame->data[plane], frame->linesize[plane],
302  select->width[plane], select->height[plane], &plane_sad);
303  sad += plane_sad;
304  count += select->width[plane] * select->height[plane];
305  }
306 
307  mafd = (double)sad / count / (1ULL << (select->bitdepth - 8));
308  diff = fabs(mafd - select->prev_mafd);
309  ret = av_clipf(FFMIN(mafd, diff) / 100., 0, 1);
310  select->prev_mafd = mafd;
311  av_frame_free(&prev_picref);
312  }
313  select->prev_picref = av_frame_clone(frame);
314  return ret;
315 }
316 
318 {
319  AVDictionary *metadata = frame->metadata;
320  AVDictionaryEntry *start_time_entry = av_dict_get(metadata, "lavf.concatdec.start_time", NULL, 0);
321  AVDictionaryEntry *duration_entry = av_dict_get(metadata, "lavf.concatdec.duration", NULL, 0);
322  if (start_time_entry) {
323  int64_t start_time = strtoll(start_time_entry->value, NULL, 10);
324  if (pts >= start_time) {
325  if (duration_entry) {
326  int64_t duration = strtoll(duration_entry->value, NULL, 10);
327  if (pts < start_time + duration)
328  return -1;
329  else
330  return 0;
331  }
332  return -1;
333  }
334  return 0;
335  }
336  return NAN;
337 }
338 
340 {
341  SelectContext *select = ctx->priv;
342  AVFilterLink *inlink = ctx->inputs[0];
344  const AVFrameSideData *sd;
345  double res;
346 
347  if (isnan(select->var_values[VAR_START_PTS]))
348  select->var_values[VAR_START_PTS] = TS2D(frame->pts);
349  if (isnan(select->var_values[VAR_START_T]))
350  select->var_values[VAR_START_T] = TS2D(frame->pts) * av_q2d(inlink->time_base);
351 
352  select->var_values[VAR_N ] = inl->frame_count_out;
353  select->var_values[VAR_PTS] = TS2D(frame->pts);
354  select->var_values[VAR_T ] = TS2D(frame->pts) * av_q2d(inlink->time_base);
355  select->var_values[VAR_KEY] = !!(frame->flags & AV_FRAME_FLAG_KEY);
357 
358  switch (inlink->type) {
359  case AVMEDIA_TYPE_AUDIO:
360  select->var_values[VAR_SAMPLES_N] = frame->nb_samples;
361  break;
362 
363  case AVMEDIA_TYPE_VIDEO:
364  select->var_values[VAR_IH] = frame->height;
365  select->var_values[VAR_IW] = frame->width;
366 
367  select->var_values[VAR_INTERLACE_TYPE] =
370  select->var_values[VAR_PICT_TYPE] = frame->pict_type;
371  if (select->do_scene_detect) {
372  char buf[32];
374  // TODO: document metadata
375  snprintf(buf, sizeof(buf), "%f", select->var_values[VAR_SCENE]);
376  av_dict_set(&frame->metadata, "lavfi.scene_score", buf, 0);
377  }
378 
379  sd = av_frame_side_data_get(frame->side_data, frame->nb_side_data,
381  select->var_values[VAR_VIEW] = sd ? *(int*)sd->data : NAN;
382  break;
383  }
384 
385  select->select = res = av_expr_eval(select->expr, select->var_values, NULL);
386  av_log(inlink->dst, AV_LOG_DEBUG,
387  "n:%f pts:%f t:%f key:%d",
388  select->var_values[VAR_N],
389  select->var_values[VAR_PTS],
390  select->var_values[VAR_T],
391  !!(frame->flags & AV_FRAME_FLAG_KEY));
392 
393  switch (inlink->type) {
394  case AVMEDIA_TYPE_VIDEO:
395  av_log(inlink->dst, AV_LOG_DEBUG, " interlace_type:%c pict_type:%c scene:%f",
396  !(frame->flags & AV_FRAME_FLAG_INTERLACED) ? 'P' :
397  (frame->flags & AV_FRAME_FLAG_TOP_FIELD_FIRST) ? 'T' : 'B',
398  av_get_picture_type_char(frame->pict_type),
399  select->var_values[VAR_SCENE]);
400  break;
401  case AVMEDIA_TYPE_AUDIO:
402  av_log(inlink->dst, AV_LOG_DEBUG, " samples_n:%d consumed_samples_n:%f",
403  frame->nb_samples,
405  break;
406  }
407 
408  if (res == 0) {
409  select->select_out = -1; /* drop */
410  } else if (isnan(res) || res < 0) {
411  select->select_out = 0; /* first output */
412  } else {
413  select->select_out = FFMIN(ceilf(res)-1, select->nb_outputs-1); /* other outputs */
414  }
415 
416  av_log(inlink->dst, AV_LOG_DEBUG, " -> select:%f select_out:%d\n", res, select->select_out);
417 
418  if (res) {
419  select->var_values[VAR_PREV_SELECTED_N] = select->var_values[VAR_N];
421  select->var_values[VAR_PREV_SELECTED_T] = select->var_values[VAR_T];
422  select->var_values[VAR_SELECTED_N] += 1.0;
423  if (inlink->type == AVMEDIA_TYPE_AUDIO)
424  select->var_values[VAR_CONSUMED_SAMPLES_N] += frame->nb_samples;
425  }
426 
427  select->var_values[VAR_PREV_PTS] = select->var_values[VAR_PTS];
428  select->var_values[VAR_PREV_T] = select->var_values[VAR_T];
429 }
430 
432 {
433  AVFilterContext *ctx = inlink->dst;
434  SelectContext *select = ctx->priv;
435 
437  if (select->select)
438  return ff_filter_frame(ctx->outputs[select->select_out], frame);
439 
441  return 0;
442 }
443 
444 static int request_frame(AVFilterLink *outlink)
445 {
446  AVFilterLink *inlink = outlink->src->inputs[0];
447  int ret = ff_request_frame(inlink);
448  return ret;
449 }
450 
452 {
453  SelectContext *select = ctx->priv;
454 
455  av_expr_free(select->expr);
456  select->expr = NULL;
457 
458  if (select->do_scene_detect) {
459  av_frame_free(&select->prev_picref);
460  }
461 }
462 
463 #if CONFIG_ASELECT_FILTER
464 
466 AVFILTER_DEFINE_CLASS(aselect);
467 
468 static av_cold int aselect_init(AVFilterContext *ctx)
469 {
470  SelectContext *select = ctx->priv;
471  int ret;
472 
473  if ((ret = init(ctx)) < 0)
474  return ret;
475 
476  if (select->do_scene_detect) {
477  av_log(ctx, AV_LOG_ERROR, "Scene detection is ignored in aselect filter\n");
478  return AVERROR(EINVAL);
479  }
480 
481  return 0;
482 }
483 
484 static const AVFilterPad avfilter_af_aselect_inputs[] = {
485  {
486  .name = "default",
487  .type = AVMEDIA_TYPE_AUDIO,
488  .config_props = config_input,
489  .filter_frame = filter_frame,
490  },
491 };
492 
493 const FFFilter ff_af_aselect = {
494  .p.name = "aselect",
495  .p.description = NULL_IF_CONFIG_SMALL("Select audio frames to pass in output."),
496  .p.priv_class = &aselect_class,
498  .init = aselect_init,
499  .uninit = uninit,
500  .priv_size = sizeof(SelectContext),
501  FILTER_INPUTS(avfilter_af_aselect_inputs),
502 };
503 #endif /* CONFIG_ASELECT_FILTER */
504 
505 #if CONFIG_SELECT_FILTER
506 
507 static int query_formats(const AVFilterContext *ctx,
508  AVFilterFormatsConfig **cfg_in,
509  AVFilterFormatsConfig **cfg_out)
510 {
511  const SelectContext *select = ctx->priv;
512 
513  if (select->do_scene_detect) {
514  static const enum AVPixelFormat pix_fmts[] = {
521  };
522  return ff_set_common_formats_from_list2(ctx, cfg_in, cfg_out, pix_fmts);
523  }
524  return 0;
525 }
526 
528 AVFILTER_DEFINE_CLASS(select);
529 
530 static av_cold int select_init(AVFilterContext *ctx)
531 {
532  int ret;
533 
534  if ((ret = init(ctx)) < 0)
535  return ret;
536 
537  return 0;
538 }
539 
540 static const AVFilterPad avfilter_vf_select_inputs[] = {
541  {
542  .name = "default",
543  .type = AVMEDIA_TYPE_VIDEO,
544  .config_props = config_input,
545  .filter_frame = filter_frame,
546  },
547 };
548 
549 const FFFilter ff_vf_select = {
550  .p.name = "select",
551  .p.description = NULL_IF_CONFIG_SMALL("Select video frames to pass in output."),
552  .p.priv_class = &select_class,
554  .init = select_init,
555  .uninit = uninit,
556  .priv_size = sizeof(SelectContext),
557  FILTER_INPUTS(avfilter_vf_select_inputs),
559 };
560 #endif /* CONFIG_SELECT_FILTER */
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:71
VAR_PICT_TYPE_S
@ VAR_PICT_TYPE_S
Definition: f_select.c:122
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
VAR_INTERLACE_TYPE_P
@ VAR_INTERLACE_TYPE_P
Definition: f_select.c:128
var_name
var_name
Definition: noise.c:47
VAR_P
@ VAR_P
Definition: f_select.c:113
ff_filter_frame
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1053
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:3341
request_frame
static int request_frame(AVFilterLink *outlink)
Definition: f_select.c:444
AV_TIME_BASE_Q
#define AV_TIME_BASE_Q
Internal time base represented as fractional value.
Definition: avutil.h:264
SelectContext
Definition: f_select.c:154
int64_t
long long int64_t
Definition: coverity.c:34
inlink
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
Definition: filter_design.txt:212
av_asprintf
char * av_asprintf(const char *fmt,...)
Definition: avstring.c:115
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:63
VAR_PREV_T
@ VAR_PREV_T
Definition: f_select.c:108
FILTER_INPUTS
#define FILTER_INPUTS(array)
Definition: filters.h:262
ff_vf_select
const FFFilter ff_vf_select
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:410
get_concatdec_select
static double get_concatdec_select(AVFrame *frame, int64_t pts)
Definition: f_select.c:317
pixdesc.h
AVFrame::width
int width
Definition: frame.h:482
VAR_INTERLACE_TYPE_B
@ VAR_INTERLACE_TYPE_B
Definition: f_select.c:130
VAR_T
@ VAR_T
Definition: f_select.c:106
uninit
static av_cold void uninit(AVFilterContext *ctx)
Definition: f_select.c:451
AV_PIX_FMT_YUV420P10
#define AV_PIX_FMT_YUV420P10
Definition: pixfmt.h:528
ff_request_frame
int ff_request_frame(AVFilterLink *link)
Request an input frame from the filter at the other end of the link.
Definition: avfilter.c:480
VAR_START_T
@ VAR_START_T
Definition: f_select.c:107
AV_PIX_FMT_BGR24
@ AV_PIX_FMT_BGR24
packed RGB 8:8:8, 24bpp, BGRBGR...
Definition: pixfmt.h:76
AV_PIX_FMT_BGRA
@ AV_PIX_FMT_BGRA
packed BGRA 8:8:8:8, 32bpp, BGRABGRA...
Definition: pixfmt.h:102
AVDictionary
Definition: dict.c:32
VAR_SAMPLE_RATE
@ VAR_SAMPLE_RATE
Definition: f_select.c:134
AVFilter::name
const char * name
Filter name.
Definition: avfilter.h:203
SelectContext::sad
ff_scene_sad_fn sad
Sum of the absolute difference function (scene detect only)
Definition: f_select.c:164
video.h
VAR_SCENE
@ VAR_SCENE
Definition: f_select.c:142
VAR_CONSUMED_SAMPLES_N
@ VAR_CONSUMED_SAMPLES_N
Definition: f_select.c:132
ceilf
static __device__ float ceilf(float a)
Definition: cuda_runtime.h:175
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:431
AV_FRAME_FLAG_TOP_FIELD_FIRST
#define AV_FRAME_FLAG_TOP_FIELD_FIRST
A flag to mark frames where the top field is displayed first if the content is interlaced.
Definition: frame.h:638
formats.h
av_expr_parse
int av_expr_parse(AVExpr **expr, const char *s, const char *const *const_names, const char *const *func1_names, double(*const *funcs1)(void *, double), const char *const *func2_names, double(*const *funcs2)(void *, double, double), int log_offset, void *log_ctx)
Parse an expression.
Definition: eval.c:710
VAR_PREV_SELECTED_N
@ VAR_PREV_SELECTED_N
Definition: f_select.c:138
av_pix_fmt_count_planes
int av_pix_fmt_count_planes(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:3381
fifo.h
VAR_SAMPLES_N
@ VAR_SAMPLES_N
Definition: f_select.c:133
VAR_START_PTS
@ VAR_START_PTS
Definition: f_select.c:102
VAR_PICT_TYPE_P
@ VAR_PICT_TYPE_P
Definition: f_select.c:120
pts
static int64_t pts
Definition: transcode_aac.c:644
av_expr_free
void av_expr_free(AVExpr *e)
Free a parsed expression previously created with av_expr_parse().
Definition: eval.c:358
AVFilterPad
A filter pad used for either input or output.
Definition: filters.h:38
AV_OPT_FLAG_AUDIO_PARAM
#define AV_OPT_FLAG_AUDIO_PARAM
Definition: opt.h:357
VAR_BI
@ VAR_BI
Definition: f_select.c:118
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:209
SelectContext::width
ptrdiff_t width[4]
Definition: f_select.c:161
AVFilterPad::request_frame
int(* request_frame)(AVFilterLink *link)
Frame request callback.
Definition: filters.h:102
av_cold
#define av_cold
Definition: attributes.h:90
AV_FRAME_FLAG_KEY
#define AV_FRAME_FLAG_KEY
A flag to mark frames that are keyframes.
Definition: frame.h:625
FFFilter
Definition: filters.h:265
duration
int64_t duration
Definition: movenc.c:65
av_dict_get
AVDictionaryEntry * av_dict_get(const AVDictionary *m, const char *key, const AVDictionaryEntry *prev, int flags)
Get a dictionary entry with matching key.
Definition: dict.c:60
AV_PIX_FMT_YUVJ422P
@ AV_PIX_FMT_YUVJ422P
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
Definition: pixfmt.h:86
get_scene_score
static double get_scene_score(AVFilterContext *ctx, AVFrame *frame)
Definition: f_select.c:285
INTERLACE_TYPE_T
#define INTERLACE_TYPE_T
Definition: f_select.c:213
VAR_SI
@ VAR_SI
Definition: f_select.c:116
AV_CEIL_RSHIFT
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:60
AVMEDIA_TYPE_AUDIO
@ AVMEDIA_TYPE_AUDIO
Definition: avutil.h:202
VAR_PREV_SELECTED_PTS
@ VAR_PREV_SELECTED_PTS
Definition: f_select.c:104
av_q2d
static double av_q2d(AVRational a)
Convert an AVRational to a double.
Definition: rational.h:104
VAR_PICT_TYPE
@ VAR_PICT_TYPE
Definition: f_select.c:111
filters.h
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:298
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:230
ctx
AVFormatContext * ctx
Definition: movenc.c:49
av_expr_eval
double av_expr_eval(AVExpr *e, const double *const_values, void *opaque)
Evaluate a previously parsed expression.
Definition: eval.c:792
av_frame_clone
AVFrame * av_frame_clone(const AVFrame *src)
Create a new frame that references the same data as src.
Definition: frame.c:485
av_rescale_q
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Definition: mathematics.c:142
AVExpr
Definition: eval.c:158
SelectContext::select_out
int select_out
mark the selected output pad index
Definition: f_select.c:168
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:73
VAR_IW
@ VAR_IW
Definition: f_select.c:147
VAR_PREV_SELECTED_T
@ VAR_PREV_SELECTED_T
Definition: f_select.c:109
NAN
#define NAN
Definition: mathematics.h:115
AV_PIX_FMT_RGBA
@ AV_PIX_FMT_RGBA
packed RGBA 8:8:8:8, 32bpp, RGBARGBA...
Definition: pixfmt.h:100
INTERLACE_TYPE_B
#define INTERLACE_TYPE_B
Definition: f_select.c:214
ff_scene_sad_get_fn
ff_scene_sad_fn ff_scene_sad_get_fn(int depth)
Definition: scene_sad.c:59
VAR_N
@ VAR_N
Definition: f_select.c:136
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:75
fabs
static __device__ float fabs(float a)
Definition: cuda_runtime.h:182
NULL
#define NULL
Definition: coverity.c:32
isnan
#define isnan(x)
Definition: libm.h:342
AV_PIX_FMT_YUVJ420P
@ AV_PIX_FMT_YUVJ420P
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
Definition: pixfmt.h:85
AVFilterContext::inputs
AVFilterLink ** inputs
array of pointers to input links
Definition: avfilter.h:265
AV_PICTURE_TYPE_SI
@ AV_PICTURE_TYPE_SI
Switching Intra.
Definition: avutil.h:283
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:279
SelectContext::select
double select
Definition: f_select.c:167
VAR_VIEW
@ VAR_VIEW
Definition: f_select.c:149
SelectContext::prev_picref
AVFrame * prev_picref
previous frame (scene detect only)
Definition: f_select.c:166
DEFINE_OPTIONS
#define DEFINE_OPTIONS(filt_name, FLAGS)
Definition: f_select.c:173
double
double
Definition: af_crystalizer.c:132
av_clipf
av_clipf
Definition: af_crystalizer.c:122
init
static av_cold int init(AVFilterContext *ctx)
Definition: f_select.c:184
AV_PIX_FMT_GRAY8
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
Definition: pixfmt.h:81
AVFILTER_DEFINE_CLASS
#define AVFILTER_DEFINE_CLASS(fname)
Definition: filters.h:476
AV_PICTURE_TYPE_SP
@ AV_PICTURE_TYPE_SP
Switching Predicted.
Definition: avutil.h:284
AV_PIX_FMT_ABGR
@ AV_PIX_FMT_ABGR
packed ABGR 8:8:8:8, 32bpp, ABGRABGR...
Definition: pixfmt.h:101
SelectContext::prev_mafd
double prev_mafd
previous MAFD (scene detect only)
Definition: f_select.c:165
AVFilterFormatsConfig
Lists of formats / etc.
Definition: avfilter.h:109
SelectContext::do_scene_detect
int do_scene_detect
1 if the expression requires scene detection variables, 0 otherwise
Definition: f_select.c:163
scene_sad.h
ff_filter_link
static FilterLink * ff_filter_link(AVFilterLink *link)
Definition: filters.h:197
TS2D
#define TS2D(ts)
Definition: filters.h:480
AVFILTER_FLAG_DYNAMIC_OUTPUTS
#define AVFILTER_FLAG_DYNAMIC_OUTPUTS
The number of the filter outputs is not determined just by AVFilter.outputs.
Definition: avfilter.h:145
config_input
static int config_input(AVFilterLink *inlink)
Definition: f_select.c:216
eval.h
AV_OPT_FLAG_FILTERING_PARAM
#define AV_OPT_FLAG_FILTERING_PARAM
A generic parameter which can be set by the user for filtering.
Definition: opt.h:381
AV_PIX_FMT_RGB24
@ AV_PIX_FMT_RGB24
packed RGB 8:8:8, 24bpp, RGBRGB...
Definition: pixfmt.h:75
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:94
AV_PIX_FMT_FLAG_RGB
#define AV_PIX_FMT_FLAG_RGB
The pixel format contains RGB-like data (as opposed to YUV/grayscale).
Definition: pixdesc.h:136
ff_af_aselect
const FFFilter ff_af_aselect
start_time
static int64_t start_time
Definition: ffplay.c:326
VAR_INTERLACE_TYPE
@ VAR_INTERLACE_TYPE
Definition: f_select.c:127
VAR_VARS_NB
@ VAR_VARS_NB
Definition: f_select.c:151
AVFrameSideData::data
uint8_t * data
Definition: frame.h:267
ff_scene_sad_fn
void(* ff_scene_sad_fn)(SCENE_SAD_PARAMS)
Definition: scene_sad.h:34
var_names
static const char *const var_names[]
Definition: f_select.c:42
diff
static av_always_inline int diff(const struct color_info *a, const struct color_info *b, const int trans_thresh)
Definition: vf_paletteuse.c:166
SelectContext::expr
AVExpr * expr
Definition: f_select.c:157
VAR_IH
@ VAR_IH
Definition: f_select.c:146
AV_FRAME_DATA_VIEW_ID
@ AV_FRAME_DATA_VIEW_ID
This side data must be associated with a video frame.
Definition: frame.h:245
av_image_get_linesize
int av_image_get_linesize(enum AVPixelFormat pix_fmt, int width, int plane)
Compute the size of an image line with format pix_fmt and width width for the plane plane.
Definition: imgutils.c:76
av_get_picture_type_char
char av_get_picture_type_char(enum AVPictureType pict_type)
Return a single letter to describe the given picture type pict_type.
Definition: utils.c:40
VAR_TB
@ VAR_TB
Definition: f_select.c:99
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:256
SelectContext::expr_str
char * expr_str
Definition: f_select.c:156
internal.h
FILTER_QUERY_FUNC2
#define FILTER_QUERY_FUNC2(func)
Definition: filters.h:239
filter_frame
static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
Definition: f_select.c:431
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
AV_OPT_FLAG_VIDEO_PARAM
#define AV_OPT_FLAG_VIDEO_PARAM
Definition: opt.h:358
SelectContext::nb_planes
int nb_planes
Definition: f_select.c:160
VAR_PICT_TYPE_SI
@ VAR_PICT_TYPE_SI
Definition: f_select.c:123
AVFilterPad::name
const char * name
Pad name.
Definition: filters.h:44
SelectContext::bitdepth
int bitdepth
Definition: f_select.c:159
AV_FRAME_FLAG_INTERLACED
#define AV_FRAME_FLAG_INTERLACED
A flag to mark frames whose content is interlaced.
Definition: frame.h:633
select_frame
static void select_frame(AVFilterContext *ctx, AVFrame *frame)
Definition: f_select.c:339
VAR_B
@ VAR_B
Definition: f_select.c:114
VAR_PICT_TYPE_BI
@ VAR_PICT_TYPE_BI
Definition: f_select.c:125
ret
ret
Definition: filter_design.txt:187
AVFilterPad::type
enum AVMediaType type
AVFilterPad type.
Definition: filters.h:49
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
VAR_KEY
@ VAR_KEY
Definition: f_select.c:140
VAR_SELECTED_N
@ VAR_SELECTED_N
Definition: f_select.c:137
SelectContext::var_values
double var_values[VAR_VARS_NB]
Definition: f_select.c:158
SelectContext::nb_outputs
int nb_outputs
Definition: f_select.c:169
AVFrame::height
int height
Definition: frame.h:482
VAR_S
@ VAR_S
Definition: f_select.c:115
ff_set_common_formats_from_list2
int ff_set_common_formats_from_list2(const AVFilterContext *ctx, AVFilterFormatsConfig **cfg_in, AVFilterFormatsConfig **cfg_out, const int *fmts)
Definition: formats.c:1016
AV_PICTURE_TYPE_B
@ AV_PICTURE_TYPE_B
Bi-dir predicted.
Definition: avutil.h:281
VAR_PICT_TYPE_I
@ VAR_PICT_TYPE_I
Definition: f_select.c:119
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:72
VAR_PREV_PTS
@ VAR_PREV_PTS
Definition: f_select.c:103
avfilter.h
VAR_SP
@ VAR_SP
Definition: f_select.c:117
AVFILTER_FLAG_METADATA_ONLY
#define AVFILTER_FLAG_METADATA_ONLY
The filter is a "metadata" filter - it does not modify the frame data in any way.
Definition: avfilter.h:166
AV_PIX_FMT_FLAG_PLANAR
#define AV_PIX_FMT_FLAG_PLANAR
At least one pixel component is not in the first data plane.
Definition: pixdesc.h:132
VAR_PICT_TYPE_SP
@ VAR_PICT_TYPE_SP
Definition: f_select.c:124
AVFilterContext
An instance of a filter.
Definition: avfilter.h:257
desc
const char * desc
Definition: libsvtav1.c:79
AV_PICTURE_TYPE_P
@ AV_PICTURE_TYPE_P
Predicted.
Definition: avutil.h:280
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
FFFilter::p
AVFilter p
The public AVFilter.
Definition: filters.h:269
AV_PIX_FMT_YUV422P
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:77
audio.h
AVFrameSideData
Structure to hold side data for an AVFrame.
Definition: frame.h:265
AVPixFmtDescriptor
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:69
AVDictionaryEntry
Definition: dict.h:89
VAR_PTS
@ VAR_PTS
Definition: f_select.c:101
ff_append_outpad_free_name
int ff_append_outpad_free_name(AVFilterContext *f, AVFilterPad *p)
Definition: avfilter.c:143
av_dict_set
int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags)
Set the given entry in *pm, overwriting an existing entry.
Definition: dict.c:86
AV_PICTURE_TYPE_BI
@ AV_PICTURE_TYPE_BI
BI type.
Definition: avutil.h:285
av_frame_side_data_get
static const AVFrameSideData * av_frame_side_data_get(AVFrameSideData *const *sd, const int nb_sd, enum AVFrameSideDataType type)
Wrapper around av_frame_side_data_get_c() to workaround the limitation that for any type T the conver...
Definition: frame.h:1127
SelectContext::height
ptrdiff_t height[4]
Definition: f_select.c:162
imgutils.h
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
Definition: frame.h:455
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
AVDictionaryEntry::value
char * value
Definition: dict.h:91
avstring.h
VAR_PICT_TYPE_B
@ VAR_PICT_TYPE_B
Definition: f_select.c:121
VAR_INTERLACE_TYPE_T
@ VAR_INTERLACE_TYPE_T
Definition: f_select.c:129
snprintf
#define snprintf
Definition: snprintf.h:34
query_formats
static int query_formats(const AVFilterContext *ctx, AVFilterFormatsConfig **cfg_in, AVFilterFormatsConfig **cfg_out)
Definition: aeval.c:246
VAR_I
@ VAR_I
Definition: f_select.c:112
VAR_CONCATDEC_SELECT
@ VAR_CONCATDEC_SELECT
Definition: f_select.c:144
INTERLACE_TYPE_P
#define INTERLACE_TYPE_P
Definition: f_select.c:212