FFmpeg
ffmpeg_filter.c
Go to the documentation of this file.
1 /*
2  * ffmpeg filter configuration
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 #include <stdint.h>
22 
23 #include "ffmpeg.h"
24 
25 #include "libavfilter/avfilter.h"
26 #include "libavfilter/buffersink.h"
27 #include "libavfilter/buffersrc.h"
28 
29 #include "libavutil/avassert.h"
30 #include "libavutil/avstring.h"
31 #include "libavutil/bprint.h"
33 #include "libavutil/display.h"
34 #include "libavutil/opt.h"
35 #include "libavutil/pixdesc.h"
36 #include "libavutil/pixfmt.h"
37 #include "libavutil/imgutils.h"
38 #include "libavutil/samplefmt.h"
39 #include "libavutil/time.h"
40 #include "libavutil/timestamp.h"
41 
42 // FIXME private header, used for mid_pred()
43 #include "libavcodec/mathops.h"
44 
45 typedef struct FilterGraphPriv {
47 
48  // name used for logging
49  char log_name[32];
50 
51  int is_simple;
52  // true when the filtergraph contains only meta filters
53  // that do not modify the frame data
54  int is_meta;
55  // source filters are present in the graph
58 
59  unsigned nb_outputs_done;
60 
61  const char *graph_desc;
62 
63  // frame for temporarily holding output from the filtergraph
65  // frame for sending output to the encoder
67 
69  unsigned sch_idx;
71 
73 {
74  return (FilterGraphPriv*)fg;
75 }
76 
77 static const FilterGraphPriv *cfgp_from_cfg(const FilterGraph *fg)
78 {
79  return (const FilterGraphPriv*)fg;
80 }
81 
82 // data that is local to the filter thread and not visible outside of it
83 typedef struct FilterGraphThread {
85 
87 
88  // Temporary buffer for output frames, since on filtergraph reset
89  // we cannot send them to encoders immediately.
90  // The output index is stored in frame opaque.
92 
93  // index of the next input to request from the scheduler
94  unsigned next_in;
95  // set to 1 after at least one frame passed through this output
96  int got_frame;
97 
98  // EOF status of each input/output, as received by the thread
99  uint8_t *eof_in;
100  uint8_t *eof_out;
102 
103 typedef struct InputFilterPriv {
105 
107 
108  int index;
109 
111 
112  // used to hold submitted input
114 
115  /* for filters that are not yet bound to an input stream,
116  * this stores the input linklabel, if any */
117  uint8_t *linklabel;
118 
119  // filter data type
121  // source data type: AVMEDIA_TYPE_SUBTITLE for sub2video,
122  // same as type otherwise
124 
125  int eof;
126  int bound;
127 
128  // parameters configured for this input
129  int format;
130 
131  int width, height;
135 
138 
140 
142 
144 
147 
148  struct {
149  AVFrame *frame;
150 
153 
154  ///< marks if sub2video_update should force an initialization
155  unsigned int initialize;
156  } sub2video;
158 
160 {
161  return (InputFilterPriv*)ifilter;
162 }
163 
164 typedef struct FPSConvContext {
166  /* number of frames emitted by the video-encoding sync code */
168  /* history of nb_frames_prev, i.e. the number of times the
169  * previous frame was duplicated by vsync code in recent
170  * do_video_out() calls */
172 
173  uint64_t dup_warning;
174 
177 
183 
184 typedef struct OutputFilterPriv {
186 
187  int index;
188 
190 
191  /* desired output stream properties */
192  int format;
193  int width, height;
196 
197  // time base in which the output is sent to our downstream
198  // does not need to match the filtersink's timebase
200  // at least one frame with the above timebase was sent
201  // to our downstream, so it cannot change anymore
203 
205 
206  // those are only set if no format is specified and the encoder gives us multiple options
207  // They point directly to the relevant lists of the encoder.
208  const int *formats;
210  const int *sample_rates;
211 
213  // offset for output timestamps, in AV_TIME_BASE_Q
218 
220 {
221  return (OutputFilterPriv*)ofilter;
222 }
223 
224 typedef struct FilterCommand {
225  char *target;
226  char *command;
227  char *arg;
228 
229  double time;
231 } FilterCommand;
232 
233 static void filter_command_free(void *opaque, uint8_t *data)
234 {
236 
237  av_freep(&fc->target);
238  av_freep(&fc->command);
239  av_freep(&fc->arg);
240 
241  av_free(data);
242 }
243 
245 {
246  AVFrame *frame = ifp->sub2video.frame;
247  int ret;
248 
250 
251  frame->width = ifp->width;
252  frame->height = ifp->height;
253  frame->format = ifp->format;
254  frame->colorspace = ifp->color_space;
255  frame->color_range = ifp->color_range;
256 
258  if (ret < 0)
259  return ret;
260 
261  memset(frame->data[0], 0, frame->height * frame->linesize[0]);
262 
263  return 0;
264 }
265 
266 static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h,
267  AVSubtitleRect *r)
268 {
269  uint32_t *pal, *dst2;
270  uint8_t *src, *src2;
271  int x, y;
272 
273  if (r->type != SUBTITLE_BITMAP) {
274  av_log(NULL, AV_LOG_WARNING, "sub2video: non-bitmap subtitle\n");
275  return;
276  }
277  if (r->x < 0 || r->x + r->w > w || r->y < 0 || r->y + r->h > h) {
278  av_log(NULL, AV_LOG_WARNING, "sub2video: rectangle (%d %d %d %d) overflowing %d %d\n",
279  r->x, r->y, r->w, r->h, w, h
280  );
281  return;
282  }
283 
284  dst += r->y * dst_linesize + r->x * 4;
285  src = r->data[0];
286  pal = (uint32_t *)r->data[1];
287  for (y = 0; y < r->h; y++) {
288  dst2 = (uint32_t *)dst;
289  src2 = src;
290  for (x = 0; x < r->w; x++)
291  *(dst2++) = pal[*(src2++)];
292  dst += dst_linesize;
293  src += r->linesize[0];
294  }
295 }
296 
298 {
299  AVFrame *frame = ifp->sub2video.frame;
300  int ret;
301 
302  av_assert1(frame->data[0]);
303  ifp->sub2video.last_pts = frame->pts = pts;
307  if (ret != AVERROR_EOF && ret < 0)
308  av_log(NULL, AV_LOG_WARNING, "Error while add the frame to buffer source(%s).\n",
309  av_err2str(ret));
310 }
311 
312 static void sub2video_update(InputFilterPriv *ifp, int64_t heartbeat_pts,
313  const AVSubtitle *sub)
314 {
315  AVFrame *frame = ifp->sub2video.frame;
316  int8_t *dst;
317  int dst_linesize;
318  int num_rects;
319  int64_t pts, end_pts;
320 
321  if (sub) {
322  pts = av_rescale_q(sub->pts + sub->start_display_time * 1000LL,
323  AV_TIME_BASE_Q, ifp->time_base);
324  end_pts = av_rescale_q(sub->pts + sub->end_display_time * 1000LL,
325  AV_TIME_BASE_Q, ifp->time_base);
326  num_rects = sub->num_rects;
327  } else {
328  /* If we are initializing the system, utilize current heartbeat
329  PTS as the start time, and show until the following subpicture
330  is received. Otherwise, utilize the previous subpicture's end time
331  as the fall-back value. */
332  pts = ifp->sub2video.initialize ?
333  heartbeat_pts : ifp->sub2video.end_pts;
334  end_pts = INT64_MAX;
335  num_rects = 0;
336  }
337  if (sub2video_get_blank_frame(ifp) < 0) {
339  "Impossible to get a blank canvas.\n");
340  return;
341  }
342  dst = frame->data [0];
343  dst_linesize = frame->linesize[0];
344  for (int i = 0; i < num_rects; i++)
345  sub2video_copy_rect(dst, dst_linesize, frame->width, frame->height, sub->rects[i]);
346  sub2video_push_ref(ifp, pts);
347  ifp->sub2video.end_pts = end_pts;
348  ifp->sub2video.initialize = 0;
349 }
350 
351 /* *dst may return be set to NULL (no pixel format found), a static string or a
352  * string backed by the bprint. Nothing has been written to the AVBPrint in case
353  * NULL is returned. The AVBPrint provided should be clean. */
354 static int choose_pix_fmts(OutputFilter *ofilter, AVBPrint *bprint,
355  const char **dst)
356 {
357  OutputFilterPriv *ofp = ofp_from_ofilter(ofilter);
358  OutputStream *ost = ofilter->ost;
359 
360  *dst = NULL;
361 
362  if (ost->keep_pix_fmt || ofp->format != AV_PIX_FMT_NONE) {
363  *dst = ofp->format == AV_PIX_FMT_NONE ? NULL :
365  } else if (ofp->formats) {
366  const enum AVPixelFormat *p = ofp->formats;
367 
368  for (; *p != AV_PIX_FMT_NONE; p++) {
369  const char *name = av_get_pix_fmt_name(*p);
370  av_bprintf(bprint, "%s%c", name, p[1] == AV_PIX_FMT_NONE ? '\0' : '|');
371  }
372  if (!av_bprint_is_complete(bprint))
373  return AVERROR(ENOMEM);
374 
375  *dst = bprint->str;
376  }
377 
378  return 0;
379 }
380 
381 /* Define a function for appending a list of allowed formats
382  * to an AVBPrint. If nonempty, the list will have a header. */
383 #define DEF_CHOOSE_FORMAT(name, type, var, supported_list, none, printf_format, get_name) \
384 static void choose_ ## name (OutputFilterPriv *ofp, AVBPrint *bprint) \
385 { \
386  if (ofp->var == none && !ofp->supported_list) \
387  return; \
388  av_bprintf(bprint, #name "="); \
389  if (ofp->var != none) { \
390  av_bprintf(bprint, printf_format, get_name(ofp->var)); \
391  } else { \
392  const type *p; \
393  \
394  for (p = ofp->supported_list; *p != none; p++) { \
395  av_bprintf(bprint, printf_format "|", get_name(*p)); \
396  } \
397  if (bprint->len > 0) \
398  bprint->str[--bprint->len] = '\0'; \
399  } \
400  av_bprint_chars(bprint, ':', 1); \
401 }
402 
403 //DEF_CHOOSE_FORMAT(pix_fmts, enum AVPixelFormat, format, formats, AV_PIX_FMT_NONE,
404 // GET_PIX_FMT_NAME)
405 
408 
410  "%d", )
411 
412 static void choose_channel_layouts(OutputFilterPriv *ofp, AVBPrint *bprint)
413 {
414  if (av_channel_layout_check(&ofp->ch_layout)) {
415  av_bprintf(bprint, "channel_layouts=");
416  av_channel_layout_describe_bprint(&ofp->ch_layout, bprint);
417  } else if (ofp->ch_layouts) {
418  const AVChannelLayout *p;
419 
420  av_bprintf(bprint, "channel_layouts=");
421  for (p = ofp->ch_layouts; p->nb_channels; p++) {
423  av_bprintf(bprint, "|");
424  }
425  if (bprint->len > 0)
426  bprint->str[--bprint->len] = '\0';
427  } else
428  return;
429  av_bprint_chars(bprint, ':', 1);
430 }
431 
432 static int read_binary(const char *path, uint8_t **data, int *len)
433 {
434  AVIOContext *io = NULL;
435  int64_t fsize;
436  int ret;
437 
438  *data = NULL;
439  *len = 0;
440 
441  ret = avio_open2(&io, path, AVIO_FLAG_READ, &int_cb, NULL);
442  if (ret < 0) {
443  av_log(NULL, AV_LOG_ERROR, "Cannot open file '%s': %s\n",
444  path, av_err2str(ret));
445  return ret;
446  }
447 
448  fsize = avio_size(io);
449  if (fsize < 0 || fsize > INT_MAX) {
450  av_log(NULL, AV_LOG_ERROR, "Cannot obtain size of file %s\n", path);
451  ret = AVERROR(EIO);
452  goto fail;
453  }
454 
455  *data = av_malloc(fsize);
456  if (!*data) {
457  ret = AVERROR(ENOMEM);
458  goto fail;
459  }
460 
461  ret = avio_read(io, *data, fsize);
462  if (ret != fsize) {
463  av_log(NULL, AV_LOG_ERROR, "Error reading file %s\n", path);
464  ret = ret < 0 ? ret : AVERROR(EIO);
465  goto fail;
466  }
467 
468  *len = fsize;
469 
470  ret = 0;
471 fail:
472  avio_close(io);
473  if (ret < 0) {
474  av_freep(data);
475  *len = 0;
476  }
477  return ret;
478 }
479 
480 static int filter_opt_apply(AVFilterContext *f, const char *key, const char *val)
481 {
482  const AVOption *o = NULL;
483  int ret;
484 
486  if (ret >= 0)
487  return 0;
488 
489  if (ret == AVERROR_OPTION_NOT_FOUND && key[0] == '/')
491  if (!o)
492  goto err_apply;
493 
494  // key is a valid option name prefixed with '/'
495  // interpret value as a path from which to load the actual option value
496  key++;
497 
498  if (o->type == AV_OPT_TYPE_BINARY) {
499  uint8_t *data;
500  int len;
501 
502  ret = read_binary(val, &data, &len);
503  if (ret < 0)
504  goto err_load;
505 
507  av_freep(&data);
508  } else {
509  char *data = file_read(val);
510  if (!data) {
511  ret = AVERROR(EIO);
512  goto err_load;
513  }
514 
516  av_freep(&data);
517  }
518  if (ret < 0)
519  goto err_apply;
520 
521  return 0;
522 
523 err_apply:
525  "Error applying option '%s' to filter '%s': %s\n",
526  key, f->filter->name, av_err2str(ret));
527  return ret;
528 err_load:
530  "Error loading value for option '%s' from file '%s'\n",
531  key, val);
532  return ret;
533 }
534 
536 {
537  for (size_t i = 0; i < seg->nb_chains; i++) {
538  AVFilterChain *ch = seg->chains[i];
539 
540  for (size_t j = 0; j < ch->nb_filters; j++) {
541  AVFilterParams *p = ch->filters[j];
542  const AVDictionaryEntry *e = NULL;
543 
544  av_assert0(p->filter);
545 
546  while ((e = av_dict_iterate(p->opts, e))) {
547  int ret = filter_opt_apply(p->filter, e->key, e->value);
548  if (ret < 0)
549  return ret;
550  }
551 
552  av_dict_free(&p->opts);
553  }
554  }
555 
556  return 0;
557 }
558 
559 static int graph_parse(AVFilterGraph *graph, const char *desc,
561  AVBufferRef *hw_device)
562 {
564  int ret;
565 
566  *inputs = NULL;
567  *outputs = NULL;
568 
569  ret = avfilter_graph_segment_parse(graph, desc, 0, &seg);
570  if (ret < 0)
571  return ret;
572 
574  if (ret < 0)
575  goto fail;
576 
577  if (hw_device) {
578  for (int i = 0; i < graph->nb_filters; i++) {
579  AVFilterContext *f = graph->filters[i];
580 
581  if (!(f->filter->flags & AVFILTER_FLAG_HWDEVICE))
582  continue;
583  f->hw_device_ctx = av_buffer_ref(hw_device);
584  if (!f->hw_device_ctx) {
585  ret = AVERROR(ENOMEM);
586  goto fail;
587  }
588  }
589  }
590 
591  ret = graph_opts_apply(seg);
592  if (ret < 0)
593  goto fail;
594 
596 
597 fail:
599  return ret;
600 }
601 
602 // Filters can be configured only if the formats of all inputs are known.
604 {
605  for (int i = 0; i < fg->nb_inputs; i++) {
607  if (ifp->format < 0)
608  return 0;
609  }
610  return 1;
611 }
612 
613 static int filter_thread(void *arg);
614 
615 static char *describe_filter_link(FilterGraph *fg, AVFilterInOut *inout, int in)
616 {
617  AVFilterContext *ctx = inout->filter_ctx;
618  AVFilterPad *pads = in ? ctx->input_pads : ctx->output_pads;
619  int nb_pads = in ? ctx->nb_inputs : ctx->nb_outputs;
620 
621  if (nb_pads > 1)
622  return av_strdup(ctx->filter->name);
623  return av_asprintf("%s:%s", ctx->filter->name,
624  avfilter_pad_get_name(pads, inout->pad_idx));
625 }
626 
628 {
629  OutputFilterPriv *ofp;
630  OutputFilter *ofilter;
631 
632  ofp = allocate_array_elem(&fg->outputs, sizeof(*ofp), &fg->nb_outputs);
633  if (!ofp)
634  return NULL;
635 
636  ofilter = &ofp->ofilter;
637  ofilter->graph = fg;
638  ofp->format = -1;
639  ofp->index = fg->nb_outputs - 1;
640 
641  return ofilter;
642 }
643 
644 static int ifilter_bind_ist(InputFilter *ifilter, InputStream *ist)
645 {
646  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
647  FilterGraphPriv *fgp = fgp_from_fg(ifilter->graph);
648  int ret, dec_idx;
649 
650  av_assert0(!ifp->bound);
651  ifp->bound = 1;
652 
653  if (ifp->type != ist->par->codec_type &&
655  av_log(fgp, AV_LOG_ERROR, "Tried to connect %s stream to %s filtergraph input\n",
657  return AVERROR(EINVAL);
658  }
659 
660  ifp->type_src = ist->st->codecpar->codec_type;
661 
662  ifp->opts.fallback = av_frame_alloc();
663  if (!ifp->opts.fallback)
664  return AVERROR(ENOMEM);
665 
666  dec_idx = ist_filter_add(ist, ifilter, filtergraph_is_simple(ifilter->graph),
667  &ifp->opts);
668  if (dec_idx < 0)
669  return dec_idx;
670 
671  ret = sch_connect(fgp->sch, SCH_DEC(dec_idx),
672  SCH_FILTER_IN(fgp->sch_idx, ifp->index));
673  if (ret < 0)
674  return ret;
675 
676  if (ifp->type_src == AVMEDIA_TYPE_SUBTITLE) {
677  ifp->sub2video.frame = av_frame_alloc();
678  if (!ifp->sub2video.frame)
679  return AVERROR(ENOMEM);
680 
681  ifp->width = ifp->opts.sub2video_width;
682  ifp->height = ifp->opts.sub2video_height;
683 
684  /* rectangles are AV_PIX_FMT_PAL8, but we have no guarantee that the
685  palettes for all rectangles are identical or compatible */
686  ifp->format = AV_PIX_FMT_RGB32;
687 
688  ifp->time_base = AV_TIME_BASE_Q;
689 
690  av_log(fgp, AV_LOG_VERBOSE, "sub2video: using %dx%d canvas\n",
691  ifp->width, ifp->height);
692  }
693 
694  return 0;
695 }
696 
698 {
700  int ret, dec_idx;
701 
702  av_assert0(!ifp->bound);
703  ifp->bound = 1;
704 
705  if (ifp->type != dec->type) {
706  av_log(fgp, AV_LOG_ERROR, "Tried to connect %s decoder to %s filtergraph input\n",
708  return AVERROR(EINVAL);
709  }
710 
711  ifp->type_src = ifp->type;
712 
713  dec_idx = dec_filter_add(dec, &ifp->ifilter, &ifp->opts);
714  if (dec_idx < 0)
715  return dec_idx;
716 
717  ret = sch_connect(fgp->sch, SCH_DEC(dec_idx),
718  SCH_FILTER_IN(fgp->sch_idx, ifp->index));
719  if (ret < 0)
720  return ret;
721 
722  return 0;
723 }
724 
726 {
727  const AVCodec *c = ost->enc_ctx->codec;
728  int i, err;
729 
730  if (ost->enc_ctx->ch_layout.order != AV_CHANNEL_ORDER_UNSPEC) {
731  /* Pass the layout through for all orders but UNSPEC */
732  err = av_channel_layout_copy(&f->ch_layout, &ost->enc_ctx->ch_layout);
733  if (err < 0)
734  return err;
735  return 0;
736  }
737 
738  /* Requested layout is of order UNSPEC */
739  if (!c->ch_layouts) {
740  /* Use the default native layout for the requested amount of channels when the
741  encoder doesn't have a list of supported layouts */
742  av_channel_layout_default(&f->ch_layout, ost->enc_ctx->ch_layout.nb_channels);
743  return 0;
744  }
745  /* Encoder has a list of supported layouts. Pick the first layout in it with the
746  same amount of channels as the requested layout */
747  for (i = 0; c->ch_layouts[i].nb_channels; i++) {
748  if (c->ch_layouts[i].nb_channels == ost->enc_ctx->ch_layout.nb_channels)
749  break;
750  }
751  if (c->ch_layouts[i].nb_channels) {
752  /* Use it if one is found */
753  err = av_channel_layout_copy(&f->ch_layout, &c->ch_layouts[i]);
754  if (err < 0)
755  return err;
756  return 0;
757  }
758  /* If no layout for the amount of channels requested was found, use the default
759  native layout for it. */
760  av_channel_layout_default(&f->ch_layout, ost->enc_ctx->ch_layout.nb_channels);
761 
762  return 0;
763 }
764 
766  unsigned sched_idx_enc)
767 {
768  const OutputFile *of = ost->file;
769  OutputFilterPriv *ofp = ofp_from_ofilter(ofilter);
770  FilterGraph *fg = ofilter->graph;
771  FilterGraphPriv *fgp = fgp_from_fg(fg);
772  const AVCodec *c = ost->enc_ctx->codec;
773  int ret;
774 
775  av_assert0(!ofilter->ost);
776 
777  ofilter->ost = ost;
778  av_freep(&ofilter->linklabel);
779 
780  ofp->ts_offset = of->start_time == AV_NOPTS_VALUE ? 0 : of->start_time;
781  ofp->enc_timebase = ost->enc_timebase;
782 
783  switch (ost->enc_ctx->codec_type) {
784  case AVMEDIA_TYPE_VIDEO:
785  ofp->width = ost->enc_ctx->width;
786  ofp->height = ost->enc_ctx->height;
787  if (ost->enc_ctx->pix_fmt != AV_PIX_FMT_NONE) {
788  ofp->format = ost->enc_ctx->pix_fmt;
789  } else {
790  ofp->formats = c->pix_fmts;
791 
792  // MJPEG encoder exports a full list of supported pixel formats,
793  // but the full-range ones are experimental-only.
794  // Restrict the auto-conversion list unless -strict experimental
795  // has been specified.
796  if (!strcmp(c->name, "mjpeg")) {
797  // FIXME: YUV420P etc. are actually supported with full color range,
798  // yet the latter information isn't available here.
799  static const enum AVPixelFormat mjpeg_formats[] =
801  AV_PIX_FMT_NONE };
802 
803  const AVDictionaryEntry *strict = av_dict_get(ost->encoder_opts, "strict", NULL, 0);
804  int strict_val = ost->enc_ctx->strict_std_compliance;
805 
806  if (strict) {
807  const AVOption *o = av_opt_find(ost->enc_ctx, strict->key, NULL, 0, 0);
808  av_assert0(o);
809  av_opt_eval_int(ost->enc_ctx, o, strict->value, &strict_val);
810  }
811 
812  if (strict_val > FF_COMPLIANCE_UNOFFICIAL)
813  ofp->formats = mjpeg_formats;
814  }
815  }
816 
817  fgp->disable_conversions |= ost->keep_pix_fmt;
818 
819  ofp->fps.last_frame = av_frame_alloc();
820  if (!ofp->fps.last_frame)
821  return AVERROR(ENOMEM);
822 
823  ofp->fps.framerate = ost->frame_rate;
824  ofp->fps.framerate_max = ost->max_frame_rate;
825  ofp->fps.framerate_supported = ost->force_fps ?
826  NULL : c->supported_framerates;
827 
828  // reduce frame rate for mpeg4 to be within the spec limits
829  if (c->id == AV_CODEC_ID_MPEG4)
830  ofp->fps.framerate_clip = 65535;
831 
832  ofp->fps.dup_warning = 1000;
833 
834  break;
835  case AVMEDIA_TYPE_AUDIO:
836  if (ost->enc_ctx->sample_fmt != AV_SAMPLE_FMT_NONE) {
837  ofp->format = ost->enc_ctx->sample_fmt;
838  } else {
839  ofp->formats = c->sample_fmts;
840  }
841  if (ost->enc_ctx->sample_rate) {
842  ofp->sample_rate = ost->enc_ctx->sample_rate;
843  } else {
844  ofp->sample_rates = c->supported_samplerates;
845  }
846  if (ost->enc_ctx->ch_layout.nb_channels) {
847  int ret = set_channel_layout(ofp, ost);
848  if (ret < 0)
849  return ret;
850  } else if (c->ch_layouts) {
851  ofp->ch_layouts = c->ch_layouts;
852  }
853  break;
854  }
855 
856  ret = sch_connect(fgp->sch, SCH_FILTER_OUT(fgp->sch_idx, ofp->index),
857  SCH_ENC(sched_idx_enc));
858  if (ret < 0)
859  return ret;
860 
861  return 0;
862 }
863 
865 {
866  InputFilterPriv *ifp;
867  InputFilter *ifilter;
868 
869  ifp = allocate_array_elem(&fg->inputs, sizeof(*ifp), &fg->nb_inputs);
870  if (!ifp)
871  return NULL;
872 
873  ifilter = &ifp->ifilter;
874  ifilter->graph = fg;
875 
876  ifp->frame = av_frame_alloc();
877  if (!ifp->frame)
878  return NULL;
879 
880  ifp->index = fg->nb_inputs - 1;
881  ifp->format = -1;
884 
886  if (!ifp->frame_queue)
887  return NULL;
888 
889  return ifilter;
890 }
891 
892 void fg_free(FilterGraph **pfg)
893 {
894  FilterGraph *fg = *pfg;
895  FilterGraphPriv *fgp;
896 
897  if (!fg)
898  return;
899  fgp = fgp_from_fg(fg);
900 
901  for (int j = 0; j < fg->nb_inputs; j++) {
902  InputFilter *ifilter = fg->inputs[j];
903  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
904 
905  if (ifp->frame_queue) {
906  AVFrame *frame;
907  while (av_fifo_read(ifp->frame_queue, &frame, 1) >= 0)
910  }
912 
913  av_frame_free(&ifp->frame);
914  av_frame_free(&ifp->opts.fallback);
915 
917  av_freep(&ifp->linklabel);
918  av_freep(&ifp->opts.name);
919  av_freep(&ifilter->name);
920  av_freep(&fg->inputs[j]);
921  }
922  av_freep(&fg->inputs);
923  for (int j = 0; j < fg->nb_outputs; j++) {
924  OutputFilter *ofilter = fg->outputs[j];
925  OutputFilterPriv *ofp = ofp_from_ofilter(ofilter);
926 
928 
929  av_freep(&ofilter->linklabel);
930  av_freep(&ofilter->name);
932  av_freep(&fg->outputs[j]);
933  }
934  av_freep(&fg->outputs);
935  av_freep(&fgp->graph_desc);
936 
937  av_frame_free(&fgp->frame);
938  av_frame_free(&fgp->frame_enc);
939 
940  av_freep(pfg);
941 }
942 
943 static const char *fg_item_name(void *obj)
944 {
945  const FilterGraphPriv *fgp = obj;
946 
947  return fgp->log_name;
948 }
949 
950 static const AVClass fg_class = {
951  .class_name = "FilterGraph",
952  .version = LIBAVUTIL_VERSION_INT,
953  .item_name = fg_item_name,
954  .category = AV_CLASS_CATEGORY_FILTER,
955 };
956 
957 int fg_create(FilterGraph **pfg, char *graph_desc, Scheduler *sch)
958 {
959  FilterGraphPriv *fgp;
960  FilterGraph *fg;
961 
963  AVFilterGraph *graph;
964  int ret = 0;
965 
966  fgp = allocate_array_elem(&filtergraphs, sizeof(*fgp), &nb_filtergraphs);
967  if (!fgp)
968  return AVERROR(ENOMEM);
969  fg = &fgp->fg;
970 
971  if (pfg)
972  *pfg = fg;
973 
974  fg->class = &fg_class;
975  fg->index = nb_filtergraphs - 1;
976  fgp->graph_desc = graph_desc;
978  fgp->sch = sch;
979 
980  snprintf(fgp->log_name, sizeof(fgp->log_name), "fc#%d", fg->index);
981 
982  fgp->frame = av_frame_alloc();
983  fgp->frame_enc = av_frame_alloc();
984  if (!fgp->frame || !fgp->frame_enc)
985  return AVERROR(ENOMEM);
986 
987  /* this graph is only used for determining the kinds of inputs
988  * and outputs we have, and is discarded on exit from this function */
989  graph = avfilter_graph_alloc();
990  if (!graph)
991  return AVERROR(ENOMEM);;
992  graph->nb_threads = 1;
993 
994  ret = graph_parse(graph, fgp->graph_desc, &inputs, &outputs, NULL);
995  if (ret < 0)
996  goto fail;
997 
998  for (unsigned i = 0; i < graph->nb_filters; i++) {
999  const AVFilter *f = graph->filters[i]->filter;
1000  if (!avfilter_filter_pad_count(f, 0) &&
1001  !(f->flags & AVFILTER_FLAG_DYNAMIC_INPUTS)) {
1002  fgp->have_sources = 1;
1003  break;
1004  }
1005  }
1006 
1007  for (AVFilterInOut *cur = inputs; cur; cur = cur->next) {
1008  InputFilter *const ifilter = ifilter_alloc(fg);
1009  InputFilterPriv *ifp;
1010 
1011  if (!ifilter) {
1012  ret = AVERROR(ENOMEM);
1013  goto fail;
1014  }
1015 
1016  ifp = ifp_from_ifilter(ifilter);
1017  ifp->linklabel = cur->name;
1018  cur->name = NULL;
1019 
1020  ifp->type = avfilter_pad_get_type(cur->filter_ctx->input_pads,
1021  cur->pad_idx);
1022 
1023  if (ifp->type != AVMEDIA_TYPE_VIDEO && ifp->type != AVMEDIA_TYPE_AUDIO) {
1024  av_log(fg, AV_LOG_FATAL, "Only video and audio filters supported "
1025  "currently.\n");
1026  ret = AVERROR(ENOSYS);
1027  goto fail;
1028  }
1029 
1030  ifilter->name = describe_filter_link(fg, cur, 1);
1031  if (!ifilter->name) {
1032  ret = AVERROR(ENOMEM);
1033  goto fail;
1034  }
1035  }
1036 
1037  for (AVFilterInOut *cur = outputs; cur; cur = cur->next) {
1038  OutputFilter *const ofilter = ofilter_alloc(fg);
1039 
1040  if (!ofilter) {
1041  ret = AVERROR(ENOMEM);
1042  goto fail;
1043  }
1044 
1045  ofilter->linklabel = cur->name;
1046  cur->name = NULL;
1047 
1048  ofilter->type = avfilter_pad_get_type(cur->filter_ctx->output_pads,
1049  cur->pad_idx);
1050  ofilter->name = describe_filter_link(fg, cur, 0);
1051  if (!ofilter->name) {
1052  ret = AVERROR(ENOMEM);
1053  goto fail;
1054  }
1055  }
1056 
1057  if (!fg->nb_outputs) {
1058  av_log(fg, AV_LOG_FATAL, "A filtergraph has zero outputs, this is not supported\n");
1059  ret = AVERROR(ENOSYS);
1060  goto fail;
1061  }
1062 
1063  ret = sch_add_filtergraph(sch, fg->nb_inputs, fg->nb_outputs,
1064  filter_thread, fgp);
1065  if (ret < 0)
1066  goto fail;
1067  fgp->sch_idx = ret;
1068 
1069 fail:
1072  avfilter_graph_free(&graph);
1073 
1074  if (ret < 0)
1075  return ret;
1076 
1077  return 0;
1078 }
1079 
1081  char *graph_desc,
1082  Scheduler *sch, unsigned sched_idx_enc)
1083 {
1084  FilterGraph *fg;
1085  FilterGraphPriv *fgp;
1086  int ret;
1087 
1088  ret = fg_create(&fg, graph_desc, sch);
1089  if (ret < 0)
1090  return ret;
1091  fgp = fgp_from_fg(fg);
1092 
1093  fgp->is_simple = 1;
1094 
1095  snprintf(fgp->log_name, sizeof(fgp->log_name), "%cf#%d:%d",
1096  av_get_media_type_string(ost->type)[0],
1097  ost->file->index, ost->index);
1098 
1099  if (fg->nb_inputs != 1 || fg->nb_outputs != 1) {
1100  av_log(fg, AV_LOG_ERROR, "Simple filtergraph '%s' was expected "
1101  "to have exactly 1 input and 1 output. "
1102  "However, it had %d input(s) and %d output(s). Please adjust, "
1103  "or use a complex filtergraph (-filter_complex) instead.\n",
1104  graph_desc, fg->nb_inputs, fg->nb_outputs);
1105  return AVERROR(EINVAL);
1106  }
1107 
1108  ost->filter = fg->outputs[0];
1109 
1110  ret = ifilter_bind_ist(fg->inputs[0], ist);
1111  if (ret < 0)
1112  return ret;
1113 
1114  ret = ofilter_bind_ost(fg->outputs[0], ost, sched_idx_enc);
1115  if (ret < 0)
1116  return ret;
1117 
1118  return 0;
1119 }
1120 
1122 {
1123  FilterGraphPriv *fgp = fgp_from_fg(fg);
1124  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
1125  InputStream *ist = NULL;
1126  enum AVMediaType type = ifp->type;
1127  int i, ret;
1128 
1129  if (ifp->linklabel && !strncmp(ifp->linklabel, "dec:", 4)) {
1130  // bind to a standalone decoder
1131  int dec_idx;
1132 
1133  dec_idx = strtol(ifp->linklabel + 4, NULL, 0);
1134  if (dec_idx < 0 || dec_idx >= nb_decoders) {
1135  av_log(fg, AV_LOG_ERROR, "Invalid decoder index %d in filtergraph description %s\n",
1136  dec_idx, fgp->graph_desc);
1137  return AVERROR(EINVAL);
1138  }
1139 
1140  ret = ifilter_bind_dec(ifp, decoders[dec_idx]);
1141  if (ret < 0)
1142  av_log(fg, AV_LOG_ERROR, "Error binding a decoder to filtergraph input %s\n",
1143  ifilter->name);
1144  return ret;
1145  } else if (ifp->linklabel) {
1146  // bind to an explicitly specified demuxer stream
1147  AVFormatContext *s;
1148  AVStream *st = NULL;
1149  char *p;
1150  int file_idx = strtol(ifp->linklabel, &p, 0);
1151 
1152  if (file_idx < 0 || file_idx >= nb_input_files) {
1153  av_log(fg, AV_LOG_FATAL, "Invalid file index %d in filtergraph description %s.\n",
1154  file_idx, fgp->graph_desc);
1155  return AVERROR(EINVAL);
1156  }
1157  s = input_files[file_idx]->ctx;
1158 
1159  for (i = 0; i < s->nb_streams; i++) {
1160  enum AVMediaType stream_type = s->streams[i]->codecpar->codec_type;
1161  if (stream_type != type &&
1162  !(stream_type == AVMEDIA_TYPE_SUBTITLE &&
1163  type == AVMEDIA_TYPE_VIDEO /* sub2video hack */))
1164  continue;
1165  if (check_stream_specifier(s, s->streams[i], *p == ':' ? p + 1 : p) == 1) {
1166  st = s->streams[i];
1167  break;
1168  }
1169  }
1170  if (!st) {
1171  av_log(fg, AV_LOG_FATAL, "Stream specifier '%s' in filtergraph description %s "
1172  "matches no streams.\n", p, fgp->graph_desc);
1173  return AVERROR(EINVAL);
1174  }
1175  ist = input_files[file_idx]->streams[st->index];
1176 
1177  av_log(fg, AV_LOG_VERBOSE,
1178  "Binding input with label '%s' to input stream %d:%d\n",
1179  ifp->linklabel, ist->file->index, ist->index);
1180  } else {
1181  ist = ist_find_unused(type);
1182  if (!ist) {
1183  av_log(fg, AV_LOG_FATAL, "Cannot find a matching stream for "
1184  "unlabeled input pad %s\n", ifilter->name);
1185  return AVERROR(EINVAL);
1186  }
1187 
1188  av_log(fg, AV_LOG_VERBOSE,
1189  "Binding unlabeled input %d to input stream %d:%d\n",
1190  ifp->index, ist->file->index, ist->index);
1191  }
1192  av_assert0(ist);
1193 
1194  ret = ifilter_bind_ist(ifilter, ist);
1195  if (ret < 0) {
1196  av_log(fg, AV_LOG_ERROR,
1197  "Error binding an input stream to complex filtergraph input %s.\n",
1198  ifilter->name);
1199  return ret;
1200  }
1201 
1202  return 0;
1203 }
1204 
1206 {
1207  // bind filtergraph inputs to input streams
1208  for (int i = 0; i < fg->nb_inputs; i++) {
1210  int ret;
1211 
1212  if (ifp->bound)
1213  continue;
1214 
1215  ret = fg_complex_bind_input(fg, &ifp->ifilter);
1216  if (ret < 0)
1217  return ret;
1218  }
1219 
1220  for (int i = 0; i < fg->nb_outputs; i++) {
1221  OutputFilter *output = fg->outputs[i];
1222  if (!output->ost) {
1224  "Filter %s has an unconnected output\n", output->name);
1225  return AVERROR(EINVAL);
1226  }
1227  }
1228  return 0;
1229 }
1230 
1232  AVFilterContext **last_filter, int *pad_idx,
1233  const char *filter_name)
1234 {
1235  AVFilterGraph *graph = (*last_filter)->graph;
1237  const AVFilter *trim;
1238  enum AVMediaType type = avfilter_pad_get_type((*last_filter)->output_pads, *pad_idx);
1239  const char *name = (type == AVMEDIA_TYPE_VIDEO) ? "trim" : "atrim";
1240  int ret = 0;
1241 
1242  if (duration == INT64_MAX && start_time == AV_NOPTS_VALUE)
1243  return 0;
1244 
1245  trim = avfilter_get_by_name(name);
1246  if (!trim) {
1247  av_log(NULL, AV_LOG_ERROR, "%s filter not present, cannot limit "
1248  "recording time.\n", name);
1249  return AVERROR_FILTER_NOT_FOUND;
1250  }
1251 
1252  ctx = avfilter_graph_alloc_filter(graph, trim, filter_name);
1253  if (!ctx)
1254  return AVERROR(ENOMEM);
1255 
1256  if (duration != INT64_MAX) {
1257  ret = av_opt_set_int(ctx, "durationi", duration,
1259  }
1260  if (ret >= 0 && start_time != AV_NOPTS_VALUE) {
1261  ret = av_opt_set_int(ctx, "starti", start_time,
1263  }
1264  if (ret < 0) {
1265  av_log(ctx, AV_LOG_ERROR, "Error configuring the %s filter", name);
1266  return ret;
1267  }
1268 
1270  if (ret < 0)
1271  return ret;
1272 
1273  ret = avfilter_link(*last_filter, *pad_idx, ctx, 0);
1274  if (ret < 0)
1275  return ret;
1276 
1277  *last_filter = ctx;
1278  *pad_idx = 0;
1279  return 0;
1280 }
1281 
1282 static int insert_filter(AVFilterContext **last_filter, int *pad_idx,
1283  const char *filter_name, const char *args)
1284 {
1285  AVFilterGraph *graph = (*last_filter)->graph;
1287  int ret;
1288 
1290  avfilter_get_by_name(filter_name),
1291  filter_name, args, NULL, graph);
1292  if (ret < 0)
1293  return ret;
1294 
1295  ret = avfilter_link(*last_filter, *pad_idx, ctx, 0);
1296  if (ret < 0)
1297  return ret;
1298 
1299  *last_filter = ctx;
1300  *pad_idx = 0;
1301  return 0;
1302 }
1303 
1305  OutputFilter *ofilter, AVFilterInOut *out)
1306 {
1307  OutputFilterPriv *ofp = ofp_from_ofilter(ofilter);
1308  OutputStream *ost = ofilter->ost;
1309  OutputFile *of = ost->file;
1310  AVFilterContext *last_filter = out->filter_ctx;
1311  AVBPrint bprint;
1312  int pad_idx = out->pad_idx;
1313  int ret;
1314  const char *pix_fmts;
1315  char name[255];
1316 
1317  snprintf(name, sizeof(name), "out_%d_%d", ost->file->index, ost->index);
1319  avfilter_get_by_name("buffersink"),
1320  name, NULL, NULL, graph);
1321 
1322  if (ret < 0)
1323  return ret;
1324 
1325  if ((ofp->width || ofp->height) && ofilter->ost->autoscale) {
1326  char args[255];
1328  const AVDictionaryEntry *e = NULL;
1329 
1330  snprintf(args, sizeof(args), "%d:%d",
1331  ofp->width, ofp->height);
1332 
1333  while ((e = av_dict_iterate(ost->sws_dict, e))) {
1334  av_strlcatf(args, sizeof(args), ":%s=%s", e->key, e->value);
1335  }
1336 
1337  snprintf(name, sizeof(name), "scaler_out_%d_%d",
1338  ost->file->index, ost->index);
1340  name, args, NULL, graph)) < 0)
1341  return ret;
1342  if ((ret = avfilter_link(last_filter, pad_idx, filter, 0)) < 0)
1343  return ret;
1344 
1345  last_filter = filter;
1346  pad_idx = 0;
1347  }
1348 
1350  ret = choose_pix_fmts(ofilter, &bprint, &pix_fmts);
1351  if (ret < 0)
1352  return ret;
1353 
1354  if (pix_fmts) {
1356 
1358  avfilter_get_by_name("format"),
1359  "format", pix_fmts, NULL, graph);
1360  av_bprint_finalize(&bprint, NULL);
1361  if (ret < 0)
1362  return ret;
1363  if ((ret = avfilter_link(last_filter, pad_idx, filter, 0)) < 0)
1364  return ret;
1365 
1366  last_filter = filter;
1367  pad_idx = 0;
1368  }
1369 
1370  snprintf(name, sizeof(name), "trim_out_%d_%d",
1371  ost->file->index, ost->index);
1373  &last_filter, &pad_idx, name);
1374  if (ret < 0)
1375  return ret;
1376 
1377 
1378  if ((ret = avfilter_link(last_filter, pad_idx, ofp->filter, 0)) < 0)
1379  return ret;
1380 
1381  return 0;
1382 }
1383 
1385  OutputFilter *ofilter, AVFilterInOut *out)
1386 {
1387  OutputFilterPriv *ofp = ofp_from_ofilter(ofilter);
1388  OutputStream *ost = ofilter->ost;
1389  OutputFile *of = ost->file;
1390  AVFilterContext *last_filter = out->filter_ctx;
1391  int pad_idx = out->pad_idx;
1392  AVBPrint args;
1393  char name[255];
1394  int ret;
1395 
1396  snprintf(name, sizeof(name), "out_%d_%d", ost->file->index, ost->index);
1398  avfilter_get_by_name("abuffersink"),
1399  name, NULL, NULL, graph);
1400  if (ret < 0)
1401  return ret;
1402  if ((ret = av_opt_set_int(ofp->filter, "all_channel_counts", 1, AV_OPT_SEARCH_CHILDREN)) < 0)
1403  return ret;
1404 
1405 #define AUTO_INSERT_FILTER(opt_name, filter_name, arg) do { \
1406  AVFilterContext *filt_ctx; \
1407  \
1408  av_log(fg, AV_LOG_INFO, opt_name " is forwarded to lavfi " \
1409  "similarly to -af " filter_name "=%s.\n", arg); \
1410  \
1411  ret = avfilter_graph_create_filter(&filt_ctx, \
1412  avfilter_get_by_name(filter_name), \
1413  filter_name, arg, NULL, graph); \
1414  if (ret < 0) \
1415  goto fail; \
1416  \
1417  ret = avfilter_link(last_filter, pad_idx, filt_ctx, 0); \
1418  if (ret < 0) \
1419  goto fail; \
1420  \
1421  last_filter = filt_ctx; \
1422  pad_idx = 0; \
1423 } while (0)
1425 
1426  choose_sample_fmts(ofp, &args);
1427  choose_sample_rates(ofp, &args);
1428  choose_channel_layouts(ofp, &args);
1429  if (!av_bprint_is_complete(&args)) {
1430  ret = AVERROR(ENOMEM);
1431  goto fail;
1432  }
1433  if (args.len) {
1435 
1436  snprintf(name, sizeof(name), "format_out_%d_%d",
1437  ost->file->index, ost->index);
1439  avfilter_get_by_name("aformat"),
1440  name, args.str, NULL, graph);
1441  if (ret < 0)
1442  goto fail;
1443 
1444  ret = avfilter_link(last_filter, pad_idx, format, 0);
1445  if (ret < 0)
1446  goto fail;
1447 
1448  last_filter = format;
1449  pad_idx = 0;
1450  }
1451 
1452  if (ost->apad && of->shortest) {
1453  int i;
1454 
1455  for (i = 0; i < of->nb_streams; i++)
1457  break;
1458 
1459  if (i < of->nb_streams) {
1460  AUTO_INSERT_FILTER("-apad", "apad", ost->apad);
1461  }
1462  }
1463 
1464  snprintf(name, sizeof(name), "trim for output stream %d:%d",
1465  ost->file->index, ost->index);
1467  &last_filter, &pad_idx, name);
1468  if (ret < 0)
1469  goto fail;
1470 
1471  if ((ret = avfilter_link(last_filter, pad_idx, ofp->filter, 0)) < 0)
1472  goto fail;
1473 fail:
1474  av_bprint_finalize(&args, NULL);
1475 
1476  return ret;
1477 }
1478 
1480  OutputFilter *ofilter, AVFilterInOut *out)
1481 {
1482  if (!ofilter->ost) {
1483  av_log(fg, AV_LOG_FATAL, "Filter %s has an unconnected output\n", ofilter->name);
1484  return AVERROR(EINVAL);
1485  }
1486 
1487  switch (avfilter_pad_get_type(out->filter_ctx->output_pads, out->pad_idx)) {
1488  case AVMEDIA_TYPE_VIDEO: return configure_output_video_filter(fg, graph, ofilter, out);
1489  case AVMEDIA_TYPE_AUDIO: return configure_output_audio_filter(fg, graph, ofilter, out);
1490  default: av_assert0(0); return 0;
1491  }
1492 }
1493 
1495 {
1496  ifp->sub2video.last_pts = INT64_MIN;
1497  ifp->sub2video.end_pts = INT64_MIN;
1498 
1499  /* sub2video structure has been (re-)initialized.
1500  Mark it as such so that the system will be
1501  initialized with the first received heartbeat. */
1502  ifp->sub2video.initialize = 1;
1503 }
1504 
1506  InputFilter *ifilter, AVFilterInOut *in)
1507 {
1508  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
1509 
1510  AVFilterContext *last_filter;
1511  const AVFilter *buffer_filt = avfilter_get_by_name("buffer");
1512  const AVPixFmtDescriptor *desc;
1513  AVRational fr = ifp->opts.framerate;
1514  AVRational sar;
1515  AVBPrint args;
1516  char name[255];
1517  int ret, pad_idx = 0;
1519  if (!par)
1520  return AVERROR(ENOMEM);
1521 
1522  if (ifp->type_src == AVMEDIA_TYPE_SUBTITLE)
1523  sub2video_prepare(ifp);
1524 
1525  sar = ifp->sample_aspect_ratio;
1526  if(!sar.den)
1527  sar = (AVRational){0,1};
1529  av_bprintf(&args,
1530  "video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:"
1531  "pixel_aspect=%d/%d:colorspace=%d:range=%d",
1532  ifp->width, ifp->height, ifp->format,
1533  ifp->time_base.num, ifp->time_base.den, sar.num, sar.den,
1534  ifp->color_space, ifp->color_range);
1535  if (fr.num && fr.den)
1536  av_bprintf(&args, ":frame_rate=%d/%d", fr.num, fr.den);
1537  snprintf(name, sizeof(name), "graph %d input from stream %s", fg->index,
1538  ifp->opts.name);
1539 
1540 
1541  if ((ret = avfilter_graph_create_filter(&ifp->filter, buffer_filt, name,
1542  args.str, NULL, graph)) < 0)
1543  goto fail;
1544  par->hw_frames_ctx = ifp->hw_frames_ctx;
1545  ret = av_buffersrc_parameters_set(ifp->filter, par);
1546  if (ret < 0)
1547  goto fail;
1548  av_freep(&par);
1549  last_filter = ifp->filter;
1550 
1552  av_assert0(desc);
1553 
1554  // TODO: insert hwaccel enabled filters like transpose_vaapi into the graph
1555  if ((ifp->opts.flags & IFILTER_FLAG_AUTOROTATE) &&
1556  !(desc->flags & AV_PIX_FMT_FLAG_HWACCEL)) {
1557  int32_t *displaymatrix = ifp->displaymatrix;
1558  double theta;
1559 
1560  theta = get_rotation(displaymatrix);
1561 
1562  if (fabs(theta - 90) < 1.0) {
1563  ret = insert_filter(&last_filter, &pad_idx, "transpose",
1564  displaymatrix[3] > 0 ? "cclock_flip" : "clock");
1565  } else if (fabs(theta - 180) < 1.0) {
1566  if (displaymatrix[0] < 0) {
1567  ret = insert_filter(&last_filter, &pad_idx, "hflip", NULL);
1568  if (ret < 0)
1569  return ret;
1570  }
1571  if (displaymatrix[4] < 0) {
1572  ret = insert_filter(&last_filter, &pad_idx, "vflip", NULL);
1573  }
1574  } else if (fabs(theta - 270) < 1.0) {
1575  ret = insert_filter(&last_filter, &pad_idx, "transpose",
1576  displaymatrix[3] < 0 ? "clock_flip" : "cclock");
1577  } else if (fabs(theta) > 1.0) {
1578  char rotate_buf[64];
1579  snprintf(rotate_buf, sizeof(rotate_buf), "%f*PI/180", theta);
1580  ret = insert_filter(&last_filter, &pad_idx, "rotate", rotate_buf);
1581  } else if (fabs(theta) < 1.0) {
1582  if (displaymatrix && displaymatrix[4] < 0) {
1583  ret = insert_filter(&last_filter, &pad_idx, "vflip", NULL);
1584  }
1585  }
1586  if (ret < 0)
1587  return ret;
1588  }
1589 
1590  snprintf(name, sizeof(name), "trim_in_%s", ifp->opts.name);
1592  &last_filter, &pad_idx, name);
1593  if (ret < 0)
1594  return ret;
1595 
1596  if ((ret = avfilter_link(last_filter, 0, in->filter_ctx, in->pad_idx)) < 0)
1597  return ret;
1598  return 0;
1599 fail:
1600  av_freep(&par);
1601 
1602  return ret;
1603 }
1604 
1606  InputFilter *ifilter, AVFilterInOut *in)
1607 {
1608  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
1609  AVFilterContext *last_filter;
1610  const AVFilter *abuffer_filt = avfilter_get_by_name("abuffer");
1611  AVBPrint args;
1612  char name[255];
1613  int ret, pad_idx = 0;
1614 
1616  av_bprintf(&args, "time_base=%d/%d:sample_rate=%d:sample_fmt=%s",
1617  ifp->time_base.num, ifp->time_base.den,
1618  ifp->sample_rate,
1620  if (av_channel_layout_check(&ifp->ch_layout) &&
1622  av_bprintf(&args, ":channel_layout=");
1624  } else
1625  av_bprintf(&args, ":channels=%d", ifp->ch_layout.nb_channels);
1626  snprintf(name, sizeof(name), "graph_%d_in_%s", fg->index, ifp->opts.name);
1627 
1628  if ((ret = avfilter_graph_create_filter(&ifp->filter, abuffer_filt,
1629  name, args.str, NULL,
1630  graph)) < 0)
1631  return ret;
1632  last_filter = ifp->filter;
1633 
1634  snprintf(name, sizeof(name), "trim for input stream %s", ifp->opts.name);
1636  &last_filter, &pad_idx, name);
1637  if (ret < 0)
1638  return ret;
1639 
1640  if ((ret = avfilter_link(last_filter, 0, in->filter_ctx, in->pad_idx)) < 0)
1641  return ret;
1642 
1643  return 0;
1644 }
1645 
1647  InputFilter *ifilter, AVFilterInOut *in)
1648 {
1649  switch (ifp_from_ifilter(ifilter)->type) {
1650  case AVMEDIA_TYPE_VIDEO: return configure_input_video_filter(fg, graph, ifilter, in);
1651  case AVMEDIA_TYPE_AUDIO: return configure_input_audio_filter(fg, graph, ifilter, in);
1652  default: av_assert0(0); return 0;
1653  }
1654 }
1655 
1657 {
1658  for (int i = 0; i < fg->nb_outputs; i++)
1660  for (int i = 0; i < fg->nb_inputs; i++)
1661  ifp_from_ifilter(fg->inputs[i])->filter = NULL;
1662  avfilter_graph_free(&fgt->graph);
1663 }
1664 
1666 {
1667  return f->nb_inputs == 0 &&
1668  (!strcmp(f->filter->name, "buffer") ||
1669  !strcmp(f->filter->name, "abuffer"));
1670 }
1671 
1672 static int graph_is_meta(AVFilterGraph *graph)
1673 {
1674  for (unsigned i = 0; i < graph->nb_filters; i++) {
1675  const AVFilterContext *f = graph->filters[i];
1676 
1677  /* in addition to filters flagged as meta, also
1678  * disregard sinks and buffersources (but not other sources,
1679  * since they introduce data we are not aware of)
1680  */
1681  if (!((f->filter->flags & AVFILTER_FLAG_METADATA_ONLY) ||
1682  f->nb_outputs == 0 ||
1684  return 0;
1685  }
1686  return 1;
1687 }
1688 
1689 static int sub2video_frame(InputFilter *ifilter, AVFrame *frame, int buffer);
1690 
1692 {
1693  FilterGraphPriv *fgp = fgp_from_fg(fg);
1694  AVBufferRef *hw_device;
1695  AVFilterInOut *inputs, *outputs, *cur;
1696  int ret, i, simple = filtergraph_is_simple(fg);
1697  int have_input_eof = 0;
1698  const char *graph_desc = fgp->graph_desc;
1699 
1700  cleanup_filtergraph(fg, fgt);
1701  fgt->graph = avfilter_graph_alloc();
1702  if (!fgt->graph)
1703  return AVERROR(ENOMEM);
1704 
1705  if (simple) {
1706  OutputStream *ost = fg->outputs[0]->ost;
1707 
1708  if (filter_nbthreads) {
1709  ret = av_opt_set(fgt->graph, "threads", filter_nbthreads, 0);
1710  if (ret < 0)
1711  goto fail;
1712  } else {
1713  const AVDictionaryEntry *e = NULL;
1714  e = av_dict_get(ost->encoder_opts, "threads", NULL, 0);
1715  if (e)
1716  av_opt_set(fgt->graph, "threads", e->value, 0);
1717  }
1718 
1719  if (av_dict_count(ost->sws_dict)) {
1720  ret = av_dict_get_string(ost->sws_dict,
1721  &fgt->graph->scale_sws_opts,
1722  '=', ':');
1723  if (ret < 0)
1724  goto fail;
1725  }
1726 
1727  if (av_dict_count(ost->swr_opts)) {
1728  char *args;
1729  ret = av_dict_get_string(ost->swr_opts, &args, '=', ':');
1730  if (ret < 0)
1731  goto fail;
1732  av_opt_set(fgt->graph, "aresample_swr_opts", args, 0);
1733  av_free(args);
1734  }
1735  } else {
1737  }
1738 
1739  hw_device = hw_device_for_filter();
1740 
1741  if ((ret = graph_parse(fgt->graph, graph_desc, &inputs, &outputs, hw_device)) < 0)
1742  goto fail;
1743 
1744  for (cur = inputs, i = 0; cur; cur = cur->next, i++)
1745  if ((ret = configure_input_filter(fg, fgt->graph, fg->inputs[i], cur)) < 0) {
1748  goto fail;
1749  }
1751 
1752  for (cur = outputs, i = 0; cur; cur = cur->next, i++) {
1753  ret = configure_output_filter(fg, fgt->graph, fg->outputs[i], cur);
1754  if (ret < 0) {
1756  goto fail;
1757  }
1758  }
1760 
1761  if (fgp->disable_conversions)
1763  if ((ret = avfilter_graph_config(fgt->graph, NULL)) < 0)
1764  goto fail;
1765 
1766  fgp->is_meta = graph_is_meta(fgt->graph);
1767 
1768  /* limit the lists of allowed formats to the ones selected, to
1769  * make sure they stay the same if the filtergraph is reconfigured later */
1770  for (int i = 0; i < fg->nb_outputs; i++) {
1771  OutputFilter *ofilter = fg->outputs[i];
1772  OutputFilterPriv *ofp = ofp_from_ofilter(ofilter);
1773  AVFilterContext *sink = ofp->filter;
1774 
1775  ofp->format = av_buffersink_get_format(sink);
1776 
1777  ofp->width = av_buffersink_get_w(sink);
1778  ofp->height = av_buffersink_get_h(sink);
1779 
1780  // If the timing parameters are not locked yet, get the tentative values
1781  // here but don't lock them. They will only be used if no output frames
1782  // are ever produced.
1783  if (!ofp->tb_out_locked) {
1785  if (ofp->fps.framerate.num <= 0 && ofp->fps.framerate.den <= 0 &&
1786  fr.num > 0 && fr.den > 0)
1787  ofp->fps.framerate = fr;
1788  ofp->tb_out = av_buffersink_get_time_base(sink);
1789  }
1791 
1794  ret = av_buffersink_get_ch_layout(sink, &ofp->ch_layout);
1795  if (ret < 0)
1796  goto fail;
1797  }
1798 
1799  for (int i = 0; i < fg->nb_inputs; i++) {
1801  AVFrame *tmp;
1802  while (av_fifo_read(ifp->frame_queue, &tmp, 1) >= 0) {
1803  if (ifp->type_src == AVMEDIA_TYPE_SUBTITLE) {
1804  sub2video_frame(&ifp->ifilter, tmp, !fgt->graph);
1805  } else {
1807  }
1808  av_frame_free(&tmp);
1809  if (ret < 0)
1810  goto fail;
1811  }
1812  }
1813 
1814  /* send the EOFs for the finished inputs */
1815  for (int i = 0; i < fg->nb_inputs; i++) {
1817  if (fgt->eof_in[i]) {
1819  if (ret < 0)
1820  goto fail;
1821  have_input_eof = 1;
1822  }
1823  }
1824 
1825  if (have_input_eof) {
1826  // make sure the EOF propagates to the end of the graph
1828  if (ret < 0 && ret != AVERROR(EAGAIN) && ret != AVERROR_EOF)
1829  goto fail;
1830  }
1831 
1832  return 0;
1833 fail:
1834  cleanup_filtergraph(fg, fgt);
1835  return ret;
1836 }
1837 
1839 {
1840  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
1841  AVFrameSideData *sd;
1842  int ret;
1843 
1845  if (ret < 0)
1846  return ret;
1847 
1848  ifp->time_base = (ifp->type == AVMEDIA_TYPE_AUDIO) ? (AVRational){ 1, frame->sample_rate } :
1849  (ifp->opts.flags & IFILTER_FLAG_CFR) ? av_inv_q(ifp->opts.framerate) :
1850  frame->time_base;
1851 
1852  ifp->format = frame->format;
1853 
1854  ifp->width = frame->width;
1855  ifp->height = frame->height;
1857  ifp->color_space = frame->colorspace;
1858  ifp->color_range = frame->color_range;
1859 
1860  ifp->sample_rate = frame->sample_rate;
1862  if (ret < 0)
1863  return ret;
1864 
1866  if (sd)
1867  memcpy(ifp->displaymatrix, sd->data, sizeof(ifp->displaymatrix));
1868  ifp->displaymatrix_present = !!sd;
1869 
1870  return 0;
1871 }
1872 
1874 {
1875  const FilterGraphPriv *fgp = cfgp_from_cfg(fg);
1876  return fgp->is_simple;
1877 }
1878 
1879 static void send_command(FilterGraph *fg, AVFilterGraph *graph,
1880  double time, const char *target,
1881  const char *command, const char *arg, int all_filters)
1882 {
1883  int ret;
1884 
1885  if (!graph)
1886  return;
1887 
1888  if (time < 0) {
1889  char response[4096];
1890  ret = avfilter_graph_send_command(graph, target, command, arg,
1891  response, sizeof(response),
1892  all_filters ? 0 : AVFILTER_CMD_FLAG_ONE);
1893  fprintf(stderr, "Command reply for stream %d: ret:%d res:\n%s",
1894  fg->index, ret, response);
1895  } else if (!all_filters) {
1896  fprintf(stderr, "Queuing commands only on filters supporting the specific command is unsupported\n");
1897  } else {
1898  ret = avfilter_graph_queue_command(graph, target, command, arg, 0, time);
1899  if (ret < 0)
1900  fprintf(stderr, "Queuing command failed with error %s\n", av_err2str(ret));
1901  }
1902 }
1903 
1904 static int choose_input(const FilterGraph *fg, const FilterGraphThread *fgt)
1905 {
1906  int nb_requests, nb_requests_max = -1;
1907  int best_input = -1;
1908 
1909  for (int i = 0; i < fg->nb_inputs; i++) {
1910  InputFilter *ifilter = fg->inputs[i];
1911  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
1912 
1913  if (fgt->eof_in[i])
1914  continue;
1915 
1916  nb_requests = av_buffersrc_get_nb_failed_requests(ifp->filter);
1917  if (nb_requests > nb_requests_max) {
1918  nb_requests_max = nb_requests;
1919  best_input = i;
1920  }
1921  }
1922 
1923  av_assert0(best_input >= 0);
1924 
1925  return best_input;
1926 }
1927 
1929 {
1930  OutputFilter *ofilter = &ofp->ofilter;
1931  FPSConvContext *fps = &ofp->fps;
1932  AVRational tb = (AVRational){ 0, 0 };
1933  AVRational fr;
1934  const FrameData *fd;
1935 
1936  fd = frame_data_c(frame);
1937 
1938  // apply -enc_time_base
1939  if (ofp->enc_timebase.num == ENC_TIME_BASE_DEMUX &&
1940  (fd->dec.tb.num <= 0 || fd->dec.tb.den <= 0)) {
1941  av_log(ofilter->ost, AV_LOG_ERROR,
1942  "Demuxing timebase not available - cannot use it for encoding\n");
1943  return AVERROR(EINVAL);
1944  }
1945 
1946  switch (ofp->enc_timebase.num) {
1947  case 0: break;
1948  case ENC_TIME_BASE_DEMUX: tb = fd->dec.tb; break;
1949  case ENC_TIME_BASE_FILTER: tb = frame->time_base; break;
1950  default: tb = ofp->enc_timebase; break;
1951  }
1952 
1953  if (ofilter->type == AVMEDIA_TYPE_AUDIO) {
1954  tb = tb.num ? tb : (AVRational){ 1, frame->sample_rate };
1955  goto finish;
1956  }
1957 
1958  fr = fps->framerate;
1959  if (!fr.num) {
1961  if (fr_sink.num > 0 && fr_sink.den > 0)
1962  fr = fr_sink;
1963  }
1964 
1965  if (ofilter->ost->is_cfr) {
1966  if (!fr.num && !fps->framerate_max.num) {
1967  fr = (AVRational){25, 1};
1968  av_log(ofilter->ost, AV_LOG_WARNING,
1969  "No information "
1970  "about the input framerate is available. Falling "
1971  "back to a default value of 25fps. Use the -r option "
1972  "if you want a different framerate.\n");
1973  }
1974 
1975  if (fps->framerate_max.num &&
1976  (av_q2d(fr) > av_q2d(fps->framerate_max) ||
1977  !fr.den))
1978  fr = fps->framerate_max;
1979  }
1980 
1981  if (fr.num > 0) {
1982  if (fps->framerate_supported) {
1983  int idx = av_find_nearest_q_idx(fr, fps->framerate_supported);
1984  fr = fps->framerate_supported[idx];
1985  }
1986  if (fps->framerate_clip) {
1987  av_reduce(&fr.num, &fr.den,
1988  fr.num, fr.den, fps->framerate_clip);
1989  }
1990  }
1991 
1992  if (!(tb.num > 0 && tb.den > 0))
1993  tb = av_inv_q(fr);
1994  if (!(tb.num > 0 && tb.den > 0))
1995  tb = frame->time_base;
1996 
1997  fps->framerate = fr;
1998 finish:
1999  ofp->tb_out = tb;
2000  ofp->tb_out_locked = 1;
2001 
2002  return 0;
2003 }
2004 
2007 {
2008  double float_pts = AV_NOPTS_VALUE; // this is identical to frame.pts but with higher precision
2009 
2010  AVRational tb = tb_dst;
2011  AVRational filter_tb = frame->time_base;
2012  const int extra_bits = av_clip(29 - av_log2(tb.den), 0, 16);
2013 
2014  if (frame->pts == AV_NOPTS_VALUE)
2015  goto early_exit;
2016 
2017  tb.den <<= extra_bits;
2018  float_pts = av_rescale_q(frame->pts, filter_tb, tb) -
2020  float_pts /= 1 << extra_bits;
2021  // when float_pts is not exactly an integer,
2022  // avoid exact midpoints to reduce the chance of rounding differences, this
2023  // can be removed in case the fps code is changed to work with integers
2024  if (float_pts != llrint(float_pts))
2025  float_pts += FFSIGN(float_pts) * 1.0 / (1<<17);
2026 
2027  frame->pts = av_rescale_q(frame->pts, filter_tb, tb_dst) -
2029  frame->time_base = tb_dst;
2030 
2031 early_exit:
2032 
2033  if (debug_ts) {
2034  av_log(NULL, AV_LOG_INFO, "filter -> pts:%s pts_time:%s exact:%f time_base:%d/%d\n",
2035  frame ? av_ts2str(frame->pts) : "NULL",
2036  av_ts2timestr(frame->pts, &tb_dst),
2037  float_pts, tb_dst.num, tb_dst.den);
2038  }
2039 
2040  return float_pts;
2041 }
2042 
2043 /* Convert frame timestamps to the encoder timebase and decide how many times
2044  * should this (and possibly previous) frame be repeated in order to conform to
2045  * desired target framerate (if any).
2046  */
2048  int64_t *nb_frames, int64_t *nb_frames_prev)
2049 {
2050  OutputFilter *ofilter = &ofp->ofilter;
2051  OutputStream *ost = ofilter->ost;
2052  FPSConvContext *fps = &ofp->fps;
2053  double delta0, delta, sync_ipts, duration;
2054 
2055  if (!frame) {
2056  *nb_frames_prev = *nb_frames = mid_pred(fps->frames_prev_hist[0],
2057  fps->frames_prev_hist[1],
2058  fps->frames_prev_hist[2]);
2059 
2060  if (!*nb_frames && fps->last_dropped) {
2061  atomic_fetch_add(&ofilter->nb_frames_drop, 1);
2062  fps->last_dropped++;
2063  }
2064 
2065  goto finish;
2066  }
2067 
2069 
2070  sync_ipts = adjust_frame_pts_to_encoder_tb(frame, ofp->tb_out, ofp->ts_offset);
2071  /* delta0 is the "drift" between the input frame and
2072  * where it would fall in the output. */
2073  delta0 = sync_ipts - ofp->next_pts;
2074  delta = delta0 + duration;
2075 
2076  // tracks the number of times the PREVIOUS frame should be duplicated,
2077  // mostly for variable framerate (VFR)
2078  *nb_frames_prev = 0;
2079  /* by default, we output a single frame */
2080  *nb_frames = 1;
2081 
2082  if (delta0 < 0 &&
2083  delta > 0 &&
2084  ost->vsync_method != VSYNC_PASSTHROUGH
2086  && ost->vsync_method != VSYNC_DROP
2087 #endif
2088  ) {
2089  if (delta0 < -0.6) {
2090  av_log(ost, AV_LOG_VERBOSE, "Past duration %f too large\n", -delta0);
2091  } else
2092  av_log(ost, AV_LOG_DEBUG, "Clipping frame in rate conversion by %f\n", -delta0);
2093  sync_ipts = ofp->next_pts;
2094  duration += delta0;
2095  delta0 = 0;
2096  }
2097 
2098  switch (ost->vsync_method) {
2099  case VSYNC_VSCFR:
2100  if (fps->frame_number == 0 && delta0 >= 0.5) {
2101  av_log(ost, AV_LOG_DEBUG, "Not duplicating %d initial frames\n", (int)lrintf(delta0));
2102  delta = duration;
2103  delta0 = 0;
2104  ofp->next_pts = llrint(sync_ipts);
2105  }
2106  case VSYNC_CFR:
2107  // FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
2108  if (frame_drop_threshold && delta < frame_drop_threshold && fps->frame_number) {
2109  *nb_frames = 0;
2110  } else if (delta < -1.1)
2111  *nb_frames = 0;
2112  else if (delta > 1.1) {
2113  *nb_frames = llrintf(delta);
2114  if (delta0 > 1.1)
2115  *nb_frames_prev = llrintf(delta0 - 0.6);
2116  }
2117  frame->duration = 1;
2118  break;
2119  case VSYNC_VFR:
2120  if (delta <= -0.6)
2121  *nb_frames = 0;
2122  else if (delta > 0.6)
2123  ofp->next_pts = llrint(sync_ipts);
2125  break;
2126 #if FFMPEG_OPT_VSYNC_DROP
2127  case VSYNC_DROP:
2128 #endif
2129  case VSYNC_PASSTHROUGH:
2130  ofp->next_pts = llrint(sync_ipts);
2132  break;
2133  default:
2134  av_assert0(0);
2135  }
2136 
2137 finish:
2138  memmove(fps->frames_prev_hist + 1,
2139  fps->frames_prev_hist,
2140  sizeof(fps->frames_prev_hist[0]) * (FF_ARRAY_ELEMS(fps->frames_prev_hist) - 1));
2141  fps->frames_prev_hist[0] = *nb_frames_prev;
2142 
2143  if (*nb_frames_prev == 0 && fps->last_dropped) {
2144  atomic_fetch_add(&ofilter->nb_frames_drop, 1);
2146  "*** dropping frame %"PRId64" at ts %"PRId64"\n",
2147  fps->frame_number, fps->last_frame->pts);
2148  }
2149  if (*nb_frames > (*nb_frames_prev && fps->last_dropped) + (*nb_frames > *nb_frames_prev)) {
2150  uint64_t nb_frames_dup;
2151  if (*nb_frames > dts_error_threshold * 30) {
2152  av_log(ost, AV_LOG_ERROR, "%"PRId64" frame duplication too large, skipping\n", *nb_frames - 1);
2153  atomic_fetch_add(&ofilter->nb_frames_drop, 1);
2154  *nb_frames = 0;
2155  return;
2156  }
2157  nb_frames_dup = atomic_fetch_add(&ofilter->nb_frames_dup,
2158  *nb_frames - (*nb_frames_prev && fps->last_dropped) - (*nb_frames > *nb_frames_prev));
2159  av_log(ost, AV_LOG_VERBOSE, "*** %"PRId64" dup!\n", *nb_frames - 1);
2160  if (nb_frames_dup > fps->dup_warning) {
2161  av_log(ost, AV_LOG_WARNING, "More than %"PRIu64" frames duplicated\n", fps->dup_warning);
2162  fps->dup_warning *= 10;
2163  }
2164  }
2165 
2166  fps->last_dropped = *nb_frames == *nb_frames_prev && frame;
2168 }
2169 
2171 {
2173  int ret;
2174 
2175  // we are finished and no frames were ever seen at this output,
2176  // at least initialize the encoder with a dummy frame
2177  if (!fgt->got_frame) {
2178  AVFrame *frame = fgt->frame;
2179  FrameData *fd;
2180 
2181  frame->time_base = ofp->tb_out;
2182  frame->format = ofp->format;
2183 
2184  frame->width = ofp->width;
2185  frame->height = ofp->height;
2187 
2188  frame->sample_rate = ofp->sample_rate;
2189  if (ofp->ch_layout.nb_channels) {
2191  if (ret < 0)
2192  return ret;
2193  }
2194 
2195  fd = frame_data(frame);
2196  if (!fd)
2197  return AVERROR(ENOMEM);
2198 
2199  fd->frame_rate_filter = ofp->fps.framerate;
2200 
2201  av_assert0(!frame->buf[0]);
2202 
2204  "No filtered frames for output stream, trying to "
2205  "initialize anyway.\n");
2206 
2207  ret = sch_filter_send(fgp->sch, fgp->sch_idx, ofp->index, frame);
2208  if (ret < 0) {
2210  return ret;
2211  }
2212  }
2213 
2214  fgt->eof_out[ofp->index] = 1;
2215 
2216  ret = sch_filter_send(fgp->sch, fgp->sch_idx, ofp->index, NULL);
2217  return (ret == AVERROR_EOF) ? 0 : ret;
2218 }
2219 
2221  AVFrame *frame)
2222 {
2224  AVFrame *frame_prev = ofp->fps.last_frame;
2225  enum AVMediaType type = ofp->ofilter.type;
2226 
2227  int64_t nb_frames = !!frame, nb_frames_prev = 0;
2228 
2229  if (type == AVMEDIA_TYPE_VIDEO && (frame || fgt->got_frame))
2230  video_sync_process(ofp, frame, &nb_frames, &nb_frames_prev);
2231 
2232  for (int64_t i = 0; i < nb_frames; i++) {
2233  AVFrame *frame_out;
2234  int ret;
2235 
2236  if (type == AVMEDIA_TYPE_VIDEO) {
2237  AVFrame *frame_in = (i < nb_frames_prev && frame_prev->buf[0]) ?
2238  frame_prev : frame;
2239  if (!frame_in)
2240  break;
2241 
2242  frame_out = fgp->frame_enc;
2243  ret = av_frame_ref(frame_out, frame_in);
2244  if (ret < 0)
2245  return ret;
2246 
2247  frame_out->pts = ofp->next_pts;
2248 
2249  if (ofp->fps.dropped_keyframe) {
2250  frame_out->flags |= AV_FRAME_FLAG_KEY;
2251  ofp->fps.dropped_keyframe = 0;
2252  }
2253  } else {
2254  frame->pts = (frame->pts == AV_NOPTS_VALUE) ? ofp->next_pts :
2257 
2258  frame->time_base = ofp->tb_out;
2260  (AVRational){ 1, frame->sample_rate },
2261  ofp->tb_out);
2262 
2263  ofp->next_pts = frame->pts + frame->duration;
2264 
2265  frame_out = frame;
2266  }
2267 
2268  // send the frame to consumers
2269  ret = sch_filter_send(fgp->sch, fgp->sch_idx, ofp->index, frame_out);
2270  if (ret < 0) {
2271  av_frame_unref(frame_out);
2272 
2273  if (!fgt->eof_out[ofp->index]) {
2274  fgt->eof_out[ofp->index] = 1;
2275  fgp->nb_outputs_done++;
2276  }
2277 
2278  return ret == AVERROR_EOF ? 0 : ret;
2279  }
2280 
2281  if (type == AVMEDIA_TYPE_VIDEO) {
2282  ofp->fps.frame_number++;
2283  ofp->next_pts++;
2284 
2285  if (i == nb_frames_prev && frame)
2287  }
2288 
2289  fgt->got_frame = 1;
2290  }
2291 
2292  if (frame && frame_prev) {
2293  av_frame_unref(frame_prev);
2294  av_frame_move_ref(frame_prev, frame);
2295  }
2296 
2297  if (!frame)
2298  return close_output(ofp, fgt);
2299 
2300  return 0;
2301 }
2302 
2304  AVFrame *frame)
2305 {
2307  OutputStream *ost = ofp->ofilter.ost;
2308  AVFilterContext *filter = ofp->filter;
2309  FrameData *fd;
2310  int ret;
2311 
2314  if (ret == AVERROR_EOF && !fgt->eof_out[ofp->index]) {
2315  ret = fg_output_frame(ofp, fgt, NULL);
2316  return (ret < 0) ? ret : 1;
2317  } else if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) {
2318  return 1;
2319  } else if (ret < 0) {
2320  av_log(fgp, AV_LOG_WARNING,
2321  "Error in retrieving a frame from the filtergraph: %s\n",
2322  av_err2str(ret));
2323  return ret;
2324  }
2325 
2326  if (fgt->eof_out[ofp->index]) {
2328  return 0;
2329  }
2330 
2332 
2333  if (debug_ts)
2334  av_log(fgp, AV_LOG_INFO, "filter_raw -> pts:%s pts_time:%s time_base:%d/%d\n",
2337 
2338  // Choose the output timebase the first time we get a frame.
2339  if (!ofp->tb_out_locked) {
2340  ret = choose_out_timebase(ofp, frame);
2341  if (ret < 0) {
2342  av_log(ost, AV_LOG_ERROR, "Could not choose an output time base\n");
2344  return ret;
2345  }
2346  }
2347 
2348  fd = frame_data(frame);
2349  if (!fd) {
2351  return AVERROR(ENOMEM);
2352  }
2353 
2355 
2356  // only use bits_per_raw_sample passed through from the decoder
2357  // if the filtergraph did not touch the frame data
2358  if (!fgp->is_meta)
2359  fd->bits_per_raw_sample = 0;
2360 
2361  if (ost->type == AVMEDIA_TYPE_VIDEO) {
2362  if (!frame->duration) {
2364  if (fr.num > 0 && fr.den > 0)
2366  }
2367 
2368  fd->frame_rate_filter = ofp->fps.framerate;
2369  }
2370 
2371  ret = fg_output_frame(ofp, fgt, frame);
2373  if (ret < 0)
2374  return ret;
2375 
2376  return 0;
2377 }
2378 
2379 /* retrieve all frames available at filtergraph outputs
2380  * and send them to consumers */
2382  AVFrame *frame)
2383 {
2384  FilterGraphPriv *fgp = fgp_from_fg(fg);
2385  int did_step = 0;
2386 
2387  // graph not configured, just select the input to request
2388  if (!fgt->graph) {
2389  for (int i = 0; i < fg->nb_inputs; i++) {
2391  if (ifp->format < 0 && !fgt->eof_in[i]) {
2392  fgt->next_in = i;
2393  return 0;
2394  }
2395  }
2396 
2397  // This state - graph is not configured, but all inputs are either
2398  // initialized or EOF - should be unreachable because sending EOF to a
2399  // filter without even a fallback format should fail
2400  av_assert0(0);
2401  return AVERROR_BUG;
2402  }
2403 
2404  while (fgp->nb_outputs_done < fg->nb_outputs) {
2405  int ret;
2406 
2408  if (ret == AVERROR(EAGAIN)) {
2409  fgt->next_in = choose_input(fg, fgt);
2410  break;
2411  } else if (ret < 0) {
2412  if (ret == AVERROR_EOF)
2413  av_log(fg, AV_LOG_VERBOSE, "Filtergraph returned EOF, finishing\n");
2414  else
2415  av_log(fg, AV_LOG_ERROR,
2416  "Error requesting a frame from the filtergraph: %s\n",
2417  av_err2str(ret));
2418  return ret;
2419  }
2420  fgt->next_in = fg->nb_inputs;
2421 
2422  // return after one iteration, so that scheduler can rate-control us
2423  if (did_step && fgp->have_sources)
2424  return 0;
2425 
2426  /* Reap all buffers present in the buffer sinks */
2427  for (int i = 0; i < fg->nb_outputs; i++) {
2429 
2430  ret = 0;
2431  while (!ret) {
2432  ret = fg_output_step(ofp, fgt, frame);
2433  if (ret < 0)
2434  return ret;
2435  }
2436  }
2437  did_step = 1;
2438  }
2439 
2440  return (fgp->nb_outputs_done == fg->nb_outputs) ? AVERROR_EOF : 0;
2441 }
2442 
2444 {
2445  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
2446  int64_t pts2;
2447 
2448  /* subtitles seem to be usually muxed ahead of other streams;
2449  if not, subtracting a larger time here is necessary */
2450  pts2 = av_rescale_q(pts, tb, ifp->time_base) - 1;
2451 
2452  /* do not send the heartbeat frame if the subtitle is already ahead */
2453  if (pts2 <= ifp->sub2video.last_pts)
2454  return;
2455 
2456  if (pts2 >= ifp->sub2video.end_pts || ifp->sub2video.initialize)
2457  /* if we have hit the end of the current displayed subpicture,
2458  or if we need to initialize the system, update the
2459  overlayed subpicture and its start/end times */
2460  sub2video_update(ifp, pts2 + 1, NULL);
2461  else
2462  sub2video_push_ref(ifp, pts2);
2463 }
2464 
2465 static int sub2video_frame(InputFilter *ifilter, AVFrame *frame, int buffer)
2466 {
2467  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
2468  int ret;
2469 
2470  if (buffer) {
2471  AVFrame *tmp;
2472 
2473  if (!frame)
2474  return 0;
2475 
2476  tmp = av_frame_alloc();
2477  if (!tmp)
2478  return AVERROR(ENOMEM);
2479 
2481 
2482  ret = av_fifo_write(ifp->frame_queue, &tmp, 1);
2483  if (ret < 0) {
2484  av_frame_free(&tmp);
2485  return ret;
2486  }
2487 
2488  return 0;
2489  }
2490 
2491  // heartbeat frame
2492  if (frame && !frame->buf[0]) {
2494  return 0;
2495  }
2496 
2497  if (!frame) {
2498  if (ifp->sub2video.end_pts < INT64_MAX)
2499  sub2video_update(ifp, INT64_MAX, NULL);
2500 
2501  return av_buffersrc_add_frame(ifp->filter, NULL);
2502  }
2503 
2504  ifp->width = frame->width ? frame->width : ifp->width;
2505  ifp->height = frame->height ? frame->height : ifp->height;
2506 
2507  sub2video_update(ifp, INT64_MIN, (const AVSubtitle*)frame->buf[0]->data);
2508 
2509  return 0;
2510 }
2511 
2512 static int send_eof(FilterGraphThread *fgt, InputFilter *ifilter,
2514 {
2515  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
2516  int ret;
2517 
2518  if (fgt->eof_in[ifp->index])
2519  return 0;
2520 
2521  fgt->eof_in[ifp->index] = 1;
2522 
2523  if (ifp->filter) {
2524  pts = av_rescale_q_rnd(pts, tb, ifp->time_base,
2526 
2528  if (ret < 0)
2529  return ret;
2530  } else {
2531  if (ifp->format < 0) {
2532  // the filtergraph was never configured, use the fallback parameters
2533  ifp->format = ifp->opts.fallback->format;
2534  ifp->sample_rate = ifp->opts.fallback->sample_rate;
2535  ifp->width = ifp->opts.fallback->width;
2536  ifp->height = ifp->opts.fallback->height;
2538  ifp->color_space = ifp->opts.fallback->colorspace;
2539  ifp->color_range = ifp->opts.fallback->color_range;
2540  ifp->time_base = ifp->opts.fallback->time_base;
2541 
2543  &ifp->opts.fallback->ch_layout);
2544  if (ret < 0)
2545  return ret;
2546 
2547  if (ifilter_has_all_input_formats(ifilter->graph)) {
2548  ret = configure_filtergraph(ifilter->graph, fgt);
2549  if (ret < 0) {
2550  av_log(NULL, AV_LOG_ERROR, "Error initializing filters!\n");
2551  return ret;
2552  }
2553  }
2554  }
2555 
2556  if (ifp->format < 0) {
2558  "Cannot determine format of input %s after EOF\n",
2559  ifp->opts.name);
2560  return AVERROR_INVALIDDATA;
2561  }
2562  }
2563 
2564  return 0;
2565 }
2566 
2568  VIDEO_CHANGED = (1 << 0),
2569  AUDIO_CHANGED = (1 << 1),
2570  MATRIX_CHANGED = (1 << 2),
2571  HWACCEL_CHANGED = (1 << 3)
2572 };
2573 
2574 static const char *unknown_if_null(const char *str)
2575 {
2576  return str ? str : "unknown";
2577 }
2578 
2580  InputFilter *ifilter, AVFrame *frame)
2581 {
2582  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
2583  FrameData *fd;
2584  AVFrameSideData *sd;
2585  int need_reinit = 0, ret;
2586 
2587  /* determine if the parameters for this input changed */
2588  switch (ifp->type) {
2589  case AVMEDIA_TYPE_AUDIO:
2590  if (ifp->format != frame->format ||
2591  ifp->sample_rate != frame->sample_rate ||
2593  need_reinit |= AUDIO_CHANGED;
2594  break;
2595  case AVMEDIA_TYPE_VIDEO:
2596  if (ifp->format != frame->format ||
2597  ifp->width != frame->width ||
2598  ifp->height != frame->height ||
2599  ifp->color_space != frame->colorspace ||
2600  ifp->color_range != frame->color_range)
2601  need_reinit |= VIDEO_CHANGED;
2602  break;
2603  }
2604 
2606  if (!ifp->displaymatrix_present ||
2607  memcmp(sd->data, ifp->displaymatrix, sizeof(ifp->displaymatrix)))
2608  need_reinit |= MATRIX_CHANGED;
2609  } else if (ifp->displaymatrix_present)
2610  need_reinit |= MATRIX_CHANGED;
2611 
2612  if (!(ifp->opts.flags & IFILTER_FLAG_REINIT) && fgt->graph)
2613  need_reinit = 0;
2614 
2615  if (!!ifp->hw_frames_ctx != !!frame->hw_frames_ctx ||
2616  (ifp->hw_frames_ctx && ifp->hw_frames_ctx->data != frame->hw_frames_ctx->data))
2617  need_reinit |= HWACCEL_CHANGED;
2618 
2619  if (need_reinit) {
2621  if (ret < 0)
2622  return ret;
2623  }
2624 
2625  /* (re)init the graph if possible, otherwise buffer the frame and return */
2626  if (need_reinit || !fgt->graph) {
2627  AVFrame *tmp = av_frame_alloc();
2628 
2629  if (!tmp)
2630  return AVERROR(ENOMEM);
2631 
2632  if (!ifilter_has_all_input_formats(fg)) {
2634 
2635  ret = av_fifo_write(ifp->frame_queue, &tmp, 1);
2636  if (ret < 0)
2637  av_frame_free(&tmp);
2638 
2639  return ret;
2640  }
2641 
2642  ret = fgt->graph ? read_frames(fg, fgt, tmp) : 0;
2643  av_frame_free(&tmp);
2644  if (ret < 0)
2645  return ret;
2646 
2647  if (fgt->graph) {
2648  AVBPrint reason;
2650  if (need_reinit & AUDIO_CHANGED) {
2651  const char *sample_format_name = av_get_sample_fmt_name(frame->format);
2652  av_bprintf(&reason, "audio parameters changed to %d Hz, ", frame->sample_rate);
2654  av_bprintf(&reason, ", %s, ", unknown_if_null(sample_format_name));
2655  }
2656  if (need_reinit & VIDEO_CHANGED) {
2657  const char *pixel_format_name = av_get_pix_fmt_name(frame->format);
2658  const char *color_space_name = av_color_space_name(frame->colorspace);
2659  const char *color_range_name = av_color_range_name(frame->color_range);
2660  av_bprintf(&reason, "video parameters changed to %s(%s, %s), %dx%d, ",
2661  unknown_if_null(pixel_format_name), unknown_if_null(color_range_name),
2662  unknown_if_null(color_space_name), frame->width, frame->height);
2663  }
2664  if (need_reinit & MATRIX_CHANGED)
2665  av_bprintf(&reason, "display matrix changed, ");
2666  if (need_reinit & HWACCEL_CHANGED)
2667  av_bprintf(&reason, "hwaccel changed, ");
2668  if (reason.len > 1)
2669  reason.str[reason.len - 2] = '\0'; // remove last comma
2670  av_log(fg, AV_LOG_INFO, "Reconfiguring filter graph%s%s\n", reason.len ? " because " : "", reason.str);
2671  }
2672 
2673  ret = configure_filtergraph(fg, fgt);
2674  if (ret < 0) {
2675  av_log(fg, AV_LOG_ERROR, "Error reinitializing filters!\n");
2676  return ret;
2677  }
2678  }
2679 
2682  frame->time_base = ifp->time_base;
2683 
2684  fd = frame_data(frame);
2685  if (!fd)
2686  return AVERROR(ENOMEM);
2688 
2691  if (ret < 0) {
2693  if (ret != AVERROR_EOF)
2694  av_log(fg, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
2695  return ret;
2696  }
2697 
2698  return 0;
2699 }
2700 
2701 static void fg_thread_set_name(const FilterGraph *fg)
2702 {
2703  char name[16];
2704  if (filtergraph_is_simple(fg)) {
2705  OutputStream *ost = fg->outputs[0]->ost;
2706  snprintf(name, sizeof(name), "%cf#%d:%d",
2707  av_get_media_type_string(ost->type)[0],
2708  ost->file->index, ost->index);
2709  } else {
2710  snprintf(name, sizeof(name), "fc%d", fg->index);
2711  }
2712 
2714 }
2715 
2717 {
2718  if (fgt->frame_queue_out) {
2719  AVFrame *frame;
2720  while (av_fifo_read(fgt->frame_queue_out, &frame, 1) >= 0)
2721  av_frame_free(&frame);
2723  }
2724 
2725  av_frame_free(&fgt->frame);
2726  av_freep(&fgt->eof_in);
2727  av_freep(&fgt->eof_out);
2728 
2729  avfilter_graph_free(&fgt->graph);
2730 
2731  memset(fgt, 0, sizeof(*fgt));
2732 }
2733 
2734 static int fg_thread_init(FilterGraphThread *fgt, const FilterGraph *fg)
2735 {
2736  memset(fgt, 0, sizeof(*fgt));
2737 
2738  fgt->frame = av_frame_alloc();
2739  if (!fgt->frame)
2740  goto fail;
2741 
2742  fgt->eof_in = av_calloc(fg->nb_inputs, sizeof(*fgt->eof_in));
2743  if (!fgt->eof_in)
2744  goto fail;
2745 
2746  fgt->eof_out = av_calloc(fg->nb_outputs, sizeof(*fgt->eof_out));
2747  if (!fgt->eof_out)
2748  goto fail;
2749 
2751  if (!fgt->frame_queue_out)
2752  goto fail;
2753 
2754  return 0;
2755 
2756 fail:
2757  fg_thread_uninit(fgt);
2758  return AVERROR(ENOMEM);
2759 }
2760 
2761 static int filter_thread(void *arg)
2762 {
2763  FilterGraphPriv *fgp = arg;
2764  FilterGraph *fg = &fgp->fg;
2765 
2766  FilterGraphThread fgt;
2767  int ret = 0, input_status = 0;
2768 
2769  ret = fg_thread_init(&fgt, fg);
2770  if (ret < 0)
2771  goto finish;
2772 
2773  fg_thread_set_name(fg);
2774 
2775  // if we have all input parameters the graph can now be configured
2777  ret = configure_filtergraph(fg, &fgt);
2778  if (ret < 0) {
2779  av_log(fg, AV_LOG_ERROR, "Error configuring filter graph: %s\n",
2780  av_err2str(ret));
2781  goto finish;
2782  }
2783  }
2784 
2785  while (1) {
2786  InputFilter *ifilter;
2787  InputFilterPriv *ifp;
2788  enum FrameOpaque o;
2789  unsigned input_idx = fgt.next_in;
2790 
2791  input_status = sch_filter_receive(fgp->sch, fgp->sch_idx,
2792  &input_idx, fgt.frame);
2793  if (input_status == AVERROR_EOF) {
2794  av_log(fg, AV_LOG_VERBOSE, "Filtering thread received EOF\n");
2795  break;
2796  } else if (input_status == AVERROR(EAGAIN)) {
2797  // should only happen when we didn't request any input
2798  av_assert0(input_idx == fg->nb_inputs);
2799  goto read_frames;
2800  }
2801  av_assert0(input_status >= 0);
2802 
2803  o = (intptr_t)fgt.frame->opaque;
2804 
2805  o = (intptr_t)fgt.frame->opaque;
2806 
2807  // message on the control stream
2808  if (input_idx == fg->nb_inputs) {
2809  FilterCommand *fc;
2810 
2811  av_assert0(o == FRAME_OPAQUE_SEND_COMMAND && fgt.frame->buf[0]);
2812 
2813  fc = (FilterCommand*)fgt.frame->buf[0]->data;
2814  send_command(fg, fgt.graph, fc->time, fc->target, fc->command, fc->arg,
2815  fc->all_filters);
2816  av_frame_unref(fgt.frame);
2817  continue;
2818  }
2819 
2820  // we received an input frame or EOF
2821  ifilter = fg->inputs[input_idx];
2822  ifp = ifp_from_ifilter(ifilter);
2823 
2824  if (ifp->type_src == AVMEDIA_TYPE_SUBTITLE) {
2825  int hb_frame = input_status >= 0 && o == FRAME_OPAQUE_SUB_HEARTBEAT;
2826  ret = sub2video_frame(ifilter, (fgt.frame->buf[0] || hb_frame) ? fgt.frame : NULL,
2827  !fgt.graph);
2828  } else if (fgt.frame->buf[0]) {
2829  ret = send_frame(fg, &fgt, ifilter, fgt.frame);
2830  } else {
2832  ret = send_eof(&fgt, ifilter, fgt.frame->pts, fgt.frame->time_base);
2833  }
2834  av_frame_unref(fgt.frame);
2835  if (ret == AVERROR_EOF) {
2836  av_log(fg, AV_LOG_VERBOSE, "Input %u no longer accepts new data\n",
2837  input_idx);
2838  sch_filter_receive_finish(fgp->sch, fgp->sch_idx, input_idx);
2839  continue;
2840  }
2841  if (ret < 0)
2842  goto finish;
2843 
2844 read_frames:
2845  // retrieve all newly avalable frames
2846  ret = read_frames(fg, &fgt, fgt.frame);
2847  if (ret == AVERROR_EOF) {
2848  av_log(fg, AV_LOG_VERBOSE, "All consumers returned EOF\n");
2849  break;
2850  } else if (ret < 0) {
2851  av_log(fg, AV_LOG_ERROR, "Error sending frames to consumers: %s\n",
2852  av_err2str(ret));
2853  goto finish;
2854  }
2855  }
2856 
2857  for (unsigned i = 0; i < fg->nb_outputs; i++) {
2859 
2860  if (fgt.eof_out[i] || !fgt.graph)
2861  continue;
2862 
2863  ret = fg_output_frame(ofp, &fgt, NULL);
2864  if (ret < 0)
2865  goto finish;
2866  }
2867 
2868 finish:
2869  // EOF is normal termination
2870  if (ret == AVERROR_EOF)
2871  ret = 0;
2872 
2873  fg_thread_uninit(&fgt);
2874 
2875  return ret;
2876 }
2877 
2878 void fg_send_command(FilterGraph *fg, double time, const char *target,
2879  const char *command, const char *arg, int all_filters)
2880 {
2881  FilterGraphPriv *fgp = fgp_from_fg(fg);
2882  AVBufferRef *buf;
2883  FilterCommand *fc;
2884 
2885  fc = av_mallocz(sizeof(*fc));
2886  if (!fc)
2887  return;
2888 
2889  buf = av_buffer_create((uint8_t*)fc, sizeof(*fc), filter_command_free, NULL, 0);
2890  if (!buf) {
2891  av_freep(&fc);
2892  return;
2893  }
2894 
2895  fc->target = av_strdup(target);
2896  fc->command = av_strdup(command);
2897  fc->arg = av_strdup(arg);
2898  if (!fc->target || !fc->command || !fc->arg) {
2899  av_buffer_unref(&buf);
2900  return;
2901  }
2902 
2903  fc->time = time;
2904  fc->all_filters = all_filters;
2905 
2906  fgp->frame->buf[0] = buf;
2907  fgp->frame->opaque = (void*)(intptr_t)FRAME_OPAQUE_SEND_COMMAND;
2908 
2909  sch_filter_command(fgp->sch, fgp->sch_idx, fgp->frame);
2910 }
AV_OPT_SEARCH_CHILDREN
#define AV_OPT_SEARCH_CHILDREN
Search in possible children of the given object first.
Definition: opt.h:522
SCH_FILTER_OUT
#define SCH_FILTER_OUT(filter, output)
Definition: ffmpeg_sched.h:122
AVSubtitle
Definition: avcodec.h:2227
formats
formats
Definition: signature.h:48
set_channel_layout
static int set_channel_layout(OutputFilterPriv *f, OutputStream *ost)
Definition: ffmpeg_filter.c:725
configure_input_filter
static int configure_input_filter(FilterGraph *fg, AVFilterGraph *graph, InputFilter *ifilter, AVFilterInOut *in)
Definition: ffmpeg_filter.c:1646
FilterGraphThread::next_in
unsigned next_in
Definition: ffmpeg_filter.c:94
AVCodec
AVCodec.
Definition: codec.h:187
AVMEDIA_TYPE_SUBTITLE
@ AVMEDIA_TYPE_SUBTITLE
Definition: avutil.h:204
av_gettime_relative
int64_t av_gettime_relative(void)
Get the current time in microseconds since some unspecified starting point.
Definition: time.c:56
AVFILTER_CMD_FLAG_ONE
#define AVFILTER_CMD_FLAG_ONE
Stop once a filter understood the command (for target=all for example), fast filters are favored auto...
Definition: avfilter.h:693
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:186
AVFrame::color_range
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: frame.h:623
AV_BPRINT_SIZE_UNLIMITED
#define AV_BPRINT_SIZE_UNLIMITED
av_buffersink_get_ch_layout
int av_buffersink_get_ch_layout(const AVFilterContext *ctx, AVChannelLayout *out)
Definition: buffersink.c:198
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:71
av_buffersink_get_sample_aspect_ratio
AVRational av_buffersink_get_sample_aspect_ratio(const AVFilterContext *ctx)
name
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default minimum maximum flags name is the option name
Definition: writing_filters.txt:88
avfilter_filter_pad_count
unsigned avfilter_filter_pad_count(const AVFilter *filter, int is_output)
Get the number of elements in an AVFilter's inputs or outputs array.
Definition: avfilter.c:615
extra_bits
#define extra_bits(eb)
Definition: intrax8.c:119
OutputFilter::graph
struct FilterGraph * graph
Definition: ffmpeg.h:273
av_clip
#define av_clip
Definition: common.h:98
InputFilterPriv::type
enum AVMediaType type
Definition: ffmpeg_filter.c:120
sch_filter_send
int sch_filter_send(Scheduler *sch, unsigned fg_idx, unsigned out_idx, AVFrame *frame)
Called by filtergraph tasks to send a filtered frame or EOF to consumers.
Definition: ffmpeg_sched.c:2381
VSYNC_VFR
@ VSYNC_VFR
Definition: ffmpeg.h:69
OutputFilterPriv::filter
AVFilterContext * filter
Definition: ffmpeg_filter.c:189
av_bprint_is_complete
static int av_bprint_is_complete(const AVBPrint *buf)
Test if the print buffer is complete (not truncated).
Definition: bprint.h:218
r
const char * r
Definition: vf_curves.c:126
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
nb_input_files
int nb_input_files
Definition: ffmpeg.c:126
AVSubtitle::rects
AVSubtitleRect ** rects
Definition: avcodec.h:2232
opt.h
choose_input
static int choose_input(const FilterGraph *fg, const FilterGraphThread *fgt)
Definition: ffmpeg_filter.c:1904
get_rotation
double get_rotation(const int32_t *displaymatrix)
Definition: cmdutils.c:1114
FilterGraphPriv::frame
AVFrame * frame
Definition: ffmpeg_filter.c:64
FilterGraphPriv::sch
Scheduler * sch
Definition: ffmpeg_filter.c:68
AVCodecParameters::codec_type
enum AVMediaType codec_type
General type of the encoded data.
Definition: codec_par.h:51
FilterGraphThread::got_frame
int got_frame
Definition: ffmpeg_filter.c:96
AVFilterGraph::nb_threads
int nb_threads
Maximum number of threads used by filters in this graph.
Definition: avfilter.h:839
InputFilterPriv::ch_layout
AVChannelLayout ch_layout
Definition: ffmpeg_filter.c:137
avfilter_pad_get_name
const char * avfilter_pad_get_name(const AVFilterPad *pads, int pad_idx)
Get the name of an AVFilterPad.
Definition: avfilter.c:972
FrameData
Definition: ffmpeg.h:593
send_command
static void send_command(FilterGraph *fg, AVFilterGraph *graph, double time, const char *target, const char *command, const char *arg, int all_filters)
Definition: ffmpeg_filter.c:1879
InputFilterPriv::last_pts
int64_t last_pts
Definition: ffmpeg_filter.c:151
avfilter_graph_segment_create_filters
int avfilter_graph_segment_create_filters(AVFilterGraphSegment *seg, int flags)
Create filters specified in a graph segment.
Definition: graphparser.c:516
out
FILE * out
Definition: movenc.c:54
av_frame_get_buffer
int av_frame_get_buffer(AVFrame *frame, int align)
Allocate new buffer(s) for audio or video data.
Definition: frame.c:258
av_bprint_init
void av_bprint_init(AVBPrint *buf, unsigned size_init, unsigned size_max)
Definition: bprint.c:69
av_frame_get_side_data
AVFrameSideData * av_frame_get_side_data(const AVFrame *frame, enum AVFrameSideDataType type)
Definition: frame.c:838
InputFilterPriv::filter
AVFilterContext * filter
Definition: ffmpeg_filter.c:110
init_simple_filtergraph
int init_simple_filtergraph(InputStream *ist, OutputStream *ost, char *graph_desc, Scheduler *sch, unsigned sched_idx_enc)
Definition: ffmpeg_filter.c:1080
sample_fmts
static enum AVSampleFormat sample_fmts[]
Definition: adpcmenc.c:947
AVFrame::duration
int64_t duration
Duration of the frame, in the same units as pts.
Definition: frame.h:750
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2962
AVERROR_EOF
#define AVERROR_EOF
End of file.
Definition: error.h:57
AVBufferRef::data
uint8_t * data
The data buffer.
Definition: buffer.h:90
FilterGraph::inputs
InputFilter ** inputs
Definition: ffmpeg.h:290
av_buffersink_get_frame_flags
int attribute_align_arg av_buffersink_get_frame_flags(AVFilterContext *ctx, AVFrame *frame, int flags)
Get a frame with filtered data from sink and put it in frame.
Definition: buffersink.c:120
av_dict_count
int av_dict_count(const AVDictionary *m)
Get number of entries in dictionary.
Definition: dict.c:39
ifilter_parameters_from_frame
static int ifilter_parameters_from_frame(InputFilter *ifilter, const AVFrame *frame)
Definition: ffmpeg_filter.c:1838
HWACCEL_CHANGED
@ HWACCEL_CHANGED
Definition: ffmpeg_filter.c:2571
frame_drop_threshold
float frame_drop_threshold
Definition: ffmpeg_opt.c:68
AV_TIME_BASE_Q
#define AV_TIME_BASE_Q
Internal time base represented as fractional value.
Definition: avutil.h:264
InputFilterPriv::time_base
AVRational time_base
Definition: ffmpeg_filter.c:139
int64_t
long long int64_t
Definition: coverity.c:34
output
filter_frame For filters that do not use the this method is called when a frame is pushed to the filter s input It can be called at any time except in a reentrant way If the input frame is enough to produce output
Definition: filter_design.txt:225
AV_BUFFERSRC_FLAG_PUSH
@ AV_BUFFERSRC_FLAG_PUSH
Immediately push the frame to the output.
Definition: buffersrc.h:46
FilterCommand::arg
char * arg
Definition: ffmpeg_filter.c:227
AVSubtitleRect
Definition: avcodec.h:2200
av_asprintf
char * av_asprintf(const char *fmt,...)
Definition: avstring.c:115
AVSubtitle::num_rects
unsigned num_rects
Definition: avcodec.h:2231
fg_free
void fg_free(FilterGraph **pfg)
Definition: ffmpeg_filter.c:892
FPSConvContext::frames_prev_hist
int64_t frames_prev_hist[3]
Definition: ffmpeg_filter.c:171
AV_CODEC_ID_MPEG4
@ AV_CODEC_ID_MPEG4
Definition: codec_id.h:64
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:130
OutputFile::start_time
int64_t start_time
start time in microseconds == AV_TIME_BASE units
Definition: ffmpeg.h:586
AVFrame::opaque
void * opaque
Frame owner's private data.
Definition: frame.h:492
AVFrame::colorspace
enum AVColorSpace colorspace
YUV colorspace type.
Definition: frame.h:634
InputFile::index
int index
Definition: ffmpeg.h:392
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:344
tmp
static uint8_t tmp[11]
Definition: aes_ctr.c:28
AVFilterInOut::next
struct AVFilterInOut * next
next input/input in the list, NULL if this is the last
Definition: avfilter.h:967
pixdesc.h
AVFrame::pts
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:456
AVFrame::width
int width
Definition: frame.h:416
FilterGraphPriv::log_name
char log_name[32]
Definition: ffmpeg_filter.c:49
w
uint8_t w
Definition: llviddspenc.c:38
FilterGraphPriv::have_sources
int have_sources
Definition: ffmpeg_filter.c:56
AVOption
AVOption.
Definition: opt.h:346
fg_output_frame
static int fg_output_frame(OutputFilterPriv *ofp, FilterGraphThread *fgt, AVFrame *frame)
Definition: ffmpeg_filter.c:2220
av_buffersrc_add_frame
int attribute_align_arg av_buffersrc_add_frame(AVFilterContext *ctx, AVFrame *frame)
Add a frame to the buffer source.
Definition: buffersrc.c:174
FilterGraph::index
int index
Definition: ffmpeg.h:288
InputFilterPriv::sample_rate
int sample_rate
Definition: ffmpeg_filter.c:136
data
const char data[16]
Definition: mxf.c:148
FPSConvContext::last_dropped
int last_dropped
Definition: ffmpeg_filter.c:175
OutputFilterPriv::ts_offset
int64_t ts_offset
Definition: ffmpeg_filter.c:214
cleanup_filtergraph
static void cleanup_filtergraph(FilterGraph *fg, FilterGraphThread *fgt)
Definition: ffmpeg_filter.c:1656
ffmpeg.h
AV_LOG_VERBOSE
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:196
fc
#define fc(width, name, range_min, range_max)
Definition: cbs_av1.c:472
FilterGraph::nb_inputs
int nb_inputs
Definition: ffmpeg.h:291
VIDEO_CHANGED
@ VIDEO_CHANGED
Definition: ffmpeg_filter.c:2568
AV_FRAME_DATA_DISPLAYMATRIX
@ AV_FRAME_DATA_DISPLAYMATRIX
This side data contains a 3x3 transformation matrix describing an affine transformation that needs to...
Definition: frame.h:85
filter
filter_frame For filters that do not use the this method is called when a frame is pushed to the filter s input It can be called at any time except in a reentrant way If the input frame is enough to produce then the filter should push the output frames on the output link immediately As an exception to the previous rule if the input frame is enough to produce several output frames then the filter needs output only at least one per link The additional frames can be left buffered in the filter
Definition: filter_design.txt:228
AVFrame::flags
int flags
Frame flags, a combination of AV_FRAME_FLAGS.
Definition: frame.h:616
ofp_from_ofilter
static OutputFilterPriv * ofp_from_ofilter(OutputFilter *ofilter)
Definition: ffmpeg_filter.c:219
AVChannelLayout::order
enum AVChannelOrder order
Channel order used in this layout.
Definition: channel_layout.h:308
av_buffer_ref
AVBufferRef * av_buffer_ref(const AVBufferRef *buf)
Create a new reference to an AVBuffer.
Definition: buffer.c:103
AV_BUFFERSRC_FLAG_KEEP_REF
@ AV_BUFFERSRC_FLAG_KEEP_REF
Keep a reference to the frame.
Definition: buffersrc.h:53
IFILTER_FLAG_AUTOROTATE
@ IFILTER_FLAG_AUTOROTATE
Definition: ffmpeg.h:239
AVChannelLayout::nb_channels
int nb_channels
Number of channels in this layout.
Definition: channel_layout.h:313
AVFrame::buf
AVBufferRef * buf[AV_NUM_DATA_POINTERS]
AVBuffer references backing the data for this frame.
Definition: frame.h:557
avio_size
int64_t avio_size(AVIOContext *s)
Get the filesize.
Definition: aviobuf.c:322
av_strlcatf
size_t av_strlcatf(char *dst, size_t size, const char *fmt,...)
Definition: avstring.c:103
ost
static AVStream * ost
Definition: vaapi_transcode.c:42
sample_rate
sample_rate
Definition: ffmpeg_filter.c:409
fg_output_step
static int fg_output_step(OutputFilterPriv *ofp, FilterGraphThread *fgt, AVFrame *frame)
Definition: ffmpeg_filter.c:2303
FilterGraphPriv
Definition: ffmpeg_filter.c:45
av_channel_layout_describe_bprint
int av_channel_layout_describe_bprint(const AVChannelLayout *channel_layout, AVBPrint *bp)
bprint variant of av_channel_layout_describe().
Definition: channel_layout.c:590
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:365
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:30
FilterGraphThread::eof_in
uint8_t * eof_in
Definition: ffmpeg_filter.c:99
avfilter_graph_free
void avfilter_graph_free(AVFilterGraph **graph)
Free a graph, destroy its links, and set *graph to NULL.
Definition: avfiltergraph.c:116
configure_filtergraph
static int configure_filtergraph(FilterGraph *fg, FilterGraphThread *fgt)
Definition: ffmpeg_filter.c:1691
choose_pix_fmts
static int choose_pix_fmts(OutputFilter *ofilter, AVBPrint *bprint, const char **dst)
Definition: ffmpeg_filter.c:354
OutputFile::nb_streams
int nb_streams
Definition: ffmpeg.h:583
AUTO_INSERT_FILTER
#define AUTO_INSERT_FILTER(opt_name, filter_name, arg)
InputStream
Definition: ffmpeg.h:345
filter_nbthreads
char * filter_nbthreads
Definition: ffmpeg_opt.c:82
debug_ts
int debug_ts
Definition: ffmpeg_opt.c:76
fg_finalise_bindings
int fg_finalise_bindings(FilterGraph *fg)
Definition: ffmpeg_filter.c:1205
InputFilterOptions::trim_start_us
int64_t trim_start_us
Definition: ffmpeg.h:245
InputFilterOptions::flags
unsigned flags
Definition: ffmpeg.h:261
avfilter_graph_create_filter
int avfilter_graph_create_filter(AVFilterContext **filt_ctx, const AVFilter *filt, const char *name, const char *args, void *opaque, AVFilterGraph *graph_ctx)
Create and add a filter instance into an existing graph.
Definition: avfiltergraph.c:137
avfilter_graph_alloc_filter
AVFilterContext * avfilter_graph_alloc_filter(AVFilterGraph *graph, const AVFilter *filter, const char *name)
Create a new filter instance in a filter graph.
Definition: avfiltergraph.c:164
finish
static void finish(void)
Definition: movenc.c:342
AV_OPT_TYPE_BINARY
@ AV_OPT_TYPE_BINARY
offset must point to a pointer immediately followed by an int for the length
Definition: opt.h:241
av_color_space_name
const char * av_color_space_name(enum AVColorSpace space)
Definition: pixdesc.c:3338
FRAME_OPAQUE_SUB_HEARTBEAT
@ FRAME_OPAQUE_SUB_HEARTBEAT
Definition: ffmpeg.h:88
OutputFilterPriv
Definition: ffmpeg_filter.c:184
fg_thread_uninit
static void fg_thread_uninit(FilterGraphThread *fgt)
Definition: ffmpeg_filter.c:2716
fail
#define fail()
Definition: checkasm.h:179
av_fifo_write
int av_fifo_write(AVFifo *f, const void *buf, size_t nb_elems)
Write data into a FIFO.
Definition: fifo.c:188
sub2video_push_ref
static void sub2video_push_ref(InputFilterPriv *ifp, int64_t pts)
Definition: ffmpeg_filter.c:297
avfilter_graph_alloc
AVFilterGraph * avfilter_graph_alloc(void)
Allocate a filter graph.
Definition: avfiltergraph.c:82
AV_PIX_FMT_FLAG_HWACCEL
#define AV_PIX_FMT_FLAG_HWACCEL
Pixel format is an HW accelerated format.
Definition: pixdesc.h:128
FFSIGN
#define FFSIGN(a)
Definition: common.h:73
samplefmt.h
AVERROR_OPTION_NOT_FOUND
#define AVERROR_OPTION_NOT_FOUND
Option not found.
Definition: error.h:63
avfilter_graph_segment_free
void avfilter_graph_segment_free(AVFilterGraphSegment **seg)
Free the provided AVFilterGraphSegment and everything associated with it.
Definition: graphparser.c:276
sub2video_get_blank_frame
static int sub2video_get_blank_frame(InputFilterPriv *ifp)
Definition: ffmpeg_filter.c:244
AV_BPRINT_SIZE_AUTOMATIC
#define AV_BPRINT_SIZE_AUTOMATIC
ifilter_has_all_input_formats
static int ifilter_has_all_input_formats(FilterGraph *fg)
Definition: ffmpeg_filter.c:603
val
static double val(void *priv, double ch)
Definition: aeval.c:78
OutputFilterPriv::index
int index
Definition: ffmpeg_filter.c:187
AVFrame::ch_layout
AVChannelLayout ch_layout
Channel layout of the audio data.
Definition: frame.h:745
SCH_ENC
#define SCH_ENC(encoder)
Definition: ffmpeg_sched.h:116
configure_input_video_filter
static int configure_input_video_filter(FilterGraph *fg, AVFilterGraph *graph, InputFilter *ifilter, AVFilterInOut *in)
Definition: ffmpeg_filter.c:1505
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
avfilter_graph_segment_parse
int avfilter_graph_segment_parse(AVFilterGraph *graph, const char *graph_str, int flags, AVFilterGraphSegment **seg)
Parse a textual filtergraph description into an intermediate form.
Definition: graphparser.c:460
pts
static int64_t pts
Definition: transcode_aac.c:643
av_opt_set
int av_opt_set(void *obj, const char *name, const char *val, int search_flags)
Definition: opt.c:738
graph_is_meta
static int graph_is_meta(AVFilterGraph *graph)
Definition: ffmpeg_filter.c:1672
FilterGraphThread::frame
AVFrame * frame
Definition: ffmpeg_filter.c:86
AVFILTER_FLAG_DYNAMIC_INPUTS
#define AVFILTER_FLAG_DYNAMIC_INPUTS
The number of the filter inputs is not determined just by AVFilter.inputs.
Definition: avfilter.h:106
FrameData::tb
AVRational tb
Definition: ffmpeg.h:603
fgp_from_fg
static FilterGraphPriv * fgp_from_fg(FilterGraph *fg)
Definition: ffmpeg_filter.c:72
OutputFilterPriv::sample_rate
int sample_rate
Definition: ffmpeg_filter.c:194
av_reduce
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
Definition: rational.c:35
FPSConvContext::dropped_keyframe
int dropped_keyframe
Definition: ffmpeg_filter.c:176
AVRational::num
int num
Numerator.
Definition: rational.h:59
LATENCY_PROBE_FILTER_PRE
@ LATENCY_PROBE_FILTER_PRE
Definition: ffmpeg.h:102
InputFilterOptions::trim_end_us
int64_t trim_end_us
Definition: ffmpeg.h:246
AVFilterPad
A filter pad used for either input or output.
Definition: internal.h:33
sch_add_filtergraph
int sch_add_filtergraph(Scheduler *sch, unsigned nb_inputs, unsigned nb_outputs, SchThreadFunc func, void *ctx)
Add a filtergraph to the scheduler.
Definition: ffmpeg_sched.c:799
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:118
sub2video_heartbeat
static void sub2video_heartbeat(InputFilter *ifilter, int64_t pts, AVRational tb)
Definition: ffmpeg_filter.c:2443
check_stream_specifier
int check_stream_specifier(AVFormatContext *s, AVStream *st, const char *spec)
Check if the given stream matches a stream specifier.
Definition: cmdutils.c:982
OutputFile::shortest
int shortest
Definition: ffmpeg.h:588
avfilter_inout_free
void avfilter_inout_free(AVFilterInOut **inout)
Free the supplied list of AVFilterInOut and set *inout to NULL.
Definition: graphparser.c:76
avassert.h
FrameData::frame_rate_filter
AVRational frame_rate_filter
Definition: ffmpeg.h:606
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
send_eof
static int send_eof(FilterGraphThread *fgt, InputFilter *ifilter, int64_t pts, AVRational tb)
Definition: ffmpeg_filter.c:2512
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
InputFilterPriv
Definition: ffmpeg_filter.c:103
av_fifo_read
int av_fifo_read(AVFifo *f, void *buf, size_t nb_elems)
Read data from a FIFO.
Definition: fifo.c:240
fg_complex_bind_input
static int fg_complex_bind_input(FilterGraph *fg, InputFilter *ifilter)
Definition: ffmpeg_filter.c:1121
AV_FRAME_FLAG_KEY
#define AV_FRAME_FLAG_KEY
A flag to mark frames that are keyframes.
Definition: frame.h:595
duration
int64_t duration
Definition: movenc.c:64
av_buffersink_get_frame_rate
AVRational av_buffersink_get_frame_rate(const AVFilterContext *ctx)
ifilter_alloc
static InputFilter * ifilter_alloc(FilterGraph *fg)
Definition: ffmpeg_filter.c:864
AVFilterChain::filters
AVFilterParams ** filters
Definition: avfilter.h:1143
av_dict_get
AVDictionaryEntry * av_dict_get(const AVDictionary *m, const char *key, const AVDictionaryEntry *prev, int flags)
Get a dictionary entry with matching key.
Definition: dict.c:62
AV_PIX_FMT_YUVJ422P
@ AV_PIX_FMT_YUVJ422P
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
Definition: pixfmt.h:86
filter_command_free
static void filter_command_free(void *opaque, uint8_t *data)
Definition: ffmpeg_filter.c:233
VSYNC_VSCFR
@ VSYNC_VSCFR
Definition: ffmpeg.h:70
llrintf
#define llrintf(x)
Definition: libm.h:399
s
#define s(width, name)
Definition: cbs_vp9.c:198
FilterGraphPriv::frame_enc
AVFrame * frame_enc
Definition: ffmpeg_filter.c:66
InputFilterPriv::frame
AVFrame * frame
Definition: ffmpeg_filter.c:113
FilterGraph::outputs
OutputFilter ** outputs
Definition: ffmpeg.h:292
format
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample format(the sample packing is implied by the sample format) and sample rate. The lists are not just lists
ofilter_alloc
static OutputFilter * ofilter_alloc(FilterGraph *fg)
Definition: ffmpeg_filter.c:627
AVDictionaryEntry::key
char * key
Definition: dict.h:90
AVMEDIA_TYPE_AUDIO
@ AVMEDIA_TYPE_AUDIO
Definition: avutil.h:202
configure_output_video_filter
static int configure_output_video_filter(FilterGraph *fg, AVFilterGraph *graph, OutputFilter *ofilter, AVFilterInOut *out)
Definition: ffmpeg_filter.c:1304
AV_CHANNEL_ORDER_UNSPEC
@ AV_CHANNEL_ORDER_UNSPEC
Only the channel count is specified, without any further information about the channel order.
Definition: channel_layout.h:112
av_q2d
static double av_q2d(AVRational a)
Convert an AVRational to a double.
Definition: rational.h:104
OutputFilter::linklabel
uint8_t * linklabel
Definition: ffmpeg.h:278
InputFilter
Definition: ffmpeg.h:266
FilterGraphPriv::nb_outputs_done
unsigned nb_outputs_done
Definition: ffmpeg_filter.c:59
av_buffersink_get_format
int av_buffersink_get_format(const AVFilterContext *ctx)
av_buffersink_get_time_base
AVRational av_buffersink_get_time_base(const AVFilterContext *ctx)
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:40
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:304
OutputFilter::ost
struct OutputStream * ost
Definition: ffmpeg.h:272
ist_filter_add
int ist_filter_add(InputStream *ist, InputFilter *ifilter, int is_simple, InputFilterOptions *opts)
Definition: ffmpeg_demux.c:981
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:201
FrameData::dec
struct FrameData::@4 dec
ctx
AVFormatContext * ctx
Definition: movenc.c:48
nb_streams
static int nb_streams
Definition: ffprobe.c:383
av_rescale_q
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Definition: mathematics.c:142
AVSubtitle::pts
int64_t pts
Same as packet pts, in AV_TIME_BASE.
Definition: avcodec.h:2233
fg_thread_init
static int fg_thread_init(FilterGraphThread *fgt, const FilterGraph *fg)
Definition: ffmpeg_filter.c:2734
InputFilterOptions::name
uint8_t * name
Definition: ffmpeg.h:248
graph_opts_apply
static int graph_opts_apply(AVFilterGraphSegment *seg)
Definition: ffmpeg_filter.c:535
InputFilter::graph
struct FilterGraph * graph
Definition: ffmpeg.h:267
av_get_sample_fmt_name
const char * av_get_sample_fmt_name(enum AVSampleFormat sample_fmt)
Return the name of sample_fmt, or NULL if sample_fmt is not recognized.
Definition: samplefmt.c:51
key
const char * key
Definition: hwcontext_opencl.c:189
AV_ROUND_NEAR_INF
@ AV_ROUND_NEAR_INF
Round to nearest and halfway cases away from zero.
Definition: mathematics.h:135
fsize
static int64_t fsize(FILE *f)
Definition: audiomatch.c:29
OutputFilterPriv::fps
FPSConvContext fps
Definition: ffmpeg_filter.c:216
fg_item_name
static const char * fg_item_name(void *obj)
Definition: ffmpeg_filter.c:943
AV_ROUND_PASS_MINMAX
@ AV_ROUND_PASS_MINMAX
Flag telling rescaling functions to pass INT64_MIN/MAX through unchanged, avoiding special cases for ...
Definition: mathematics.h:159
command
static int command(AVFilterContext *ctx, const char *cmd, const char *arg, char *res, int res_len, int flags)
Definition: vf_drawtext.c:1185
AV_PIX_FMT_YUVJ444P
@ AV_PIX_FMT_YUVJ444P
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
Definition: pixfmt.h:87
frame
static AVFrame * frame
Definition: demux_decode.c:54
arg
const char * arg
Definition: jacosubdec.c:67
OutputFilterPriv::ch_layouts
const AVChannelLayout * ch_layouts
Definition: ffmpeg_filter.c:209
OutputFilterPriv::width
int width
Definition: ffmpeg_filter.c:193
av_color_range_name
const char * av_color_range_name(enum AVColorRange range)
Definition: pixdesc.c:3278
AVFormatContext
Format I/O context.
Definition: avformat.h:1255
avfilter_get_by_name
const AVFilter * avfilter_get_by_name(const char *name)
Get a filter definition matching the given name.
Definition: allfilters.c:629
AVStream::codecpar
AVCodecParameters * codecpar
Codec parameters associated with this stream.
Definition: avformat.h:766
LIBAVUTIL_VERSION_INT
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:66
OutputFilter::name
uint8_t * name
Definition: ffmpeg.h:274
fabs
static __device__ float fabs(float a)
Definition: cuda_runtime.h:182
avfilter_graph_config
int avfilter_graph_config(AVFilterGraph *graphctx, void *log_ctx)
Check validity and configure all the links and formats in the graph.
Definition: avfiltergraph.c:1243
OutputFilterPriv::enc_timebase
AVRational enc_timebase
Definition: ffmpeg_filter.c:212
avfilter_graph_segment_apply
int avfilter_graph_segment_apply(AVFilterGraphSegment *seg, int flags, AVFilterInOut **inputs, AVFilterInOut **outputs)
Apply all filter/link descriptions from a graph segment to the associated filtergraph.
Definition: graphparser.c:881
InputFilterPriv::color_space
enum AVColorSpace color_space
Definition: ffmpeg_filter.c:133
NULL
#define NULL
Definition: coverity.c:32
av_opt_set_bin
int av_opt_set_bin(void *obj, const char *name, const uint8_t *val, int len, int search_flags)
Definition: opt.c:804
OutputFilterPriv::ch_layout
AVChannelLayout ch_layout
Definition: ffmpeg_filter.c:195
AVFilterParams
Parameters describing a filter to be created in a filtergraph.
Definition: avfilter.h:1075
FPSConvContext::dup_warning
uint64_t dup_warning
Definition: ffmpeg_filter.c:173
av_buffer_unref
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it.
Definition: buffer.c:139
InputStream::st
AVStream * st
Definition: ffmpeg.h:353
dec_filter_add
int dec_filter_add(Decoder *dec, InputFilter *ifilter, InputFilterOptions *opts)
Definition: ffmpeg_dec.c:1350
avfilter_graph_set_auto_convert
void avfilter_graph_set_auto_convert(AVFilterGraph *graph, unsigned flags)
Enable or disable automatic format conversion inside the graph.
Definition: avfiltergraph.c:159
InputFilterPriv::displaymatrix_present
int displaymatrix_present
Definition: ffmpeg_filter.c:145
Decoder
Definition: ffmpeg.h:331
AVFilterParams::filter
AVFilterContext * filter
The filter context.
Definition: avfilter.h:1086
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
AVFilterChain::nb_filters
size_t nb_filters
Definition: avfilter.h:1144
AV_PIX_FMT_YUVJ420P
@ AV_PIX_FMT_YUVJ420P
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
Definition: pixfmt.h:85
AVFilterGraph::filters
AVFilterContext ** filters
Definition: avfilter.h:815
InputFilterPriv::linklabel
uint8_t * linklabel
Definition: ffmpeg_filter.c:117
filter_opt_apply
static int filter_opt_apply(AVFilterContext *f, const char *key, const char *val)
Definition: ffmpeg_filter.c:480
OutputFilterPriv::sample_aspect_ratio
AVRational sample_aspect_ratio
Definition: ffmpeg_filter.c:204
close_output
static int close_output(OutputFilterPriv *ofp, FilterGraphThread *fgt)
Definition: ffmpeg_filter.c:2170
FilterGraphThread::frame_queue_out
AVFifo * frame_queue_out
Definition: ffmpeg_filter.c:91
ifilter_bind_ist
static int ifilter_bind_ist(InputFilter *ifilter, InputStream *ist)
Definition: ffmpeg_filter.c:644
mathops.h
configure_output_audio_filter
static int configure_output_audio_filter(FilterGraph *fg, AVFilterGraph *graph, OutputFilter *ofilter, AVFilterInOut *out)
Definition: ffmpeg_filter.c:1384
FilterGraphPriv::sch_idx
unsigned sch_idx
Definition: ffmpeg_filter.c:69
FrameData::wallclock
int64_t wallclock[LATENCY_PROBE_NB]
Definition: ffmpeg.h:610
avfilter_graph_request_oldest
int avfilter_graph_request_oldest(AVFilterGraph *graph)
Request a frame on the oldest sink link.
Definition: avfiltergraph.c:1373
time.h
AVFilterGraphSegment::chains
AVFilterChain ** chains
A list of filter chain contained in this segment.
Definition: avfilter.h:1167
AVFilterGraph
Definition: avfilter.h:813
inputs
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several inputs
Definition: filter_design.txt:243
InputFilterOptions
Definition: ffmpeg.h:244
InputFilterPriv::sample_aspect_ratio
AVRational sample_aspect_ratio
Definition: ffmpeg_filter.c:132
AVCOL_RANGE_UNSPECIFIED
@ AVCOL_RANGE_UNSPECIFIED
Definition: pixfmt.h:649
FilterGraph::nb_outputs
int nb_outputs
Definition: ffmpeg.h:293
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
OutputFilterPriv::formats
const int * formats
Definition: ffmpeg_filter.c:208
av_opt_set_int
int av_opt_set_int(void *obj, const char *name, int64_t val, int search_flags)
Definition: opt.c:789
InputStream::par
AVCodecParameters * par
Codec parameters - to be used by the decoding/streamcopy code.
Definition: ffmpeg.h:361
av_buffer_create
AVBufferRef * av_buffer_create(uint8_t *data, size_t size, void(*free)(void *opaque, uint8_t *data), void *opaque, int flags)
Create an AVBuffer from an existing array.
Definition: buffer.c:55
input_files
InputFile ** input_files
Definition: ffmpeg.c:125
OutputFile::streams
OutputStream ** streams
Definition: ffmpeg.h:582
AV_CLASS_CATEGORY_FILTER
@ AV_CLASS_CATEGORY_FILTER
Definition: log.h:36
Scheduler
Definition: ffmpeg_sched.c:269
FilterGraphPriv::fg
FilterGraph fg
Definition: ffmpeg_filter.c:46
OutputFilterPriv::ofilter
OutputFilter ofilter
Definition: ffmpeg_filter.c:185
FilterGraph
Definition: ffmpeg.h:286
AVFilterGraphSegment
A parsed representation of a filtergraph segment.
Definition: avfilter.h:1156
file_read
char * file_read(const char *filename)
Definition: cmdutils.c:1132
ENC_TIME_BASE_DEMUX
@ ENC_TIME_BASE_DEMUX
Definition: ffmpeg.h:77
InputFilterOptions::sub2video_width
int sub2video_width
Definition: ffmpeg.h:257
AVFilterInOut::pad_idx
int pad_idx
index of the filt_ctx pad to use for linking
Definition: avfilter.h:964
av_buffersrc_close
int av_buffersrc_close(AVFilterContext *ctx, int64_t pts, unsigned flags)
Close the buffer source after EOF.
Definition: buffersrc.c:278
AVFilterGraph::scale_sws_opts
char * scale_sws_opts
sws options to use for the auto-inserted scale filters
Definition: avfilter.h:818
filtergraph_is_simple
int filtergraph_is_simple(const FilterGraph *fg)
Definition: ffmpeg_filter.c:1873
av_opt_find
const AVOption * av_opt_find(void *obj, const char *name, const char *unit, int opt_flags, int search_flags)
Look for an option in an object.
Definition: opt.c:1950
IFILTER_FLAG_REINIT
@ IFILTER_FLAG_REINIT
Definition: ffmpeg.h:240
f
f
Definition: af_crystalizer.c:121
AVIOContext
Bytestream IO Context.
Definition: avio.h:160
configure_output_filter
static int configure_output_filter(FilterGraph *fg, AVFilterGraph *graph, OutputFilter *ofilter, AVFilterInOut *out)
Definition: ffmpeg_filter.c:1479
av_ts2timestr
#define av_ts2timestr(ts, tb)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: timestamp.h:83
filter_thread
static int filter_thread(void *arg)
Definition: ffmpeg_filter.c:2761
AVMediaType
AVMediaType
Definition: avutil.h:199
InputFilterPriv::hw_frames_ctx
AVBufferRef * hw_frames_ctx
Definition: ffmpeg_filter.c:143
AVFifo
Definition: fifo.c:35
FRAME_OPAQUE_SEND_COMMAND
@ FRAME_OPAQUE_SEND_COMMAND
Definition: ffmpeg.h:90
FilterGraphThread
Definition: ffmpeg_filter.c:83
av_frame_ref
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:354
av_bprint_finalize
int av_bprint_finalize(AVBPrint *buf, char **ret_str)
Finalize a print buffer.
Definition: bprint.c:240
InputFilterPriv::displaymatrix
int32_t displaymatrix[9]
Definition: ffmpeg_filter.c:146
AVChannelLayout
An AVChannelLayout holds information about the channel layout of audio data.
Definition: channel_layout.h:303
FilterGraphThread::graph
AVFilterGraph * graph
Definition: ffmpeg_filter.c:84
av_buffersrc_parameters_alloc
AVBufferSrcParameters * av_buffersrc_parameters_alloc(void)
Allocate a new AVBufferSrcParameters instance.
Definition: buffersrc.c:105
AVFilterInOut::filter_ctx
AVFilterContext * filter_ctx
filter context associated to this input/output
Definition: avfilter.h:961
av_err2str
#define av_err2str(errnum)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: error.h:121
AVFrame::sample_rate
int sample_rate
Sample rate of the audio data.
Definition: frame.h:543
OutputFilterPriv::tb_out_locked
int tb_out_locked
Definition: ffmpeg_filter.c:202
for
for(k=2;k<=8;++k)
Definition: h264pred_template.c:425
avfilter_link
int avfilter_link(AVFilterContext *src, unsigned srcpad, AVFilterContext *dst, unsigned dstpad)
Link two filters together.
Definition: avfilter.c:148
AVBufferSrcParameters::hw_frames_ctx
AVBufferRef * hw_frames_ctx
Video with a hwaccel pixel format only.
Definition: buffersrc.h:106
start_time
static int64_t start_time
Definition: ffplay.c:329
AVFILTER_FLAG_HWDEVICE
#define AVFILTER_FLAG_HWDEVICE
The filter can create hardware frames using AVFilterContext.hw_device_ctx.
Definition: avfilter.h:138
adjust_frame_pts_to_encoder_tb
static double adjust_frame_pts_to_encoder_tb(AVFrame *frame, AVRational tb_dst, int64_t start_time)
Definition: ffmpeg_filter.c:2005
InputFilterPriv::color_range
enum AVColorRange color_range
Definition: ffmpeg_filter.c:134
AV_SAMPLE_FMT_NONE
@ AV_SAMPLE_FMT_NONE
Definition: samplefmt.h:56
MATRIX_CHANGED
@ MATRIX_CHANGED
Definition: ffmpeg_filter.c:2570
FilterCommand::time
double time
Definition: ffmpeg_filter.c:229
insert_trim
static int insert_trim(int64_t start_time, int64_t duration, AVFilterContext **last_filter, int *pad_idx, const char *filter_name)
Definition: ffmpeg_filter.c:1231
InputFilterPriv::initialize
unsigned int initialize
Definition: ffmpeg_filter.c:155
graph_parse
static int graph_parse(AVFilterGraph *graph, const char *desc, AVFilterInOut **inputs, AVFilterInOut **outputs, AVBufferRef *hw_device)
Definition: ffmpeg_filter.c:559
avfilter_graph_queue_command
int avfilter_graph_queue_command(AVFilterGraph *graph, const char *target, const char *cmd, const char *arg, int flags, double ts)
Queue a command for one or more filter instances.
Definition: avfiltergraph.c:1291
AV_NOPTS_VALUE
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:248
AVFrame::time_base
AVRational time_base
Time base for the timestamps in this frame.
Definition: frame.h:471
AVFrameSideData::data
uint8_t * data
Definition: frame.h:252
read_binary
static int read_binary(const char *path, uint8_t **data, int *len)
Definition: ffmpeg_filter.c:432
AVFrame::format
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames,...
Definition: frame.h:431
FilterGraphPriv::disable_conversions
int disable_conversions
Definition: ffmpeg_filter.c:57
frame_data
FrameData * frame_data(AVFrame *frame)
Get our axiliary frame data attached to the frame, allocating it if needed.
Definition: ffmpeg.c:474
AVSubtitle::end_display_time
uint32_t end_display_time
Definition: avcodec.h:2230
FilterGraphThread::eof_out
uint8_t * eof_out
Definition: ffmpeg_filter.c:100
FilterGraphPriv::graph_desc
const char * graph_desc
Definition: ffmpeg_filter.c:61
allocate_array_elem
void * allocate_array_elem(void *ptr, size_t elem_size, int *nb_elems)
Atomically add a new element to an array of pointers, i.e.
Definition: cmdutils.c:1104
InputFilterPriv::width
int width
Definition: ffmpeg_filter.c:131
AV_PIX_FMT_RGB32
#define AV_PIX_FMT_RGB32
Definition: pixfmt.h:451
filter_is_buffersrc
static int filter_is_buffersrc(const AVFilterContext *f)
Definition: ffmpeg_filter.c:1665
AUDIO_CHANGED
@ AUDIO_CHANGED
Definition: ffmpeg_filter.c:2569
SCH_DEC
#define SCH_DEC(decoder)
Definition: ffmpeg_sched.h:113
sch_filter_receive
int sch_filter_receive(Scheduler *sch, unsigned fg_idx, unsigned *in_idx, AVFrame *frame)
Called by filtergraph tasks to obtain frames for filtering.
Definition: ffmpeg_sched.c:2316
av_dict_free
void av_dict_free(AVDictionary **pm)
Free all the memory allocated for an AVDictionary struct and all keys and values.
Definition: dict.c:223
unknown_if_null
static const char * unknown_if_null(const char *str)
Definition: ffmpeg_filter.c:2574
InputFilterOptions::sub2video_height
int sub2video_height
Definition: ffmpeg.h:258
FF_COMPLIANCE_UNOFFICIAL
#define FF_COMPLIANCE_UNOFFICIAL
Allow unofficial extensions.
Definition: defs.h:61
decoders
Decoder ** decoders
Definition: ffmpeg.c:134
nb_decoders
int nb_decoders
Definition: ffmpeg.c:135
OutputFilter::type
enum AVMediaType type
Definition: ffmpeg.h:280
read_frames
static int read_frames(FilterGraph *fg, FilterGraphThread *fgt, AVFrame *frame)
Definition: ffmpeg_filter.c:2381
av_channel_layout_compare
int av_channel_layout_compare(const AVChannelLayout *chl, const AVChannelLayout *chl1)
Check whether two channel layouts are semantically the same, i.e.
Definition: channel_layout.c:800
SUBTITLE_BITMAP
@ SUBTITLE_BITMAP
A bitmap, pict will be set.
Definition: avcodec.h:2183
AV_LOG_INFO
#define AV_LOG_INFO
Standard information.
Definition: log.h:191
sample_rates
sample_rates
Definition: ffmpeg_filter.c:409
send_frame
static int send_frame(FilterGraph *fg, FilterGraphThread *fgt, InputFilter *ifilter, AVFrame *frame)
Definition: ffmpeg_filter.c:2579
avfilter_init_str
int avfilter_init_str(AVFilterContext *filter, const char *args)
Initialize a filter with the supplied parameters.
Definition: avfilter.c:944
buffersink.h
av_channel_layout_default
void av_channel_layout_default(AVChannelLayout *ch_layout, int nb_channels)
Get the default channel layout for a given number of channels.
Definition: channel_layout.c:830
av_find_nearest_q_idx
int av_find_nearest_q_idx(AVRational q, const AVRational *q_list)
Find the value in a list of rationals nearest a given reference rational.
Definition: rational.c:142
av_buffersink_get_w
int av_buffersink_get_w(const AVFilterContext *ctx)
FilterCommand::all_filters
int all_filters
Definition: ffmpeg_filter.c:230
AVFrame::nb_samples
int nb_samples
number of audio samples (per channel) described by this frame
Definition: frame.h:424
FPSConvContext::framerate_clip
int framerate_clip
Definition: ffmpeg_filter.c:181
bprint.h
FPSConvContext::frame_number
int64_t frame_number
Definition: ffmpeg_filter.c:167
av_buffersrc_parameters_set
int av_buffersrc_parameters_set(AVFilterContext *ctx, AVBufferSrcParameters *param)
Initialize the buffersrc or abuffersrc filter with the provided parameters.
Definition: buffersrc.c:118
FPSConvContext
Definition: ffmpeg_filter.c:164
lrintf
#define lrintf(x)
Definition: libm_mips.h:72
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:255
InputFilterPriv::index
int index
Definition: ffmpeg_filter.c:108
FrameData::bits_per_raw_sample
int bits_per_raw_sample
Definition: ffmpeg.h:608
fg_send_command
void fg_send_command(FilterGraph *fg, double time, const char *target, const char *command, const char *arg, int all_filters)
Definition: ffmpeg_filter.c:2878
FilterGraphPriv::is_simple
int is_simple
Definition: ffmpeg_filter.c:51
InputFilterOptions::fallback
AVFrame * fallback
Definition: ffmpeg.h:263
av_buffersrc_add_frame_flags
int attribute_align_arg av_buffersrc_add_frame_flags(AVFilterContext *ctx, AVFrame *frame, int flags)
Add a frame to the buffer source.
Definition: buffersrc.c:193
VSYNC_CFR
@ VSYNC_CFR
Definition: ffmpeg.h:68
src2
const pixel * src2
Definition: h264pred_template.c:422
configure_input_audio_filter
static int configure_input_audio_filter(FilterGraph *fg, AVFilterGraph *graph, InputFilter *ifilter, AVFilterInOut *in)
Definition: ffmpeg_filter.c:1605
AVColorSpace
AVColorSpace
YUV colorspace type.
Definition: pixfmt.h:609
display.h
FPSConvContext::framerate_max
AVRational framerate_max
Definition: ffmpeg_filter.c:179
av_assert1
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:56
InputFilterPriv::sub2video
struct InputFilterPriv::@6 sub2video
AVSampleFormat
AVSampleFormat
Audio sample formats.
Definition: samplefmt.h:55
delta
float delta
Definition: vorbis_enc_data.h:430
FRAME_OPAQUE_EOF
@ FRAME_OPAQUE_EOF
Definition: ffmpeg.h:89
InputFile::ctx
AVFormatContext * ctx
Definition: ffmpeg.h:394
av_frame_move_ref
void av_frame_move_ref(AVFrame *dst, AVFrame *src)
Move everything contained in src to dst and reset src.
Definition: frame.c:603
cfgp_from_cfg
static const FilterGraphPriv * cfgp_from_cfg(const FilterGraph *fg)
Definition: ffmpeg_filter.c:77
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:576
InputFilterPriv::eof
int eof
Definition: ffmpeg_filter.c:125
tb
#define tb
Definition: regdef.h:68
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:254
av_inv_q
static av_always_inline AVRational av_inv_q(AVRational q)
Invert a rational.
Definition: rational.h:159
av_buffer_replace
int av_buffer_replace(AVBufferRef **pdst, const AVBufferRef *src)
Ensure dst refers to the same data as src.
Definition: buffer.c:233
len
int len
Definition: vorbis_enc_data.h:426
AVCOL_SPC_UNSPECIFIED
@ AVCOL_SPC_UNSPECIFIED
Definition: pixfmt.h:612
ofilter_bind_ost
int ofilter_bind_ost(OutputFilter *ofilter, OutputStream *ost, unsigned sched_idx_enc)
Definition: ffmpeg_filter.c:765
filtergraphs
FilterGraph ** filtergraphs
Definition: ffmpeg.c:131
int_cb
const AVIOInterruptCB int_cb
Definition: ffmpeg.c:328
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Definition: mem.c:262
outputs
static const AVFilterPad outputs[]
Definition: af_aap.c:310
sch_connect
int sch_connect(Scheduler *sch, SchedulerNode src, SchedulerNode dst)
Definition: ffmpeg_sched.c:897
FFMPEG_OPT_VSYNC_DROP
#define FFMPEG_OPT_VSYNC_DROP
Definition: ffmpeg.h:59
av_buffersink_get_h
int av_buffersink_get_h(const AVFilterContext *ctx)
sch_filter_command
int sch_filter_command(Scheduler *sch, unsigned fg_idx, AVFrame *frame)
Definition: ffmpeg_sched.c:2418
AVFilter
Filter definition.
Definition: avfilter.h:166
video_sync_process
static void video_sync_process(OutputFilterPriv *ofp, AVFrame *frame, int64_t *nb_frames, int64_t *nb_frames_prev)
Definition: ffmpeg_filter.c:2047
ifp_from_ifilter
static InputFilterPriv * ifp_from_ifilter(InputFilter *ifilter)
Definition: ffmpeg_filter.c:159
fg_create
int fg_create(FilterGraph **pfg, char *graph_desc, Scheduler *sch)
Create a new filtergraph in the global filtergraph list.
Definition: ffmpeg_filter.c:957
mid_pred
#define mid_pred
Definition: mathops.h:98
AV_BUFFERSINK_FLAG_NO_REQUEST
#define AV_BUFFERSINK_FLAG_NO_REQUEST
Tell av_buffersink_get_buffer_ref() not to request a frame from its input.
Definition: buffersink.h:97
ret
ret
Definition: filter_design.txt:187
AVStream
Stream structure.
Definition: avformat.h:743
AV_LOG_FATAL
#define AV_LOG_FATAL
Something went wrong and recovery is not possible.
Definition: log.h:174
pixfmt.h
AVClass::class_name
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:71
InputFilter::name
uint8_t * name
Definition: ffmpeg.h:268
VSYNC_DROP
@ VSYNC_DROP
Definition: ffmpeg.h:72
av_opt_eval_int
int av_opt_eval_int(void *obj, const AVOption *o, const char *val, int *int_out)
av_channel_layout_check
int av_channel_layout_check(const AVChannelLayout *channel_layout)
Check whether a channel layout is valid, i.e.
Definition: channel_layout.c:774
FPSConvContext::last_frame
AVFrame * last_frame
Definition: ffmpeg_filter.c:165
InputFile::streams
InputStream ** streams
Definition: ffmpeg.h:408
insert_filter
static int insert_filter(AVFilterContext **last_filter, int *pad_idx, const char *filter_name, const char *args)
Definition: ffmpeg_filter.c:1282
AVFilterParams::opts
AVDictionary * opts
Options to be apllied to the filter.
Definition: avfilter.h:1127
OutputFilterPriv::next_pts
int64_t next_pts
Definition: ffmpeg_filter.c:215
choose_channel_layouts
static void choose_channel_layouts(OutputFilterPriv *ofp, AVBPrint *bprint)
Definition: ffmpeg_filter.c:412
av_bprintf
void av_bprintf(AVBPrint *buf, const char *fmt,...)
Definition: bprint.c:99
ReinitReason
ReinitReason
Definition: ffmpeg_filter.c:2567
av_fifo_alloc2
AVFifo * av_fifo_alloc2(size_t nb_elems, size_t elem_size, unsigned int flags)
Allocate and initialize an AVFifo with a given element size.
Definition: fifo.c:47
AVOption::type
enum AVOptionType type
Definition: opt.h:362
AVFrame::sample_aspect_ratio
AVRational sample_aspect_ratio
Sample aspect ratio for the video frame, 0/1 if unknown/unspecified.
Definition: frame.h:451
avfilter_pad_get_type
enum AVMediaType avfilter_pad_get_type(const AVFilterPad *pads, int pad_idx)
Get the type of an AVFilterPad.
Definition: avfilter.c:977
AVFrame::hw_frames_ctx
AVBufferRef * hw_frames_ctx
For hwaccel-format frames, this should be a reference to the AVHWFramesContext describing the frame.
Definition: frame.h:695
FrameOpaque
FrameOpaque
Definition: ffmpeg.h:87
av_get_media_type_string
const char * av_get_media_type_string(enum AVMediaType media_type)
Return a string describing the media_type enum, NULL if media_type is unknown.
Definition: utils.c:28
AVFrame::height
int height
Definition: frame.h:416
AVStream::index
int index
stream index in AVFormatContext
Definition: avformat.h:749
DEF_CHOOSE_FORMAT
#define DEF_CHOOSE_FORMAT(name, type, var, supported_list, none, printf_format, get_name)
Definition: ffmpeg_filter.c:383
channel_layout.h
AVBufferSrcParameters
This structure contains the parameters describing the frames that will be passed to this filter.
Definition: buffersrc.h:73
av_buffersink_get_sample_rate
int av_buffersink_get_sample_rate(const AVFilterContext *ctx)
describe_filter_link
static char * describe_filter_link(FilterGraph *fg, AVFilterInOut *inout, int in)
Definition: ffmpeg_filter.c:615
buffer
the frame and frame reference mechanism is intended to as much as expensive copies of that data while still allowing the filters to produce correct results The data is stored in buffers represented by AVFrame structures Several references can point to the same frame buffer
Definition: filter_design.txt:49
InputFilterPriv::bound
int bound
Definition: ffmpeg_filter.c:126
AVRational::den
int den
Denominator.
Definition: rational.h:60
InputStream::file
struct InputFile * file
Definition: ffmpeg.h:349
AVFilterChain
A filterchain is a list of filter specifications.
Definition: avfilter.h:1142
InputFilterPriv::frame_queue
AVFifo * frame_queue
Definition: ffmpeg_filter.c:141
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:72
atomic_fetch_add
#define atomic_fetch_add(object, operand)
Definition: stdatomic.h:131
avfilter.h
InputFilterPriv::type_src
enum AVMediaType type_src
Definition: ffmpeg_filter.c:123
av_channel_layout_uninit
void av_channel_layout_uninit(AVChannelLayout *channel_layout)
Free any allocated data in the channel layout and reset the channel count to 0.
Definition: channel_layout.c:432
FilterGraphPriv::is_meta
int is_meta
Definition: ffmpeg_filter.c:54
IFILTER_FLAG_CFR
@ IFILTER_FLAG_CFR
Definition: ffmpeg.h:241
AVFILTER_FLAG_METADATA_ONLY
#define AVFILTER_FLAG_METADATA_ONLY
The filter is a "metadata" filter - it does not modify the frame data in any way.
Definition: avfilter.h:133
avio_read
int avio_read(AVIOContext *s, unsigned char *buf, int size)
Read size bytes from AVIOContext into buf.
Definition: aviobuf.c:611
choose_out_timebase
static int choose_out_timebase(OutputFilterPriv *ofp, AVFrame *frame)
Definition: ffmpeg_filter.c:1928
OutputFilterPriv::sample_rates
const int * sample_rates
Definition: ffmpeg_filter.c:210
AVERROR_FILTER_NOT_FOUND
#define AVERROR_FILTER_NOT_FOUND
Filter not found.
Definition: error.h:60
sub2video_copy_rect
static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h, AVSubtitleRect *r)
Definition: ffmpeg_filter.c:266
AVFilterGraphSegment::nb_chains
size_t nb_chains
Definition: avfilter.h:1168
AVFilterContext
An instance of a filter.
Definition: avfilter.h:407
FilterGraph::class
const AVClass * class
Definition: ffmpeg.h:287
av_channel_layout_copy
int av_channel_layout_copy(AVChannelLayout *dst, const AVChannelLayout *src)
Make a copy of a channel layout.
Definition: channel_layout.c:439
OutputFilter
Definition: ffmpeg.h:271
sub2video_frame
static int sub2video_frame(InputFilter *ifilter, AVFrame *frame, int buffer)
Definition: ffmpeg_filter.c:2465
InputFilterPriv::ifilter
InputFilter ifilter
Definition: ffmpeg_filter.c:104
AVIO_FLAG_READ
#define AVIO_FLAG_READ
read-only
Definition: avio.h:617
av_strdup
char * av_strdup(const char *s)
Duplicate a string.
Definition: mem.c:270
desc
const char * desc
Definition: libsvtav1.c:75
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
av_buffersrc_get_nb_failed_requests
unsigned av_buffersrc_get_nb_failed_requests(AVFilterContext *buffer_src)
Get the number of failed requests.
Definition: buffersrc.c:313
AVFILTER_AUTO_CONVERT_NONE
@ AVFILTER_AUTO_CONVERT_NONE
all automatic conversions disabled
Definition: avfilter.h:929
AVBufferRef
A reference to a data buffer.
Definition: buffer.h:82
avio_open2
int avio_open2(AVIOContext **s, const char *filename, int flags, const AVIOInterruptCB *int_cb, AVDictionary **options)
Create and initialize a AVIOContext for accessing the resource indicated by url.
Definition: avio.c:490
OutputFilter::nb_frames_drop
atomic_uint_least64_t nb_frames_drop
Definition: ffmpeg.h:283
auto_conversion_filters
int auto_conversion_filters
Definition: ffmpeg_opt.c:85
llrint
#define llrint(x)
Definition: libm.h:394
AVFrameSideData
Structure to hold side data for an AVFrame.
Definition: frame.h:250
OutputStream::is_cfr
int is_cfr
Definition: ffmpeg.h:527
AVPixFmtDescriptor
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:69
OutputStream::autoscale
int autoscale
Definition: ffmpeg.h:532
InputStream::index
int index
Definition: ffmpeg.h:351
sch_filter_receive_finish
void sch_filter_receive_finish(Scheduler *sch, unsigned fg_idx, unsigned in_idx)
Called by filter tasks to signal that a filter input will no longer accept input.
Definition: ffmpeg_sched.c:2360
av_free
#define av_free(p)
Definition: tableprint_vlc.h:33
AVDictionaryEntry
Definition: dict.h:89
ENC_TIME_BASE_FILTER
@ ENC_TIME_BASE_FILTER
Definition: ffmpeg.h:78
FilterCommand::target
char * target
Definition: ffmpeg_filter.c:225
fg_class
static const AVClass fg_class
Definition: ffmpeg_filter.c:950
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
src
INIT_CLIP pixel * src
Definition: h264pred_template.c:418
av_dict_get_string
int av_dict_get_string(const AVDictionary *m, char **buffer, const char key_val_sep, const char pairs_sep)
Get dictionary entries as a string.
Definition: dict.c:250
Decoder::type
enum AVMediaType type
Definition: ffmpeg.h:334
InputFilterPriv::format
int format
Definition: ffmpeg_filter.c:129
InputFilterPriv::end_pts
int64_t end_pts
marks if sub2video_update should force an initialization
Definition: ffmpeg_filter.c:152
nb_filtergraphs
int nb_filtergraphs
Definition: ffmpeg.c:132
d
d
Definition: ffmpeg_filter.c:409
int32_t
int32_t
Definition: audioconvert.c:56
sub2video_update
static void sub2video_update(InputFilterPriv *ifp, int64_t heartbeat_pts, const AVSubtitle *sub)
Definition: ffmpeg_filter.c:312
imgutils.h
timestamp.h
OutputStream
Definition: mux.c:53
AVERROR_BUG
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
Definition: error.h:52
avio_close
int avio_close(AVIOContext *s)
Close the resource accessed by the AVIOContext s and free it.
Definition: avio.c:615
OutputStream::st
AVStream * st
Definition: mux.c:54
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
Definition: frame.h:389
OutputFilterPriv::format
int format
Definition: ffmpeg_filter.c:192
avfilter_graph_send_command
int avfilter_graph_send_command(AVFilterGraph *graph, const char *target, const char *cmd, const char *arg, char *res, int res_len, int flags)
Send a command to one or more filter instances.
Definition: avfiltergraph.c:1261
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
InputFilterPriv::opts
InputFilterOptions opts
Definition: ffmpeg_filter.c:106
dts_error_threshold
float dts_error_threshold
Definition: ffmpeg_opt.c:63
av_fifo_freep2
void av_fifo_freep2(AVFifo **f)
Free an AVFifo and reset pointer to NULL.
Definition: fifo.c:286
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
av_ts2str
#define av_ts2str(ts)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: timestamp.h:54
ifilter_bind_dec
static int ifilter_bind_dec(InputFilterPriv *ifp, Decoder *dec)
Definition: ffmpeg_filter.c:697
h
h
Definition: vp9dsp_template.c:2038
av_bprint_chars
void av_bprint_chars(AVBPrint *buf, char c, unsigned n)
Append char c n times to a print buffer.
Definition: bprint.c:145
hw_device_for_filter
AVBufferRef * hw_device_for_filter(void)
Get a hardware device to be used with this filtergraph.
Definition: ffmpeg_hw.c:300
AVDictionaryEntry::value
char * value
Definition: dict.h:91
AVFilterGraph::nb_filters
unsigned nb_filters
Definition: avfilter.h:816
avstring.h
AVFilterContext::filter
const AVFilter * filter
the AVFilter of which this is an instance
Definition: avfilter.h:410
AVColorRange
AVColorRange
Visual content value range.
Definition: pixfmt.h:648
OutputFile::recording_time
int64_t recording_time
desired length of the resulting file in microseconds == AV_TIME_BASE units
Definition: ffmpeg.h:585
frame_data_c
const FrameData * frame_data_c(AVFrame *frame)
Definition: ffmpeg.c:480
OutputFilterPriv::tb_out
AVRational tb_out
Definition: ffmpeg_filter.c:199
AVFilterInOut
A linked-list of the inputs/outputs of the filter chain.
Definition: avfilter.h:956
VSYNC_PASSTHROUGH
@ VSYNC_PASSTHROUGH
Definition: ffmpeg.h:67
OutputFilterPriv::height
int height
Definition: ffmpeg_filter.c:193
snprintf
#define snprintf
Definition: snprintf.h:34
SCH_FILTER_IN
#define SCH_FILTER_IN(filter, input)
Definition: ffmpeg_sched.h:119
FPSConvContext::framerate
AVRational framerate
Definition: ffmpeg_filter.c:178
av_log2
int av_log2(unsigned v)
Definition: intmath.c:26
buffersrc.h
fg_thread_set_name
static void fg_thread_set_name(const FilterGraph *fg)
Definition: ffmpeg_filter.c:2701
ist_find_unused
InputStream * ist_find_unused(enum AVMediaType type)
Find an unused input stream of given type.
Definition: ffmpeg_demux.c:153
sub2video_prepare
static void sub2video_prepare(InputFilterPriv *ifp)
Definition: ffmpeg_filter.c:1494
av_rescale_q_rnd
int64_t av_rescale_q_rnd(int64_t a, AVRational bq, AVRational cq, enum AVRounding rnd)
Rescale a 64-bit integer by 2 rational numbers with specified rounding.
Definition: mathematics.c:134
av_dict_iterate
const AVDictionaryEntry * av_dict_iterate(const AVDictionary *m, const AVDictionaryEntry *prev)
Iterate over a dictionary.
Definition: dict.c:44
AVSubtitle::start_display_time
uint32_t start_display_time
Definition: avcodec.h:2229
FilterCommand::command
char * command
Definition: ffmpeg_filter.c:226
FilterCommand
Definition: ffmpeg_filter.c:224
AV_FIFO_FLAG_AUTO_GROW
#define AV_FIFO_FLAG_AUTO_GROW
Automatically resize the FIFO on writes, so that the data fits.
Definition: fifo.h:67
InputFilterPriv::height
int height
Definition: ffmpeg_filter.c:131
av_get_pix_fmt_name
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.
Definition: pixdesc.c:2882
OutputFilter::nb_frames_dup
atomic_uint_least64_t nb_frames_dup
Definition: ffmpeg.h:282
filter_complex_nbthreads
int filter_complex_nbthreads
Definition: ffmpeg_opt.c:83
OutputFile
Definition: ffmpeg.h:574
InputFilterOptions::framerate
AVRational framerate
Definition: ffmpeg.h:255
ff_thread_setname
static int ff_thread_setname(const char *name)
Definition: thread.h:216
LATENCY_PROBE_FILTER_POST
@ LATENCY_PROBE_FILTER_POST
Definition: ffmpeg.h:103
FPSConvContext::framerate_supported
const AVRational * framerate_supported
Definition: ffmpeg_filter.c:180