FFmpeg
ffplay.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2003 Fabrice Bellard
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * simple media player based on the FFmpeg libraries
24  */
25 
26 #include "config.h"
27 #include "config_components.h"
28 #include <inttypes.h>
29 #include <math.h>
30 #include <limits.h>
31 #include <signal.h>
32 #include <stdint.h>
33 
34 #include "libavutil/avstring.h"
36 #include "libavutil/eval.h"
37 #include "libavutil/mathematics.h"
38 #include "libavutil/pixdesc.h"
39 #include "libavutil/imgutils.h"
40 #include "libavutil/dict.h"
41 #include "libavutil/fifo.h"
42 #include "libavutil/parseutils.h"
43 #include "libavutil/samplefmt.h"
44 #include "libavutil/time.h"
45 #include "libavutil/bprint.h"
46 #include "libavformat/avformat.h"
47 #include "libavdevice/avdevice.h"
48 #include "libswscale/swscale.h"
49 #include "libavutil/opt.h"
50 #include "libavutil/tx.h"
52 
53 #include "libavfilter/avfilter.h"
54 #include "libavfilter/buffersink.h"
55 #include "libavfilter/buffersrc.h"
56 
57 #include <SDL.h>
58 #include <SDL_thread.h>
59 
60 #include "cmdutils.h"
61 #include "opt_common.h"
62 
63 const char program_name[] = "ffplay";
64 const int program_birth_year = 2003;
65 
66 #define MAX_QUEUE_SIZE (15 * 1024 * 1024)
67 #define MIN_FRAMES 25
68 #define EXTERNAL_CLOCK_MIN_FRAMES 2
69 #define EXTERNAL_CLOCK_MAX_FRAMES 10
70 
71 /* Minimum SDL audio buffer size, in samples. */
72 #define SDL_AUDIO_MIN_BUFFER_SIZE 512
73 /* Calculate actual buffer size keeping in mind not cause too frequent audio callbacks */
74 #define SDL_AUDIO_MAX_CALLBACKS_PER_SEC 30
75 
76 /* Step size for volume control in dB */
77 #define SDL_VOLUME_STEP (0.75)
78 
79 /* no AV sync correction is done if below the minimum AV sync threshold */
80 #define AV_SYNC_THRESHOLD_MIN 0.04
81 /* AV sync correction is done if above the maximum AV sync threshold */
82 #define AV_SYNC_THRESHOLD_MAX 0.1
83 /* If a frame duration is longer than this, it will not be duplicated to compensate AV sync */
84 #define AV_SYNC_FRAMEDUP_THRESHOLD 0.1
85 /* no AV correction is done if too big error */
86 #define AV_NOSYNC_THRESHOLD 10.0
87 
88 /* maximum audio speed change to get correct sync */
89 #define SAMPLE_CORRECTION_PERCENT_MAX 10
90 
91 /* external clock speed adjustment constants for realtime sources based on buffer fullness */
92 #define EXTERNAL_CLOCK_SPEED_MIN 0.900
93 #define EXTERNAL_CLOCK_SPEED_MAX 1.010
94 #define EXTERNAL_CLOCK_SPEED_STEP 0.001
95 
96 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
97 #define AUDIO_DIFF_AVG_NB 20
98 
99 /* polls for possible required screen refresh at least this often, should be less than 1/fps */
100 #define REFRESH_RATE 0.01
101 
102 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
103 /* TODO: We assume that a decoded and resampled frame fits into this buffer */
104 #define SAMPLE_ARRAY_SIZE (8 * 65536)
105 
106 #define CURSOR_HIDE_DELAY 1000000
107 
108 #define USE_ONEPASS_SUBTITLE_RENDER 1
109 
110 typedef struct MyAVPacketList {
112  int serial;
114 
115 typedef struct PacketQueue {
118  int size;
119  int64_t duration;
121  int serial;
122  SDL_mutex *mutex;
123  SDL_cond *cond;
124 } PacketQueue;
125 
126 #define VIDEO_PICTURE_QUEUE_SIZE 3
127 #define SUBPICTURE_QUEUE_SIZE 16
128 #define SAMPLE_QUEUE_SIZE 9
129 #define FRAME_QUEUE_SIZE FFMAX(SAMPLE_QUEUE_SIZE, FFMAX(VIDEO_PICTURE_QUEUE_SIZE, SUBPICTURE_QUEUE_SIZE))
130 
131 typedef struct AudioParams {
132  int freq;
137 } AudioParams;
138 
139 typedef struct Clock {
140  double pts; /* clock base */
141  double pts_drift; /* clock base minus time at which we updated the clock */
142  double last_updated;
143  double speed;
144  int serial; /* clock is based on a packet with this serial */
145  int paused;
146  int *queue_serial; /* pointer to the current packet queue serial, used for obsolete clock detection */
147 } Clock;
148 
149 typedef struct FrameData {
150  int64_t pkt_pos;
151 } FrameData;
152 
153 /* Common struct for handling all types of decoded data and allocated render buffers. */
154 typedef struct Frame {
157  int serial;
158  double pts; /* presentation timestamp for the frame */
159  double duration; /* estimated duration of the frame */
160  int64_t pos; /* byte position of the frame in the input file */
161  int width;
162  int height;
163  int format;
165  int uploaded;
166  int flip_v;
167 } Frame;
168 
169 typedef struct FrameQueue {
171  int rindex;
172  int windex;
173  int size;
174  int max_size;
177  SDL_mutex *mutex;
178  SDL_cond *cond;
180 } FrameQueue;
181 
182 enum {
183  AV_SYNC_AUDIO_MASTER, /* default choice */
185  AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
186 };
187 
188 typedef struct Decoder {
189  AVPacket *pkt;
193  int finished;
195  SDL_cond *empty_queue_cond;
196  int64_t start_pts;
198  int64_t next_pts;
200  SDL_Thread *decoder_tid;
201 } Decoder;
202 
203 typedef struct VideoState {
204  SDL_Thread *read_tid;
208  int paused;
211  int seek_req;
213  int64_t seek_pos;
214  int64_t seek_rel;
217  int realtime;
218 
222 
226 
230 
232 
234 
235  double audio_clock;
237  double audio_diff_cum; /* used for AV difference average computation */
244  uint8_t *audio_buf;
245  uint8_t *audio_buf1;
246  unsigned int audio_buf_size; /* in bytes */
247  unsigned int audio_buf1_size;
248  int audio_buf_index; /* in bytes */
251  int muted;
258 
259  enum ShowMode {
261  } show_mode;
268  float *real_data;
270  int xpos;
272  SDL_Texture *vis_texture;
273  SDL_Texture *sub_texture;
274  SDL_Texture *vid_texture;
275 
279 
280  double frame_timer;
286  double max_frame_duration; // maximum duration of a frame - above this, we consider the jump a timestamp discontinuity
288  int eof;
289 
290  char *filename;
292  int step;
293 
295  AVFilterContext *in_video_filter; // the first filter in the video chain
296  AVFilterContext *out_video_filter; // the last filter in the video chain
297  AVFilterContext *in_audio_filter; // the first filter in the audio chain
298  AVFilterContext *out_audio_filter; // the last filter in the audio chain
299  AVFilterGraph *agraph; // audio filter graph
300 
302 
304 } VideoState;
305 
306 /* options specified by the user */
308 static const char *input_filename;
309 static const char *window_title;
310 static int default_width = 640;
311 static int default_height = 480;
312 static int screen_width = 0;
313 static int screen_height = 0;
314 static int screen_left = SDL_WINDOWPOS_CENTERED;
315 static int screen_top = SDL_WINDOWPOS_CENTERED;
316 static int audio_disable;
317 static int video_disable;
318 static int subtitle_disable;
319 static const char* wanted_stream_spec[AVMEDIA_TYPE_NB] = {0};
320 static int seek_by_bytes = -1;
321 static float seek_interval = 10;
322 static int display_disable;
323 static int borderless;
324 static int alwaysontop;
325 static int startup_volume = 100;
326 static int show_status = -1;
328 static int64_t start_time = AV_NOPTS_VALUE;
329 static int64_t duration = AV_NOPTS_VALUE;
330 static int fast = 0;
331 static int genpts = 0;
332 static int lowres = 0;
333 static int decoder_reorder_pts = -1;
334 static int autoexit;
335 static int exit_on_keydown;
336 static int exit_on_mousedown;
337 static int loop = 1;
338 static int framedrop = -1;
339 static int infinite_buffer = -1;
340 static enum ShowMode show_mode = SHOW_MODE_NONE;
341 static const char *audio_codec_name;
342 static const char *subtitle_codec_name;
343 static const char *video_codec_name;
344 double rdftspeed = 0.02;
345 static int64_t cursor_last_shown;
346 static int cursor_hidden = 0;
347 static const char **vfilters_list = NULL;
348 static int nb_vfilters = 0;
349 static char *afilters = NULL;
350 static int autorotate = 1;
351 static int find_stream_info = 1;
352 static int filter_nbthreads = 0;
353 
354 /* current context */
355 static int is_full_screen;
356 static int64_t audio_callback_time;
357 
358 #define FF_QUIT_EVENT (SDL_USEREVENT + 2)
359 
360 static SDL_Window *window;
361 static SDL_Renderer *renderer;
362 static SDL_RendererInfo renderer_info = {0};
363 static SDL_AudioDeviceID audio_dev;
364 
365 static const struct TextureFormatEntry {
369  { AV_PIX_FMT_RGB8, SDL_PIXELFORMAT_RGB332 },
370  { AV_PIX_FMT_RGB444, SDL_PIXELFORMAT_RGB444 },
371  { AV_PIX_FMT_RGB555, SDL_PIXELFORMAT_RGB555 },
372  { AV_PIX_FMT_BGR555, SDL_PIXELFORMAT_BGR555 },
373  { AV_PIX_FMT_RGB565, SDL_PIXELFORMAT_RGB565 },
374  { AV_PIX_FMT_BGR565, SDL_PIXELFORMAT_BGR565 },
375  { AV_PIX_FMT_RGB24, SDL_PIXELFORMAT_RGB24 },
376  { AV_PIX_FMT_BGR24, SDL_PIXELFORMAT_BGR24 },
377  { AV_PIX_FMT_0RGB32, SDL_PIXELFORMAT_RGB888 },
378  { AV_PIX_FMT_0BGR32, SDL_PIXELFORMAT_BGR888 },
379  { AV_PIX_FMT_NE(RGB0, 0BGR), SDL_PIXELFORMAT_RGBX8888 },
380  { AV_PIX_FMT_NE(BGR0, 0RGB), SDL_PIXELFORMAT_BGRX8888 },
381  { AV_PIX_FMT_RGB32, SDL_PIXELFORMAT_ARGB8888 },
382  { AV_PIX_FMT_RGB32_1, SDL_PIXELFORMAT_RGBA8888 },
383  { AV_PIX_FMT_BGR32, SDL_PIXELFORMAT_ABGR8888 },
384  { AV_PIX_FMT_BGR32_1, SDL_PIXELFORMAT_BGRA8888 },
385  { AV_PIX_FMT_YUV420P, SDL_PIXELFORMAT_IYUV },
386  { AV_PIX_FMT_YUYV422, SDL_PIXELFORMAT_YUY2 },
387  { AV_PIX_FMT_UYVY422, SDL_PIXELFORMAT_UYVY },
388  { AV_PIX_FMT_NONE, SDL_PIXELFORMAT_UNKNOWN },
389 };
390 
391 static int opt_add_vfilter(void *optctx, const char *opt, const char *arg)
392 {
394  if (ret < 0)
395  return ret;
396 
398  return 0;
399 }
400 
401 static inline
402 int cmp_audio_fmts(enum AVSampleFormat fmt1, int64_t channel_count1,
403  enum AVSampleFormat fmt2, int64_t channel_count2)
404 {
405  /* If channel count == 1, planar and non-planar formats are the same */
406  if (channel_count1 == 1 && channel_count2 == 1)
408  else
409  return channel_count1 != channel_count2 || fmt1 != fmt2;
410 }
411 
413 {
414  MyAVPacketList pkt1;
415  int ret;
416 
417  if (q->abort_request)
418  return -1;
419 
420 
421  pkt1.pkt = pkt;
422  pkt1.serial = q->serial;
423 
424  ret = av_fifo_write(q->pkt_list, &pkt1, 1);
425  if (ret < 0)
426  return ret;
427  q->nb_packets++;
428  q->size += pkt1.pkt->size + sizeof(pkt1);
429  q->duration += pkt1.pkt->duration;
430  /* XXX: should duplicate packet data in DV case */
431  SDL_CondSignal(q->cond);
432  return 0;
433 }
434 
436 {
437  AVPacket *pkt1;
438  int ret;
439 
440  pkt1 = av_packet_alloc();
441  if (!pkt1) {
443  return -1;
444  }
445  av_packet_move_ref(pkt1, pkt);
446 
447  SDL_LockMutex(q->mutex);
448  ret = packet_queue_put_private(q, pkt1);
449  SDL_UnlockMutex(q->mutex);
450 
451  if (ret < 0)
452  av_packet_free(&pkt1);
453 
454  return ret;
455 }
456 
457 static int packet_queue_put_nullpacket(PacketQueue *q, AVPacket *pkt, int stream_index)
458 {
459  pkt->stream_index = stream_index;
460  return packet_queue_put(q, pkt);
461 }
462 
463 /* packet queue handling */
465 {
466  memset(q, 0, sizeof(PacketQueue));
468  if (!q->pkt_list)
469  return AVERROR(ENOMEM);
470  q->mutex = SDL_CreateMutex();
471  if (!q->mutex) {
472  av_log(NULL, AV_LOG_FATAL, "SDL_CreateMutex(): %s\n", SDL_GetError());
473  return AVERROR(ENOMEM);
474  }
475  q->cond = SDL_CreateCond();
476  if (!q->cond) {
477  av_log(NULL, AV_LOG_FATAL, "SDL_CreateCond(): %s\n", SDL_GetError());
478  return AVERROR(ENOMEM);
479  }
480  q->abort_request = 1;
481  return 0;
482 }
483 
485 {
486  MyAVPacketList pkt1;
487 
488  SDL_LockMutex(q->mutex);
489  while (av_fifo_read(q->pkt_list, &pkt1, 1) >= 0)
490  av_packet_free(&pkt1.pkt);
491  q->nb_packets = 0;
492  q->size = 0;
493  q->duration = 0;
494  q->serial++;
495  SDL_UnlockMutex(q->mutex);
496 }
497 
499 {
502  SDL_DestroyMutex(q->mutex);
503  SDL_DestroyCond(q->cond);
504 }
505 
507 {
508  SDL_LockMutex(q->mutex);
509 
510  q->abort_request = 1;
511 
512  SDL_CondSignal(q->cond);
513 
514  SDL_UnlockMutex(q->mutex);
515 }
516 
518 {
519  SDL_LockMutex(q->mutex);
520  q->abort_request = 0;
521  q->serial++;
522  SDL_UnlockMutex(q->mutex);
523 }
524 
525 /* return < 0 if aborted, 0 if no packet and > 0 if packet. */
526 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block, int *serial)
527 {
528  MyAVPacketList pkt1;
529  int ret;
530 
531  SDL_LockMutex(q->mutex);
532 
533  for (;;) {
534  if (q->abort_request) {
535  ret = -1;
536  break;
537  }
538 
539  if (av_fifo_read(q->pkt_list, &pkt1, 1) >= 0) {
540  q->nb_packets--;
541  q->size -= pkt1.pkt->size + sizeof(pkt1);
542  q->duration -= pkt1.pkt->duration;
543  av_packet_move_ref(pkt, pkt1.pkt);
544  if (serial)
545  *serial = pkt1.serial;
546  av_packet_free(&pkt1.pkt);
547  ret = 1;
548  break;
549  } else if (!block) {
550  ret = 0;
551  break;
552  } else {
553  SDL_CondWait(q->cond, q->mutex);
554  }
555  }
556  SDL_UnlockMutex(q->mutex);
557  return ret;
558 }
559 
560 static int decoder_init(Decoder *d, AVCodecContext *avctx, PacketQueue *queue, SDL_cond *empty_queue_cond) {
561  memset(d, 0, sizeof(Decoder));
562  d->pkt = av_packet_alloc();
563  if (!d->pkt)
564  return AVERROR(ENOMEM);
565  d->avctx = avctx;
566  d->queue = queue;
567  d->empty_queue_cond = empty_queue_cond;
568  d->start_pts = AV_NOPTS_VALUE;
569  d->pkt_serial = -1;
570  return 0;
571 }
572 
574  int ret = AVERROR(EAGAIN);
575 
576  for (;;) {
577  if (d->queue->serial == d->pkt_serial) {
578  do {
579  if (d->queue->abort_request)
580  return -1;
581 
582  switch (d->avctx->codec_type) {
583  case AVMEDIA_TYPE_VIDEO:
584  ret = avcodec_receive_frame(d->avctx, frame);
585  if (ret >= 0) {
586  if (decoder_reorder_pts == -1) {
588  } else if (!decoder_reorder_pts) {
589  frame->pts = frame->pkt_dts;
590  }
591  }
592  break;
593  case AVMEDIA_TYPE_AUDIO:
594  ret = avcodec_receive_frame(d->avctx, frame);
595  if (ret >= 0) {
597  if (frame->pts != AV_NOPTS_VALUE)
598  frame->pts = av_rescale_q(frame->pts, d->avctx->pkt_timebase, tb);
599  else if (d->next_pts != AV_NOPTS_VALUE)
600  frame->pts = av_rescale_q(d->next_pts, d->next_pts_tb, tb);
601  if (frame->pts != AV_NOPTS_VALUE) {
602  d->next_pts = frame->pts + frame->nb_samples;
603  d->next_pts_tb = tb;
604  }
605  }
606  break;
607  }
608  if (ret == AVERROR_EOF) {
609  d->finished = d->pkt_serial;
610  avcodec_flush_buffers(d->avctx);
611  return 0;
612  }
613  if (ret >= 0)
614  return 1;
615  } while (ret != AVERROR(EAGAIN));
616  }
617 
618  do {
619  if (d->queue->nb_packets == 0)
620  SDL_CondSignal(d->empty_queue_cond);
621  if (d->packet_pending) {
622  d->packet_pending = 0;
623  } else {
624  int old_serial = d->pkt_serial;
625  if (packet_queue_get(d->queue, d->pkt, 1, &d->pkt_serial) < 0)
626  return -1;
627  if (old_serial != d->pkt_serial) {
628  avcodec_flush_buffers(d->avctx);
629  d->finished = 0;
630  d->next_pts = d->start_pts;
631  d->next_pts_tb = d->start_pts_tb;
632  }
633  }
634  if (d->queue->serial == d->pkt_serial)
635  break;
636  av_packet_unref(d->pkt);
637  } while (1);
638 
639  if (d->avctx->codec_type == AVMEDIA_TYPE_SUBTITLE) {
640  int got_frame = 0;
641  ret = avcodec_decode_subtitle2(d->avctx, sub, &got_frame, d->pkt);
642  if (ret < 0) {
643  ret = AVERROR(EAGAIN);
644  } else {
645  if (got_frame && !d->pkt->data) {
646  d->packet_pending = 1;
647  }
648  ret = got_frame ? 0 : (d->pkt->data ? AVERROR(EAGAIN) : AVERROR_EOF);
649  }
650  av_packet_unref(d->pkt);
651  } else {
652  if (d->pkt->buf && !d->pkt->opaque_ref) {
653  FrameData *fd;
654 
655  d->pkt->opaque_ref = av_buffer_allocz(sizeof(*fd));
656  if (!d->pkt->opaque_ref)
657  return AVERROR(ENOMEM);
658  fd = (FrameData*)d->pkt->opaque_ref->data;
659  fd->pkt_pos = d->pkt->pos;
660  }
661 
662  if (avcodec_send_packet(d->avctx, d->pkt) == AVERROR(EAGAIN)) {
663  av_log(d->avctx, AV_LOG_ERROR, "Receive_frame and send_packet both returned EAGAIN, which is an API violation.\n");
664  d->packet_pending = 1;
665  } else {
666  av_packet_unref(d->pkt);
667  }
668  }
669  }
670 }
671 
672 static void decoder_destroy(Decoder *d) {
673  av_packet_free(&d->pkt);
674  avcodec_free_context(&d->avctx);
675 }
676 
678 {
679  av_frame_unref(vp->frame);
680  avsubtitle_free(&vp->sub);
681 }
682 
683 static int frame_queue_init(FrameQueue *f, PacketQueue *pktq, int max_size, int keep_last)
684 {
685  int i;
686  memset(f, 0, sizeof(FrameQueue));
687  if (!(f->mutex = SDL_CreateMutex())) {
688  av_log(NULL, AV_LOG_FATAL, "SDL_CreateMutex(): %s\n", SDL_GetError());
689  return AVERROR(ENOMEM);
690  }
691  if (!(f->cond = SDL_CreateCond())) {
692  av_log(NULL, AV_LOG_FATAL, "SDL_CreateCond(): %s\n", SDL_GetError());
693  return AVERROR(ENOMEM);
694  }
695  f->pktq = pktq;
696  f->max_size = FFMIN(max_size, FRAME_QUEUE_SIZE);
697  f->keep_last = !!keep_last;
698  for (i = 0; i < f->max_size; i++)
699  if (!(f->queue[i].frame = av_frame_alloc()))
700  return AVERROR(ENOMEM);
701  return 0;
702 }
703 
705 {
706  int i;
707  for (i = 0; i < f->max_size; i++) {
708  Frame *vp = &f->queue[i];
710  av_frame_free(&vp->frame);
711  }
712  SDL_DestroyMutex(f->mutex);
713  SDL_DestroyCond(f->cond);
714 }
715 
717 {
718  SDL_LockMutex(f->mutex);
719  SDL_CondSignal(f->cond);
720  SDL_UnlockMutex(f->mutex);
721 }
722 
724 {
725  return &f->queue[(f->rindex + f->rindex_shown) % f->max_size];
726 }
727 
729 {
730  return &f->queue[(f->rindex + f->rindex_shown + 1) % f->max_size];
731 }
732 
734 {
735  return &f->queue[f->rindex];
736 }
737 
739 {
740  /* wait until we have space to put a new frame */
741  SDL_LockMutex(f->mutex);
742  while (f->size >= f->max_size &&
743  !f->pktq->abort_request) {
744  SDL_CondWait(f->cond, f->mutex);
745  }
746  SDL_UnlockMutex(f->mutex);
747 
748  if (f->pktq->abort_request)
749  return NULL;
750 
751  return &f->queue[f->windex];
752 }
753 
755 {
756  /* wait until we have a readable a new frame */
757  SDL_LockMutex(f->mutex);
758  while (f->size - f->rindex_shown <= 0 &&
759  !f->pktq->abort_request) {
760  SDL_CondWait(f->cond, f->mutex);
761  }
762  SDL_UnlockMutex(f->mutex);
763 
764  if (f->pktq->abort_request)
765  return NULL;
766 
767  return &f->queue[(f->rindex + f->rindex_shown) % f->max_size];
768 }
769 
771 {
772  if (++f->windex == f->max_size)
773  f->windex = 0;
774  SDL_LockMutex(f->mutex);
775  f->size++;
776  SDL_CondSignal(f->cond);
777  SDL_UnlockMutex(f->mutex);
778 }
779 
781 {
782  if (f->keep_last && !f->rindex_shown) {
783  f->rindex_shown = 1;
784  return;
785  }
786  frame_queue_unref_item(&f->queue[f->rindex]);
787  if (++f->rindex == f->max_size)
788  f->rindex = 0;
789  SDL_LockMutex(f->mutex);
790  f->size--;
791  SDL_CondSignal(f->cond);
792  SDL_UnlockMutex(f->mutex);
793 }
794 
795 /* return the number of undisplayed frames in the queue */
797 {
798  return f->size - f->rindex_shown;
799 }
800 
801 /* return last shown position */
803 {
804  Frame *fp = &f->queue[f->rindex];
805  if (f->rindex_shown && fp->serial == f->pktq->serial)
806  return fp->pos;
807  else
808  return -1;
809 }
810 
811 static void decoder_abort(Decoder *d, FrameQueue *fq)
812 {
813  packet_queue_abort(d->queue);
814  frame_queue_signal(fq);
815  SDL_WaitThread(d->decoder_tid, NULL);
816  d->decoder_tid = NULL;
817  packet_queue_flush(d->queue);
818 }
819 
820 static inline void fill_rectangle(int x, int y, int w, int h)
821 {
822  SDL_Rect rect;
823  rect.x = x;
824  rect.y = y;
825  rect.w = w;
826  rect.h = h;
827  if (w && h)
828  SDL_RenderFillRect(renderer, &rect);
829 }
830 
831 static int realloc_texture(SDL_Texture **texture, Uint32 new_format, int new_width, int new_height, SDL_BlendMode blendmode, int init_texture)
832 {
833  Uint32 format;
834  int access, w, h;
835  if (!*texture || SDL_QueryTexture(*texture, &format, &access, &w, &h) < 0 || new_width != w || new_height != h || new_format != format) {
836  void *pixels;
837  int pitch;
838  if (*texture)
839  SDL_DestroyTexture(*texture);
840  if (!(*texture = SDL_CreateTexture(renderer, new_format, SDL_TEXTUREACCESS_STREAMING, new_width, new_height)))
841  return -1;
842  if (SDL_SetTextureBlendMode(*texture, blendmode) < 0)
843  return -1;
844  if (init_texture) {
845  if (SDL_LockTexture(*texture, NULL, &pixels, &pitch) < 0)
846  return -1;
847  memset(pixels, 0, pitch * new_height);
848  SDL_UnlockTexture(*texture);
849  }
850  av_log(NULL, AV_LOG_VERBOSE, "Created %dx%d texture with %s.\n", new_width, new_height, SDL_GetPixelFormatName(new_format));
851  }
852  return 0;
853 }
854 
855 static void calculate_display_rect(SDL_Rect *rect,
856  int scr_xleft, int scr_ytop, int scr_width, int scr_height,
857  int pic_width, int pic_height, AVRational pic_sar)
858 {
859  AVRational aspect_ratio = pic_sar;
860  int64_t width, height, x, y;
861 
862  if (av_cmp_q(aspect_ratio, av_make_q(0, 1)) <= 0)
863  aspect_ratio = av_make_q(1, 1);
864 
865  aspect_ratio = av_mul_q(aspect_ratio, av_make_q(pic_width, pic_height));
866 
867  /* XXX: we suppose the screen has a 1.0 pixel ratio */
868  height = scr_height;
869  width = av_rescale(height, aspect_ratio.num, aspect_ratio.den) & ~1;
870  if (width > scr_width) {
871  width = scr_width;
872  height = av_rescale(width, aspect_ratio.den, aspect_ratio.num) & ~1;
873  }
874  x = (scr_width - width) / 2;
875  y = (scr_height - height) / 2;
876  rect->x = scr_xleft + x;
877  rect->y = scr_ytop + y;
878  rect->w = FFMAX((int)width, 1);
879  rect->h = FFMAX((int)height, 1);
880 }
881 
882 static void get_sdl_pix_fmt_and_blendmode(int format, Uint32 *sdl_pix_fmt, SDL_BlendMode *sdl_blendmode)
883 {
884  int i;
885  *sdl_blendmode = SDL_BLENDMODE_NONE;
886  *sdl_pix_fmt = SDL_PIXELFORMAT_UNKNOWN;
887  if (format == AV_PIX_FMT_RGB32 ||
891  *sdl_blendmode = SDL_BLENDMODE_BLEND;
892  for (i = 0; i < FF_ARRAY_ELEMS(sdl_texture_format_map) - 1; i++) {
894  *sdl_pix_fmt = sdl_texture_format_map[i].texture_fmt;
895  return;
896  }
897  }
898 }
899 
900 static int upload_texture(SDL_Texture **tex, AVFrame *frame)
901 {
902  int ret = 0;
903  Uint32 sdl_pix_fmt;
904  SDL_BlendMode sdl_blendmode;
905  get_sdl_pix_fmt_and_blendmode(frame->format, &sdl_pix_fmt, &sdl_blendmode);
906  if (realloc_texture(tex, sdl_pix_fmt == SDL_PIXELFORMAT_UNKNOWN ? SDL_PIXELFORMAT_ARGB8888 : sdl_pix_fmt, frame->width, frame->height, sdl_blendmode, 0) < 0)
907  return -1;
908  switch (sdl_pix_fmt) {
909  case SDL_PIXELFORMAT_IYUV:
910  if (frame->linesize[0] > 0 && frame->linesize[1] > 0 && frame->linesize[2] > 0) {
911  ret = SDL_UpdateYUVTexture(*tex, NULL, frame->data[0], frame->linesize[0],
912  frame->data[1], frame->linesize[1],
913  frame->data[2], frame->linesize[2]);
914  } else if (frame->linesize[0] < 0 && frame->linesize[1] < 0 && frame->linesize[2] < 0) {
915  ret = SDL_UpdateYUVTexture(*tex, NULL, frame->data[0] + frame->linesize[0] * (frame->height - 1), -frame->linesize[0],
916  frame->data[1] + frame->linesize[1] * (AV_CEIL_RSHIFT(frame->height, 1) - 1), -frame->linesize[1],
917  frame->data[2] + frame->linesize[2] * (AV_CEIL_RSHIFT(frame->height, 1) - 1), -frame->linesize[2]);
918  } else {
919  av_log(NULL, AV_LOG_ERROR, "Mixed negative and positive linesizes are not supported.\n");
920  return -1;
921  }
922  break;
923  default:
924  if (frame->linesize[0] < 0) {
925  ret = SDL_UpdateTexture(*tex, NULL, frame->data[0] + frame->linesize[0] * (frame->height - 1), -frame->linesize[0]);
926  } else {
927  ret = SDL_UpdateTexture(*tex, NULL, frame->data[0], frame->linesize[0]);
928  }
929  break;
930  }
931  return ret;
932 }
933 
935 {
936 #if SDL_VERSION_ATLEAST(2,0,8)
937  SDL_YUV_CONVERSION_MODE mode = SDL_YUV_CONVERSION_AUTOMATIC;
940  mode = SDL_YUV_CONVERSION_JPEG;
941  else if (frame->colorspace == AVCOL_SPC_BT709)
942  mode = SDL_YUV_CONVERSION_BT709;
944  mode = SDL_YUV_CONVERSION_BT601;
945  }
946  SDL_SetYUVConversionMode(mode); /* FIXME: no support for linear transfer */
947 #endif
948 }
949 
951 {
952  Frame *vp;
953  Frame *sp = NULL;
954  SDL_Rect rect;
955 
956  vp = frame_queue_peek_last(&is->pictq);
957  if (is->subtitle_st) {
958  if (frame_queue_nb_remaining(&is->subpq) > 0) {
959  sp = frame_queue_peek(&is->subpq);
960 
961  if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000)) {
962  if (!sp->uploaded) {
963  uint8_t* pixels[4];
964  int pitch[4];
965  int i;
966  if (!sp->width || !sp->height) {
967  sp->width = vp->width;
968  sp->height = vp->height;
969  }
970  if (realloc_texture(&is->sub_texture, SDL_PIXELFORMAT_ARGB8888, sp->width, sp->height, SDL_BLENDMODE_BLEND, 1) < 0)
971  return;
972 
973  for (i = 0; i < sp->sub.num_rects; i++) {
974  AVSubtitleRect *sub_rect = sp->sub.rects[i];
975 
976  sub_rect->x = av_clip(sub_rect->x, 0, sp->width );
977  sub_rect->y = av_clip(sub_rect->y, 0, sp->height);
978  sub_rect->w = av_clip(sub_rect->w, 0, sp->width - sub_rect->x);
979  sub_rect->h = av_clip(sub_rect->h, 0, sp->height - sub_rect->y);
980 
981  is->sub_convert_ctx = sws_getCachedContext(is->sub_convert_ctx,
982  sub_rect->w, sub_rect->h, AV_PIX_FMT_PAL8,
983  sub_rect->w, sub_rect->h, AV_PIX_FMT_BGRA,
984  0, NULL, NULL, NULL);
985  if (!is->sub_convert_ctx) {
986  av_log(NULL, AV_LOG_FATAL, "Cannot initialize the conversion context\n");
987  return;
988  }
989  if (!SDL_LockTexture(is->sub_texture, (SDL_Rect *)sub_rect, (void **)pixels, pitch)) {
990  sws_scale(is->sub_convert_ctx, (const uint8_t * const *)sub_rect->data, sub_rect->linesize,
991  0, sub_rect->h, pixels, pitch);
992  SDL_UnlockTexture(is->sub_texture);
993  }
994  }
995  sp->uploaded = 1;
996  }
997  } else
998  sp = NULL;
999  }
1000  }
1001 
1002  calculate_display_rect(&rect, is->xleft, is->ytop, is->width, is->height, vp->width, vp->height, vp->sar);
1004 
1005  if (!vp->uploaded) {
1006  if (upload_texture(&is->vid_texture, vp->frame) < 0) {
1008  return;
1009  }
1010  vp->uploaded = 1;
1011  vp->flip_v = vp->frame->linesize[0] < 0;
1012  }
1013 
1014  SDL_RenderCopyEx(renderer, is->vid_texture, NULL, &rect, 0, NULL, vp->flip_v ? SDL_FLIP_VERTICAL : 0);
1016  if (sp) {
1017 #if USE_ONEPASS_SUBTITLE_RENDER
1018  SDL_RenderCopy(renderer, is->sub_texture, NULL, &rect);
1019 #else
1020  int i;
1021  double xratio = (double)rect.w / (double)sp->width;
1022  double yratio = (double)rect.h / (double)sp->height;
1023  for (i = 0; i < sp->sub.num_rects; i++) {
1024  SDL_Rect *sub_rect = (SDL_Rect*)sp->sub.rects[i];
1025  SDL_Rect target = {.x = rect.x + sub_rect->x * xratio,
1026  .y = rect.y + sub_rect->y * yratio,
1027  .w = sub_rect->w * xratio,
1028  .h = sub_rect->h * yratio};
1029  SDL_RenderCopy(renderer, is->sub_texture, sub_rect, &target);
1030  }
1031 #endif
1032  }
1033 }
1034 
1035 static inline int compute_mod(int a, int b)
1036 {
1037  return a < 0 ? a%b + b : a%b;
1038 }
1039 
1041 {
1042  int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
1043  int ch, channels, h, h2;
1044  int64_t time_diff;
1045  int rdft_bits, nb_freq;
1046 
1047  for (rdft_bits = 1; (1 << rdft_bits) < 2 * s->height; rdft_bits++)
1048  ;
1049  nb_freq = 1 << (rdft_bits - 1);
1050 
1051  /* compute display index : center on currently output samples */
1052  channels = s->audio_tgt.ch_layout.nb_channels;
1053  nb_display_channels = channels;
1054  if (!s->paused) {
1055  int data_used= s->show_mode == SHOW_MODE_WAVES ? s->width : (2*nb_freq);
1056  n = 2 * channels;
1057  delay = s->audio_write_buf_size;
1058  delay /= n;
1059 
1060  /* to be more precise, we take into account the time spent since
1061  the last buffer computation */
1062  if (audio_callback_time) {
1063  time_diff = av_gettime_relative() - audio_callback_time;
1064  delay -= (time_diff * s->audio_tgt.freq) / 1000000;
1065  }
1066 
1067  delay += 2 * data_used;
1068  if (delay < data_used)
1069  delay = data_used;
1070 
1071  i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
1072  if (s->show_mode == SHOW_MODE_WAVES) {
1073  h = INT_MIN;
1074  for (i = 0; i < 1000; i += channels) {
1075  int idx = (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
1076  int a = s->sample_array[idx];
1077  int b = s->sample_array[(idx + 4 * channels) % SAMPLE_ARRAY_SIZE];
1078  int c = s->sample_array[(idx + 5 * channels) % SAMPLE_ARRAY_SIZE];
1079  int d = s->sample_array[(idx + 9 * channels) % SAMPLE_ARRAY_SIZE];
1080  int score = a - d;
1081  if (h < score && (b ^ c) < 0) {
1082  h = score;
1083  i_start = idx;
1084  }
1085  }
1086  }
1087 
1088  s->last_i_start = i_start;
1089  } else {
1090  i_start = s->last_i_start;
1091  }
1092 
1093  if (s->show_mode == SHOW_MODE_WAVES) {
1094  SDL_SetRenderDrawColor(renderer, 255, 255, 255, 255);
1095 
1096  /* total height for one channel */
1097  h = s->height / nb_display_channels;
1098  /* graph height / 2 */
1099  h2 = (h * 9) / 20;
1100  for (ch = 0; ch < nb_display_channels; ch++) {
1101  i = i_start + ch;
1102  y1 = s->ytop + ch * h + (h / 2); /* position of center line */
1103  for (x = 0; x < s->width; x++) {
1104  y = (s->sample_array[i] * h2) >> 15;
1105  if (y < 0) {
1106  y = -y;
1107  ys = y1 - y;
1108  } else {
1109  ys = y1;
1110  }
1111  fill_rectangle(s->xleft + x, ys, 1, y);
1112  i += channels;
1113  if (i >= SAMPLE_ARRAY_SIZE)
1114  i -= SAMPLE_ARRAY_SIZE;
1115  }
1116  }
1117 
1118  SDL_SetRenderDrawColor(renderer, 0, 0, 255, 255);
1119 
1120  for (ch = 1; ch < nb_display_channels; ch++) {
1121  y = s->ytop + ch * h;
1122  fill_rectangle(s->xleft, y, s->width, 1);
1123  }
1124  } else {
1125  int err = 0;
1126  if (realloc_texture(&s->vis_texture, SDL_PIXELFORMAT_ARGB8888, s->width, s->height, SDL_BLENDMODE_NONE, 1) < 0)
1127  return;
1128 
1129  if (s->xpos >= s->width)
1130  s->xpos = 0;
1131  nb_display_channels= FFMIN(nb_display_channels, 2);
1132  if (rdft_bits != s->rdft_bits) {
1133  const float rdft_scale = 1.0;
1134  av_tx_uninit(&s->rdft);
1135  av_freep(&s->real_data);
1136  av_freep(&s->rdft_data);
1137  s->rdft_bits = rdft_bits;
1138  s->real_data = av_malloc_array(nb_freq, 4 *sizeof(*s->real_data));
1139  s->rdft_data = av_malloc_array(nb_freq + 1, 2 *sizeof(*s->rdft_data));
1140  err = av_tx_init(&s->rdft, &s->rdft_fn, AV_TX_FLOAT_RDFT,
1141  0, 1 << rdft_bits, &rdft_scale, 0);
1142  }
1143  if (err < 0 || !s->rdft_data) {
1144  av_log(NULL, AV_LOG_ERROR, "Failed to allocate buffers for RDFT, switching to waves display\n");
1145  s->show_mode = SHOW_MODE_WAVES;
1146  } else {
1147  float *data_in[2];
1148  AVComplexFloat *data[2];
1149  SDL_Rect rect = {.x = s->xpos, .y = 0, .w = 1, .h = s->height};
1150  uint32_t *pixels;
1151  int pitch;
1152  for (ch = 0; ch < nb_display_channels; ch++) {
1153  data_in[ch] = s->real_data + 2 * nb_freq * ch;
1154  data[ch] = s->rdft_data + nb_freq * ch;
1155  i = i_start + ch;
1156  for (x = 0; x < 2 * nb_freq; x++) {
1157  double w = (x-nb_freq) * (1.0 / nb_freq);
1158  data_in[ch][x] = s->sample_array[i] * (1.0 - w * w);
1159  i += channels;
1160  if (i >= SAMPLE_ARRAY_SIZE)
1161  i -= SAMPLE_ARRAY_SIZE;
1162  }
1163  s->rdft_fn(s->rdft, data[ch], data_in[ch], sizeof(float));
1164  data[ch][0].im = data[ch][nb_freq].re;
1165  data[ch][nb_freq].re = 0;
1166  }
1167  /* Least efficient way to do this, we should of course
1168  * directly access it but it is more than fast enough. */
1169  if (!SDL_LockTexture(s->vis_texture, &rect, (void **)&pixels, &pitch)) {
1170  pitch >>= 2;
1171  pixels += pitch * s->height;
1172  for (y = 0; y < s->height; y++) {
1173  double w = 1 / sqrt(nb_freq);
1174  int a = sqrt(w * sqrt(data[0][y].re * data[0][y].re + data[0][y].im * data[0][y].im));
1175  int b = (nb_display_channels == 2 ) ? sqrt(w * hypot(data[1][y].re, data[1][y].im))
1176  : a;
1177  a = FFMIN(a, 255);
1178  b = FFMIN(b, 255);
1179  pixels -= pitch;
1180  *pixels = (a << 16) + (b << 8) + ((a+b) >> 1);
1181  }
1182  SDL_UnlockTexture(s->vis_texture);
1183  }
1184  SDL_RenderCopy(renderer, s->vis_texture, NULL, NULL);
1185  }
1186  if (!s->paused)
1187  s->xpos++;
1188  }
1189 }
1190 
1191 static void stream_component_close(VideoState *is, int stream_index)
1192 {
1193  AVFormatContext *ic = is->ic;
1194  AVCodecParameters *codecpar;
1195 
1196  if (stream_index < 0 || stream_index >= ic->nb_streams)
1197  return;
1198  codecpar = ic->streams[stream_index]->codecpar;
1199 
1200  switch (codecpar->codec_type) {
1201  case AVMEDIA_TYPE_AUDIO:
1202  decoder_abort(&is->auddec, &is->sampq);
1203  SDL_CloseAudioDevice(audio_dev);
1204  decoder_destroy(&is->auddec);
1205  swr_free(&is->swr_ctx);
1206  av_freep(&is->audio_buf1);
1207  is->audio_buf1_size = 0;
1208  is->audio_buf = NULL;
1209 
1210  if (is->rdft) {
1211  av_tx_uninit(&is->rdft);
1212  av_freep(&is->real_data);
1213  av_freep(&is->rdft_data);
1214  is->rdft = NULL;
1215  is->rdft_bits = 0;
1216  }
1217  break;
1218  case AVMEDIA_TYPE_VIDEO:
1219  decoder_abort(&is->viddec, &is->pictq);
1220  decoder_destroy(&is->viddec);
1221  break;
1222  case AVMEDIA_TYPE_SUBTITLE:
1223  decoder_abort(&is->subdec, &is->subpq);
1224  decoder_destroy(&is->subdec);
1225  break;
1226  default:
1227  break;
1228  }
1229 
1230  ic->streams[stream_index]->discard = AVDISCARD_ALL;
1231  switch (codecpar->codec_type) {
1232  case AVMEDIA_TYPE_AUDIO:
1233  is->audio_st = NULL;
1234  is->audio_stream = -1;
1235  break;
1236  case AVMEDIA_TYPE_VIDEO:
1237  is->video_st = NULL;
1238  is->video_stream = -1;
1239  break;
1240  case AVMEDIA_TYPE_SUBTITLE:
1241  is->subtitle_st = NULL;
1242  is->subtitle_stream = -1;
1243  break;
1244  default:
1245  break;
1246  }
1247 }
1248 
1250 {
1251  /* XXX: use a special url_shutdown call to abort parse cleanly */
1252  is->abort_request = 1;
1253  SDL_WaitThread(is->read_tid, NULL);
1254 
1255  /* close each stream */
1256  if (is->audio_stream >= 0)
1257  stream_component_close(is, is->audio_stream);
1258  if (is->video_stream >= 0)
1259  stream_component_close(is, is->video_stream);
1260  if (is->subtitle_stream >= 0)
1261  stream_component_close(is, is->subtitle_stream);
1262 
1263  avformat_close_input(&is->ic);
1264 
1265  packet_queue_destroy(&is->videoq);
1266  packet_queue_destroy(&is->audioq);
1267  packet_queue_destroy(&is->subtitleq);
1268 
1269  /* free all pictures */
1270  frame_queue_destroy(&is->pictq);
1271  frame_queue_destroy(&is->sampq);
1272  frame_queue_destroy(&is->subpq);
1273  SDL_DestroyCond(is->continue_read_thread);
1274  sws_freeContext(is->sub_convert_ctx);
1275  av_free(is->filename);
1276  if (is->vis_texture)
1277  SDL_DestroyTexture(is->vis_texture);
1278  if (is->vid_texture)
1279  SDL_DestroyTexture(is->vid_texture);
1280  if (is->sub_texture)
1281  SDL_DestroyTexture(is->sub_texture);
1282  av_free(is);
1283 }
1284 
1285 static void do_exit(VideoState *is)
1286 {
1287  if (is) {
1288  stream_close(is);
1289  }
1290  if (renderer)
1291  SDL_DestroyRenderer(renderer);
1292  if (window)
1293  SDL_DestroyWindow(window);
1294  uninit_opts();
1297  if (show_status)
1298  printf("\n");
1299  SDL_Quit();
1300  av_log(NULL, AV_LOG_QUIET, "%s", "");
1301  exit(0);
1302 }
1303 
1304 static void sigterm_handler(int sig)
1305 {
1306  exit(123);
1307 }
1308 
1310 {
1311  SDL_Rect rect;
1312  int max_width = screen_width ? screen_width : INT_MAX;
1313  int max_height = screen_height ? screen_height : INT_MAX;
1314  if (max_width == INT_MAX && max_height == INT_MAX)
1315  max_height = height;
1316  calculate_display_rect(&rect, 0, 0, max_width, max_height, width, height, sar);
1317  default_width = rect.w;
1318  default_height = rect.h;
1319 }
1320 
1322 {
1323  int w,h;
1324 
1327 
1328  if (!window_title)
1330  SDL_SetWindowTitle(window, window_title);
1331 
1332  SDL_SetWindowSize(window, w, h);
1333  SDL_SetWindowPosition(window, screen_left, screen_top);
1334  if (is_full_screen)
1335  SDL_SetWindowFullscreen(window, SDL_WINDOW_FULLSCREEN_DESKTOP);
1336  SDL_ShowWindow(window);
1337 
1338  is->width = w;
1339  is->height = h;
1340 
1341  return 0;
1342 }
1343 
1344 /* display the current picture, if any */
1346 {
1347  if (!is->width)
1348  video_open(is);
1349 
1350  SDL_SetRenderDrawColor(renderer, 0, 0, 0, 255);
1351  SDL_RenderClear(renderer);
1352  if (is->audio_st && is->show_mode != SHOW_MODE_VIDEO)
1354  else if (is->video_st)
1356  SDL_RenderPresent(renderer);
1357 }
1358 
1359 static double get_clock(Clock *c)
1360 {
1361  if (*c->queue_serial != c->serial)
1362  return NAN;
1363  if (c->paused) {
1364  return c->pts;
1365  } else {
1366  double time = av_gettime_relative() / 1000000.0;
1367  return c->pts_drift + time - (time - c->last_updated) * (1.0 - c->speed);
1368  }
1369 }
1370 
1371 static void set_clock_at(Clock *c, double pts, int serial, double time)
1372 {
1373  c->pts = pts;
1374  c->last_updated = time;
1375  c->pts_drift = c->pts - time;
1376  c->serial = serial;
1377 }
1378 
1379 static void set_clock(Clock *c, double pts, int serial)
1380 {
1381  double time = av_gettime_relative() / 1000000.0;
1382  set_clock_at(c, pts, serial, time);
1383 }
1384 
1385 static void set_clock_speed(Clock *c, double speed)
1386 {
1387  set_clock(c, get_clock(c), c->serial);
1388  c->speed = speed;
1389 }
1390 
1391 static void init_clock(Clock *c, int *queue_serial)
1392 {
1393  c->speed = 1.0;
1394  c->paused = 0;
1395  c->queue_serial = queue_serial;
1396  set_clock(c, NAN, -1);
1397 }
1398 
1399 static void sync_clock_to_slave(Clock *c, Clock *slave)
1400 {
1401  double clock = get_clock(c);
1402  double slave_clock = get_clock(slave);
1403  if (!isnan(slave_clock) && (isnan(clock) || fabs(clock - slave_clock) > AV_NOSYNC_THRESHOLD))
1404  set_clock(c, slave_clock, slave->serial);
1405 }
1406 
1408  if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
1409  if (is->video_st)
1410  return AV_SYNC_VIDEO_MASTER;
1411  else
1412  return AV_SYNC_AUDIO_MASTER;
1413  } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
1414  if (is->audio_st)
1415  return AV_SYNC_AUDIO_MASTER;
1416  else
1417  return AV_SYNC_EXTERNAL_CLOCK;
1418  } else {
1419  return AV_SYNC_EXTERNAL_CLOCK;
1420  }
1421 }
1422 
1423 /* get the current master clock value */
1425 {
1426  double val;
1427 
1428  switch (get_master_sync_type(is)) {
1429  case AV_SYNC_VIDEO_MASTER:
1430  val = get_clock(&is->vidclk);
1431  break;
1432  case AV_SYNC_AUDIO_MASTER:
1433  val = get_clock(&is->audclk);
1434  break;
1435  default:
1436  val = get_clock(&is->extclk);
1437  break;
1438  }
1439  return val;
1440 }
1441 
1443  if (is->video_stream >= 0 && is->videoq.nb_packets <= EXTERNAL_CLOCK_MIN_FRAMES ||
1444  is->audio_stream >= 0 && is->audioq.nb_packets <= EXTERNAL_CLOCK_MIN_FRAMES) {
1446  } else if ((is->video_stream < 0 || is->videoq.nb_packets > EXTERNAL_CLOCK_MAX_FRAMES) &&
1447  (is->audio_stream < 0 || is->audioq.nb_packets > EXTERNAL_CLOCK_MAX_FRAMES)) {
1449  } else {
1450  double speed = is->extclk.speed;
1451  if (speed != 1.0)
1452  set_clock_speed(&is->extclk, speed + EXTERNAL_CLOCK_SPEED_STEP * (1.0 - speed) / fabs(1.0 - speed));
1453  }
1454 }
1455 
1456 /* seek in the stream */
1457 static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int by_bytes)
1458 {
1459  if (!is->seek_req) {
1460  is->seek_pos = pos;
1461  is->seek_rel = rel;
1462  is->seek_flags &= ~AVSEEK_FLAG_BYTE;
1463  if (by_bytes)
1464  is->seek_flags |= AVSEEK_FLAG_BYTE;
1465  is->seek_req = 1;
1466  SDL_CondSignal(is->continue_read_thread);
1467  }
1468 }
1469 
1470 /* pause or resume the video */
1472 {
1473  if (is->paused) {
1474  is->frame_timer += av_gettime_relative() / 1000000.0 - is->vidclk.last_updated;
1475  if (is->read_pause_return != AVERROR(ENOSYS)) {
1476  is->vidclk.paused = 0;
1477  }
1478  set_clock(&is->vidclk, get_clock(&is->vidclk), is->vidclk.serial);
1479  }
1480  set_clock(&is->extclk, get_clock(&is->extclk), is->extclk.serial);
1481  is->paused = is->audclk.paused = is->vidclk.paused = is->extclk.paused = !is->paused;
1482 }
1483 
1485 {
1487  is->step = 0;
1488 }
1489 
1491 {
1492  is->muted = !is->muted;
1493 }
1494 
1495 static void update_volume(VideoState *is, int sign, double step)
1496 {
1497  double volume_level = is->audio_volume ? (20 * log(is->audio_volume / (double)SDL_MIX_MAXVOLUME) / log(10)) : -1000.0;
1498  int new_volume = lrint(SDL_MIX_MAXVOLUME * pow(10.0, (volume_level + sign * step) / 20.0));
1499  is->audio_volume = av_clip(is->audio_volume == new_volume ? (is->audio_volume + sign) : new_volume, 0, SDL_MIX_MAXVOLUME);
1500 }
1501 
1503 {
1504  /* if the stream is paused unpause it, then step */
1505  if (is->paused)
1507  is->step = 1;
1508 }
1509 
1510 static double compute_target_delay(double delay, VideoState *is)
1511 {
1512  double sync_threshold, diff = 0;
1513 
1514  /* update delay to follow master synchronisation source */
1516  /* if video is slave, we try to correct big delays by
1517  duplicating or deleting a frame */
1518  diff = get_clock(&is->vidclk) - get_master_clock(is);
1519 
1520  /* skip or repeat frame. We take into account the
1521  delay to compute the threshold. I still don't know
1522  if it is the best guess */
1523  sync_threshold = FFMAX(AV_SYNC_THRESHOLD_MIN, FFMIN(AV_SYNC_THRESHOLD_MAX, delay));
1524  if (!isnan(diff) && fabs(diff) < is->max_frame_duration) {
1525  if (diff <= -sync_threshold)
1526  delay = FFMAX(0, delay + diff);
1527  else if (diff >= sync_threshold && delay > AV_SYNC_FRAMEDUP_THRESHOLD)
1528  delay = delay + diff;
1529  else if (diff >= sync_threshold)
1530  delay = 2 * delay;
1531  }
1532  }
1533 
1534  av_log(NULL, AV_LOG_TRACE, "video: delay=%0.3f A-V=%f\n",
1535  delay, -diff);
1536 
1537  return delay;
1538 }
1539 
1540 static double vp_duration(VideoState *is, Frame *vp, Frame *nextvp) {
1541  if (vp->serial == nextvp->serial) {
1542  double duration = nextvp->pts - vp->pts;
1543  if (isnan(duration) || duration <= 0 || duration > is->max_frame_duration)
1544  return vp->duration;
1545  else
1546  return duration;
1547  } else {
1548  return 0.0;
1549  }
1550 }
1551 
1552 static void update_video_pts(VideoState *is, double pts, int serial)
1553 {
1554  /* update current video pts */
1555  set_clock(&is->vidclk, pts, serial);
1556  sync_clock_to_slave(&is->extclk, &is->vidclk);
1557 }
1558 
1559 /* called to display each frame */
1560 static void video_refresh(void *opaque, double *remaining_time)
1561 {
1562  VideoState *is = opaque;
1563  double time;
1564 
1565  Frame *sp, *sp2;
1566 
1567  if (!is->paused && get_master_sync_type(is) == AV_SYNC_EXTERNAL_CLOCK && is->realtime)
1569 
1570  if (!display_disable && is->show_mode != SHOW_MODE_VIDEO && is->audio_st) {
1571  time = av_gettime_relative() / 1000000.0;
1572  if (is->force_refresh || is->last_vis_time + rdftspeed < time) {
1573  video_display(is);
1574  is->last_vis_time = time;
1575  }
1576  *remaining_time = FFMIN(*remaining_time, is->last_vis_time + rdftspeed - time);
1577  }
1578 
1579  if (is->video_st) {
1580 retry:
1581  if (frame_queue_nb_remaining(&is->pictq) == 0) {
1582  // nothing to do, no picture to display in the queue
1583  } else {
1584  double last_duration, duration, delay;
1585  Frame *vp, *lastvp;
1586 
1587  /* dequeue the picture */
1588  lastvp = frame_queue_peek_last(&is->pictq);
1589  vp = frame_queue_peek(&is->pictq);
1590 
1591  if (vp->serial != is->videoq.serial) {
1592  frame_queue_next(&is->pictq);
1593  goto retry;
1594  }
1595 
1596  if (lastvp->serial != vp->serial)
1597  is->frame_timer = av_gettime_relative() / 1000000.0;
1598 
1599  if (is->paused)
1600  goto display;
1601 
1602  /* compute nominal last_duration */
1603  last_duration = vp_duration(is, lastvp, vp);
1604  delay = compute_target_delay(last_duration, is);
1605 
1606  time= av_gettime_relative()/1000000.0;
1607  if (time < is->frame_timer + delay) {
1608  *remaining_time = FFMIN(is->frame_timer + delay - time, *remaining_time);
1609  goto display;
1610  }
1611 
1612  is->frame_timer += delay;
1613  if (delay > 0 && time - is->frame_timer > AV_SYNC_THRESHOLD_MAX)
1614  is->frame_timer = time;
1615 
1616  SDL_LockMutex(is->pictq.mutex);
1617  if (!isnan(vp->pts))
1618  update_video_pts(is, vp->pts, vp->serial);
1619  SDL_UnlockMutex(is->pictq.mutex);
1620 
1621  if (frame_queue_nb_remaining(&is->pictq) > 1) {
1622  Frame *nextvp = frame_queue_peek_next(&is->pictq);
1623  duration = vp_duration(is, vp, nextvp);
1624  if(!is->step && (framedrop>0 || (framedrop && get_master_sync_type(is) != AV_SYNC_VIDEO_MASTER)) && time > is->frame_timer + duration){
1625  is->frame_drops_late++;
1626  frame_queue_next(&is->pictq);
1627  goto retry;
1628  }
1629  }
1630 
1631  if (is->subtitle_st) {
1632  while (frame_queue_nb_remaining(&is->subpq) > 0) {
1633  sp = frame_queue_peek(&is->subpq);
1634 
1635  if (frame_queue_nb_remaining(&is->subpq) > 1)
1636  sp2 = frame_queue_peek_next(&is->subpq);
1637  else
1638  sp2 = NULL;
1639 
1640  if (sp->serial != is->subtitleq.serial
1641  || (is->vidclk.pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1642  || (sp2 && is->vidclk.pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1643  {
1644  if (sp->uploaded) {
1645  int i;
1646  for (i = 0; i < sp->sub.num_rects; i++) {
1647  AVSubtitleRect *sub_rect = sp->sub.rects[i];
1648  uint8_t *pixels;
1649  int pitch, j;
1650 
1651  if (!SDL_LockTexture(is->sub_texture, (SDL_Rect *)sub_rect, (void **)&pixels, &pitch)) {
1652  for (j = 0; j < sub_rect->h; j++, pixels += pitch)
1653  memset(pixels, 0, sub_rect->w << 2);
1654  SDL_UnlockTexture(is->sub_texture);
1655  }
1656  }
1657  }
1658  frame_queue_next(&is->subpq);
1659  } else {
1660  break;
1661  }
1662  }
1663  }
1664 
1665  frame_queue_next(&is->pictq);
1666  is->force_refresh = 1;
1667 
1668  if (is->step && !is->paused)
1670  }
1671 display:
1672  /* display picture */
1673  if (!display_disable && is->force_refresh && is->show_mode == SHOW_MODE_VIDEO && is->pictq.rindex_shown)
1674  video_display(is);
1675  }
1676  is->force_refresh = 0;
1677  if (show_status) {
1678  AVBPrint buf;
1679  static int64_t last_time;
1680  int64_t cur_time;
1681  int aqsize, vqsize, sqsize;
1682  double av_diff;
1683 
1684  cur_time = av_gettime_relative();
1685  if (!last_time || (cur_time - last_time) >= 30000) {
1686  aqsize = 0;
1687  vqsize = 0;
1688  sqsize = 0;
1689  if (is->audio_st)
1690  aqsize = is->audioq.size;
1691  if (is->video_st)
1692  vqsize = is->videoq.size;
1693  if (is->subtitle_st)
1694  sqsize = is->subtitleq.size;
1695  av_diff = 0;
1696  if (is->audio_st && is->video_st)
1697  av_diff = get_clock(&is->audclk) - get_clock(&is->vidclk);
1698  else if (is->video_st)
1699  av_diff = get_master_clock(is) - get_clock(&is->vidclk);
1700  else if (is->audio_st)
1701  av_diff = get_master_clock(is) - get_clock(&is->audclk);
1702 
1704  av_bprintf(&buf,
1705  "%7.2f %s:%7.3f fd=%4d aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64" \r",
1707  (is->audio_st && is->video_st) ? "A-V" : (is->video_st ? "M-V" : (is->audio_st ? "M-A" : " ")),
1708  av_diff,
1709  is->frame_drops_early + is->frame_drops_late,
1710  aqsize / 1024,
1711  vqsize / 1024,
1712  sqsize,
1713  is->video_st ? is->viddec.avctx->pts_correction_num_faulty_dts : 0,
1714  is->video_st ? is->viddec.avctx->pts_correction_num_faulty_pts : 0);
1715 
1716  if (show_status == 1 && AV_LOG_INFO > av_log_get_level())
1717  fprintf(stderr, "%s", buf.str);
1718  else
1719  av_log(NULL, AV_LOG_INFO, "%s", buf.str);
1720 
1721  fflush(stderr);
1722  av_bprint_finalize(&buf, NULL);
1723 
1724  last_time = cur_time;
1725  }
1726  }
1727 }
1728 
1729 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, double duration, int64_t pos, int serial)
1730 {
1731  Frame *vp;
1732 
1733 #if defined(DEBUG_SYNC)
1734  printf("frame_type=%c pts=%0.3f\n",
1735  av_get_picture_type_char(src_frame->pict_type), pts);
1736 #endif
1737 
1738  if (!(vp = frame_queue_peek_writable(&is->pictq)))
1739  return -1;
1740 
1741  vp->sar = src_frame->sample_aspect_ratio;
1742  vp->uploaded = 0;
1743 
1744  vp->width = src_frame->width;
1745  vp->height = src_frame->height;
1746  vp->format = src_frame->format;
1747 
1748  vp->pts = pts;
1749  vp->duration = duration;
1750  vp->pos = pos;
1751  vp->serial = serial;
1752 
1753  set_default_window_size(vp->width, vp->height, vp->sar);
1754 
1755  av_frame_move_ref(vp->frame, src_frame);
1756  frame_queue_push(&is->pictq);
1757  return 0;
1758 }
1759 
1761 {
1762  int got_picture;
1763 
1764  if ((got_picture = decoder_decode_frame(&is->viddec, frame, NULL)) < 0)
1765  return -1;
1766 
1767  if (got_picture) {
1768  double dpts = NAN;
1769 
1770  if (frame->pts != AV_NOPTS_VALUE)
1771  dpts = av_q2d(is->video_st->time_base) * frame->pts;
1772 
1774 
1776  if (frame->pts != AV_NOPTS_VALUE) {
1777  double diff = dpts - get_master_clock(is);
1778  if (!isnan(diff) && fabs(diff) < AV_NOSYNC_THRESHOLD &&
1779  diff - is->frame_last_filter_delay < 0 &&
1780  is->viddec.pkt_serial == is->vidclk.serial &&
1781  is->videoq.nb_packets) {
1782  is->frame_drops_early++;
1784  got_picture = 0;
1785  }
1786  }
1787  }
1788  }
1789 
1790  return got_picture;
1791 }
1792 
1793 static int configure_filtergraph(AVFilterGraph *graph, const char *filtergraph,
1794  AVFilterContext *source_ctx, AVFilterContext *sink_ctx)
1795 {
1796  int ret, i;
1797  int nb_filters = graph->nb_filters;
1799 
1800  if (filtergraph) {
1803  if (!outputs || !inputs) {
1804  ret = AVERROR(ENOMEM);
1805  goto fail;
1806  }
1807 
1808  outputs->name = av_strdup("in");
1809  outputs->filter_ctx = source_ctx;
1810  outputs->pad_idx = 0;
1811  outputs->next = NULL;
1812 
1813  inputs->name = av_strdup("out");
1814  inputs->filter_ctx = sink_ctx;
1815  inputs->pad_idx = 0;
1816  inputs->next = NULL;
1817 
1818  if ((ret = avfilter_graph_parse_ptr(graph, filtergraph, &inputs, &outputs, NULL)) < 0)
1819  goto fail;
1820  } else {
1821  if ((ret = avfilter_link(source_ctx, 0, sink_ctx, 0)) < 0)
1822  goto fail;
1823  }
1824 
1825  /* Reorder the filters to ensure that inputs of the custom filters are merged first */
1826  for (i = 0; i < graph->nb_filters - nb_filters; i++)
1827  FFSWAP(AVFilterContext*, graph->filters[i], graph->filters[i + nb_filters]);
1828 
1829  ret = avfilter_graph_config(graph, NULL);
1830 fail:
1833  return ret;
1834 }
1835 
1836 static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const char *vfilters, AVFrame *frame)
1837 {
1839  char sws_flags_str[512] = "";
1840  char buffersrc_args[256];
1841  int ret;
1842  AVFilterContext *filt_src = NULL, *filt_out = NULL, *last_filter = NULL;
1843  AVCodecParameters *codecpar = is->video_st->codecpar;
1844  AVRational fr = av_guess_frame_rate(is->ic, is->video_st, NULL);
1845  const AVDictionaryEntry *e = NULL;
1846  int nb_pix_fmts = 0;
1847  int i, j;
1848 
1849  for (i = 0; i < renderer_info.num_texture_formats; i++) {
1850  for (j = 0; j < FF_ARRAY_ELEMS(sdl_texture_format_map) - 1; j++) {
1851  if (renderer_info.texture_formats[i] == sdl_texture_format_map[j].texture_fmt) {
1852  pix_fmts[nb_pix_fmts++] = sdl_texture_format_map[j].format;
1853  break;
1854  }
1855  }
1856  }
1857  pix_fmts[nb_pix_fmts] = AV_PIX_FMT_NONE;
1858 
1859  while ((e = av_dict_iterate(sws_dict, e))) {
1860  if (!strcmp(e->key, "sws_flags")) {
1861  av_strlcatf(sws_flags_str, sizeof(sws_flags_str), "%s=%s:", "flags", e->value);
1862  } else
1863  av_strlcatf(sws_flags_str, sizeof(sws_flags_str), "%s=%s:", e->key, e->value);
1864  }
1865  if (strlen(sws_flags_str))
1866  sws_flags_str[strlen(sws_flags_str)-1] = '\0';
1867 
1868  graph->scale_sws_opts = av_strdup(sws_flags_str);
1869 
1870  snprintf(buffersrc_args, sizeof(buffersrc_args),
1871  "video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d",
1873  is->video_st->time_base.num, is->video_st->time_base.den,
1874  codecpar->sample_aspect_ratio.num, FFMAX(codecpar->sample_aspect_ratio.den, 1));
1875  if (fr.num && fr.den)
1876  av_strlcatf(buffersrc_args, sizeof(buffersrc_args), ":frame_rate=%d/%d", fr.num, fr.den);
1877 
1878  if ((ret = avfilter_graph_create_filter(&filt_src,
1879  avfilter_get_by_name("buffer"),
1880  "ffplay_buffer", buffersrc_args, NULL,
1881  graph)) < 0)
1882  goto fail;
1883 
1884  ret = avfilter_graph_create_filter(&filt_out,
1885  avfilter_get_by_name("buffersink"),
1886  "ffplay_buffersink", NULL, NULL, graph);
1887  if (ret < 0)
1888  goto fail;
1889 
1890  if ((ret = av_opt_set_int_list(filt_out, "pix_fmts", pix_fmts, AV_PIX_FMT_NONE, AV_OPT_SEARCH_CHILDREN)) < 0)
1891  goto fail;
1892 
1893  last_filter = filt_out;
1894 
1895 /* Note: this macro adds a filter before the lastly added filter, so the
1896  * processing order of the filters is in reverse */
1897 #define INSERT_FILT(name, arg) do { \
1898  AVFilterContext *filt_ctx; \
1899  \
1900  ret = avfilter_graph_create_filter(&filt_ctx, \
1901  avfilter_get_by_name(name), \
1902  "ffplay_" name, arg, NULL, graph); \
1903  if (ret < 0) \
1904  goto fail; \
1905  \
1906  ret = avfilter_link(filt_ctx, 0, last_filter, 0); \
1907  if (ret < 0) \
1908  goto fail; \
1909  \
1910  last_filter = filt_ctx; \
1911 } while (0)
1912 
1913  if (autorotate) {
1914  double theta = 0.0;
1915  int32_t *displaymatrix = NULL;
1917  if (sd)
1918  displaymatrix = (int32_t *)sd->data;
1919  if (!displaymatrix) {
1920  const AVPacketSideData *sd = av_packet_side_data_get(is->video_st->codecpar->coded_side_data,
1921  is->video_st->codecpar->nb_coded_side_data,
1923  if (sd)
1924  displaymatrix = (int32_t *)sd->data;
1925  }
1926  theta = get_rotation(displaymatrix);
1927 
1928  if (fabs(theta - 90) < 1.0) {
1929  INSERT_FILT("transpose", "clock");
1930  } else if (fabs(theta - 180) < 1.0) {
1931  INSERT_FILT("hflip", NULL);
1932  INSERT_FILT("vflip", NULL);
1933  } else if (fabs(theta - 270) < 1.0) {
1934  INSERT_FILT("transpose", "cclock");
1935  } else if (fabs(theta) > 1.0) {
1936  char rotate_buf[64];
1937  snprintf(rotate_buf, sizeof(rotate_buf), "%f*PI/180", theta);
1938  INSERT_FILT("rotate", rotate_buf);
1939  }
1940  }
1941 
1942  if ((ret = configure_filtergraph(graph, vfilters, filt_src, last_filter)) < 0)
1943  goto fail;
1944 
1945  is->in_video_filter = filt_src;
1946  is->out_video_filter = filt_out;
1947 
1948 fail:
1949  return ret;
1950 }
1951 
1952 static int configure_audio_filters(VideoState *is, const char *afilters, int force_output_format)
1953 {
1955  int sample_rates[2] = { 0, -1 };
1956  AVFilterContext *filt_asrc = NULL, *filt_asink = NULL;
1957  char aresample_swr_opts[512] = "";
1958  const AVDictionaryEntry *e = NULL;
1959  AVBPrint bp;
1960  char asrc_args[256];
1961  int ret;
1962 
1963  avfilter_graph_free(&is->agraph);
1964  if (!(is->agraph = avfilter_graph_alloc()))
1965  return AVERROR(ENOMEM);
1966  is->agraph->nb_threads = filter_nbthreads;
1967 
1969 
1970  while ((e = av_dict_iterate(swr_opts, e)))
1971  av_strlcatf(aresample_swr_opts, sizeof(aresample_swr_opts), "%s=%s:", e->key, e->value);
1972  if (strlen(aresample_swr_opts))
1973  aresample_swr_opts[strlen(aresample_swr_opts)-1] = '\0';
1974  av_opt_set(is->agraph, "aresample_swr_opts", aresample_swr_opts, 0);
1975 
1976  av_channel_layout_describe_bprint(&is->audio_filter_src.ch_layout, &bp);
1977 
1978  ret = snprintf(asrc_args, sizeof(asrc_args),
1979  "sample_rate=%d:sample_fmt=%s:time_base=%d/%d:channel_layout=%s",
1980  is->audio_filter_src.freq, av_get_sample_fmt_name(is->audio_filter_src.fmt),
1981  1, is->audio_filter_src.freq, bp.str);
1982 
1983  ret = avfilter_graph_create_filter(&filt_asrc,
1984  avfilter_get_by_name("abuffer"), "ffplay_abuffer",
1985  asrc_args, NULL, is->agraph);
1986  if (ret < 0)
1987  goto end;
1988 
1989 
1990  ret = avfilter_graph_create_filter(&filt_asink,
1991  avfilter_get_by_name("abuffersink"), "ffplay_abuffersink",
1992  NULL, NULL, is->agraph);
1993  if (ret < 0)
1994  goto end;
1995 
1996  if ((ret = av_opt_set_int_list(filt_asink, "sample_fmts", sample_fmts, AV_SAMPLE_FMT_NONE, AV_OPT_SEARCH_CHILDREN)) < 0)
1997  goto end;
1998  if ((ret = av_opt_set_int(filt_asink, "all_channel_counts", 1, AV_OPT_SEARCH_CHILDREN)) < 0)
1999  goto end;
2000 
2001  if (force_output_format) {
2002  av_bprint_clear(&bp);
2003  av_channel_layout_describe_bprint(&is->audio_tgt.ch_layout, &bp);
2004  sample_rates [0] = is->audio_tgt.freq;
2005  if ((ret = av_opt_set_int(filt_asink, "all_channel_counts", 0, AV_OPT_SEARCH_CHILDREN)) < 0)
2006  goto end;
2007  if ((ret = av_opt_set(filt_asink, "ch_layouts", bp.str, AV_OPT_SEARCH_CHILDREN)) < 0)
2008  goto end;
2009  if ((ret = av_opt_set_int_list(filt_asink, "sample_rates" , sample_rates , -1, AV_OPT_SEARCH_CHILDREN)) < 0)
2010  goto end;
2011  }
2012 
2013 
2014  if ((ret = configure_filtergraph(is->agraph, afilters, filt_asrc, filt_asink)) < 0)
2015  goto end;
2016 
2017  is->in_audio_filter = filt_asrc;
2018  is->out_audio_filter = filt_asink;
2019 
2020 end:
2021  if (ret < 0)
2022  avfilter_graph_free(&is->agraph);
2023  av_bprint_finalize(&bp, NULL);
2024 
2025  return ret;
2026 }
2027 
2028 static int audio_thread(void *arg)
2029 {
2030  VideoState *is = arg;
2032  Frame *af;
2033  int last_serial = -1;
2034  int reconfigure;
2035  int got_frame = 0;
2036  AVRational tb;
2037  int ret = 0;
2038 
2039  if (!frame)
2040  return AVERROR(ENOMEM);
2041 
2042  do {
2043  if ((got_frame = decoder_decode_frame(&is->auddec, frame, NULL)) < 0)
2044  goto the_end;
2045 
2046  if (got_frame) {
2047  tb = (AVRational){1, frame->sample_rate};
2048 
2049  reconfigure =
2050  cmp_audio_fmts(is->audio_filter_src.fmt, is->audio_filter_src.ch_layout.nb_channels,
2052  av_channel_layout_compare(&is->audio_filter_src.ch_layout, &frame->ch_layout) ||
2053  is->audio_filter_src.freq != frame->sample_rate ||
2054  is->auddec.pkt_serial != last_serial;
2055 
2056  if (reconfigure) {
2057  char buf1[1024], buf2[1024];
2058  av_channel_layout_describe(&is->audio_filter_src.ch_layout, buf1, sizeof(buf1));
2059  av_channel_layout_describe(&frame->ch_layout, buf2, sizeof(buf2));
2061  "Audio frame changed from rate:%d ch:%d fmt:%s layout:%s serial:%d to rate:%d ch:%d fmt:%s layout:%s serial:%d\n",
2062  is->audio_filter_src.freq, is->audio_filter_src.ch_layout.nb_channels, av_get_sample_fmt_name(is->audio_filter_src.fmt), buf1, last_serial,
2064 
2065  is->audio_filter_src.fmt = frame->format;
2066  ret = av_channel_layout_copy(&is->audio_filter_src.ch_layout, &frame->ch_layout);
2067  if (ret < 0)
2068  goto the_end;
2069  is->audio_filter_src.freq = frame->sample_rate;
2070  last_serial = is->auddec.pkt_serial;
2071 
2072  if ((ret = configure_audio_filters(is, afilters, 1)) < 0)
2073  goto the_end;
2074  }
2075 
2076  if ((ret = av_buffersrc_add_frame(is->in_audio_filter, frame)) < 0)
2077  goto the_end;
2078 
2079  while ((ret = av_buffersink_get_frame_flags(is->out_audio_filter, frame, 0)) >= 0) {
2081  tb = av_buffersink_get_time_base(is->out_audio_filter);
2082  if (!(af = frame_queue_peek_writable(&is->sampq)))
2083  goto the_end;
2084 
2085  af->pts = (frame->pts == AV_NOPTS_VALUE) ? NAN : frame->pts * av_q2d(tb);
2086  af->pos = fd ? fd->pkt_pos : -1;
2087  af->serial = is->auddec.pkt_serial;
2089 
2091  frame_queue_push(&is->sampq);
2092 
2093  if (is->audioq.serial != is->auddec.pkt_serial)
2094  break;
2095  }
2096  if (ret == AVERROR_EOF)
2097  is->auddec.finished = is->auddec.pkt_serial;
2098  }
2099  } while (ret >= 0 || ret == AVERROR(EAGAIN) || ret == AVERROR_EOF);
2100  the_end:
2101  avfilter_graph_free(&is->agraph);
2102  av_frame_free(&frame);
2103  return ret;
2104 }
2105 
2106 static int decoder_start(Decoder *d, int (*fn)(void *), const char *thread_name, void* arg)
2107 {
2108  packet_queue_start(d->queue);
2109  d->decoder_tid = SDL_CreateThread(fn, thread_name, arg);
2110  if (!d->decoder_tid) {
2111  av_log(NULL, AV_LOG_ERROR, "SDL_CreateThread(): %s\n", SDL_GetError());
2112  return AVERROR(ENOMEM);
2113  }
2114  return 0;
2115 }
2116 
2117 static int video_thread(void *arg)
2118 {
2119  VideoState *is = arg;
2121  double pts;
2122  double duration;
2123  int ret;
2124  AVRational tb = is->video_st->time_base;
2125  AVRational frame_rate = av_guess_frame_rate(is->ic, is->video_st, NULL);
2126 
2127  AVFilterGraph *graph = NULL;
2128  AVFilterContext *filt_out = NULL, *filt_in = NULL;
2129  int last_w = 0;
2130  int last_h = 0;
2131  enum AVPixelFormat last_format = -2;
2132  int last_serial = -1;
2133  int last_vfilter_idx = 0;
2134 
2135  if (!frame)
2136  return AVERROR(ENOMEM);
2137 
2138  for (;;) {
2140  if (ret < 0)
2141  goto the_end;
2142  if (!ret)
2143  continue;
2144 
2145  if ( last_w != frame->width
2146  || last_h != frame->height
2147  || last_format != frame->format
2148  || last_serial != is->viddec.pkt_serial
2149  || last_vfilter_idx != is->vfilter_idx) {
2151  "Video frame changed from size:%dx%d format:%s serial:%d to size:%dx%d format:%s serial:%d\n",
2152  last_w, last_h,
2153  (const char *)av_x_if_null(av_get_pix_fmt_name(last_format), "none"), last_serial,
2154  frame->width, frame->height,
2155  (const char *)av_x_if_null(av_get_pix_fmt_name(frame->format), "none"), is->viddec.pkt_serial);
2156  avfilter_graph_free(&graph);
2157  graph = avfilter_graph_alloc();
2158  if (!graph) {
2159  ret = AVERROR(ENOMEM);
2160  goto the_end;
2161  }
2162  graph->nb_threads = filter_nbthreads;
2163  if ((ret = configure_video_filters(graph, is, vfilters_list ? vfilters_list[is->vfilter_idx] : NULL, frame)) < 0) {
2164  SDL_Event event;
2165  event.type = FF_QUIT_EVENT;
2166  event.user.data1 = is;
2167  SDL_PushEvent(&event);
2168  goto the_end;
2169  }
2170  filt_in = is->in_video_filter;
2171  filt_out = is->out_video_filter;
2172  last_w = frame->width;
2173  last_h = frame->height;
2174  last_format = frame->format;
2175  last_serial = is->viddec.pkt_serial;
2176  last_vfilter_idx = is->vfilter_idx;
2177  frame_rate = av_buffersink_get_frame_rate(filt_out);
2178  }
2179 
2180  ret = av_buffersrc_add_frame(filt_in, frame);
2181  if (ret < 0)
2182  goto the_end;
2183 
2184  while (ret >= 0) {
2185  FrameData *fd;
2186 
2187  is->frame_last_returned_time = av_gettime_relative() / 1000000.0;
2188 
2189  ret = av_buffersink_get_frame_flags(filt_out, frame, 0);
2190  if (ret < 0) {
2191  if (ret == AVERROR_EOF)
2192  is->viddec.finished = is->viddec.pkt_serial;
2193  ret = 0;
2194  break;
2195  }
2196 
2198 
2199  is->frame_last_filter_delay = av_gettime_relative() / 1000000.0 - is->frame_last_returned_time;
2200  if (fabs(is->frame_last_filter_delay) > AV_NOSYNC_THRESHOLD / 10.0)
2201  is->frame_last_filter_delay = 0;
2202  tb = av_buffersink_get_time_base(filt_out);
2203  duration = (frame_rate.num && frame_rate.den ? av_q2d((AVRational){frame_rate.den, frame_rate.num}) : 0);
2204  pts = (frame->pts == AV_NOPTS_VALUE) ? NAN : frame->pts * av_q2d(tb);
2205  ret = queue_picture(is, frame, pts, duration, fd ? fd->pkt_pos : -1, is->viddec.pkt_serial);
2207  if (is->videoq.serial != is->viddec.pkt_serial)
2208  break;
2209  }
2210 
2211  if (ret < 0)
2212  goto the_end;
2213  }
2214  the_end:
2215  avfilter_graph_free(&graph);
2216  av_frame_free(&frame);
2217  return 0;
2218 }
2219 
2220 static int subtitle_thread(void *arg)
2221 {
2222  VideoState *is = arg;
2223  Frame *sp;
2224  int got_subtitle;
2225  double pts;
2226 
2227  for (;;) {
2228  if (!(sp = frame_queue_peek_writable(&is->subpq)))
2229  return 0;
2230 
2231  if ((got_subtitle = decoder_decode_frame(&is->subdec, NULL, &sp->sub)) < 0)
2232  break;
2233 
2234  pts = 0;
2235 
2236  if (got_subtitle && sp->sub.format == 0) {
2237  if (sp->sub.pts != AV_NOPTS_VALUE)
2238  pts = sp->sub.pts / (double)AV_TIME_BASE;
2239  sp->pts = pts;
2240  sp->serial = is->subdec.pkt_serial;
2241  sp->width = is->subdec.avctx->width;
2242  sp->height = is->subdec.avctx->height;
2243  sp->uploaded = 0;
2244 
2245  /* now we can update the picture count */
2246  frame_queue_push(&is->subpq);
2247  } else if (got_subtitle) {
2248  avsubtitle_free(&sp->sub);
2249  }
2250  }
2251  return 0;
2252 }
2253 
2254 /* copy samples for viewing in editor window */
2255 static void update_sample_display(VideoState *is, short *samples, int samples_size)
2256 {
2257  int size, len;
2258 
2259  size = samples_size / sizeof(short);
2260  while (size > 0) {
2261  len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
2262  if (len > size)
2263  len = size;
2264  memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
2265  samples += len;
2266  is->sample_array_index += len;
2267  if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
2268  is->sample_array_index = 0;
2269  size -= len;
2270  }
2271 }
2272 
2273 /* return the wanted number of samples to get better sync if sync_type is video
2274  * or external master clock */
2275 static int synchronize_audio(VideoState *is, int nb_samples)
2276 {
2277  int wanted_nb_samples = nb_samples;
2278 
2279  /* if not master, then we try to remove or add samples to correct the clock */
2281  double diff, avg_diff;
2282  int min_nb_samples, max_nb_samples;
2283 
2284  diff = get_clock(&is->audclk) - get_master_clock(is);
2285 
2286  if (!isnan(diff) && fabs(diff) < AV_NOSYNC_THRESHOLD) {
2287  is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
2288  if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
2289  /* not enough measures to have a correct estimate */
2290  is->audio_diff_avg_count++;
2291  } else {
2292  /* estimate the A-V difference */
2293  avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
2294 
2295  if (fabs(avg_diff) >= is->audio_diff_threshold) {
2296  wanted_nb_samples = nb_samples + (int)(diff * is->audio_src.freq);
2297  min_nb_samples = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX) / 100));
2298  max_nb_samples = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX) / 100));
2299  wanted_nb_samples = av_clip(wanted_nb_samples, min_nb_samples, max_nb_samples);
2300  }
2301  av_log(NULL, AV_LOG_TRACE, "diff=%f adiff=%f sample_diff=%d apts=%0.3f %f\n",
2302  diff, avg_diff, wanted_nb_samples - nb_samples,
2303  is->audio_clock, is->audio_diff_threshold);
2304  }
2305  } else {
2306  /* too big difference : may be initial PTS errors, so
2307  reset A-V filter */
2308  is->audio_diff_avg_count = 0;
2309  is->audio_diff_cum = 0;
2310  }
2311  }
2312 
2313  return wanted_nb_samples;
2314 }
2315 
2316 /**
2317  * Decode one audio frame and return its uncompressed size.
2318  *
2319  * The processed audio frame is decoded, converted if required, and
2320  * stored in is->audio_buf, with size in bytes given by the return
2321  * value.
2322  */
2324 {
2325  int data_size, resampled_data_size;
2326  av_unused double audio_clock0;
2327  int wanted_nb_samples;
2328  Frame *af;
2329 
2330  if (is->paused)
2331  return -1;
2332 
2333  do {
2334 #if defined(_WIN32)
2335  while (frame_queue_nb_remaining(&is->sampq) == 0) {
2336  if ((av_gettime_relative() - audio_callback_time) > 1000000LL * is->audio_hw_buf_size / is->audio_tgt.bytes_per_sec / 2)
2337  return -1;
2338  av_usleep (1000);
2339  }
2340 #endif
2341  if (!(af = frame_queue_peek_readable(&is->sampq)))
2342  return -1;
2343  frame_queue_next(&is->sampq);
2344  } while (af->serial != is->audioq.serial);
2345 
2347  af->frame->nb_samples,
2348  af->frame->format, 1);
2349 
2350  wanted_nb_samples = synchronize_audio(is, af->frame->nb_samples);
2351 
2352  if (af->frame->format != is->audio_src.fmt ||
2353  av_channel_layout_compare(&af->frame->ch_layout, &is->audio_src.ch_layout) ||
2354  af->frame->sample_rate != is->audio_src.freq ||
2355  (wanted_nb_samples != af->frame->nb_samples && !is->swr_ctx)) {
2356  swr_free(&is->swr_ctx);
2357  swr_alloc_set_opts2(&is->swr_ctx,
2358  &is->audio_tgt.ch_layout, is->audio_tgt.fmt, is->audio_tgt.freq,
2359  &af->frame->ch_layout, af->frame->format, af->frame->sample_rate,
2360  0, NULL);
2361  if (!is->swr_ctx || swr_init(is->swr_ctx) < 0) {
2363  "Cannot create sample rate converter for conversion of %d Hz %s %d channels to %d Hz %s %d channels!\n",
2365  is->audio_tgt.freq, av_get_sample_fmt_name(is->audio_tgt.fmt), is->audio_tgt.ch_layout.nb_channels);
2366  swr_free(&is->swr_ctx);
2367  return -1;
2368  }
2369  if (av_channel_layout_copy(&is->audio_src.ch_layout, &af->frame->ch_layout) < 0)
2370  return -1;
2371  is->audio_src.freq = af->frame->sample_rate;
2372  is->audio_src.fmt = af->frame->format;
2373  }
2374 
2375  if (is->swr_ctx) {
2376  const uint8_t **in = (const uint8_t **)af->frame->extended_data;
2377  uint8_t **out = &is->audio_buf1;
2378  int out_count = (int64_t)wanted_nb_samples * is->audio_tgt.freq / af->frame->sample_rate + 256;
2379  int out_size = av_samples_get_buffer_size(NULL, is->audio_tgt.ch_layout.nb_channels, out_count, is->audio_tgt.fmt, 0);
2380  int len2;
2381  if (out_size < 0) {
2382  av_log(NULL, AV_LOG_ERROR, "av_samples_get_buffer_size() failed\n");
2383  return -1;
2384  }
2385  if (wanted_nb_samples != af->frame->nb_samples) {
2386  if (swr_set_compensation(is->swr_ctx, (wanted_nb_samples - af->frame->nb_samples) * is->audio_tgt.freq / af->frame->sample_rate,
2387  wanted_nb_samples * is->audio_tgt.freq / af->frame->sample_rate) < 0) {
2388  av_log(NULL, AV_LOG_ERROR, "swr_set_compensation() failed\n");
2389  return -1;
2390  }
2391  }
2392  av_fast_malloc(&is->audio_buf1, &is->audio_buf1_size, out_size);
2393  if (!is->audio_buf1)
2394  return AVERROR(ENOMEM);
2395  len2 = swr_convert(is->swr_ctx, out, out_count, in, af->frame->nb_samples);
2396  if (len2 < 0) {
2397  av_log(NULL, AV_LOG_ERROR, "swr_convert() failed\n");
2398  return -1;
2399  }
2400  if (len2 == out_count) {
2401  av_log(NULL, AV_LOG_WARNING, "audio buffer is probably too small\n");
2402  if (swr_init(is->swr_ctx) < 0)
2403  swr_free(&is->swr_ctx);
2404  }
2405  is->audio_buf = is->audio_buf1;
2406  resampled_data_size = len2 * is->audio_tgt.ch_layout.nb_channels * av_get_bytes_per_sample(is->audio_tgt.fmt);
2407  } else {
2408  is->audio_buf = af->frame->data[0];
2409  resampled_data_size = data_size;
2410  }
2411 
2412  audio_clock0 = is->audio_clock;
2413  /* update the audio clock with the pts */
2414  if (!isnan(af->pts))
2415  is->audio_clock = af->pts + (double) af->frame->nb_samples / af->frame->sample_rate;
2416  else
2417  is->audio_clock = NAN;
2418  is->audio_clock_serial = af->serial;
2419 #ifdef DEBUG
2420  {
2421  static double last_clock;
2422  printf("audio: delay=%0.3f clock=%0.3f clock0=%0.3f\n",
2423  is->audio_clock - last_clock,
2424  is->audio_clock, audio_clock0);
2425  last_clock = is->audio_clock;
2426  }
2427 #endif
2428  return resampled_data_size;
2429 }
2430 
2431 /* prepare a new audio buffer */
2432 static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
2433 {
2434  VideoState *is = opaque;
2435  int audio_size, len1;
2436 
2438 
2439  while (len > 0) {
2440  if (is->audio_buf_index >= is->audio_buf_size) {
2441  audio_size = audio_decode_frame(is);
2442  if (audio_size < 0) {
2443  /* if error, just output silence */
2444  is->audio_buf = NULL;
2445  is->audio_buf_size = SDL_AUDIO_MIN_BUFFER_SIZE / is->audio_tgt.frame_size * is->audio_tgt.frame_size;
2446  } else {
2447  if (is->show_mode != SHOW_MODE_VIDEO)
2448  update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
2449  is->audio_buf_size = audio_size;
2450  }
2451  is->audio_buf_index = 0;
2452  }
2453  len1 = is->audio_buf_size - is->audio_buf_index;
2454  if (len1 > len)
2455  len1 = len;
2456  if (!is->muted && is->audio_buf && is->audio_volume == SDL_MIX_MAXVOLUME)
2457  memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
2458  else {
2459  memset(stream, 0, len1);
2460  if (!is->muted && is->audio_buf)
2461  SDL_MixAudioFormat(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, AUDIO_S16SYS, len1, is->audio_volume);
2462  }
2463  len -= len1;
2464  stream += len1;
2465  is->audio_buf_index += len1;
2466  }
2467  is->audio_write_buf_size = is->audio_buf_size - is->audio_buf_index;
2468  /* Let's assume the audio driver that is used by SDL has two periods. */
2469  if (!isnan(is->audio_clock)) {
2470  set_clock_at(&is->audclk, is->audio_clock - (double)(2 * is->audio_hw_buf_size + is->audio_write_buf_size) / is->audio_tgt.bytes_per_sec, is->audio_clock_serial, audio_callback_time / 1000000.0);
2471  sync_clock_to_slave(&is->extclk, &is->audclk);
2472  }
2473 }
2474 
2475 static int audio_open(void *opaque, AVChannelLayout *wanted_channel_layout, int wanted_sample_rate, struct AudioParams *audio_hw_params)
2476 {
2477  SDL_AudioSpec wanted_spec, spec;
2478  const char *env;
2479  static const int next_nb_channels[] = {0, 0, 1, 6, 2, 6, 4, 6};
2480  static const int next_sample_rates[] = {0, 44100, 48000, 96000, 192000};
2481  int next_sample_rate_idx = FF_ARRAY_ELEMS(next_sample_rates) - 1;
2482  int wanted_nb_channels = wanted_channel_layout->nb_channels;
2483 
2484  env = SDL_getenv("SDL_AUDIO_CHANNELS");
2485  if (env) {
2486  wanted_nb_channels = atoi(env);
2487  av_channel_layout_uninit(wanted_channel_layout);
2488  av_channel_layout_default(wanted_channel_layout, wanted_nb_channels);
2489  }
2490  if (wanted_channel_layout->order != AV_CHANNEL_ORDER_NATIVE) {
2491  av_channel_layout_uninit(wanted_channel_layout);
2492  av_channel_layout_default(wanted_channel_layout, wanted_nb_channels);
2493  }
2494  wanted_nb_channels = wanted_channel_layout->nb_channels;
2495  wanted_spec.channels = wanted_nb_channels;
2496  wanted_spec.freq = wanted_sample_rate;
2497  if (wanted_spec.freq <= 0 || wanted_spec.channels <= 0) {
2498  av_log(NULL, AV_LOG_ERROR, "Invalid sample rate or channel count!\n");
2499  return -1;
2500  }
2501  while (next_sample_rate_idx && next_sample_rates[next_sample_rate_idx] >= wanted_spec.freq)
2502  next_sample_rate_idx--;
2503  wanted_spec.format = AUDIO_S16SYS;
2504  wanted_spec.silence = 0;
2505  wanted_spec.samples = FFMAX(SDL_AUDIO_MIN_BUFFER_SIZE, 2 << av_log2(wanted_spec.freq / SDL_AUDIO_MAX_CALLBACKS_PER_SEC));
2506  wanted_spec.callback = sdl_audio_callback;
2507  wanted_spec.userdata = opaque;
2508  while (!(audio_dev = SDL_OpenAudioDevice(NULL, 0, &wanted_spec, &spec, SDL_AUDIO_ALLOW_FREQUENCY_CHANGE | SDL_AUDIO_ALLOW_CHANNELS_CHANGE))) {
2509  av_log(NULL, AV_LOG_WARNING, "SDL_OpenAudio (%d channels, %d Hz): %s\n",
2510  wanted_spec.channels, wanted_spec.freq, SDL_GetError());
2511  wanted_spec.channels = next_nb_channels[FFMIN(7, wanted_spec.channels)];
2512  if (!wanted_spec.channels) {
2513  wanted_spec.freq = next_sample_rates[next_sample_rate_idx--];
2514  wanted_spec.channels = wanted_nb_channels;
2515  if (!wanted_spec.freq) {
2517  "No more combinations to try, audio open failed\n");
2518  return -1;
2519  }
2520  }
2521  av_channel_layout_default(wanted_channel_layout, wanted_spec.channels);
2522  }
2523  if (spec.format != AUDIO_S16SYS) {
2525  "SDL advised audio format %d is not supported!\n", spec.format);
2526  return -1;
2527  }
2528  if (spec.channels != wanted_spec.channels) {
2529  av_channel_layout_uninit(wanted_channel_layout);
2530  av_channel_layout_default(wanted_channel_layout, spec.channels);
2531  if (wanted_channel_layout->order != AV_CHANNEL_ORDER_NATIVE) {
2533  "SDL advised channel count %d is not supported!\n", spec.channels);
2534  return -1;
2535  }
2536  }
2537 
2538  audio_hw_params->fmt = AV_SAMPLE_FMT_S16;
2539  audio_hw_params->freq = spec.freq;
2540  if (av_channel_layout_copy(&audio_hw_params->ch_layout, wanted_channel_layout) < 0)
2541  return -1;
2542  audio_hw_params->frame_size = av_samples_get_buffer_size(NULL, audio_hw_params->ch_layout.nb_channels, 1, audio_hw_params->fmt, 1);
2543  audio_hw_params->bytes_per_sec = av_samples_get_buffer_size(NULL, audio_hw_params->ch_layout.nb_channels, audio_hw_params->freq, audio_hw_params->fmt, 1);
2544  if (audio_hw_params->bytes_per_sec <= 0 || audio_hw_params->frame_size <= 0) {
2545  av_log(NULL, AV_LOG_ERROR, "av_samples_get_buffer_size failed\n");
2546  return -1;
2547  }
2548  return spec.size;
2549 }
2550 
2551 /* open a given stream. Return 0 if OK */
2552 static int stream_component_open(VideoState *is, int stream_index)
2553 {
2554  AVFormatContext *ic = is->ic;
2555  AVCodecContext *avctx;
2556  const AVCodec *codec;
2557  const char *forced_codec_name = NULL;
2558  AVDictionary *opts = NULL;
2559  const AVDictionaryEntry *t = NULL;
2560  int sample_rate;
2561  AVChannelLayout ch_layout = { 0 };
2562  int ret = 0;
2563  int stream_lowres = lowres;
2564 
2565  if (stream_index < 0 || stream_index >= ic->nb_streams)
2566  return -1;
2567 
2568  avctx = avcodec_alloc_context3(NULL);
2569  if (!avctx)
2570  return AVERROR(ENOMEM);
2571 
2572  ret = avcodec_parameters_to_context(avctx, ic->streams[stream_index]->codecpar);
2573  if (ret < 0)
2574  goto fail;
2575  avctx->pkt_timebase = ic->streams[stream_index]->time_base;
2576 
2577  codec = avcodec_find_decoder(avctx->codec_id);
2578 
2579  switch(avctx->codec_type){
2580  case AVMEDIA_TYPE_AUDIO : is->last_audio_stream = stream_index; forced_codec_name = audio_codec_name; break;
2581  case AVMEDIA_TYPE_SUBTITLE: is->last_subtitle_stream = stream_index; forced_codec_name = subtitle_codec_name; break;
2582  case AVMEDIA_TYPE_VIDEO : is->last_video_stream = stream_index; forced_codec_name = video_codec_name; break;
2583  }
2584  if (forced_codec_name)
2585  codec = avcodec_find_decoder_by_name(forced_codec_name);
2586  if (!codec) {
2587  if (forced_codec_name) av_log(NULL, AV_LOG_WARNING,
2588  "No codec could be found with name '%s'\n", forced_codec_name);
2589  else av_log(NULL, AV_LOG_WARNING,
2590  "No decoder could be found for codec %s\n", avcodec_get_name(avctx->codec_id));
2591  ret = AVERROR(EINVAL);
2592  goto fail;
2593  }
2594 
2595  avctx->codec_id = codec->id;
2596  if (stream_lowres > codec->max_lowres) {
2597  av_log(avctx, AV_LOG_WARNING, "The maximum value for lowres supported by the decoder is %d\n",
2598  codec->max_lowres);
2599  stream_lowres = codec->max_lowres;
2600  }
2601  avctx->lowres = stream_lowres;
2602 
2603  if (fast)
2604  avctx->flags2 |= AV_CODEC_FLAG2_FAST;
2605 
2606  ret = filter_codec_opts(codec_opts, avctx->codec_id, ic,
2607  ic->streams[stream_index], codec, &opts);
2608  if (ret < 0)
2609  goto fail;
2610 
2611  if (!av_dict_get(opts, "threads", NULL, 0))
2612  av_dict_set(&opts, "threads", "auto", 0);
2613  if (stream_lowres)
2614  av_dict_set_int(&opts, "lowres", stream_lowres, 0);
2615 
2616  av_dict_set(&opts, "flags", "+copy_opaque", AV_DICT_MULTIKEY);
2617 
2618  if ((ret = avcodec_open2(avctx, codec, &opts)) < 0) {
2619  goto fail;
2620  }
2621  if ((t = av_dict_get(opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2622  av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2624  goto fail;
2625  }
2626 
2627  is->eof = 0;
2628  ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
2629  switch (avctx->codec_type) {
2630  case AVMEDIA_TYPE_AUDIO:
2631  {
2632  AVFilterContext *sink;
2633 
2634  is->audio_filter_src.freq = avctx->sample_rate;
2635  ret = av_channel_layout_copy(&is->audio_filter_src.ch_layout, &avctx->ch_layout);
2636  if (ret < 0)
2637  goto fail;
2638  is->audio_filter_src.fmt = avctx->sample_fmt;
2639  if ((ret = configure_audio_filters(is, afilters, 0)) < 0)
2640  goto fail;
2641  sink = is->out_audio_filter;
2643  ret = av_buffersink_get_ch_layout(sink, &ch_layout);
2644  if (ret < 0)
2645  goto fail;
2646  }
2647 
2648  /* prepare audio output */
2649  if ((ret = audio_open(is, &ch_layout, sample_rate, &is->audio_tgt)) < 0)
2650  goto fail;
2651  is->audio_hw_buf_size = ret;
2652  is->audio_src = is->audio_tgt;
2653  is->audio_buf_size = 0;
2654  is->audio_buf_index = 0;
2655 
2656  /* init averaging filter */
2657  is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
2658  is->audio_diff_avg_count = 0;
2659  /* since we do not have a precise anough audio FIFO fullness,
2660  we correct audio sync only if larger than this threshold */
2661  is->audio_diff_threshold = (double)(is->audio_hw_buf_size) / is->audio_tgt.bytes_per_sec;
2662 
2663  is->audio_stream = stream_index;
2664  is->audio_st = ic->streams[stream_index];
2665 
2666  if ((ret = decoder_init(&is->auddec, avctx, &is->audioq, is->continue_read_thread)) < 0)
2667  goto fail;
2668  if (is->ic->iformat->flags & AVFMT_NOTIMESTAMPS) {
2669  is->auddec.start_pts = is->audio_st->start_time;
2670  is->auddec.start_pts_tb = is->audio_st->time_base;
2671  }
2672  if ((ret = decoder_start(&is->auddec, audio_thread, "audio_decoder", is)) < 0)
2673  goto out;
2674  SDL_PauseAudioDevice(audio_dev, 0);
2675  break;
2676  case AVMEDIA_TYPE_VIDEO:
2677  is->video_stream = stream_index;
2678  is->video_st = ic->streams[stream_index];
2679 
2680  if ((ret = decoder_init(&is->viddec, avctx, &is->videoq, is->continue_read_thread)) < 0)
2681  goto fail;
2682  if ((ret = decoder_start(&is->viddec, video_thread, "video_decoder", is)) < 0)
2683  goto out;
2684  is->queue_attachments_req = 1;
2685  break;
2686  case AVMEDIA_TYPE_SUBTITLE:
2687  is->subtitle_stream = stream_index;
2688  is->subtitle_st = ic->streams[stream_index];
2689 
2690  if ((ret = decoder_init(&is->subdec, avctx, &is->subtitleq, is->continue_read_thread)) < 0)
2691  goto fail;
2692  if ((ret = decoder_start(&is->subdec, subtitle_thread, "subtitle_decoder", is)) < 0)
2693  goto out;
2694  break;
2695  default:
2696  break;
2697  }
2698  goto out;
2699 
2700 fail:
2701  avcodec_free_context(&avctx);
2702 out:
2703  av_channel_layout_uninit(&ch_layout);
2704  av_dict_free(&opts);
2705 
2706  return ret;
2707 }
2708 
2709 static int decode_interrupt_cb(void *ctx)
2710 {
2711  VideoState *is = ctx;
2712  return is->abort_request;
2713 }
2714 
2715 static int stream_has_enough_packets(AVStream *st, int stream_id, PacketQueue *queue) {
2716  return stream_id < 0 ||
2717  queue->abort_request ||
2719  queue->nb_packets > MIN_FRAMES && (!queue->duration || av_q2d(st->time_base) * queue->duration > 1.0);
2720 }
2721 
2723 {
2724  if( !strcmp(s->iformat->name, "rtp")
2725  || !strcmp(s->iformat->name, "rtsp")
2726  || !strcmp(s->iformat->name, "sdp")
2727  )
2728  return 1;
2729 
2730  if(s->pb && ( !strncmp(s->url, "rtp:", 4)
2731  || !strncmp(s->url, "udp:", 4)
2732  )
2733  )
2734  return 1;
2735  return 0;
2736 }
2737 
2738 /* this thread gets the stream from the disk or the network */
2739 static int read_thread(void *arg)
2740 {
2741  VideoState *is = arg;
2742  AVFormatContext *ic = NULL;
2743  int err, i, ret;
2744  int st_index[AVMEDIA_TYPE_NB];
2745  AVPacket *pkt = NULL;
2746  int64_t stream_start_time;
2747  int pkt_in_play_range = 0;
2748  const AVDictionaryEntry *t;
2749  SDL_mutex *wait_mutex = SDL_CreateMutex();
2750  int scan_all_pmts_set = 0;
2751  int64_t pkt_ts;
2752 
2753  if (!wait_mutex) {
2754  av_log(NULL, AV_LOG_FATAL, "SDL_CreateMutex(): %s\n", SDL_GetError());
2755  ret = AVERROR(ENOMEM);
2756  goto fail;
2757  }
2758 
2759  memset(st_index, -1, sizeof(st_index));
2760  is->eof = 0;
2761 
2762  pkt = av_packet_alloc();
2763  if (!pkt) {
2764  av_log(NULL, AV_LOG_FATAL, "Could not allocate packet.\n");
2765  ret = AVERROR(ENOMEM);
2766  goto fail;
2767  }
2768  ic = avformat_alloc_context();
2769  if (!ic) {
2770  av_log(NULL, AV_LOG_FATAL, "Could not allocate context.\n");
2771  ret = AVERROR(ENOMEM);
2772  goto fail;
2773  }
2776  if (!av_dict_get(format_opts, "scan_all_pmts", NULL, AV_DICT_MATCH_CASE)) {
2777  av_dict_set(&format_opts, "scan_all_pmts", "1", AV_DICT_DONT_OVERWRITE);
2778  scan_all_pmts_set = 1;
2779  }
2780  err = avformat_open_input(&ic, is->filename, is->iformat, &format_opts);
2781  if (err < 0) {
2782  print_error(is->filename, err);
2783  ret = -1;
2784  goto fail;
2785  }
2786  if (scan_all_pmts_set)
2787  av_dict_set(&format_opts, "scan_all_pmts", NULL, AV_DICT_MATCH_CASE);
2788 
2790  av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2792  goto fail;
2793  }
2794  is->ic = ic;
2795 
2796  if (genpts)
2797  ic->flags |= AVFMT_FLAG_GENPTS;
2798 
2799  if (find_stream_info) {
2800  AVDictionary **opts;
2801  int orig_nb_streams = ic->nb_streams;
2802 
2804  if (err < 0) {
2806  "Error setting up avformat_find_stream_info() options\n");
2807  ret = err;
2808  goto fail;
2809  }
2810 
2811  err = avformat_find_stream_info(ic, opts);
2812 
2813  for (i = 0; i < orig_nb_streams; i++)
2814  av_dict_free(&opts[i]);
2815  av_freep(&opts);
2816 
2817  if (err < 0) {
2819  "%s: could not find codec parameters\n", is->filename);
2820  ret = -1;
2821  goto fail;
2822  }
2823  }
2824 
2825  if (ic->pb)
2826  ic->pb->eof_reached = 0; // FIXME hack, ffplay maybe should not use avio_feof() to test for the end
2827 
2828  if (seek_by_bytes < 0)
2830  !!(ic->iformat->flags & AVFMT_TS_DISCONT) &&
2831  strcmp("ogg", ic->iformat->name);
2832 
2833  is->max_frame_duration = (ic->iformat->flags & AVFMT_TS_DISCONT) ? 10.0 : 3600.0;
2834 
2835  if (!window_title && (t = av_dict_get(ic->metadata, "title", NULL, 0)))
2836  window_title = av_asprintf("%s - %s", t->value, input_filename);
2837 
2838  /* if seeking requested, we execute it */
2839  if (start_time != AV_NOPTS_VALUE) {
2840  int64_t timestamp;
2841 
2842  timestamp = start_time;
2843  /* add the stream start time */
2844  if (ic->start_time != AV_NOPTS_VALUE)
2845  timestamp += ic->start_time;
2846  ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
2847  if (ret < 0) {
2848  av_log(NULL, AV_LOG_WARNING, "%s: could not seek to position %0.3f\n",
2849  is->filename, (double)timestamp / AV_TIME_BASE);
2850  }
2851  }
2852 
2853  is->realtime = is_realtime(ic);
2854 
2855  if (show_status)
2856  av_dump_format(ic, 0, is->filename, 0);
2857 
2858  for (i = 0; i < ic->nb_streams; i++) {
2859  AVStream *st = ic->streams[i];
2860  enum AVMediaType type = st->codecpar->codec_type;
2861  st->discard = AVDISCARD_ALL;
2862  if (type >= 0 && wanted_stream_spec[type] && st_index[type] == -1)
2864  st_index[type] = i;
2865  }
2866  for (i = 0; i < AVMEDIA_TYPE_NB; i++) {
2867  if (wanted_stream_spec[i] && st_index[i] == -1) {
2868  av_log(NULL, AV_LOG_ERROR, "Stream specifier %s does not match any %s stream\n", wanted_stream_spec[i], av_get_media_type_string(i));
2869  st_index[i] = INT_MAX;
2870  }
2871  }
2872 
2873  if (!video_disable)
2874  st_index[AVMEDIA_TYPE_VIDEO] =
2876  st_index[AVMEDIA_TYPE_VIDEO], -1, NULL, 0);
2877  if (!audio_disable)
2878  st_index[AVMEDIA_TYPE_AUDIO] =
2880  st_index[AVMEDIA_TYPE_AUDIO],
2881  st_index[AVMEDIA_TYPE_VIDEO],
2882  NULL, 0);
2884  st_index[AVMEDIA_TYPE_SUBTITLE] =
2886  st_index[AVMEDIA_TYPE_SUBTITLE],
2887  (st_index[AVMEDIA_TYPE_AUDIO] >= 0 ?
2888  st_index[AVMEDIA_TYPE_AUDIO] :
2889  st_index[AVMEDIA_TYPE_VIDEO]),
2890  NULL, 0);
2891 
2892  is->show_mode = show_mode;
2893  if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2894  AVStream *st = ic->streams[st_index[AVMEDIA_TYPE_VIDEO]];
2895  AVCodecParameters *codecpar = st->codecpar;
2897  if (codecpar->width)
2898  set_default_window_size(codecpar->width, codecpar->height, sar);
2899  }
2900 
2901  /* open the streams */
2902  if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
2904  }
2905 
2906  ret = -1;
2907  if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2909  }
2910  if (is->show_mode == SHOW_MODE_NONE)
2911  is->show_mode = ret >= 0 ? SHOW_MODE_VIDEO : SHOW_MODE_RDFT;
2912 
2913  if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {
2915  }
2916 
2917  if (is->video_stream < 0 && is->audio_stream < 0) {
2918  av_log(NULL, AV_LOG_FATAL, "Failed to open file '%s' or configure filtergraph\n",
2919  is->filename);
2920  ret = -1;
2921  goto fail;
2922  }
2923 
2924  if (infinite_buffer < 0 && is->realtime)
2925  infinite_buffer = 1;
2926 
2927  for (;;) {
2928  if (is->abort_request)
2929  break;
2930  if (is->paused != is->last_paused) {
2931  is->last_paused = is->paused;
2932  if (is->paused)
2933  is->read_pause_return = av_read_pause(ic);
2934  else
2935  av_read_play(ic);
2936  }
2937 #if CONFIG_RTSP_DEMUXER || CONFIG_MMSH_PROTOCOL
2938  if (is->paused &&
2939  (!strcmp(ic->iformat->name, "rtsp") ||
2940  (ic->pb && !strncmp(input_filename, "mmsh:", 5)))) {
2941  /* wait 10 ms to avoid trying to get another packet */
2942  /* XXX: horrible */
2943  SDL_Delay(10);
2944  continue;
2945  }
2946 #endif
2947  if (is->seek_req) {
2948  int64_t seek_target = is->seek_pos;
2949  int64_t seek_min = is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
2950  int64_t seek_max = is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
2951 // FIXME the +-2 is due to rounding being not done in the correct direction in generation
2952 // of the seek_pos/seek_rel variables
2953 
2954  ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
2955  if (ret < 0) {
2957  "%s: error while seeking\n", is->ic->url);
2958  } else {
2959  if (is->audio_stream >= 0)
2960  packet_queue_flush(&is->audioq);
2961  if (is->subtitle_stream >= 0)
2962  packet_queue_flush(&is->subtitleq);
2963  if (is->video_stream >= 0)
2964  packet_queue_flush(&is->videoq);
2965  if (is->seek_flags & AVSEEK_FLAG_BYTE) {
2966  set_clock(&is->extclk, NAN, 0);
2967  } else {
2968  set_clock(&is->extclk, seek_target / (double)AV_TIME_BASE, 0);
2969  }
2970  }
2971  is->seek_req = 0;
2972  is->queue_attachments_req = 1;
2973  is->eof = 0;
2974  if (is->paused)
2976  }
2977  if (is->queue_attachments_req) {
2978  if (is->video_st && is->video_st->disposition & AV_DISPOSITION_ATTACHED_PIC) {
2979  if ((ret = av_packet_ref(pkt, &is->video_st->attached_pic)) < 0)
2980  goto fail;
2981  packet_queue_put(&is->videoq, pkt);
2982  packet_queue_put_nullpacket(&is->videoq, pkt, is->video_stream);
2983  }
2984  is->queue_attachments_req = 0;
2985  }
2986 
2987  /* if the queue are full, no need to read more */
2988  if (infinite_buffer<1 &&
2989  (is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
2990  || (stream_has_enough_packets(is->audio_st, is->audio_stream, &is->audioq) &&
2991  stream_has_enough_packets(is->video_st, is->video_stream, &is->videoq) &&
2992  stream_has_enough_packets(is->subtitle_st, is->subtitle_stream, &is->subtitleq)))) {
2993  /* wait 10 ms */
2994  SDL_LockMutex(wait_mutex);
2995  SDL_CondWaitTimeout(is->continue_read_thread, wait_mutex, 10);
2996  SDL_UnlockMutex(wait_mutex);
2997  continue;
2998  }
2999  if (!is->paused &&
3000  (!is->audio_st || (is->auddec.finished == is->audioq.serial && frame_queue_nb_remaining(&is->sampq) == 0)) &&
3001  (!is->video_st || (is->viddec.finished == is->videoq.serial && frame_queue_nb_remaining(&is->pictq) == 0))) {
3002  if (loop != 1 && (!loop || --loop)) {
3004  } else if (autoexit) {
3005  ret = AVERROR_EOF;
3006  goto fail;
3007  }
3008  }
3009  ret = av_read_frame(ic, pkt);
3010  if (ret < 0) {
3011  if ((ret == AVERROR_EOF || avio_feof(ic->pb)) && !is->eof) {
3012  if (is->video_stream >= 0)
3013  packet_queue_put_nullpacket(&is->videoq, pkt, is->video_stream);
3014  if (is->audio_stream >= 0)
3015  packet_queue_put_nullpacket(&is->audioq, pkt, is->audio_stream);
3016  if (is->subtitle_stream >= 0)
3017  packet_queue_put_nullpacket(&is->subtitleq, pkt, is->subtitle_stream);
3018  is->eof = 1;
3019  }
3020  if (ic->pb && ic->pb->error) {
3021  if (autoexit)
3022  goto fail;
3023  else
3024  break;
3025  }
3026  SDL_LockMutex(wait_mutex);
3027  SDL_CondWaitTimeout(is->continue_read_thread, wait_mutex, 10);
3028  SDL_UnlockMutex(wait_mutex);
3029  continue;
3030  } else {
3031  is->eof = 0;
3032  }
3033  /* check if packet is in play range specified by user, then queue, otherwise discard */
3034  stream_start_time = ic->streams[pkt->stream_index]->start_time;
3035  pkt_ts = pkt->pts == AV_NOPTS_VALUE ? pkt->dts : pkt->pts;
3036  pkt_in_play_range = duration == AV_NOPTS_VALUE ||
3037  (pkt_ts - (stream_start_time != AV_NOPTS_VALUE ? stream_start_time : 0)) *
3039  (double)(start_time != AV_NOPTS_VALUE ? start_time : 0) / 1000000
3040  <= ((double)duration / 1000000);
3041  if (pkt->stream_index == is->audio_stream && pkt_in_play_range) {
3042  packet_queue_put(&is->audioq, pkt);
3043  } else if (pkt->stream_index == is->video_stream && pkt_in_play_range
3044  && !(is->video_st->disposition & AV_DISPOSITION_ATTACHED_PIC)) {
3045  packet_queue_put(&is->videoq, pkt);
3046  } else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) {
3047  packet_queue_put(&is->subtitleq, pkt);
3048  } else {
3050  }
3051  }
3052 
3053  ret = 0;
3054  fail:
3055  if (ic && !is->ic)
3056  avformat_close_input(&ic);
3057 
3058  av_packet_free(&pkt);
3059  if (ret != 0) {
3060  SDL_Event event;
3061 
3062  event.type = FF_QUIT_EVENT;
3063  event.user.data1 = is;
3064  SDL_PushEvent(&event);
3065  }
3066  SDL_DestroyMutex(wait_mutex);
3067  return 0;
3068 }
3069 
3070 static VideoState *stream_open(const char *filename,
3071  const AVInputFormat *iformat)
3072 {
3073  VideoState *is;
3074 
3075  is = av_mallocz(sizeof(VideoState));
3076  if (!is)
3077  return NULL;
3078  is->last_video_stream = is->video_stream = -1;
3079  is->last_audio_stream = is->audio_stream = -1;
3080  is->last_subtitle_stream = is->subtitle_stream = -1;
3081  is->filename = av_strdup(filename);
3082  if (!is->filename)
3083  goto fail;
3084  is->iformat = iformat;
3085  is->ytop = 0;
3086  is->xleft = 0;
3087 
3088  /* start video display */
3089  if (frame_queue_init(&is->pictq, &is->videoq, VIDEO_PICTURE_QUEUE_SIZE, 1) < 0)
3090  goto fail;
3091  if (frame_queue_init(&is->subpq, &is->subtitleq, SUBPICTURE_QUEUE_SIZE, 0) < 0)
3092  goto fail;
3093  if (frame_queue_init(&is->sampq, &is->audioq, SAMPLE_QUEUE_SIZE, 1) < 0)
3094  goto fail;
3095 
3096  if (packet_queue_init(&is->videoq) < 0 ||
3097  packet_queue_init(&is->audioq) < 0 ||
3098  packet_queue_init(&is->subtitleq) < 0)
3099  goto fail;
3100 
3101  if (!(is->continue_read_thread = SDL_CreateCond())) {
3102  av_log(NULL, AV_LOG_FATAL, "SDL_CreateCond(): %s\n", SDL_GetError());
3103  goto fail;
3104  }
3105 
3106  init_clock(&is->vidclk, &is->videoq.serial);
3107  init_clock(&is->audclk, &is->audioq.serial);
3108  init_clock(&is->extclk, &is->extclk.serial);
3109  is->audio_clock_serial = -1;
3110  if (startup_volume < 0)
3111  av_log(NULL, AV_LOG_WARNING, "-volume=%d < 0, setting to 0\n", startup_volume);
3112  if (startup_volume > 100)
3113  av_log(NULL, AV_LOG_WARNING, "-volume=%d > 100, setting to 100\n", startup_volume);
3115  startup_volume = av_clip(SDL_MIX_MAXVOLUME * startup_volume / 100, 0, SDL_MIX_MAXVOLUME);
3116  is->audio_volume = startup_volume;
3117  is->muted = 0;
3118  is->av_sync_type = av_sync_type;
3119  is->read_tid = SDL_CreateThread(read_thread, "read_thread", is);
3120  if (!is->read_tid) {
3121  av_log(NULL, AV_LOG_FATAL, "SDL_CreateThread(): %s\n", SDL_GetError());
3122 fail:
3123  stream_close(is);
3124  return NULL;
3125  }
3126  return is;
3127 }
3128 
3130 {
3131  AVFormatContext *ic = is->ic;
3132  int start_index, stream_index;
3133  int old_index;
3134  AVStream *st;
3135  AVProgram *p = NULL;
3136  int nb_streams = is->ic->nb_streams;
3137 
3138  if (codec_type == AVMEDIA_TYPE_VIDEO) {
3139  start_index = is->last_video_stream;
3140  old_index = is->video_stream;
3141  } else if (codec_type == AVMEDIA_TYPE_AUDIO) {
3142  start_index = is->last_audio_stream;
3143  old_index = is->audio_stream;
3144  } else {
3145  start_index = is->last_subtitle_stream;
3146  old_index = is->subtitle_stream;
3147  }
3148  stream_index = start_index;
3149 
3150  if (codec_type != AVMEDIA_TYPE_VIDEO && is->video_stream != -1) {
3151  p = av_find_program_from_stream(ic, NULL, is->video_stream);
3152  if (p) {
3154  for (start_index = 0; start_index < nb_streams; start_index++)
3155  if (p->stream_index[start_index] == stream_index)
3156  break;
3157  if (start_index == nb_streams)
3158  start_index = -1;
3159  stream_index = start_index;
3160  }
3161  }
3162 
3163  for (;;) {
3164  if (++stream_index >= nb_streams)
3165  {
3167  {
3168  stream_index = -1;
3169  is->last_subtitle_stream = -1;
3170  goto the_end;
3171  }
3172  if (start_index == -1)
3173  return;
3174  stream_index = 0;
3175  }
3176  if (stream_index == start_index)
3177  return;
3178  st = is->ic->streams[p ? p->stream_index[stream_index] : stream_index];
3179  if (st->codecpar->codec_type == codec_type) {
3180  /* check that parameters are OK */
3181  switch (codec_type) {
3182  case AVMEDIA_TYPE_AUDIO:
3183  if (st->codecpar->sample_rate != 0 &&
3184  st->codecpar->ch_layout.nb_channels != 0)
3185  goto the_end;
3186  break;
3187  case AVMEDIA_TYPE_VIDEO:
3188  case AVMEDIA_TYPE_SUBTITLE:
3189  goto the_end;
3190  default:
3191  break;
3192  }
3193  }
3194  }
3195  the_end:
3196  if (p && stream_index != -1)
3197  stream_index = p->stream_index[stream_index];
3198  av_log(NULL, AV_LOG_INFO, "Switch %s stream from #%d to #%d\n",
3200  old_index,
3201  stream_index);
3202 
3203  stream_component_close(is, old_index);
3204  stream_component_open(is, stream_index);
3205 }
3206 
3207 
3209 {
3211  SDL_SetWindowFullscreen(window, is_full_screen ? SDL_WINDOW_FULLSCREEN_DESKTOP : 0);
3212 }
3213 
3215 {
3216  int next = is->show_mode;
3217  do {
3218  next = (next + 1) % SHOW_MODE_NB;
3219  } while (next != is->show_mode && (next == SHOW_MODE_VIDEO && !is->video_st || next != SHOW_MODE_VIDEO && !is->audio_st));
3220  if (is->show_mode != next) {
3221  is->force_refresh = 1;
3222  is->show_mode = next;
3223  }
3224 }
3225 
3226 static void refresh_loop_wait_event(VideoState *is, SDL_Event *event) {
3227  double remaining_time = 0.0;
3228  SDL_PumpEvents();
3229  while (!SDL_PeepEvents(event, 1, SDL_GETEVENT, SDL_FIRSTEVENT, SDL_LASTEVENT)) {
3231  SDL_ShowCursor(0);
3232  cursor_hidden = 1;
3233  }
3234  if (remaining_time > 0.0)
3235  av_usleep((int64_t)(remaining_time * 1000000.0));
3236  remaining_time = REFRESH_RATE;
3237  if (is->show_mode != SHOW_MODE_NONE && (!is->paused || is->force_refresh))
3238  video_refresh(is, &remaining_time);
3239  SDL_PumpEvents();
3240  }
3241 }
3242 
3243 static void seek_chapter(VideoState *is, int incr)
3244 {
3245  int64_t pos = get_master_clock(is) * AV_TIME_BASE;
3246  int i;
3247 
3248  if (!is->ic->nb_chapters)
3249  return;
3250 
3251  /* find the current chapter */
3252  for (i = 0; i < is->ic->nb_chapters; i++) {
3253  AVChapter *ch = is->ic->chapters[i];
3254  if (av_compare_ts(pos, AV_TIME_BASE_Q, ch->start, ch->time_base) < 0) {
3255  i--;
3256  break;
3257  }
3258  }
3259 
3260  i += incr;
3261  i = FFMAX(i, 0);
3262  if (i >= is->ic->nb_chapters)
3263  return;
3264 
3265  av_log(NULL, AV_LOG_VERBOSE, "Seeking to chapter %d.\n", i);
3266  stream_seek(is, av_rescale_q(is->ic->chapters[i]->start, is->ic->chapters[i]->time_base,
3267  AV_TIME_BASE_Q), 0, 0);
3268 }
3269 
3270 /* handle an event sent by the GUI */
3271 static void event_loop(VideoState *cur_stream)
3272 {
3273  SDL_Event event;
3274  double incr, pos, frac;
3275 
3276  for (;;) {
3277  double x;
3278  refresh_loop_wait_event(cur_stream, &event);
3279  switch (event.type) {
3280  case SDL_KEYDOWN:
3281  if (exit_on_keydown || event.key.keysym.sym == SDLK_ESCAPE || event.key.keysym.sym == SDLK_q) {
3282  do_exit(cur_stream);
3283  break;
3284  }
3285  // If we don't yet have a window, skip all key events, because read_thread might still be initializing...
3286  if (!cur_stream->width)
3287  continue;
3288  switch (event.key.keysym.sym) {
3289  case SDLK_f:
3290  toggle_full_screen(cur_stream);
3291  cur_stream->force_refresh = 1;
3292  break;
3293  case SDLK_p:
3294  case SDLK_SPACE:
3295  toggle_pause(cur_stream);
3296  break;
3297  case SDLK_m:
3298  toggle_mute(cur_stream);
3299  break;
3300  case SDLK_KP_MULTIPLY:
3301  case SDLK_0:
3302  update_volume(cur_stream, 1, SDL_VOLUME_STEP);
3303  break;
3304  case SDLK_KP_DIVIDE:
3305  case SDLK_9:
3306  update_volume(cur_stream, -1, SDL_VOLUME_STEP);
3307  break;
3308  case SDLK_s: // S: Step to next frame
3309  step_to_next_frame(cur_stream);
3310  break;
3311  case SDLK_a:
3313  break;
3314  case SDLK_v:
3316  break;
3317  case SDLK_c:
3321  break;
3322  case SDLK_t:
3324  break;
3325  case SDLK_w:
3326  if (cur_stream->show_mode == SHOW_MODE_VIDEO && cur_stream->vfilter_idx < nb_vfilters - 1) {
3327  if (++cur_stream->vfilter_idx >= nb_vfilters)
3328  cur_stream->vfilter_idx = 0;
3329  } else {
3330  cur_stream->vfilter_idx = 0;
3331  toggle_audio_display(cur_stream);
3332  }
3333  break;
3334  case SDLK_PAGEUP:
3335  if (cur_stream->ic->nb_chapters <= 1) {
3336  incr = 600.0;
3337  goto do_seek;
3338  }
3339  seek_chapter(cur_stream, 1);
3340  break;
3341  case SDLK_PAGEDOWN:
3342  if (cur_stream->ic->nb_chapters <= 1) {
3343  incr = -600.0;
3344  goto do_seek;
3345  }
3346  seek_chapter(cur_stream, -1);
3347  break;
3348  case SDLK_LEFT:
3349  incr = seek_interval ? -seek_interval : -10.0;
3350  goto do_seek;
3351  case SDLK_RIGHT:
3352  incr = seek_interval ? seek_interval : 10.0;
3353  goto do_seek;
3354  case SDLK_UP:
3355  incr = 60.0;
3356  goto do_seek;
3357  case SDLK_DOWN:
3358  incr = -60.0;
3359  do_seek:
3360  if (seek_by_bytes) {
3361  pos = -1;
3362  if (pos < 0 && cur_stream->video_stream >= 0)
3363  pos = frame_queue_last_pos(&cur_stream->pictq);
3364  if (pos < 0 && cur_stream->audio_stream >= 0)
3365  pos = frame_queue_last_pos(&cur_stream->sampq);
3366  if (pos < 0)
3367  pos = avio_tell(cur_stream->ic->pb);
3368  if (cur_stream->ic->bit_rate)
3369  incr *= cur_stream->ic->bit_rate / 8.0;
3370  else
3371  incr *= 180000.0;
3372  pos += incr;
3373  stream_seek(cur_stream, pos, incr, 1);
3374  } else {
3375  pos = get_master_clock(cur_stream);
3376  if (isnan(pos))
3377  pos = (double)cur_stream->seek_pos / AV_TIME_BASE;
3378  pos += incr;
3379  if (cur_stream->ic->start_time != AV_NOPTS_VALUE && pos < cur_stream->ic->start_time / (double)AV_TIME_BASE)
3380  pos = cur_stream->ic->start_time / (double)AV_TIME_BASE;
3381  stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
3382  }
3383  break;
3384  default:
3385  break;
3386  }
3387  break;
3388  case SDL_MOUSEBUTTONDOWN:
3389  if (exit_on_mousedown) {
3390  do_exit(cur_stream);
3391  break;
3392  }
3393  if (event.button.button == SDL_BUTTON_LEFT) {
3394  static int64_t last_mouse_left_click = 0;
3395  if (av_gettime_relative() - last_mouse_left_click <= 500000) {
3396  toggle_full_screen(cur_stream);
3397  cur_stream->force_refresh = 1;
3398  last_mouse_left_click = 0;
3399  } else {
3400  last_mouse_left_click = av_gettime_relative();
3401  }
3402  }
3403  case SDL_MOUSEMOTION:
3404  if (cursor_hidden) {
3405  SDL_ShowCursor(1);
3406  cursor_hidden = 0;
3407  }
3409  if (event.type == SDL_MOUSEBUTTONDOWN) {
3410  if (event.button.button != SDL_BUTTON_RIGHT)
3411  break;
3412  x = event.button.x;
3413  } else {
3414  if (!(event.motion.state & SDL_BUTTON_RMASK))
3415  break;
3416  x = event.motion.x;
3417  }
3418  if (seek_by_bytes || cur_stream->ic->duration <= 0) {
3419  uint64_t size = avio_size(cur_stream->ic->pb);
3420  stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
3421  } else {
3422  int64_t ts;
3423  int ns, hh, mm, ss;
3424  int tns, thh, tmm, tss;
3425  tns = cur_stream->ic->duration / 1000000LL;
3426  thh = tns / 3600;
3427  tmm = (tns % 3600) / 60;
3428  tss = (tns % 60);
3429  frac = x / cur_stream->width;
3430  ns = frac * tns;
3431  hh = ns / 3600;
3432  mm = (ns % 3600) / 60;
3433  ss = (ns % 60);
3435  "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d) \n", frac*100,
3436  hh, mm, ss, thh, tmm, tss);
3437  ts = frac * cur_stream->ic->duration;
3438  if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
3439  ts += cur_stream->ic->start_time;
3440  stream_seek(cur_stream, ts, 0, 0);
3441  }
3442  break;
3443  case SDL_WINDOWEVENT:
3444  switch (event.window.event) {
3445  case SDL_WINDOWEVENT_SIZE_CHANGED:
3446  screen_width = cur_stream->width = event.window.data1;
3447  screen_height = cur_stream->height = event.window.data2;
3448  if (cur_stream->vis_texture) {
3449  SDL_DestroyTexture(cur_stream->vis_texture);
3450  cur_stream->vis_texture = NULL;
3451  }
3452  case SDL_WINDOWEVENT_EXPOSED:
3453  cur_stream->force_refresh = 1;
3454  }
3455  break;
3456  case SDL_QUIT:
3457  case FF_QUIT_EVENT:
3458  do_exit(cur_stream);
3459  break;
3460  default:
3461  break;
3462  }
3463  }
3464 }
3465 
3466 static int opt_width(void *optctx, const char *opt, const char *arg)
3467 {
3468  double num;
3469  int ret = parse_number(opt, arg, OPT_INT64, 1, INT_MAX, &num);
3470  if (ret < 0)
3471  return ret;
3472 
3473  screen_width = num;
3474  return 0;
3475 }
3476 
3477 static int opt_height(void *optctx, const char *opt, const char *arg)
3478 {
3479  double num;
3480  int ret = parse_number(opt, arg, OPT_INT64, 1, INT_MAX, &num);
3481  if (ret < 0)
3482  return ret;
3483 
3484  screen_height = num;
3485  return 0;
3486 }
3487 
3488 static int opt_format(void *optctx, const char *opt, const char *arg)
3489 {
3491  if (!file_iformat) {
3492  av_log(NULL, AV_LOG_FATAL, "Unknown input format: %s\n", arg);
3493  return AVERROR(EINVAL);
3494  }
3495  return 0;
3496 }
3497 
3498 static int opt_sync(void *optctx, const char *opt, const char *arg)
3499 {
3500  if (!strcmp(arg, "audio"))
3502  else if (!strcmp(arg, "video"))
3504  else if (!strcmp(arg, "ext"))
3506  else {
3507  av_log(NULL, AV_LOG_ERROR, "Unknown value for %s: %s\n", opt, arg);
3508  exit(1);
3509  }
3510  return 0;
3511 }
3512 
3513 static int opt_show_mode(void *optctx, const char *opt, const char *arg)
3514 {
3515  show_mode = !strcmp(arg, "video") ? SHOW_MODE_VIDEO :
3516  !strcmp(arg, "waves") ? SHOW_MODE_WAVES :
3517  !strcmp(arg, "rdft" ) ? SHOW_MODE_RDFT : SHOW_MODE_NONE;
3518 
3519  if (show_mode == SHOW_MODE_NONE) {
3520  double num;
3521  int ret = parse_number(opt, arg, OPT_INT, 0, SHOW_MODE_NB-1, &num);
3522  if (ret < 0)
3523  return ret;
3524  show_mode = num;
3525  }
3526  return 0;
3527 }
3528 
3529 static int opt_input_file(void *optctx, const char *filename)
3530 {
3531  if (input_filename) {
3533  "Argument '%s' provided as input filename, but '%s' was already specified.\n",
3534  filename, input_filename);
3535  return AVERROR(EINVAL);
3536  }
3537  if (!strcmp(filename, "-"))
3538  filename = "fd:";
3539  input_filename = filename;
3540 
3541  return 0;
3542 }
3543 
3544 static int opt_codec(void *optctx, const char *opt, const char *arg)
3545 {
3546  const char *spec = strchr(opt, ':');
3547  if (!spec) {
3549  "No media specifier was specified in '%s' in option '%s'\n",
3550  arg, opt);
3551  return AVERROR(EINVAL);
3552  }
3553  spec++;
3554  switch (spec[0]) {
3555  case 'a' : audio_codec_name = arg; break;
3556  case 's' : subtitle_codec_name = arg; break;
3557  case 'v' : video_codec_name = arg; break;
3558  default:
3560  "Invalid media specifier '%s' in option '%s'\n", spec, opt);
3561  return AVERROR(EINVAL);
3562  }
3563  return 0;
3564 }
3565 
3566 static int dummy;
3567 
3568 static const OptionDef options[] = {
3570  { "x", HAS_ARG, { .func_arg = opt_width }, "force displayed width", "width" },
3571  { "y", HAS_ARG, { .func_arg = opt_height }, "force displayed height", "height" },
3572  { "fs", OPT_BOOL, { &is_full_screen }, "force full screen" },
3573  { "an", OPT_BOOL, { &audio_disable }, "disable audio" },
3574  { "vn", OPT_BOOL, { &video_disable }, "disable video" },
3575  { "sn", OPT_BOOL, { &subtitle_disable }, "disable subtitling" },
3576  { "ast", OPT_STRING | HAS_ARG | OPT_EXPERT, { &wanted_stream_spec[AVMEDIA_TYPE_AUDIO] }, "select desired audio stream", "stream_specifier" },
3577  { "vst", OPT_STRING | HAS_ARG | OPT_EXPERT, { &wanted_stream_spec[AVMEDIA_TYPE_VIDEO] }, "select desired video stream", "stream_specifier" },
3578  { "sst", OPT_STRING | HAS_ARG | OPT_EXPERT, { &wanted_stream_spec[AVMEDIA_TYPE_SUBTITLE] }, "select desired subtitle stream", "stream_specifier" },
3579  { "ss", HAS_ARG | OPT_TIME, { &start_time }, "seek to a given position in seconds", "pos" },
3580  { "t", HAS_ARG | OPT_TIME, { &duration }, "play \"duration\" seconds of audio/video", "duration" },
3581  { "bytes", OPT_INT | HAS_ARG, { &seek_by_bytes }, "seek by bytes 0=off 1=on -1=auto", "val" },
3582  { "seek_interval", OPT_FLOAT | HAS_ARG, { &seek_interval }, "set seek interval for left/right keys, in seconds", "seconds" },
3583  { "nodisp", OPT_BOOL, { &display_disable }, "disable graphical display" },
3584  { "noborder", OPT_BOOL, { &borderless }, "borderless window" },
3585  { "alwaysontop", OPT_BOOL, { &alwaysontop }, "window always on top" },
3586  { "volume", OPT_INT | HAS_ARG, { &startup_volume}, "set startup volume 0=min 100=max", "volume" },
3587  { "f", HAS_ARG, { .func_arg = opt_format }, "force format", "fmt" },
3588  { "stats", OPT_BOOL | OPT_EXPERT, { &show_status }, "show status", "" },
3589  { "fast", OPT_BOOL | OPT_EXPERT, { &fast }, "non spec compliant optimizations", "" },
3590  { "genpts", OPT_BOOL | OPT_EXPERT, { &genpts }, "generate pts", "" },
3591  { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, { &decoder_reorder_pts }, "let decoder reorder pts 0=off 1=on -1=auto", ""},
3592  { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, { &lowres }, "", "" },
3593  { "sync", HAS_ARG | OPT_EXPERT, { .func_arg = opt_sync }, "set audio-video sync. type (type=audio/video/ext)", "type" },
3594  { "autoexit", OPT_BOOL | OPT_EXPERT, { &autoexit }, "exit at the end", "" },
3595  { "exitonkeydown", OPT_BOOL | OPT_EXPERT, { &exit_on_keydown }, "exit on key down", "" },
3596  { "exitonmousedown", OPT_BOOL | OPT_EXPERT, { &exit_on_mousedown }, "exit on mouse down", "" },
3597  { "loop", OPT_INT | HAS_ARG | OPT_EXPERT, { &loop }, "set number of times the playback shall be looped", "loop count" },
3598  { "framedrop", OPT_BOOL | OPT_EXPERT, { &framedrop }, "drop frames when cpu is too slow", "" },
3599  { "infbuf", OPT_BOOL | OPT_EXPERT, { &infinite_buffer }, "don't limit the input buffer size (useful with realtime streams)", "" },
3600  { "window_title", OPT_STRING | HAS_ARG, { &window_title }, "set window title", "window title" },
3601  { "left", OPT_INT | HAS_ARG | OPT_EXPERT, { &screen_left }, "set the x position for the left of the window", "x pos" },
3602  { "top", OPT_INT | HAS_ARG | OPT_EXPERT, { &screen_top }, "set the y position for the top of the window", "y pos" },
3603  { "vf", OPT_EXPERT | HAS_ARG, { .func_arg = opt_add_vfilter }, "set video filters", "filter_graph" },
3604  { "af", OPT_STRING | HAS_ARG, { &afilters }, "set audio filters", "filter_graph" },
3605  { "rdftspeed", OPT_INT | HAS_ARG| OPT_AUDIO | OPT_EXPERT, { &rdftspeed }, "rdft speed", "msecs" },
3606  { "showmode", HAS_ARG, { .func_arg = opt_show_mode}, "select show mode (0 = video, 1 = waves, 2 = RDFT)", "mode" },
3607  { "i", OPT_BOOL, { &dummy}, "read specified file", "input_file"},
3608  { "codec", HAS_ARG, { .func_arg = opt_codec}, "force decoder", "decoder_name" },
3609  { "acodec", HAS_ARG | OPT_STRING | OPT_EXPERT, { &audio_codec_name }, "force audio decoder", "decoder_name" },
3610  { "scodec", HAS_ARG | OPT_STRING | OPT_EXPERT, { &subtitle_codec_name }, "force subtitle decoder", "decoder_name" },
3611  { "vcodec", HAS_ARG | OPT_STRING | OPT_EXPERT, { &video_codec_name }, "force video decoder", "decoder_name" },
3612  { "autorotate", OPT_BOOL, { &autorotate }, "automatically rotate video", "" },
3613  { "find_stream_info", OPT_BOOL | OPT_INPUT | OPT_EXPERT, { &find_stream_info },
3614  "read and decode the streams to fill missing information with heuristics" },
3615  { "filter_threads", HAS_ARG | OPT_INT | OPT_EXPERT, { &filter_nbthreads }, "number of filter threads per graph" },
3616  { NULL, },
3617 };
3618 
3619 static void show_usage(void)
3620 {
3621  av_log(NULL, AV_LOG_INFO, "Simple media player\n");
3622  av_log(NULL, AV_LOG_INFO, "usage: %s [options] input_file\n", program_name);
3623  av_log(NULL, AV_LOG_INFO, "\n");
3624 }
3625 
3626 void show_help_default(const char *opt, const char *arg)
3627 {
3629  show_usage();
3630  show_help_options(options, "Main options:", 0, OPT_EXPERT, 0);
3631  show_help_options(options, "Advanced options:", OPT_EXPERT, 0, 0);
3632  printf("\n");
3636  printf("\nWhile playing:\n"
3637  "q, ESC quit\n"
3638  "f toggle full screen\n"
3639  "p, SPC pause\n"
3640  "m toggle mute\n"
3641  "9, 0 decrease and increase volume respectively\n"
3642  "/, * decrease and increase volume respectively\n"
3643  "a cycle audio channel in the current program\n"
3644  "v cycle video channel\n"
3645  "t cycle subtitle channel in the current program\n"
3646  "c cycle program\n"
3647  "w cycle video filters or show modes\n"
3648  "s activate frame-step mode\n"
3649  "left/right seek backward/forward 10 seconds or to custom interval if -seek_interval is set\n"
3650  "down/up seek backward/forward 1 minute\n"
3651  "page down/page up seek backward/forward 10 minutes\n"
3652  "right mouse click seek to percentage in file corresponding to fraction of width\n"
3653  "left double-click toggle full screen\n"
3654  );
3655 }
3656 
3657 /* Called from the main */
3658 int main(int argc, char **argv)
3659 {
3660  int flags, ret;
3661  VideoState *is;
3662 
3663  init_dynload();
3664 
3666  parse_loglevel(argc, argv, options);
3667 
3668  /* register all codecs, demux and protocols */
3669 #if CONFIG_AVDEVICE
3671 #endif
3673 
3674  signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
3675  signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
3676 
3677  show_banner(argc, argv, options);
3678 
3679  ret = parse_options(NULL, argc, argv, options, opt_input_file);
3680  if (ret < 0)
3681  exit(ret == AVERROR_EXIT ? 0 : 1);
3682 
3683  if (!input_filename) {
3684  show_usage();
3685  av_log(NULL, AV_LOG_FATAL, "An input file must be specified\n");
3687  "Use -h to get full help or, even better, run 'man %s'\n", program_name);
3688  exit(1);
3689  }
3690 
3691  if (display_disable) {
3692  video_disable = 1;
3693  }
3694  flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
3695  if (audio_disable)
3696  flags &= ~SDL_INIT_AUDIO;
3697  else {
3698  /* Try to work around an occasional ALSA buffer underflow issue when the
3699  * period size is NPOT due to ALSA resampling by forcing the buffer size. */
3700  if (!SDL_getenv("SDL_AUDIO_ALSA_SET_BUFFER_SIZE"))
3701  SDL_setenv("SDL_AUDIO_ALSA_SET_BUFFER_SIZE","1", 1);
3702  }
3703  if (display_disable)
3704  flags &= ~SDL_INIT_VIDEO;
3705  if (SDL_Init (flags)) {
3706  av_log(NULL, AV_LOG_FATAL, "Could not initialize SDL - %s\n", SDL_GetError());
3707  av_log(NULL, AV_LOG_FATAL, "(Did you set the DISPLAY variable?)\n");
3708  exit(1);
3709  }
3710 
3711  SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
3712  SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
3713 
3714  if (!display_disable) {
3715  int flags = SDL_WINDOW_HIDDEN;
3716  if (alwaysontop)
3717 #if SDL_VERSION_ATLEAST(2,0,5)
3718  flags |= SDL_WINDOW_ALWAYS_ON_TOP;
3719 #else
3720  av_log(NULL, AV_LOG_WARNING, "Your SDL version doesn't support SDL_WINDOW_ALWAYS_ON_TOP. Feature will be inactive.\n");
3721 #endif
3722  if (borderless)
3723  flags |= SDL_WINDOW_BORDERLESS;
3724  else
3725  flags |= SDL_WINDOW_RESIZABLE;
3726 
3727 #ifdef SDL_HINT_VIDEO_X11_NET_WM_BYPASS_COMPOSITOR
3728  SDL_SetHint(SDL_HINT_VIDEO_X11_NET_WM_BYPASS_COMPOSITOR, "0");
3729 #endif
3730  window = SDL_CreateWindow(program_name, SDL_WINDOWPOS_UNDEFINED, SDL_WINDOWPOS_UNDEFINED, default_width, default_height, flags);
3731  SDL_SetHint(SDL_HINT_RENDER_SCALE_QUALITY, "linear");
3732  if (window) {
3733  renderer = SDL_CreateRenderer(window, -1, SDL_RENDERER_ACCELERATED | SDL_RENDERER_PRESENTVSYNC);
3734  if (!renderer) {
3735  av_log(NULL, AV_LOG_WARNING, "Failed to initialize a hardware accelerated renderer: %s\n", SDL_GetError());
3736  renderer = SDL_CreateRenderer(window, -1, 0);
3737  }
3738  if (renderer) {
3739  if (!SDL_GetRendererInfo(renderer, &renderer_info))
3740  av_log(NULL, AV_LOG_VERBOSE, "Initialized %s renderer.\n", renderer_info.name);
3741  }
3742  }
3743  if (!window || !renderer || !renderer_info.num_texture_formats) {
3744  av_log(NULL, AV_LOG_FATAL, "Failed to create window or renderer: %s", SDL_GetError());
3745  do_exit(NULL);
3746  }
3747  }
3748 
3750  if (!is) {
3751  av_log(NULL, AV_LOG_FATAL, "Failed to initialize VideoState!\n");
3752  do_exit(NULL);
3753  }
3754 
3755  event_loop(is);
3756 
3757  /* never returns */
3758 
3759  return 0;
3760 }
OPT_FLOAT
#define OPT_FLOAT
Definition: cmdutils.h:116
AVSubtitle
Definition: avcodec.h:2269
rect::w
int w
Definition: f_ebur128.c:78
sws_getCachedContext
struct SwsContext * sws_getCachedContext(struct SwsContext *context, int srcW, int srcH, enum AVPixelFormat srcFormat, int dstW, int dstH, enum AVPixelFormat dstFormat, int flags, SwsFilter *srcFilter, SwsFilter *dstFilter, const double *param)
Check if context can be reused, otherwise reallocate a new one.
Definition: utils.c:2502
do_exit
static void do_exit(VideoState *is)
Definition: ffplay.c:1285
VideoState::seek_rel
int64_t seek_rel
Definition: ffplay.c:214
AV_PKT_DATA_DISPLAYMATRIX
@ AV_PKT_DATA_DISPLAYMATRIX
This side data contains a 3x3 transformation matrix describing an affine transformation that needs to...
Definition: packet.h:109
av_packet_unref
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
Definition: avpacket.c:423
AVCodec
AVCodec.
Definition: codec.h:187
AVMEDIA_TYPE_SUBTITLE
@ AVMEDIA_TYPE_SUBTITLE
Definition: avutil.h:204
av_gettime_relative
int64_t av_gettime_relative(void)
Get the current time in microseconds since some unspecified starting point.
Definition: time.c:56
VideoState::video_st
AVStream * video_st
Definition: ffplay.c:284
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:186
AVFrame::color_range
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: frame.h:656
av_buffersink_get_ch_layout
int av_buffersink_get_ch_layout(const AVFilterContext *ctx, AVChannelLayout *out)
Definition: buffersink.c:221
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
Frame::width
int width
Definition: ffplay.c:161
AVFMT_NO_BYTE_SEEK
#define AVFMT_NO_BYTE_SEEK
Format does not allow seeking by bytes.
Definition: avformat.h:488
AV_SYNC_VIDEO_MASTER
@ AV_SYNC_VIDEO_MASTER
Definition: ffplay.c:184
av_clip
#define av_clip
Definition: common.h:96
VideoState::rdft
AVTXContext * rdft
Definition: ffplay.c:265
AudioParams::fmt
enum AVSampleFormat fmt
Definition: ffplay.c:134
av_sync_type
static int av_sync_type
Definition: ffplay.c:327
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
renderer_info
static SDL_RendererInfo renderer_info
Definition: ffplay.c:362
FrameData::pkt_pos
int64_t pkt_pos
Definition: ffplay.c:150
get_rotation
double get_rotation(const int32_t *displaymatrix)
Definition: cmdutils.c:1049
frame_queue_nb_remaining
static int frame_queue_nb_remaining(FrameQueue *f)
Definition: ffplay.c:796
VideoState::agraph
AVFilterGraph * agraph
Definition: ffplay.c:299
configure_audio_filters
static int configure_audio_filters(VideoState *is, const char *afilters, int force_output_format)
Definition: ffplay.c:1952
opt_add_vfilter
static int opt_add_vfilter(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:391
frame_queue_next
static void frame_queue_next(FrameQueue *f)
Definition: ffplay.c:780
AVCodecParameters::codec_type
enum AVMediaType codec_type
General type of the encoded data.
Definition: codec_par.h:51
Decoder::finished
int finished
Definition: ffplay.c:193
AVFilterGraph::nb_threads
int nb_threads
Maximum number of threads used by filters in this graph.
Definition: avfilter.h:890
av_compare_ts
int av_compare_ts(int64_t ts_a, AVRational tb_a, int64_t ts_b, AVRational tb_b)
Compare two timestamps each in its own time base.
Definition: mathematics.c:147
FrameData
Definition: ffmpeg.h:636
AVProgram::nb_stream_indexes
unsigned int nb_stream_indexes
Definition: avformat.h:1044
frame_queue_last_pos
static int64_t frame_queue_last_pos(FrameQueue *f)
Definition: ffplay.c:802
av_find_best_stream
int av_find_best_stream(AVFormatContext *ic, enum AVMediaType type, int wanted_stream_nb, int related_stream, const AVCodec **decoder_ret, int flags)
Definition: avformat.c:390
out
FILE * out
Definition: movenc.c:54
VideoState::rdft_fn
av_tx_fn rdft_fn
Definition: ffplay.c:266
AVCodecContext::sample_rate
int sample_rate
samples per second
Definition: avcodec.h:1064
av_bprint_init
void av_bprint_init(AVBPrint *buf, unsigned size_init, unsigned size_max)
Definition: bprint.c:69
video_thread
static int video_thread(void *arg)
Definition: ffplay.c:2117
VideoState::av_sync_type
int av_sync_type
Definition: ffplay.c:233
av_frame_get_side_data
AVFrameSideData * av_frame_get_side_data(const AVFrame *frame, enum AVFrameSideDataType type)
Definition: frame.c:824
is
The official guide to swscale for confused that is
Definition: swscale.txt:28
set_default_window_size
static void set_default_window_size(int width, int height, AVRational sar)
Definition: ffplay.c:1309
AV_PIX_FMT_BGR32
#define AV_PIX_FMT_BGR32
Definition: pixfmt.h:443
AV_NOSYNC_THRESHOLD
#define AV_NOSYNC_THRESHOLD
Definition: ffplay.c:86
AVFormatContext::nb_chapters
unsigned int nb_chapters
Number of chapters in AVChapter array.
Definition: avformat.h:1332
AVCodecParameters
This struct describes the properties of an encoded stream.
Definition: codec_par.h:47
AV_LOG_QUIET
#define AV_LOG_QUIET
Print no output.
Definition: log.h:162
FrameQueue::keep_last
int keep_last
Definition: ffplay.c:175
sample_fmts
static enum AVSampleFormat sample_fmts[]
Definition: adpcmenc.c:947
VideoState::audio_hw_buf_size
int audio_hw_buf_size
Definition: ffplay.c:243
decoder_decode_frame
static int decoder_decode_frame(Decoder *d, AVFrame *frame, AVSubtitle *sub)
Definition: ffplay.c:573
AVERROR_EOF
#define AVERROR_EOF
End of file.
Definition: error.h:57
AVBufferRef::data
uint8_t * data
The data buffer.
Definition: buffer.h:90
AV_DISPOSITION_ATTACHED_PIC
#define AV_DISPOSITION_ATTACHED_PIC
The stream is stored in the file as an attached picture/"cover art" (e.g.
Definition: avformat.h:772
FrameQueue::cond
SDL_cond * cond
Definition: ffplay.c:178
av_buffersink_get_frame_flags
int attribute_align_arg av_buffersink_get_frame_flags(AVFilterContext *ctx, AVFrame *frame, int flags)
Get a frame with filtered data from sink and put it in frame.
Definition: buffersink.c:148
AVStream::discard
enum AVDiscard discard
Selects which packets can be discarded at will and do not need to be demuxed.
Definition: avformat.h:912
av_find_program_from_stream
AVProgram * av_find_program_from_stream(AVFormatContext *ic, AVProgram *last, int s)
Find the programs which belong to a given stream.
Definition: avformat.c:339
display_disable
static int display_disable
Definition: ffplay.c:322
screen_width
static int screen_width
Definition: ffplay.c:312
DEBUG
#define DEBUG
Definition: vf_framerate.c:29
sws_dict
AVDictionary * sws_dict
Definition: cmdutils.c:58
swr_set_compensation
int swr_set_compensation(struct SwrContext *s, int sample_delta, int compensation_distance)
Activate resampling compensation ("soft" compensation).
Definition: swresample.c:1021
SAMPLE_ARRAY_SIZE
#define SAMPLE_ARRAY_SIZE
Definition: ffplay.c:104
AVTXContext
Definition: tx_priv.h:235
rect
Definition: f_ebur128.c:78
update_volume
static void update_volume(VideoState *is, int sign, double step)
Definition: ffplay.c:1495
AVFMT_NOTIMESTAMPS
#define AVFMT_NOTIMESTAMPS
Format does not need / have any timestamps.
Definition: avformat.h:480
AV_TIME_BASE_Q
#define AV_TIME_BASE_Q
Internal time base represented as fractional value.
Definition: avutil.h:264
VideoState::auddec
Decoder auddec
Definition: ffplay.c:227
screen_left
static int screen_left
Definition: ffplay.c:314
av_opt_set_int_list
#define av_opt_set_int_list(obj, name, val, term, flags)
Set a binary option to an integer list.
Definition: opt.h:721
AudioParams::frame_size
int frame_size
Definition: ffplay.c:135
AVSubtitleRect
Definition: avcodec.h:2241
av_asprintf
char * av_asprintf(const char *fmt,...)
Definition: avstring.c:115
Decoder::next_pts
int64_t next_pts
Definition: ffplay.c:198
decoder_start
static int decoder_start(Decoder *d, int(*fn)(void *), const char *thread_name, void *arg)
Definition: ffplay.c:2106
AV_SYNC_EXTERNAL_CLOCK
@ AV_SYNC_EXTERNAL_CLOCK
Definition: ffplay.c:185
rect::y
int y
Definition: f_ebur128.c:78
FrameQueue::size
int size
Definition: ffplay.c:173
avformat_get_class
const AVClass * avformat_get_class(void)
Get the AVClass for AVFormatContext.
Definition: options.c:205
av_unused
#define av_unused
Definition: attributes.h:131
normalize.log
log
Definition: normalize.py:21
Frame::sar
AVRational sar
Definition: ffplay.c:164
out_size
int out_size
Definition: movenc.c:55
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:100
VideoState::vis_texture
SDL_Texture * vis_texture
Definition: ffplay.c:272
AVFrame::colorspace
enum AVColorSpace colorspace
YUV colorspace type.
Definition: frame.h:667
queue_picture
static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, double duration, int64_t pos, int serial)
Definition: ffplay.c:1729
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:340
AudioParams
Definition: ffplay.c:131
VideoState::subtitle_st
AVStream * subtitle_st
Definition: ffplay.c:277
VideoState::audio_filter_src
struct AudioParams audio_filter_src
Definition: ffplay.c:253
pixdesc.h
AVFormatContext::streams
AVStream ** streams
A list of all streams in the file.
Definition: avformat.h:1183
AVFrame::pts
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:452
step
trying all byte sequences megabyte in length and selecting the best looking sequence will yield cases to try But a word about which is also called distortion Distortion can be quantified by almost any quality measurement one chooses the sum of squared differences is used but more complex methods that consider psychovisual effects can be used as well It makes no difference in this discussion First step
Definition: rate_distortion.txt:58
VideoState::frame_last_filter_delay
double frame_last_filter_delay
Definition: ffplay.c:282
AVFrame::width
int width
Definition: frame.h:412
VideoState::xleft
int xleft
Definition: ffplay.c:291
AVPacketSideData
This structure stores auxiliary information for decoding, presenting, or otherwise processing the cod...
Definition: packet.h:342
Frame::pts
double pts
Definition: ffplay.c:158
w
uint8_t w
Definition: llviddspenc.c:38
AVCOL_RANGE_JPEG
@ AVCOL_RANGE_JPEG
Full range content.
Definition: pixfmt.h:673
OPT_INPUT
#define OPT_INPUT
Definition: cmdutils.h:126
frame_queue_init
static int frame_queue_init(FrameQueue *f, PacketQueue *pktq, int max_size, int keep_last)
Definition: ffplay.c:683
subtitle_codec_name
static const char * subtitle_codec_name
Definition: ffplay.c:342
EXTERNAL_CLOCK_MIN_FRAMES
#define EXTERNAL_CLOCK_MIN_FRAMES
Definition: ffplay.c:68
HAS_ARG
#define HAS_ARG
Definition: cmdutils.h:109
b
#define b
Definition: input.c:41
av_buffersrc_add_frame
int attribute_align_arg av_buffersrc_add_frame(AVFilterContext *ctx, AVFrame *frame)
Add a frame to the buffer source.
Definition: buffersrc.c:157
AVChapter::start
int64_t start
Definition: avformat.h:1077
Clock
Definition: ffplay.c:139
data
const char data[16]
Definition: mxf.c:148
SAMPLE_QUEUE_SIZE
#define SAMPLE_QUEUE_SIZE
Definition: ffplay.c:128
program_name
const char program_name[]
program name, defined by the program for show_version().
Definition: ffplay.c:63
Decoder::queue
PacketQueue * queue
Definition: ffplay.c:190
format_opts
AVDictionary * format_opts
Definition: cmdutils.c:60
fn
#define fn(a)
Definition: adynamicequalizer_template.c:61
AVSEEK_FLAG_BYTE
#define AVSEEK_FLAG_BYTE
seeking based on position in bytes
Definition: avformat.h:2225
AVIOContext::error
int error
contains the error code or 0 if no error happened
Definition: avio.h:249
AV_DICT_IGNORE_SUFFIX
#define AV_DICT_IGNORE_SUFFIX
Return first entry in a dictionary whose first part corresponds to the search key,...
Definition: dict.h:75
AV_PIX_FMT_RGB32_1
#define AV_PIX_FMT_RGB32_1
Definition: pixfmt.h:442
VideoState::audio_diff_avg_coef
double audio_diff_avg_coef
Definition: ffplay.c:238
AV_LOG_VERBOSE
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:196
CURSOR_HIDE_DELAY
#define CURSOR_HIDE_DELAY
Definition: ffplay.c:106
SDL_VOLUME_STEP
#define SDL_VOLUME_STEP
Definition: ffplay.c:77
AVComplexFloat
Definition: tx.h:27
show_help_children
void show_help_children(const AVClass *class, int flags)
Show help for all options with given flags in class and all its children.
Definition: cmdutils.c:138
AV_PIX_FMT_BGR24
@ AV_PIX_FMT_BGR24
packed RGB 8:8:8, 24bpp, BGRBGR...
Definition: pixfmt.h:69
AV_PIX_FMT_BGRA
@ AV_PIX_FMT_BGRA
packed BGRA 8:8:8:8, 32bpp, BGRABGRA...
Definition: pixfmt.h:95
AV_FRAME_DATA_DISPLAYMATRIX
@ AV_FRAME_DATA_DISPLAYMATRIX
This side data contains a 3x3 transformation matrix describing an affine transformation that needs to...
Definition: frame.h:85
autorotate
static int autorotate
Definition: ffplay.c:350
AVPacket::duration
int64_t duration
Duration of this packet in AVStream->time_base units, 0 if unknown.
Definition: packet.h:509
TextureFormatEntry::texture_fmt
int texture_fmt
Definition: ffplay.c:367
video_disable
static int video_disable
Definition: ffplay.c:317
Frame::uploaded
int uploaded
Definition: ffplay.c:165
mathematics.h
sws_scale
int attribute_align_arg sws_scale(struct SwsContext *c, const uint8_t *const srcSlice[], const int srcStride[], int srcSliceY, int srcSliceH, uint8_t *const dst[], const int dstStride[])
swscale wrapper, so we don't need to export the SwsContext.
Definition: swscale.c:1205
AVDictionary
Definition: dict.c:34
AVChannelLayout::order
enum AVChannelOrder order
Channel order used in this layout.
Definition: channel_layout.h:312
compute_target_delay
static double compute_target_delay(double delay, VideoState *is)
Definition: ffplay.c:1510
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
Frame
Definition: ffplay.c:154
opt_input_file
static int opt_input_file(void *optctx, const char *filename)
Definition: ffplay.c:3529
stream_close
static void stream_close(VideoState *is)
Definition: ffplay.c:1249
av_read_frame
int av_read_frame(AVFormatContext *s, AVPacket *pkt)
Return the next frame of a stream.
Definition: demux.c:1462
SDL_AUDIO_MAX_CALLBACKS_PER_SEC
#define SDL_AUDIO_MAX_CALLBACKS_PER_SEC
Definition: ffplay.c:74
VideoState::paused
int paused
Definition: ffplay.c:208
AVChannelLayout::nb_channels
int nb_channels
Number of channels in this layout.
Definition: channel_layout.h:317
init_clock
static void init_clock(Clock *c, int *queue_serial)
Definition: ffplay.c:1391
codec_type
enum AVMediaType codec_type
Definition: rtp.c:37
AV_OPT_FLAG_FILTERING_PARAM
#define AV_OPT_FLAG_FILTERING_PARAM
a generic parameter which can be set by the user for filtering
Definition: opt.h:297
avio_size
int64_t avio_size(AVIOContext *s)
Get the filesize.
Definition: aviobuf.c:370
av_strlcatf
size_t av_strlcatf(char *dst, size_t size, const char *fmt,...)
Definition: avstring.c:103
get_master_clock
static double get_master_clock(VideoState *is)
Definition: ffplay.c:1424
VideoState::width
int width
Definition: ffplay.c:291
file_iformat
static const AVInputFormat * file_iformat
Definition: ffplay.c:307
sample_rate
sample_rate
Definition: ffmpeg_filter.c:368
dummy
static int dummy
Definition: ffplay.c:3566
FF_QUIT_EVENT
#define FF_QUIT_EVENT
Definition: ffplay.c:358
av_packet_free
void av_packet_free(AVPacket **pkt)
Free the packet, if the packet is reference counted, it will be unreferenced first.
Definition: avpacket.c:74
PacketQueue
Definition: ffplay.c:115
av_tx_init
av_cold int av_tx_init(AVTXContext **ctx, av_tx_fn *tx, enum AVTXType type, int inv, int len, const void *scale, uint64_t flags)
Initialize a transform context with the given configuration (i)MDCTs with an odd length are currently...
Definition: tx.c:901
subtitle_thread
static int subtitle_thread(void *arg)
Definition: ffplay.c:2220
av_channel_layout_describe_bprint
int av_channel_layout_describe_bprint(const AVChannelLayout *channel_layout, AVBPrint *bp)
bprint variant of av_channel_layout_describe().
Definition: channel_layout.c:740
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:361
VideoState::last_subtitle_stream
int last_subtitle_stream
Definition: ffplay.c:301
VideoState::SHOW_MODE_NONE
@ SHOW_MODE_NONE
Definition: ffplay.c:260
avfilter_graph_free
void avfilter_graph_free(AVFilterGraph **graph)
Free a graph, destroy its links, and set *graph to NULL.
Definition: avfiltergraph.c:119
OptionDef
Definition: cmdutils.h:106
audio_decode_frame
static int audio_decode_frame(VideoState *is)
Decode one audio frame and return its uncompressed size.
Definition: ffplay.c:2323
subtitle_disable
static int subtitle_disable
Definition: ffplay.c:318
VideoState::pictq
FrameQueue pictq
Definition: ffplay.c:223
genpts
static int genpts
Definition: ffplay.c:331
VideoState::swr_ctx
struct SwrContext * swr_ctx
Definition: ffplay.c:255
opt_sync
static int opt_sync(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3498
AVFrame::opaque_ref
AVBufferRef * opaque_ref
Frame owner's private data.
Definition: frame.h:768
step_to_next_frame
static void step_to_next_frame(VideoState *is)
Definition: ffplay.c:1502
upload_texture
static int upload_texture(SDL_Texture **tex, AVFrame *frame)
Definition: ffplay.c:900
VideoState::sampq
FrameQueue sampq
Definition: ffplay.c:225
TextureFormatEntry::format
enum AVPixelFormat format
Definition: ffplay.c:366
FrameQueue::rindex
int rindex
Definition: ffplay.c:171
video_display
static void video_display(VideoState *is)
Definition: ffplay.c:1345
AVCodec::max_lowres
uint8_t max_lowres
maximum value for lowres supported by the decoder
Definition: codec.h:207
avformat_close_input
void avformat_close_input(AVFormatContext **s)
Close an opened input AVFormatContext.
Definition: demux.c:374
AVCOL_SPC_BT470BG
@ AVCOL_SPC_BT470BG
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601
Definition: pixfmt.h:605
AVFormatContext::interrupt_callback
AVIOInterruptCB interrupt_callback
Custom interrupt callbacks for the I/O layer.
Definition: avformat.h:1381
SDL_AUDIO_MIN_BUFFER_SIZE
#define SDL_AUDIO_MIN_BUFFER_SIZE
Definition: ffplay.c:72
print_error
void print_error(const char *filename, int err)
Print an error message to stderr, indicating filename and a human readable description of the error c...
Definition: cmdutils.c:829
startup_volume
static int startup_volume
Definition: ffplay.c:325
window
static SDL_Window * window
Definition: ffplay.c:360
avfilter_graph_create_filter
int avfilter_graph_create_filter(AVFilterContext **filt_ctx, const AVFilter *filt, const char *name, const char *args, void *opaque, AVFilterGraph *graph_ctx)
Create and add a filter instance into an existing graph.
Definition: avfiltergraph.c:138
fifo.h
toggle_full_screen
static void toggle_full_screen(VideoState *is)
Definition: ffplay.c:3208
Clock::queue_serial
int * queue_serial
Definition: ffplay.c:146
VideoState::extclk
Clock extclk
Definition: ffplay.c:221
VideoState::seek_flags
int seek_flags
Definition: ffplay.c:212
alwaysontop
static int alwaysontop
Definition: ffplay.c:324
VideoState::audio_st
AVStream * audio_st
Definition: ffplay.c:241
packet_queue_init
static int packet_queue_init(PacketQueue *q)
Definition: ffplay.c:464
AUDIO_DIFF_AVG_NB
#define AUDIO_DIFF_AVG_NB
Definition: ffplay.c:97
AVCodecContext::ch_layout
AVChannelLayout ch_layout
Audio channel layout.
Definition: avcodec.h:2107
fail
#define fail()
Definition: checkasm.h:138
av_fifo_write
int av_fifo_write(AVFifo *f, const void *buf, size_t nb_elems)
Write data into a FIFO.
Definition: fifo.c:188
avfilter_graph_alloc
AVFilterGraph * avfilter_graph_alloc(void)
Allocate a filter graph.
Definition: avfiltergraph.c:82
FrameQueue
Definition: ffplay.c:169
packet_queue_put
static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
Definition: ffplay.c:435
samplefmt.h
AVSubtitleRect::x
int x
top left corner of pict, undefined when pict is not set
Definition: avcodec.h:2242
VideoState::video_stream
int video_stream
Definition: ffplay.c:283
autoexit
static int autoexit
Definition: ffplay.c:334
avio_tell
static av_always_inline int64_t avio_tell(AVIOContext *s)
ftell() equivalent for AVIOContext.
Definition: avio.h:513
AVERROR_OPTION_NOT_FOUND
#define AVERROR_OPTION_NOT_FOUND
Option not found.
Definition: error.h:63
AV_BPRINT_SIZE_AUTOMATIC
#define AV_BPRINT_SIZE_AUTOMATIC
AVChapter
Definition: avformat.h:1074
video_image_display
static void video_image_display(VideoState *is)
Definition: ffplay.c:950
val
static double val(void *priv, double ch)
Definition: aeval.c:78
AVFrame::ch_layout
AVChannelLayout ch_layout
Channel layout of the audio data.
Definition: frame.h:802
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
opt_show_mode
static int opt_show_mode(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3513
Decoder::empty_queue_cond
SDL_cond * empty_queue_cond
Definition: ffplay.c:195
pts
static int64_t pts
Definition: transcode_aac.c:643
set_clock_speed
static void set_clock_speed(Clock *c, double speed)
Definition: ffplay.c:1385
VideoState::audio_diff_threshold
double audio_diff_threshold
Definition: ffplay.c:239
av_opt_set
int av_opt_set(void *obj, const char *name, const char *val, int search_flags)
Definition: opt.c:487
VideoState::audio_diff_cum
double audio_diff_cum
Definition: ffplay.c:237
VideoState::last_video_stream
int last_video_stream
Definition: ffplay.c:301
OPT_STRING
#define OPT_STRING
Definition: cmdutils.h:112
ss
#define ss(width, name, subs,...)
Definition: cbs_vp9.c:202
fast
static int fast
Definition: ffplay.c:330
loop
static int loop
Definition: ffplay.c:337
AVRational::num
int num
Numerator.
Definition: rational.h:59
avformat_network_init
int avformat_network_init(void)
Do global initialization of network libraries.
Definition: utils.c:558
VideoState::rdft_bits
int rdft_bits
Definition: ffplay.c:267
opt_height
static int opt_height(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3477
avsubtitle_free
void avsubtitle_free(AVSubtitle *sub)
Free all allocated data in the given subtitle struct.
Definition: avcodec.c:413
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:88
sdl_texture_format_map
static const struct TextureFormatEntry sdl_texture_format_map[]
AVFormatContext::bit_rate
int64_t bit_rate
Total stream bitrate in bit/s, 0 if not available.
Definition: avformat.h:1224
is_full_screen
static int is_full_screen
Definition: ffplay.c:355
avcodec_decode_subtitle2
int avcodec_decode_subtitle2(AVCodecContext *avctx, AVSubtitle *sub, int *got_sub_ptr, const AVPacket *avpkt)
Decode a subtitle message.
Definition: decode.c:969
avfilter_inout_free
void avfilter_inout_free(AVFilterInOut **inout)
Free the supplied list of AVFilterInOut and set *inout to NULL.
Definition: graphparser.c:75
set_sdl_yuv_conversion_mode
static void set_sdl_yuv_conversion_mode(AVFrame *frame)
Definition: ffplay.c:934
lrint
#define lrint
Definition: tablegen.h:53
Frame::flip_v
int flip_v
Definition: ffplay.c:166
AV_LOG_TRACE
#define AV_LOG_TRACE
Extremely verbose debugging, useful for libav* development.
Definition: log.h:206
pkt
AVPacket * pkt
Definition: movenc.c:59
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
AVFormatContext::metadata
AVDictionary * metadata
Metadata that applies to the whole file.
Definition: avformat.h:1343
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
AVInputFormat
Definition: avformat.h:549
audio_thread
static int audio_thread(void *arg)
Definition: ffplay.c:2028
set_clock
static void set_clock(Clock *c, double pts, int serial)
Definition: ffplay.c:1379
av_dump_format
void av_dump_format(AVFormatContext *ic, int index, const char *url, int is_output)
Print detailed information about the input or output format, such as duration, bitrate,...
Definition: dump.c:629
VideoState
Definition: ffplay.c:203
frame_queue_peek_next
static Frame * frame_queue_peek_next(FrameQueue *f)
Definition: ffplay.c:728
sdl_audio_callback
static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
Definition: ffplay.c:2432
av_fifo_read
int av_fifo_read(AVFifo *f, void *buf, size_t nb_elems)
Read data from a FIFO.
Definition: fifo.c:240
sync_clock_to_slave
static void sync_clock_to_slave(Clock *c, Clock *slave)
Definition: ffplay.c:1399
av_tx_fn
void(* av_tx_fn)(AVTXContext *s, void *out, void *in, ptrdiff_t stride)
Function pointer to a function to perform the transform.
Definition: tx.h:151
swr_init
av_cold int swr_init(struct SwrContext *s)
Initialize context after user parameters have been set.
Definition: swresample.c:193
av_buffersink_get_frame_rate
AVRational av_buffersink_get_frame_rate(const AVFilterContext *ctx)
avformat_open_input
int avformat_open_input(AVFormatContext **ps, const char *url, const AVInputFormat *fmt, AVDictionary **options)
Open an input stream and read the header.
Definition: demux.c:226
frame_queue_signal
static void frame_queue_signal(FrameQueue *f)
Definition: ffplay.c:716
av_dict_get
AVDictionaryEntry * av_dict_get(const AVDictionary *m, const char *key, const AVDictionaryEntry *prev, int flags)
Get a dictionary entry with matching key.
Definition: dict.c:62
av_channel_layout_describe
int av_channel_layout_describe(const AVChannelLayout *channel_layout, char *buf, size_t buf_size)
Get a human-readable string describing the channel layout properties.
Definition: channel_layout.c:786
OPT_INT
#define OPT_INT
Definition: cmdutils.h:115
avcodec_alloc_context3
AVCodecContext * avcodec_alloc_context3(const AVCodec *codec)
Allocate an AVCodecContext and set its fields to default values.
Definition: options.c:153
width
#define width
VideoState::ShowMode
ShowMode
Definition: ffplay.c:259
Decoder::avctx
AVCodecContext * avctx
Definition: ffplay.c:191
s
#define s(width, name)
Definition: cbs_vp9.c:198
show_help_default
void show_help_default(const char *opt, const char *arg)
Per-fftool specific help handler.
Definition: ffplay.c:3626
AV_CEIL_RSHIFT
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:51
default_height
static int default_height
Definition: ffplay.c:311
AVFormatContext::flags
int flags
Flags modifying the (de)muxer behaviour.
Definition: avformat.h:1233
format
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample format(the sample packing is implied by the sample format) and sample rate. The lists are not just lists
AVCodecParameters::sample_aspect_ratio
AVRational sample_aspect_ratio
Video only.
Definition: codec_par.h:131
AVInputFormat::name
const char * name
A comma separated list of short names for the format.
Definition: avformat.h:554
AVFormatContext::iformat
const struct AVInputFormat * iformat
The input container format.
Definition: avformat.h:1127
AV_PIX_FMT_0BGR32
#define AV_PIX_FMT_0BGR32
Definition: pixfmt.h:446
AVCOL_SPC_SMPTE170M
@ AVCOL_SPC_SMPTE170M
also ITU-R BT601-6 525 / ITU-R BT1358 525 / ITU-R BT1700 NTSC / functionally identical to above
Definition: pixfmt.h:606
AVDictionaryEntry::key
char * key
Definition: dict.h:90
Clock::last_updated
double last_updated
Definition: ffplay.c:142
PacketQueue::duration
int64_t duration
Definition: ffplay.c:119
AVMEDIA_TYPE_AUDIO
@ AVMEDIA_TYPE_AUDIO
Definition: avutil.h:202
AVSubtitleRect::y
int y
top left corner of pict, undefined when pict is not set
Definition: avcodec.h:2243
AVCodecParameters::width
int width
Video only.
Definition: codec_par.h:121
video_stream
static AVStream * video_stream
Definition: demux_decode.c:42
calculate_display_rect
static void calculate_display_rect(SDL_Rect *rect, int scr_xleft, int scr_ytop, int scr_width, int scr_height, int pic_width, int pic_height, AVRational pic_sar)
Definition: ffplay.c:855
av_q2d
static double av_q2d(AVRational a)
Convert an AVRational to a double.
Definition: rational.h:104
screen_height
static int screen_height
Definition: ffplay.c:313
EXTERNAL_CLOCK_SPEED_STEP
#define EXTERNAL_CLOCK_SPEED_STEP
Definition: ffplay.c:94
Decoder::pkt_serial
int pkt_serial
Definition: ffplay.c:192
configure_video_filters
static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const char *vfilters, AVFrame *frame)
Definition: ffplay.c:1836
avcodec_receive_frame
int attribute_align_arg avcodec_receive_frame(AVCodecContext *avctx, AVFrame *frame)
Return decoded output data from a decoder or encoder (when the AV_CODEC_FLAG_RECON_FRAME flag is used...
Definition: avcodec.c:713
av_buffersink_get_time_base
AVRational av_buffersink_get_time_base(const AVFilterContext *ctx)
AVMEDIA_TYPE_NB
@ AVMEDIA_TYPE_NB
Definition: avutil.h:206
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:304
setup_find_stream_info_opts
int setup_find_stream_info_opts(AVFormatContext *s, AVDictionary *codec_opts, AVDictionary ***dst)
Setup AVCodecContext options for avformat_find_stream_info().
Definition: cmdutils.c:990
av_read_play
int av_read_play(AVFormatContext *s)
Start playing a network-based stream (e.g.
Definition: demux_utils.c:202
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:201
opt_codec
static int opt_codec(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3544
AVPacketSideData::data
uint8_t * data
Definition: packet.h:343
Clock::pts_drift
double pts_drift
Definition: ffplay.c:141
VideoState::videoq
PacketQueue videoq
Definition: ffplay.c:285
ctx
AVFormatContext * ctx
Definition: movenc.c:48
av_guess_sample_aspect_ratio
AVRational av_guess_sample_aspect_ratio(AVFormatContext *format, AVStream *stream, AVFrame *frame)
Guess the sample aspect ratio of a frame, based on both the stream and the frame aspect ratio.
Definition: avformat.c:632
channels
channels
Definition: aptx.h:31
limits.h
REFRESH_RATE
#define REFRESH_RATE
Definition: ffplay.c:100
FrameQueue::rindex_shown
int rindex_shown
Definition: ffplay.c:176
nb_streams
static int nb_streams
Definition: ffprobe.c:328
av_rescale_q
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Definition: mathematics.c:142
VideoState::force_refresh
int force_refresh
Definition: ffplay.c:207
get_clock
static double get_clock(Clock *c)
Definition: ffplay.c:1359
screen_top
static int screen_top
Definition: ffplay.c:315
VideoState::audio_diff_avg_count
int audio_diff_avg_count
Definition: ffplay.c:240
EXTERNAL_CLOCK_SPEED_MIN
#define EXTERNAL_CLOCK_SPEED_MIN
Definition: ffplay.c:92
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
av_get_sample_fmt_name
const char * av_get_sample_fmt_name(enum AVSampleFormat sample_fmt)
Return the name of sample_fmt, or NULL if sample_fmt is not recognized.
Definition: samplefmt.c:51
renderer
static SDL_Renderer * renderer
Definition: ffplay.c:361
filter_codec_opts
int filter_codec_opts(const AVDictionary *opts, enum AVCodecID codec_id, AVFormatContext *s, AVStream *st, const AVCodec *codec, AVDictionary **dst)
Filter out options for given codec.
Definition: cmdutils.c:925
av_usleep
int av_usleep(unsigned usec)
Sleep for a period of time.
Definition: time.c:84
SwrContext
The libswresample context.
Definition: swresample_internal.h:95
vp_duration
static double vp_duration(VideoState *is, Frame *vp, Frame *nextvp)
Definition: ffplay.c:1540
NAN
#define NAN
Definition: mathematics.h:115
AV_PIX_FMT_BGR32_1
#define AV_PIX_FMT_BGR32_1
Definition: pixfmt.h:444
VideoState::step
int step
Definition: ffplay.c:292
frame
static AVFrame * frame
Definition: demux_decode.c:54
synchronize_audio
static int synchronize_audio(VideoState *is, int nb_samples)
Definition: ffplay.c:2275
AVCodecContext::codec_id
enum AVCodecID codec_id
Definition: avcodec.h:451
arg
const char * arg
Definition: jacosubdec.c:67
if
if(ret)
Definition: filter_design.txt:179
window_title
static const char * window_title
Definition: ffplay.c:309
Clock::speed
double speed
Definition: ffplay.c:143
VideoState::SHOW_MODE_VIDEO
@ SHOW_MODE_VIDEO
Definition: ffplay.c:260
AVDISCARD_ALL
@ AVDISCARD_ALL
discard all
Definition: defs.h:219
AVFormatContext
Format I/O context.
Definition: avformat.h:1115
av_log_get_level
int av_log_get_level(void)
Get the current log level.
Definition: log.c:437
avfilter_get_by_name
const AVFilter * avfilter_get_by_name(const char *name)
Get a filter definition matching the given name.
Definition: allfilters.c:625
init_dynload
void init_dynload(void)
Initialize dynamic library loading.
Definition: cmdutils.c:77
opts
AVDictionary * opts
Definition: movenc.c:50
AVStream::codecpar
AVCodecParameters * codecpar
Codec parameters associated with this stream.
Definition: avformat.h:864
avcodec_parameters_to_context
int avcodec_parameters_to_context(AVCodecContext *codec, const struct AVCodecParameters *par)
Fill the codec context based on the values from the supplied codec parameters.
AVSubtitleRect::w
int w
width of pict, undefined when pict is not set
Definition: avcodec.h:2244
seek_chapter
static void seek_chapter(VideoState *is, int incr)
Definition: ffplay.c:3243
get_master_sync_type
static int get_master_sync_type(VideoState *is)
Definition: ffplay.c:1407
avcodec_get_class
const AVClass * avcodec_get_class(void)
Get the AVClass for AVCodecContext.
Definition: options.c:187
fabs
static __device__ float fabs(float a)
Definition: cuda_runtime.h:182
frame_queue_destroy
static void frame_queue_destroy(FrameQueue *f)
Definition: ffplay.c:704
avfilter_graph_config
int avfilter_graph_config(AVFilterGraph *graphctx, void *log_ctx)
Check validity and configure all the links and formats in the graph.
Definition: avfiltergraph.c:1169
AVStream::time_base
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented.
Definition: avformat.h:880
NULL
#define NULL
Definition: coverity.c:32
avcodec_find_decoder_by_name
const AVCodec * avcodec_find_decoder_by_name(const char *name)
Find a registered decoder with the specified name.
Definition: allcodecs.c:1003
FrameQueue::max_size
int max_size
Definition: ffplay.c:174
AV_DICT_MULTIKEY
#define AV_DICT_MULTIKEY
Allow to store several equal keys in the dictionary.
Definition: dict.h:84
OPT_EXPERT
#define OPT_EXPERT
Definition: cmdutils.h:111
AV_PIX_FMT_YUYV422
@ AV_PIX_FMT_YUYV422
packed YUV 4:2:2, 16bpp, Y0 Cb Y1 Cr
Definition: pixfmt.h:67
Decoder
Definition: ffmpeg_dec.c:35
AudioParams::freq
int freq
Definition: ffplay.c:132
avcodec_free_context
void avcodec_free_context(AVCodecContext **avctx)
Free the codec context and everything associated with it and write NULL to the provided pointer.
Definition: options.c:168
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
isnan
#define isnan(x)
Definition: libm.h:340
AudioParams::ch_layout
AVChannelLayout ch_layout
Definition: ffplay.c:133
audio_open
static int audio_open(void *opaque, AVChannelLayout *wanted_channel_layout, int wanted_sample_rate, struct AudioParams *audio_hw_params)
Definition: ffplay.c:2475
AVFilterGraph::filters
AVFilterContext ** filters
Definition: avfilter.h:866
stream_cycle_channel
static void stream_cycle_channel(VideoState *is, int codec_type)
Definition: ffplay.c:3129
VideoState::frame_drops_late
int frame_drops_late
Definition: ffplay.c:257
AV_CODEC_FLAG2_FAST
#define AV_CODEC_FLAG2_FAST
Allow non spec compliant speedup tricks.
Definition: avcodec.h:353
AVFormatContext::pb
AVIOContext * pb
I/O context.
Definition: avformat.h:1157
av_log_set_flags
void av_log_set_flags(int arg)
Definition: log.c:447
parseutils.h
frame_queue_unref_item
static void frame_queue_unref_item(Frame *vp)
Definition: ffplay.c:677
FrameQueue::queue
Frame queue[FRAME_QUEUE_SIZE]
Definition: ffplay.c:170
VideoState::last_i_start
int last_i_start
Definition: ffplay.c:264
Decoder::packet_pending
int packet_pending
Definition: ffplay.c:194
cursor_last_shown
static int64_t cursor_last_shown
Definition: ffplay.c:345
AVProgram::stream_index
unsigned int * stream_index
Definition: avformat.h:1043
frame_queue_peek
static Frame * frame_queue_peek(FrameQueue *f)
Definition: ffplay.c:723
OPT_INT64
#define OPT_INT64
Definition: cmdutils.h:118
avfilter_inout_alloc
AVFilterInOut * avfilter_inout_alloc(void)
Allocate a single AVFilterInOut entry.
Definition: graphparser.c:70
Frame::duration
double duration
Definition: ffplay.c:159
lowres
static int lowres
Definition: ffplay.c:332
double
double
Definition: af_crystalizer.c:131
AV_DICT_DONT_OVERWRITE
#define AV_DICT_DONT_OVERWRITE
Don't overwrite existing entries.
Definition: dict.h:81
AVFrame::pkt_dts
int64_t pkt_dts
DTS copied from the AVPacket that triggered returning this frame.
Definition: frame.h:459
AV_PIX_FMT_RGB8
@ AV_PIX_FMT_RGB8
packed RGB 3:3:2, 8bpp, (msb)2R 3G 3B(lsb)
Definition: pixfmt.h:86
avcodec_open2
int attribute_align_arg avcodec_open2(AVCodecContext *avctx, const AVCodec *codec, AVDictionary **options)
Initialize the AVCodecContext to use the given AVCodec.
Definition: avcodec.c:128
time.h
Frame::pos
int64_t pos
Definition: ffplay.c:160
VideoState::frame_last_returned_time
double frame_last_returned_time
Definition: ffplay.c:281
set_clock_at
static void set_clock_at(Clock *c, double pts, int serial, double time)
Definition: ffplay.c:1371
toggle_pause
static void toggle_pause(VideoState *is)
Definition: ffplay.c:1484
TextureFormatEntry
Definition: ffplay.c:365
AVFilterGraph
Definition: avfilter.h:864
stream_component_open
static int stream_component_open(VideoState *is, int stream_index)
Definition: ffplay.c:2552
inputs
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several inputs
Definition: filter_design.txt:243
av_packet_ref
int av_packet_ref(AVPacket *dst, const AVPacket *src)
Setup a new reference to the data described by a given packet.
Definition: avpacket.c:431
fp
#define fp
Definition: regdef.h:44
AVCodecParameters::ch_layout
AVChannelLayout ch_layout
Audio only.
Definition: codec_par.h:206
VideoState::rdft_data
AVComplexFloat * rdft_data
Definition: ffplay.c:269
av_packet_move_ref
void av_packet_move_ref(AVPacket *dst, AVPacket *src)
Move every field in src to dst and reset src.
Definition: avpacket.c:480
AV_PIX_FMT_NE
#define AV_PIX_FMT_NE(be, le)
Definition: pixfmt.h:438
exp
int8_t exp
Definition: eval.c:72
VideoState::seek_req
int seek_req
Definition: ffplay.c:211
VideoState::SHOW_MODE_WAVES
@ SHOW_MODE_WAVES
Definition: ffplay.c:260
VideoState::audio_clock
double audio_clock
Definition: ffplay.c:235
VideoState::read_pause_return
int read_pause_return
Definition: ffplay.c:215
event_loop
static void event_loop(VideoState *cur_stream)
Definition: ffplay.c:3271
swresample.h
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
VideoState::ytop
int ytop
Definition: ffplay.c:291
AVCodecParameters::sample_rate
int sample_rate
Audio only.
Definition: codec_par.h:171
av_opt_set_int
int av_opt_set_int(void *obj, const char *name, int64_t val, int search_flags)
Definition: opt.c:624
av_packet_side_data_get
const AVPacketSideData * av_packet_side_data_get(const AVPacketSideData *sd, int nb_sd, enum AVPacketSideDataType type)
Get side information from a side data array.
Definition: avpacket.c:650
avcodec_find_decoder
const AVCodec * avcodec_find_decoder(enum AVCodecID id)
Find a registered decoder with a matching codec ID.
Definition: allcodecs.c:975
AVFrame::best_effort_timestamp
int64_t best_effort_timestamp
frame timestamp estimated using various heuristics, in stream time base
Definition: frame.h:676
VideoState::sample_array
int16_t sample_array[SAMPLE_ARRAY_SIZE]
Definition: ffplay.c:262
AVFormatContext::nb_streams
unsigned int nb_streams
Number of elements in AVFormatContext.streams.
Definition: avformat.h:1171
exit_on_mousedown
static int exit_on_mousedown
Definition: ffplay.c:336
AVFilterGraph::scale_sws_opts
char * scale_sws_opts
sws options to use for the auto-inserted scale filters
Definition: avfilter.h:869
VideoState::iformat
const AVInputFormat * iformat
Definition: ffplay.c:205
Decoder::next_pts_tb
AVRational next_pts_tb
Definition: ffplay.c:199
AVCodecContext::lowres
int lowres
low resolution decoding, 1-> 1/2 size, 2->1/4 size
Definition: avcodec.h:1524
VideoState::audioq
PacketQueue audioq
Definition: ffplay.c:242
codec_opts
AVDictionary * codec_opts
Definition: cmdutils.c:60
audio_callback_time
static int64_t audio_callback_time
Definition: ffplay.c:356
eval.h
avformat_find_stream_info
int avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options)
Read packets of a media file to get stream information.
Definition: demux.c:2436
Frame::format
int format
Definition: ffplay.c:163
INSERT_FILT
#define INSERT_FILT(name, arg)
f
f
Definition: af_crystalizer.c:121
swr_alloc_set_opts2
int swr_alloc_set_opts2(struct SwrContext **ps, const AVChannelLayout *out_ch_layout, enum AVSampleFormat out_sample_fmt, int out_sample_rate, const AVChannelLayout *in_ch_layout, enum AVSampleFormat in_sample_fmt, int in_sample_rate, int log_offset, void *log_ctx)
Allocate SwrContext if needed and set/reset common parameters.
Definition: swresample.c:85
AVCodecContext::flags2
int flags2
AV_CODEC_FLAG2_*.
Definition: avcodec.h:528
AVFrame::pict_type
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:442
AV_PIX_FMT_RGB24
@ AV_PIX_FMT_RGB24
packed RGB 8:8:8, 24bpp, RGBRGB...
Definition: pixfmt.h:68
AVMediaType
AVMediaType
Definition: avutil.h:199
av_log_set_callback
void av_log_set_callback(void(*callback)(void *, int, const char *, va_list))
Set the logging callback.
Definition: log.c:457
AVPacket::size
int size
Definition: packet.h:492
VideoState::in_audio_filter
AVFilterContext * in_audio_filter
Definition: ffplay.c:297
AVFifo
Definition: fifo.c:35
avformat_match_stream_specifier
int avformat_match_stream_specifier(AVFormatContext *s, AVStream *st, const char *spec)
Check if the stream st contained in s is matched by the stream specifier spec.
Definition: avformat.c:588
VideoState::audio_write_buf_size
int audio_write_buf_size
Definition: ffplay.c:249
avformat_alloc_context
AVFormatContext * avformat_alloc_context(void)
Allocate an AVFormatContext.
Definition: options.c:166
AVDISCARD_DEFAULT
@ AVDISCARD_DEFAULT
discard useless packets like 0 size packets in avi
Definition: defs.h:214
FrameQueue::mutex
SDL_mutex * mutex
Definition: ffplay.c:177
av_bprint_finalize
int av_bprint_finalize(AVBPrint *buf, char **ret_str)
Finalize a print buffer.
Definition: bprint.c:240
AVChannelLayout
An AVChannelLayout holds information about the channel layout of audio data.
Definition: channel_layout.h:307
frame_queue_peek_writable
static Frame * frame_queue_peek_writable(FrameQueue *f)
Definition: ffplay.c:738
OPT_AUDIO
#define OPT_AUDIO
Definition: cmdutils.h:114
Frame::sub
AVSubtitle sub
Definition: ffplay.c:156
VideoState::last_audio_stream
int last_audio_stream
Definition: ffplay.c:301
vfilters_list
static const char ** vfilters_list
Definition: ffplay.c:347
AVFrame::sample_rate
int sample_rate
Sample rate of the audio data.
Definition: frame.h:567
decoder_init
static int decoder_init(Decoder *d, AVCodecContext *avctx, PacketQueue *queue, SDL_cond *empty_queue_cond)
Definition: ffplay.c:560
avfilter_link
int avfilter_link(AVFilterContext *src, unsigned srcpad, AVFilterContext *dst, unsigned dstpad)
Link two filters together.
Definition: avfilter.c:149
sp
#define sp
Definition: regdef.h:63
start_time
static int64_t start_time
Definition: ffplay.c:328
audio_stream
static AVStream * audio_stream
Definition: demux_decode.c:42
VideoState::SHOW_MODE_NB
@ SHOW_MODE_NB
Definition: ffplay.c:260
AVCodecContext::sample_fmt
enum AVSampleFormat sample_fmt
audio sample format
Definition: avcodec.h:1080
Frame::serial
int serial
Definition: ffplay.c:157
AVCodecContext::pkt_timebase
AVRational pkt_timebase
Timebase in which pkt_dts/pts and AVPacket.dts/pts are expressed.
Definition: avcodec.h:1817
AV_SAMPLE_FMT_NONE
@ AV_SAMPLE_FMT_NONE
Definition: samplefmt.h:56
hypot
static av_const double hypot(double x, double y)
Definition: libm.h:366
uninit_opts
void uninit_opts(void)
Uninitialize the cmdutils option system, in particular free the *_opts contexts and their contents.
Definition: cmdutils.c:64
size
int size
Definition: twinvq_data.h:10344
VideoState::xpos
int xpos
Definition: ffplay.c:270
av_make_q
static AVRational av_make_q(int num, int den)
Create an AVRational.
Definition: rational.h:71
read_thread
static int read_thread(void *arg)
Definition: ffplay.c:2739
AV_PIX_FMT_BGR555
#define AV_PIX_FMT_BGR555
Definition: pixfmt.h:461
avformat_seek_file
int avformat_seek_file(AVFormatContext *s, int stream_index, int64_t min_ts, int64_t ts, int64_t max_ts, int flags)
Seek to timestamp ts.
Definition: seek.c:662
AV_NOPTS_VALUE
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:248
Clock::paused
int paused
Definition: ffplay.c:145
rect::h
int h
Definition: f_ebur128.c:78
VideoState::sub_texture
SDL_Texture * sub_texture
Definition: ffplay.c:273
swr_free
av_cold void swr_free(SwrContext **ss)
Free the given SwrContext and set the pointer to NULL.
Definition: swresample.c:174
AVFrameSideData::data
uint8_t * data
Definition: frame.h:248
VideoState::vid_texture
SDL_Texture * vid_texture
Definition: ffplay.c:274
swr_convert
int attribute_align_arg swr_convert(struct SwrContext *s, uint8_t **out_arg, int out_count, const uint8_t **in_arg, int in_count)
Convert audio.
Definition: swresample.c:836
printf
printf("static const uint8_t my_array[100] = {\n")
AVFrame::format
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames,...
Definition: frame.h:427
VideoState::sample_array_index
int sample_array_index
Definition: ffplay.c:263
wanted_stream_spec
static const char * wanted_stream_spec[AVMEDIA_TYPE_NB]
Definition: ffplay.c:319
infinite_buffer
static int infinite_buffer
Definition: ffplay.c:339
VideoState::max_frame_duration
double max_frame_duration
Definition: ffplay.c:286
avdevice.h
show_banner
void show_banner(int argc, char **argv, const OptionDef *options)
Print the program banner to stderr.
Definition: opt_common.c:237
diff
static av_always_inline int diff(const struct color_info *a, const struct color_info *b, const int trans_thresh)
Definition: vf_paletteuse.c:164
AV_OPT_SEARCH_CHILDREN
#define AV_OPT_SEARCH_CHILDREN
Search in possible children of the given object first.
Definition: opt.h:563
packet_queue_destroy
static void packet_queue_destroy(PacketQueue *q)
Definition: ffplay.c:498
AVPacket::dts
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed.
Definition: packet.h:490
height
#define height
AV_PIX_FMT_RGB32
#define AV_PIX_FMT_RGB32
Definition: pixfmt.h:441
VideoState::frame_drops_early
int frame_drops_early
Definition: ffplay.c:256
update_video_pts
static void update_video_pts(VideoState *is, double pts, int serial)
Definition: ffplay.c:1552
a
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
Definition: undefined.txt:41
toggle_mute
static void toggle_mute(VideoState *is)
Definition: ffplay.c:1490
decoder_abort
static void decoder_abort(Decoder *d, FrameQueue *fq)
Definition: ffplay.c:811
video_refresh
static void video_refresh(void *opaque, double *remaining_time)
Definition: ffplay.c:1560
AV_CHANNEL_ORDER_NATIVE
@ AV_CHANNEL_ORDER_NATIVE
The native channel order, i.e.
Definition: channel_layout.h:118
ns
#define ns(max_value, name, subs,...)
Definition: cbs_av1.c:608
seek_interval
static float seek_interval
Definition: ffplay.c:321
av_packet_alloc
AVPacket * av_packet_alloc(void)
Allocate an AVPacket and set its fields to default values.
Definition: avpacket.c:63
av_dict_free
void av_dict_free(AVDictionary **pm)
Free all the memory allocated for an AVDictionary struct and all keys and values.
Definition: dict.c:225
rect::x
int x
Definition: f_ebur128.c:78
VideoState::seek_pos
int64_t seek_pos
Definition: ffplay.c:213
frame_queue_push
static void frame_queue_push(FrameQueue *f)
Definition: ffplay.c:770
audio_dev
static SDL_AudioDeviceID audio_dev
Definition: ffplay.c:363
av_tx_uninit
av_cold void av_tx_uninit(AVTXContext **ctx)
Frees a context and sets *ctx to NULL, does nothing when *ctx == NULL.
Definition: tx.c:294
sigterm_handler
static void sigterm_handler(int sig)
Definition: ffplay.c:1304
av_channel_layout_compare
int av_channel_layout_compare(const AVChannelLayout *chl, const AVChannelLayout *chl1)
Check whether two channel layouts are semantically the same, i.e.
Definition: channel_layout.c:942
AV_LOG_INFO
#define AV_LOG_INFO
Standard information.
Definition: log.h:191
sample_rates
sample_rates
Definition: ffmpeg_filter.c:368
packet_queue_abort
static void packet_queue_abort(PacketQueue *q)
Definition: ffplay.c:506
video_codec_name
static const char * video_codec_name
Definition: ffplay.c:343
buffersink.h
AVCodec::id
enum AVCodecID id
Definition: codec.h:201
iformat
static const AVInputFormat * iformat
Definition: ffprobe.c:304
packet_queue_flush
static void packet_queue_flush(PacketQueue *q)
Definition: ffplay.c:484
av_channel_layout_default
void av_channel_layout_default(AVChannelLayout *ch_layout, int nb_channels)
Get the default channel layout for a given number of channels.
Definition: channel_layout.c:972
packet_queue_get
static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block, int *serial)
Definition: ffplay.c:526
av_get_picture_type_char
char av_get_picture_type_char(enum AVPictureType pict_type)
Return a single letter to describe the given picture type pict_type.
Definition: utils.c:40
MAX_QUEUE_SIZE
#define MAX_QUEUE_SIZE
Definition: ffplay.c:66
avcodec_get_name
const char * avcodec_get_name(enum AVCodecID id)
Get the name of a codec.
Definition: utils.c:417
MIN_FRAMES
#define MIN_FRAMES
Definition: ffplay.c:67
nb_vfilters
static int nb_vfilters
Definition: ffplay.c:348
VideoState::queue_attachments_req
int queue_attachments_req
Definition: ffplay.c:210
avcodec_send_packet
int avcodec_send_packet(AVCodecContext *avctx, const AVPacket *avpkt)
Supply raw packet data as input to a decoder.
Definition: decode.c:709
FrameQueue::windex
int windex
Definition: ffplay.c:172
VideoState::filename
char * filename
Definition: ffplay.c:290
VideoState::muted
int muted
Definition: ffplay.c:251
Decoder::start_pts
int64_t start_pts
Definition: ffplay.c:196
AVFrame::nb_samples
int nb_samples
number of audio samples (per channel) described by this frame
Definition: frame.h:420
bprint.h
Clock::pts
double pts
Definition: ffplay.c:140
VIDEO_PICTURE_QUEUE_SIZE
#define VIDEO_PICTURE_QUEUE_SIZE
Definition: ffplay.c:126
PacketQueue::serial
int serial
Definition: ffplay.c:121
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:255
AVPacket::pts
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
Definition: packet.h:484
VideoState::show_mode
enum VideoState::ShowMode show_mode
VideoState::audio_src
struct AudioParams audio_src
Definition: ffplay.c:252
program_birth_year
const int program_birth_year
program birth year, defined by the program for show_banner()
Definition: ffplay.c:64
VideoState::audio_buf1
uint8_t * audio_buf1
Definition: ffplay.c:245
OPT_TIME
#define OPT_TIME
Definition: cmdutils.h:124
av_get_bytes_per_sample
int av_get_bytes_per_sample(enum AVSampleFormat sample_fmt)
Return number of bytes per sample.
Definition: samplefmt.c:108
avfilter_graph_parse_ptr
int avfilter_graph_parse_ptr(AVFilterGraph *graph, const char *filters, AVFilterInOut **inputs, AVFilterInOut **outputs, void *log_ctx)
Add a graph described by a string to a graph.
Definition: graphparser.c:918
swr_opts
AVDictionary * swr_opts
Definition: cmdutils.c:59
compute_mod
static int compute_mod(int a, int b)
Definition: ffplay.c:1035
Decoder::start_pts_tb
AVRational start_pts_tb
Definition: ffplay.c:197
AVCodecParameters::height
int height
Definition: codec_par.h:122
AV_TIME_BASE
#define AV_TIME_BASE
Internal time base represented as integer.
Definition: avutil.h:254
AVFrame::extended_data
uint8_t ** extended_data
pointers to the data planes/channels.
Definition: frame.h:401
av_malloc_array
#define av_malloc_array(a, b)
Definition: tableprint_vlc.h:31
video_open
static int video_open(VideoState *is)
Definition: ffplay.c:1321
get_sdl_pix_fmt_and_blendmode
static void get_sdl_pix_fmt_and_blendmode(int format, Uint32 *sdl_pix_fmt, SDL_BlendMode *sdl_blendmode)
Definition: ffplay.c:882
show_status
static int show_status
Definition: ffplay.c:326
opt_format
static int opt_format(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3488
AVSampleFormat
AVSampleFormat
Audio sample formats.
Definition: samplefmt.h:55
parse_options
int parse_options(void *optctx, int argc, char **argv, const OptionDef *options, int(*parse_arg_function)(void *, const char *))
Definition: cmdutils.c:346
AV_PIX_FMT_RGB555
#define AV_PIX_FMT_RGB555
Definition: pixfmt.h:456
AV_OPT_FLAG_DECODING_PARAM
#define AV_OPT_FLAG_DECODING_PARAM
a generic parameter which can be set by the user for demuxing or decoding
Definition: opt.h:282
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
av_frame_move_ref
void av_frame_move_ref(AVFrame *dst, AVFrame *src)
Move everything contained in src to dst and reset src.
Definition: frame.c:649
borderless
static int borderless
Definition: ffplay.c:323
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:622
update_sample_display
static void update_sample_display(VideoState *is, short *samples, int samples_size)
Definition: ffplay.c:2255
MyAVPacketList
Definition: ffplay.c:110
AV_SAMPLE_FMT_S16
@ AV_SAMPLE_FMT_S16
signed 16 bits
Definition: samplefmt.h:58
tb
#define tb
Definition: regdef.h:68
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:254
AVProgram
New fields can be added to the end with minor version bumps.
Definition: avformat.h:1039
av_read_pause
int av_read_pause(AVFormatContext *s)
Pause a network-based stream (e.g.
Definition: demux_utils.c:211
len
int len
Definition: vorbis_enc_data.h:426
Frame::frame
AVFrame * frame
Definition: ffplay.c:155
AV_PIX_FMT_BGR565
#define AV_PIX_FMT_BGR565
Definition: pixfmt.h:460
av_samples_get_buffer_size
int av_samples_get_buffer_size(int *linesize, int nb_channels, int nb_samples, enum AVSampleFormat sample_fmt, int align)
Get the required buffer size for the given audio parameters.
Definition: samplefmt.c:121
AVFilterPad::name
const char * name
Pad name.
Definition: internal.h:53
PacketQueue::nb_packets
int nb_packets
Definition: ffplay.c:117
av_rescale
int64_t av_rescale(int64_t a, int64_t b, int64_t c)
Rescale a 64-bit integer with rounding to nearest.
Definition: mathematics.c:129
FRAME_QUEUE_SIZE
#define FRAME_QUEUE_SIZE
Definition: ffplay.c:129
frame_queue_peek_readable
static Frame * frame_queue_peek_readable(FrameQueue *f)
Definition: ffplay.c:754
AVIOInterruptCB::opaque
void * opaque
Definition: avio.h:61
AV_PIX_FMT_RGB565
#define AV_PIX_FMT_RGB565
Definition: pixfmt.h:455
parse_loglevel
void parse_loglevel(int argc, char **argv, const OptionDef *options)
Find the '-loglevel' option in the command line args and apply it.
Definition: cmdutils.c:474
av_cmp_q
static int av_cmp_q(AVRational a, AVRational b)
Compare two rationals.
Definition: rational.h:89
Clock::serial
int serial
Definition: ffplay.c:144
VideoState::height
int height
Definition: ffplay.c:291
AV_PIX_FMT_PAL8
@ AV_PIX_FMT_PAL8
8 bits with AV_PIX_FMT_RGB32 palette
Definition: pixfmt.h:77
AVStream::disposition
int disposition
Stream disposition - a combination of AV_DISPOSITION_* flags.
Definition: avformat.h:910
AVFMT_FLAG_GENPTS
#define AVFMT_FLAG_GENPTS
Generate missing pts even if it requires parsing future frames.
Definition: avformat.h:1234
VideoState::subpq
FrameQueue subpq
Definition: ffplay.c:224
av_buffer_allocz
AVBufferRef * av_buffer_allocz(size_t size)
Same as av_buffer_alloc(), except the returned buffer will be initialized to zero.
Definition: buffer.c:93
seek_by_bytes
static int seek_by_bytes
Definition: ffplay.c:320
ret
ret
Definition: filter_design.txt:187
AVStream
Stream structure.
Definition: avformat.h:841
AV_LOG_FATAL
#define AV_LOG_FATAL
Something went wrong and recovery is not possible.
Definition: log.h:174
EXTERNAL_CLOCK_MAX_FRAMES
#define EXTERNAL_CLOCK_MAX_FRAMES
Definition: ffplay.c:69
av_guess_frame_rate
AVRational av_guess_frame_rate(AVFormatContext *format, AVStream *st, AVFrame *frame)
Guess the frame rate, based on both the container and codec information.
Definition: avformat.c:655
AVSubtitleRect::h
int h
height of pict, undefined when pict is not set
Definition: avcodec.h:2245
stream_open
static VideoState * stream_open(const char *filename, const AVInputFormat *iformat)
Definition: ffplay.c:3070
FFSWAP
#define FFSWAP(type, a, b)
Definition: macros.h:52
avcodec_flush_buffers
void avcodec_flush_buffers(AVCodecContext *avctx)
Reset the internal codec state / flush internal buffers.
Definition: avcodec.c:384
AV_PIX_FMT_0RGB32
#define AV_PIX_FMT_0RGB32
Definition: pixfmt.h:445
VideoState::vfilter_idx
int vfilter_idx
Definition: ffplay.c:294
filter_nbthreads
static int filter_nbthreads
Definition: ffplay.c:352
log_callback_help
void log_callback_help(void *ptr, int level, const char *fmt, va_list vl)
Trivial log callback.
Definition: cmdutils.c:72
cursor_hidden
static int cursor_hidden
Definition: ffplay.c:346
VideoState::SHOW_MODE_RDFT
@ SHOW_MODE_RDFT
Definition: ffplay.c:260
find_stream_info
static int find_stream_info
Definition: ffplay.c:351
packet_queue_put_private
static int packet_queue_put_private(PacketQueue *q, AVPacket *pkt)
Definition: ffplay.c:412
pos
unsigned int pos
Definition: spdifenc.c:413
VideoState::audio_buf_index
int audio_buf_index
Definition: ffplay.c:248
avformat.h
av_bprintf
void av_bprintf(AVBPrint *buf, const char *fmt,...)
Definition: bprint.c:99
VideoState::out_video_filter
AVFilterContext * out_video_filter
Definition: ffplay.c:296
dict.h
av_fifo_alloc2
AVFifo * av_fifo_alloc2(size_t nb_elems, size_t elem_size, unsigned int flags)
Allocate and initialize an AVFifo with a given element size.
Definition: fifo.c:47
AVFrame::sample_aspect_ratio
AVRational sample_aspect_ratio
Sample aspect ratio for the video frame, 0/1 if unknown/unspecified.
Definition: frame.h:447
VideoState::last_paused
int last_paused
Definition: ffplay.c:209
AV_LOG_SKIP_REPEATED
#define AV_LOG_SKIP_REPEATED
Skip repeated messages, this requires the user app to use av_log() instead of (f)printf as the 2 woul...
Definition: log.h:370
AV_PIX_FMT_UYVY422
@ AV_PIX_FMT_UYVY422
packed YUV 4:2:2, 16bpp, Cb Y0 Cr Y1
Definition: pixfmt.h:81
CMDUTILS_COMMON_OPTIONS
#define CMDUTILS_COMMON_OPTIONS
Definition: opt_common.h:199
AV_DICT_MATCH_CASE
#define AV_DICT_MATCH_CASE
Only get an entry with exact-case key match.
Definition: dict.h:74
rdftspeed
double rdftspeed
Definition: ffplay.c:344
AV_TX_FLOAT_RDFT
@ AV_TX_FLOAT_RDFT
Real to complex and complex to real DFTs.
Definition: tx.h:90
MyAVPacketList::serial
int serial
Definition: ffplay.c:112
opt_width
static int opt_width(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3466
main
int main(int argc, char **argv)
Definition: ffplay.c:3658
avformat_network_deinit
int avformat_network_deinit(void)
Undo the initialization done by avformat_network_init.
Definition: utils.c:570
show_usage
static void show_usage(void)
Definition: ffplay.c:3619
av_get_media_type_string
const char * av_get_media_type_string(enum AVMediaType media_type)
Return a string describing the media_type enum, NULL if media_type is unknown.
Definition: utils.c:28
AVCodecContext
main external API structure.
Definition: avcodec.h:441
AVFrame::height
int height
Definition: frame.h:412
PacketQueue::mutex
SDL_mutex * mutex
Definition: ffplay.c:122
packet_queue_start
static void packet_queue_start(PacketQueue *q)
Definition: ffplay.c:517
VideoState::vidclk
Clock vidclk
Definition: ffplay.c:220
audio_codec_name
static const char * audio_codec_name
Definition: ffplay.c:341
channel_layout.h
av_buffersink_get_sample_rate
int av_buffersink_get_sample_rate(const AVFilterContext *ctx)
AV_SYNC_FRAMEDUP_THRESHOLD
#define AV_SYNC_FRAMEDUP_THRESHOLD
Definition: ffplay.c:84
show_mode
static enum ShowMode show_mode
Definition: ffplay.c:340
PacketQueue::cond
SDL_cond * cond
Definition: ffplay.c:123
sws_freeContext
void sws_freeContext(struct SwsContext *swsContext)
Free the swscaler context swsContext.
Definition: utils.c:2427
PacketQueue::size
int size
Definition: ffplay.c:118
options
static const OptionDef options[]
Definition: ffplay.c:3568
outputs
static const AVFilterPad outputs[]
Definition: af_afwtdn.c:1291
opt_common.h
AVInputFormat::flags
int flags
Can use flags: AVFMT_NOFILE, AVFMT_NEEDNUMBER, AVFMT_SHOW_IDS, AVFMT_NOTIMESTAMPS,...
Definition: avformat.h:568
AVRational::den
int den
Denominator.
Definition: rational.h:60
mode
mode
Definition: ebur128.h:83
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
VideoState::in_video_filter
AVFilterContext * in_video_filter
Definition: ffplay.c:295
VideoState::subtitle_stream
int subtitle_stream
Definition: ffplay.c:276
avfilter.h
VideoState::abort_request
int abort_request
Definition: ffplay.c:206
fill_rectangle
static void fill_rectangle(int x, int y, int w, int h)
Definition: ffplay.c:820
av_bprint_clear
void av_bprint_clear(AVBPrint *buf)
Reset the string to "" but keep internal allocated data.
Definition: bprint.c:232
av_channel_layout_uninit
void av_channel_layout_uninit(AVChannelLayout *channel_layout)
Free any allocated data in the channel layout and reset the channel count to 0.
Definition: channel_layout.c:640
VideoState::audio_buf1_size
unsigned int audio_buf1_size
Definition: ffplay.c:247
VideoState::eof
int eof
Definition: ffplay.c:288
AV_SYNC_THRESHOLD_MAX
#define AV_SYNC_THRESHOLD_MAX
Definition: ffplay.c:82
decoder_destroy
static void decoder_destroy(Decoder *d)
Definition: ffplay.c:672
av_get_packed_sample_fmt
enum AVSampleFormat av_get_packed_sample_fmt(enum AVSampleFormat sample_fmt)
Get the packed alternative form of the given sample format.
Definition: samplefmt.c:77
VideoState::read_tid
SDL_Thread * read_tid
Definition: ffplay.c:204
VideoState::audio_volume
int audio_volume
Definition: ffplay.c:250
VideoState::subdec
Decoder subdec
Definition: ffplay.c:229
AVIOContext::eof_reached
int eof_reached
true if was unable to read due to error or eof
Definition: avio.h:248
stream_has_enough_packets
static int stream_has_enough_packets(AVStream *st, int stream_id, PacketQueue *queue)
Definition: ffplay.c:2715
samples
Filter the word “frame” indicates either a video frame or a group of audio samples
Definition: filter_design.txt:8
VideoState::out_audio_filter
AVFilterContext * out_audio_filter
Definition: ffplay.c:298
av_find_input_format
const AVInputFormat * av_find_input_format(const char *short_name)
Find AVInputFormat based on the short name of the input format.
Definition: format.c:144
AVFormatContext::duration
int64_t duration
Duration of the stream, in AV_TIME_BASE fractional seconds.
Definition: avformat.h:1217
av_mul_q
AVRational av_mul_q(AVRational b, AVRational c)
Multiply two rationals.
Definition: rational.c:80
AVPacket::stream_index
int stream_index
Definition: packet.h:493
GROW_ARRAY
#define GROW_ARRAY(array, nb_elems)
Definition: cmdutils.h:401
SUBPICTURE_QUEUE_SIZE
#define SUBPICTURE_QUEUE_SIZE
Definition: ffplay.c:127
AVFilterContext
An instance of a filter.
Definition: avfilter.h:397
input_filename
static const char * input_filename
Definition: ffplay.c:308
av_channel_layout_copy
int av_channel_layout_copy(AVChannelLayout *dst, const AVChannelLayout *src)
Make a copy of a channel layout.
Definition: channel_layout.c:647
stream_toggle_pause
static void stream_toggle_pause(VideoState *is)
Definition: ffplay.c:1471
VideoState::continue_read_thread
SDL_cond * continue_read_thread
Definition: ffplay.c:303
av_dict_set_int
int av_dict_set_int(AVDictionary **pm, const char *key, int64_t value, int flags)
Convenience wrapper for av_dict_set() that converts the value to a string and stores it.
Definition: dict.c:169
toggle_audio_display
static void toggle_audio_display(VideoState *is)
Definition: ffplay.c:3214
AVCodecContext::codec_type
enum AVMediaType codec_type
Definition: avcodec.h:449
av_strdup
char * av_strdup(const char *s)
Duplicate a string.
Definition: mem.c:270
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
AVFMT_TS_DISCONT
#define AVFMT_TS_DISCONT
Format allows timestamp discontinuities.
Definition: avformat.h:482
VideoState::real_data
float * real_data
Definition: ffplay.c:268
get_video_frame
static int get_video_frame(VideoState *is, AVFrame *frame)
Definition: ffplay.c:1760
AVFrameSideData
Structure to hold side data for an AVFrame.
Definition: frame.h:246
default_width
static int default_width
Definition: ffplay.c:310
configure_filtergraph
static int configure_filtergraph(AVFilterGraph *graph, const char *filtergraph, AVFilterContext *source_ctx, AVFilterContext *sink_ctx)
Definition: ffplay.c:1793
AVIOInterruptCB::callback
int(* callback)(void *)
Definition: avio.h:60
VideoState::realtime
int realtime
Definition: ffplay.c:217
VideoState::sub_convert_ctx
struct SwsContext * sub_convert_ctx
Definition: ffplay.c:287
av_free
#define av_free(p)
Definition: tableprint_vlc.h:33
AVDictionaryEntry
Definition: dict.h:89
AVPacket
This structure stores compressed data.
Definition: packet.h:468
audio_disable
static int audio_disable
Definition: ffplay.c:316
refresh_loop_wait_event
static void refresh_loop_wait_event(VideoState *is, SDL_Event *event)
Definition: ffplay.c:3226
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
av_dict_set
int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags)
Set the given entry in *pm, overwriting an existing entry.
Definition: dict.c:88
av_fast_malloc
void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size)
Allocate a buffer, reusing the given one if large enough.
Definition: mem.c:555
stream_component_close
static void stream_component_close(VideoState *is, int stream_index)
Definition: ffplay.c:1191
VideoState::subtitleq
PacketQueue subtitleq
Definition: ffplay.c:278
cmdutils.h
cmp_audio_fmts
static int cmp_audio_fmts(enum AVSampleFormat fmt1, int64_t channel_count1, enum AVSampleFormat fmt2, int64_t channel_count2)
Definition: ffplay.c:402
OPT_BOOL
#define OPT_BOOL
Definition: cmdutils.h:110
Decoder::decoder_tid
SDL_Thread * decoder_tid
Definition: ffplay.c:200
d
d
Definition: ffmpeg_filter.c:368
int32_t
int32_t
Definition: audioconvert.c:56
framedrop
static int framedrop
Definition: ffplay.c:338
VideoState::audio_stream
int audio_stream
Definition: ffplay.c:231
imgutils.h
VideoState::audio_buf_size
unsigned int audio_buf_size
Definition: ffplay.c:246
flags
#define flags(name, subs,...)
Definition: cbs_av1.c:474
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
Definition: frame.h:385
block
The exact code depends on how similar the blocks are and how related they are to the block
Definition: filter_design.txt:207
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
AVFormatContext::start_time
int64_t start_time
Position of the first frame of the component, in AV_TIME_BASE fractional seconds.
Definition: avformat.h:1207
AV_SYNC_AUDIO_MASTER
@ AV_SYNC_AUDIO_MASTER
Definition: ffplay.c:183
PacketQueue::abort_request
int abort_request
Definition: ffplay.c:120
av_fifo_freep2
void av_fifo_freep2(AVFifo **f)
Free an AVFifo and reset pointer to NULL.
Definition: fifo.c:286
VideoState::ic
AVFormatContext * ic
Definition: ffplay.c:216
VideoState::viddec
Decoder viddec
Definition: ffplay.c:228
h
h
Definition: vp9dsp_template.c:2038
AVERROR_EXIT
#define AVERROR_EXIT
Immediate exit was requested; the called function should not be restarted.
Definition: error.h:58
AVDictionaryEntry::value
char * value
Definition: dict.h:91
AVStream::start_time
int64_t start_time
Decoding: pts of the first frame of the stream in presentation order, in stream time base.
Definition: avformat.h:890
VideoState::audio_buf
uint8_t * audio_buf
Definition: ffplay.c:244
AVFilterGraph::nb_filters
unsigned nb_filters
Definition: avfilter.h:867
avstring.h
frame_queue_peek_last
static Frame * frame_queue_peek_last(FrameQueue *f)
Definition: ffplay.c:733
VideoState::last_vis_time
double last_vis_time
Definition: ffplay.c:271
stream_seek
static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int by_bytes)
Definition: ffplay.c:1457
show_help_options
void show_help_options(const OptionDef *options, const char *msg, int req_flags, int rej_flags, int alt_flags)
Print help for all options matching specified flags.
Definition: cmdutils.c:109
decoder_reorder_pts
static int decoder_reorder_pts
Definition: ffplay.c:333
AVCOL_SPC_BT709
@ AVCOL_SPC_BT709
also ITU-R BT1361 / IEC 61966-2-4 xvYCC709 / derived in SMPTE RP 177 Annex B
Definition: pixfmt.h:601
VideoState::audio_tgt
struct AudioParams audio_tgt
Definition: ffplay.c:254
afilters
static char * afilters
Definition: ffplay.c:349
AVChapter::time_base
AVRational time_base
time base in which the start/end timestamps are specified
Definition: avformat.h:1076
int
int
Definition: ffmpeg_filter.c:368
SwsContext
Definition: swscale_internal.h:299
VideoState::audclk
Clock audclk
Definition: ffplay.c:219
avfilter_get_class
const AVClass * avfilter_get_class(void)
Definition: avfilter.c:1542
AVFilterInOut
A linked-list of the inputs/outputs of the filter chain.
Definition: avfilter.h:1024
FrameQueue::pktq
PacketQueue * pktq
Definition: ffplay.c:179
short
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default minimum maximum flags name is the option keep it simple and lowercase description are short
Definition: writing_filters.txt:89
snprintf
#define snprintf
Definition: snprintf.h:34
video_audio_display
static void video_audio_display(VideoState *s)
Definition: ffplay.c:1040
av_log2
int av_log2(unsigned v)
Definition: intmath.c:26
AV_SYNC_THRESHOLD_MIN
#define AV_SYNC_THRESHOLD_MIN
Definition: ffplay.c:80
Decoder::pkt
AVPacket * pkt
Definition: ffmpeg_dec.c:37
buffersrc.h
AudioParams::bytes_per_sec
int bytes_per_sec
Definition: ffplay.c:136
check_external_clock_speed
static void check_external_clock_speed(VideoState *is)
Definition: ffplay.c:1442
av_dict_iterate
const AVDictionaryEntry * av_dict_iterate(const AVDictionary *m, const AVDictionaryEntry *prev)
Iterate over a dictionary.
Definition: dict.c:44
parse_number
int parse_number(const char *context, const char *numstr, int type, double min, double max, double *dst)
Parse a string and return its corresponding value as a double.
Definition: cmdutils.c:86
AVSubtitle::start_display_time
uint32_t start_display_time
Definition: avcodec.h:2271
SAMPLE_CORRECTION_PERCENT_MAX
#define SAMPLE_CORRECTION_PERCENT_MAX
Definition: ffplay.c:89
EXTERNAL_CLOCK_SPEED_MAX
#define EXTERNAL_CLOCK_SPEED_MAX
Definition: ffplay.c:93
packet_queue_put_nullpacket
static int packet_queue_put_nullpacket(PacketQueue *q, AVPacket *pkt, int stream_index)
Definition: ffplay.c:457
duration
static int64_t duration
Definition: ffplay.c:329
AV_FIFO_FLAG_AUTO_GROW
#define AV_FIFO_FLAG_AUTO_GROW
Automatically resize the FIFO on writes, so that the data fits.
Definition: fifo.h:67
MyAVPacketList::pkt
AVPacket * pkt
Definition: ffplay.c:111
swscale.h
is_realtime
static int is_realtime(AVFormatContext *s)
Definition: ffplay.c:2722
av_x_if_null
static void * av_x_if_null(const void *p, const void *x)
Return x default pointer in case p is NULL.
Definition: avutil.h:312
PacketQueue::pkt_list
AVFifo * pkt_list
Definition: ffplay.c:116
Frame::height
int height
Definition: ffplay.c:162
decode_interrupt_cb
static int decode_interrupt_cb(void *ctx)
Definition: ffplay.c:2709
av_get_pix_fmt_name
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.
Definition: pixdesc.c:2884
VideoState::frame_timer
double frame_timer
Definition: ffplay.c:280
tx.h
VideoState::audio_clock_serial
int audio_clock_serial
Definition: ffplay.c:236
avdevice_register_all
void avdevice_register_all(void)
Initialize libavdevice and register all the input and output devices.
Definition: alldevices.c:65
avio_feof
int avio_feof(AVIOContext *s)
Similar to feof() but also returns nonzero on read errors.
Definition: aviobuf.c:393
realloc_texture
static int realloc_texture(SDL_Texture **texture, Uint32 new_format, int new_width, int new_height, SDL_BlendMode blendmode, int init_texture)
Definition: ffplay.c:831
AV_PIX_FMT_RGB444
#define AV_PIX_FMT_RGB444
Definition: pixfmt.h:457
exit_on_keydown
static int exit_on_keydown
Definition: ffplay.c:335