Go to the documentation of this file.
58 #include <SDL_thread.h>
67 #define MAX_QUEUE_SIZE (15 * 1024 * 1024)
69 #define EXTERNAL_CLOCK_MIN_FRAMES 2
70 #define EXTERNAL_CLOCK_MAX_FRAMES 10
73 #define SDL_AUDIO_MIN_BUFFER_SIZE 512
75 #define SDL_AUDIO_MAX_CALLBACKS_PER_SEC 30
78 #define SDL_VOLUME_STEP (0.75)
81 #define AV_SYNC_THRESHOLD_MIN 0.04
83 #define AV_SYNC_THRESHOLD_MAX 0.1
85 #define AV_SYNC_FRAMEDUP_THRESHOLD 0.1
87 #define AV_NOSYNC_THRESHOLD 10.0
90 #define SAMPLE_CORRECTION_PERCENT_MAX 10
93 #define EXTERNAL_CLOCK_SPEED_MIN 0.900
94 #define EXTERNAL_CLOCK_SPEED_MAX 1.010
95 #define EXTERNAL_CLOCK_SPEED_STEP 0.001
98 #define AUDIO_DIFF_AVG_NB 20
101 #define REFRESH_RATE 0.01
105 #define SAMPLE_ARRAY_SIZE (8 * 65536)
107 #define CURSOR_HIDE_DELAY 1000000
109 #define USE_ONEPASS_SUBTITLE_RENDER 1
130 #define VIDEO_PICTURE_QUEUE_SIZE 3
131 #define SUBPICTURE_QUEUE_SIZE 16
132 #define SAMPLE_QUEUE_SIZE 9
133 #define FRAME_QUEUE_SIZE FFMAX(SAMPLE_QUEUE_SIZE, FFMAX(VIDEO_PICTURE_QUEUE_SIZE, SUBPICTURE_QUEUE_SIZE))
352 static const char **vfilters_list =
NULL;
353 static int nb_vfilters = 0;
354 static char *afilters =
NULL;
366 #define FF_QUIT_EVENT (SDL_USEREVENT + 2)
400 static int opt_add_vfilter(
void *optctx,
const char *opt,
const char *
arg)
403 vfilters_list[nb_vfilters - 1] =
arg;
413 if (channel_count1 == 1 && channel_count2 == 1)
416 return channel_count1 != channel_count2 || fmt1 != fmt2;
423 return channel_layout;
453 SDL_CondSignal(q->
cond);
461 SDL_LockMutex(q->
mutex);
463 SDL_UnlockMutex(q->
mutex);
485 q->
mutex = SDL_CreateMutex();
490 q->
cond = SDL_CreateCond();
503 SDL_LockMutex(q->
mutex);
514 SDL_UnlockMutex(q->
mutex);
520 SDL_DestroyMutex(q->
mutex);
521 SDL_DestroyCond(q->
cond);
526 SDL_LockMutex(q->
mutex);
530 SDL_CondSignal(q->
cond);
532 SDL_UnlockMutex(q->
mutex);
537 SDL_LockMutex(q->
mutex);
540 SDL_UnlockMutex(q->
mutex);
549 SDL_LockMutex(q->
mutex);
578 SDL_UnlockMutex(q->
mutex);
673 av_log(d->
avctx,
AV_LOG_ERROR,
"Receive_frame and send_packet both returned EAGAIN, which is an API violation.\n");
698 if (!(
f->mutex = SDL_CreateMutex())) {
702 if (!(
f->cond = SDL_CreateCond())) {
708 f->keep_last = !!keep_last;
709 for (
i = 0;
i <
f->max_size;
i++)
718 for (
i = 0;
i <
f->max_size;
i++) {
723 SDL_DestroyMutex(
f->mutex);
724 SDL_DestroyCond(
f->cond);
729 SDL_LockMutex(
f->mutex);
730 SDL_CondSignal(
f->cond);
731 SDL_UnlockMutex(
f->mutex);
736 return &
f->queue[(
f->rindex +
f->rindex_shown) %
f->max_size];
741 return &
f->queue[(
f->rindex +
f->rindex_shown + 1) %
f->max_size];
746 return &
f->queue[
f->rindex];
752 SDL_LockMutex(
f->mutex);
753 while (
f->size >=
f->max_size &&
754 !
f->pktq->abort_request) {
755 SDL_CondWait(
f->cond,
f->mutex);
757 SDL_UnlockMutex(
f->mutex);
759 if (
f->pktq->abort_request)
762 return &
f->queue[
f->windex];
768 SDL_LockMutex(
f->mutex);
769 while (
f->size -
f->rindex_shown <= 0 &&
770 !
f->pktq->abort_request) {
771 SDL_CondWait(
f->cond,
f->mutex);
773 SDL_UnlockMutex(
f->mutex);
775 if (
f->pktq->abort_request)
778 return &
f->queue[(
f->rindex +
f->rindex_shown) %
f->max_size];
783 if (++
f->windex ==
f->max_size)
785 SDL_LockMutex(
f->mutex);
787 SDL_CondSignal(
f->cond);
788 SDL_UnlockMutex(
f->mutex);
793 if (
f->keep_last && !
f->rindex_shown) {
798 if (++
f->rindex ==
f->max_size)
800 SDL_LockMutex(
f->mutex);
802 SDL_CondSignal(
f->cond);
803 SDL_UnlockMutex(
f->mutex);
809 return f->size -
f->rindex_shown;
816 if (
f->rindex_shown &&
fp->serial ==
f->pktq->serial)
842 static int realloc_texture(SDL_Texture **texture, Uint32 new_format,
int new_width,
int new_height, SDL_BlendMode blendmode,
int init_texture)
846 if (!*texture || SDL_QueryTexture(*texture, &
format, &access, &
w, &
h) < 0 || new_width !=
w || new_height !=
h || new_format !=
format) {
850 SDL_DestroyTexture(*texture);
851 if (!(*texture = SDL_CreateTexture(
renderer, new_format, SDL_TEXTUREACCESS_STREAMING, new_width, new_height)))
853 if (SDL_SetTextureBlendMode(*texture, blendmode) < 0)
856 if (SDL_LockTexture(*texture,
NULL, &pixels, &pitch) < 0)
858 memset(pixels, 0, pitch * new_height);
859 SDL_UnlockTexture(*texture);
861 av_log(
NULL,
AV_LOG_VERBOSE,
"Created %dx%d texture with %s.\n", new_width, new_height, SDL_GetPixelFormatName(new_format));
867 int scr_xleft,
int scr_ytop,
int scr_width,
int scr_height,
868 int pic_width,
int pic_height,
AVRational pic_sar)
881 if (
width > scr_width) {
885 x = (scr_width -
width) / 2;
886 y = (scr_height -
height) / 2;
887 rect->
x = scr_xleft + x;
888 rect->
y = scr_ytop + y;
896 *sdl_blendmode = SDL_BLENDMODE_NONE;
897 *sdl_pix_fmt = SDL_PIXELFORMAT_UNKNOWN;
902 *sdl_blendmode = SDL_BLENDMODE_BLEND;
914 SDL_BlendMode sdl_blendmode;
916 if (
realloc_texture(tex, sdl_pix_fmt == SDL_PIXELFORMAT_UNKNOWN ? SDL_PIXELFORMAT_ARGB8888 : sdl_pix_fmt,
frame->width,
frame->height, sdl_blendmode, 0) < 0)
918 switch (sdl_pix_fmt) {
919 case SDL_PIXELFORMAT_UNKNOWN:
924 if (*img_convert_ctx !=
NULL) {
927 if (!SDL_LockTexture(*tex,
NULL, (
void **)pixels, pitch)) {
929 0,
frame->height, pixels, pitch);
930 SDL_UnlockTexture(*tex);
937 case SDL_PIXELFORMAT_IYUV:
938 if (
frame->linesize[0] > 0 &&
frame->linesize[1] > 0 &&
frame->linesize[2] > 0) {
942 }
else if (
frame->linesize[0] < 0 &&
frame->linesize[1] < 0 &&
frame->linesize[2] < 0) {
952 if (
frame->linesize[0] < 0) {
964 #if SDL_VERSION_ATLEAST(2,0,8)
965 SDL_YUV_CONVERSION_MODE
mode = SDL_YUV_CONVERSION_AUTOMATIC;
968 mode = SDL_YUV_CONVERSION_JPEG;
970 mode = SDL_YUV_CONVERSION_BT709;
972 mode = SDL_YUV_CONVERSION_BT601;
974 SDL_SetYUVConversionMode(
mode);
985 if (
is->subtitle_st) {
989 if (vp->
pts >=
sp->pts + ((
float)
sp->sub.start_display_time / 1000)) {
994 if (!
sp->width || !
sp->height) {
998 if (
realloc_texture(&
is->sub_texture, SDL_PIXELFORMAT_ARGB8888,
sp->width,
sp->height, SDL_BLENDMODE_BLEND, 1) < 0)
1001 for (
i = 0;
i <
sp->sub.num_rects;
i++) {
1004 sub_rect->
x = av_clip(sub_rect->
x, 0,
sp->width );
1005 sub_rect->
y = av_clip(sub_rect->
y, 0,
sp->height);
1006 sub_rect->
w = av_clip(sub_rect->
w, 0,
sp->width - sub_rect->
x);
1007 sub_rect->
h = av_clip(sub_rect->
h, 0,
sp->height - sub_rect->
y);
1013 if (!
is->sub_convert_ctx) {
1017 if (!SDL_LockTexture(
is->sub_texture, (SDL_Rect *)sub_rect, (
void **)pixels, pitch)) {
1018 sws_scale(
is->sub_convert_ctx, (
const uint8_t *
const *)sub_rect->data, sub_rect->linesize,
1019 0, sub_rect->h, pixels, pitch);
1020 SDL_UnlockTexture(
is->sub_texture);
1043 #if USE_ONEPASS_SUBTITLE_RENDER
1047 double xratio = (double)
rect.
w / (
double)
sp->width;
1048 double yratio = (double)
rect.
h / (
double)
sp->height;
1049 for (
i = 0;
i <
sp->sub.num_rects;
i++) {
1050 SDL_Rect *sub_rect = (SDL_Rect*)
sp->sub.rects[
i];
1051 SDL_Rect target = {.x =
rect.
x + sub_rect->x * xratio,
1052 .y =
rect.
y + sub_rect->y * yratio,
1053 .w = sub_rect->w * xratio,
1054 .h = sub_rect->h * yratio};
1055 SDL_RenderCopy(
renderer,
is->sub_texture, sub_rect, &target);
1063 return a < 0 ?
a%
b +
b :
a%
b;
1068 int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
1071 int rdft_bits, nb_freq;
1073 for (rdft_bits = 1; (1 << rdft_bits) < 2 *
s->height; rdft_bits++)
1075 nb_freq = 1 << (rdft_bits - 1);
1081 int data_used=
s->show_mode == SHOW_MODE_WAVES ?
s->width : (2*nb_freq);
1083 delay =
s->audio_write_buf_size;
1090 delay -= (time_diff *
s->audio_tgt.freq) / 1000000;
1093 delay += 2 * data_used;
1094 if (delay < data_used)
1098 if (
s->show_mode == SHOW_MODE_WAVES) {
1102 int a =
s->sample_array[idx];
1107 if (
h < score && (
b ^
c) < 0) {
1114 s->last_i_start = i_start;
1116 i_start =
s->last_i_start;
1119 if (
s->show_mode == SHOW_MODE_WAVES) {
1120 SDL_SetRenderDrawColor(
renderer, 255, 255, 255, 255);
1123 h =
s->height / nb_display_channels;
1126 for (ch = 0; ch < nb_display_channels; ch++) {
1128 y1 =
s->ytop + ch *
h + (
h / 2);
1129 for (x = 0; x <
s->width; x++) {
1130 y = (
s->sample_array[
i] * h2) >> 15;
1144 SDL_SetRenderDrawColor(
renderer, 0, 0, 255, 255);
1146 for (ch = 1; ch < nb_display_channels; ch++) {
1147 y =
s->ytop + ch *
h;
1151 if (
realloc_texture(&
s->vis_texture, SDL_PIXELFORMAT_ARGB8888,
s->width,
s->height, SDL_BLENDMODE_NONE, 1) < 0)
1154 if (
s->xpos >=
s->width)
1156 nb_display_channels=
FFMIN(nb_display_channels, 2);
1157 if (rdft_bits !=
s->rdft_bits) {
1161 s->rdft_bits = rdft_bits;
1164 if (!
s->rdft || !
s->rdft_data){
1166 s->show_mode = SHOW_MODE_WAVES;
1169 SDL_Rect
rect = {.
x =
s->xpos, .y = 0, .w = 1, .h =
s->height};
1172 for (ch = 0; ch < nb_display_channels; ch++) {
1173 data[ch] =
s->rdft_data + 2 * nb_freq * ch;
1175 for (x = 0; x < 2 * nb_freq; x++) {
1176 double w = (x-nb_freq) * (1.0 / nb_freq);
1177 data[ch][x] =
s->sample_array[
i] * (1.0 -
w *
w);
1186 if (!SDL_LockTexture(
s->vis_texture, &
rect, (
void **)&pixels, &pitch)) {
1188 pixels += pitch *
s->height;
1189 for (y = 0; y <
s->height; y++) {
1190 double w = 1 / sqrt(nb_freq);
1191 int a = sqrt(
w * sqrt(
data[0][2 * y + 0] *
data[0][2 * y + 0] +
data[0][2 * y + 1] *
data[0][2 * y + 1]));
1192 int b = (nb_display_channels == 2 ) ? sqrt(
w *
hypot(
data[1][2 * y + 0],
data[1][2 * y + 1]))
1197 *pixels = (
a << 16) + (
b << 8) + ((
a+
b) >> 1);
1199 SDL_UnlockTexture(
s->vis_texture);
1213 if (stream_index < 0 || stream_index >= ic->
nb_streams)
1224 is->audio_buf1_size = 0;
1250 is->audio_stream = -1;
1254 is->video_stream = -1;
1258 is->subtitle_stream = -1;
1268 is->abort_request = 1;
1269 SDL_WaitThread(
is->read_tid,
NULL);
1272 if (
is->audio_stream >= 0)
1274 if (
is->video_stream >= 0)
1276 if (
is->subtitle_stream >= 0)
1289 SDL_DestroyCond(
is->continue_read_thread);
1293 if (
is->vis_texture)
1294 SDL_DestroyTexture(
is->vis_texture);
1295 if (
is->vid_texture)
1296 SDL_DestroyTexture(
is->vid_texture);
1297 if (
is->sub_texture)
1298 SDL_DestroyTexture(
is->sub_texture);
1310 SDL_DestroyWindow(
window);
1333 if (max_width == INT_MAX && max_height == INT_MAX)
1354 SDL_SetWindowFullscreen(
window, SDL_WINDOW_FULLSCREEN_DESKTOP);
1369 SDL_SetRenderDrawColor(
renderer, 0, 0, 0, 255);
1371 if (
is->audio_st &&
is->show_mode != SHOW_MODE_VIDEO)
1373 else if (
is->video_st)
1380 if (*
c->queue_serial !=
c->serial)
1386 return c->pts_drift + time - (time -
c->last_updated) * (1.0 -
c->speed);
1393 c->last_updated = time;
1394 c->pts_drift =
c->pts - time;
1414 c->queue_serial = queue_serial;
1469 double speed =
is->extclk.speed;
1478 if (!
is->seek_req) {
1485 SDL_CondSignal(
is->continue_read_thread);
1494 if (
is->read_pause_return !=
AVERROR(ENOSYS)) {
1495 is->vidclk.paused = 0;
1500 is->paused =
is->audclk.paused =
is->vidclk.paused =
is->extclk.paused = !
is->paused;
1511 is->muted = !
is->muted;
1516 double volume_level =
is->audio_volume ? (20 * log(
is->audio_volume / (
double)SDL_MIX_MAXVOLUME) / log(10)) : -1000.0;
1517 int new_volume =
lrint(SDL_MIX_MAXVOLUME * pow(10.0, (volume_level + sign *
step) / 20.0));
1518 is->audio_volume = av_clip(
is->audio_volume == new_volume ? (
is->audio_volume + sign) : new_volume, 0, SDL_MIX_MAXVOLUME);
1531 double sync_threshold,
diff = 0;
1544 if (
diff <= -sync_threshold)
1547 delay = delay +
diff;
1548 else if (
diff >= sync_threshold)
1562 if (
isnan(
duration) || duration <= 0 || duration >
is->max_frame_duration)
1590 if (
is->force_refresh ||
is->last_vis_time +
rdftspeed < time) {
1592 is->last_vis_time = time;
1594 *remaining_time =
FFMIN(*remaining_time,
is->last_vis_time +
rdftspeed - time);
1602 double last_duration,
duration, delay;
1609 if (vp->
serial !=
is->videoq.serial) {
1625 if (time < is->frame_timer + delay) {
1626 *remaining_time =
FFMIN(
is->frame_timer + delay - time, *remaining_time);
1630 is->frame_timer += delay;
1632 is->frame_timer = time;
1634 SDL_LockMutex(
is->pictq.mutex);
1637 SDL_UnlockMutex(
is->pictq.mutex);
1643 is->frame_drops_late++;
1649 if (
is->subtitle_st) {
1658 if (
sp->serial !=
is->subtitleq.serial
1659 || (
is->vidclk.pts > (
sp->pts + ((
float)
sp->sub.end_display_time / 1000)))
1664 for (
i = 0;
i <
sp->sub.num_rects;
i++) {
1669 if (!SDL_LockTexture(
is->sub_texture, (SDL_Rect *)sub_rect, (
void **)&pixels, &pitch)) {
1670 for (j = 0; j < sub_rect->h; j++, pixels += pitch)
1671 memset(pixels, 0, sub_rect->w << 2);
1672 SDL_UnlockTexture(
is->sub_texture);
1684 is->force_refresh = 1;
1686 if (
is->step && !
is->paused)
1691 if (!
display_disable &&
is->force_refresh &&
is->show_mode == SHOW_MODE_VIDEO &&
is->pictq.rindex_shown)
1694 is->force_refresh = 0;
1697 static int64_t last_time;
1699 int aqsize, vqsize, sqsize;
1703 if (!last_time || (cur_time - last_time) >= 30000) {
1708 aqsize =
is->audioq.size;
1710 vqsize =
is->videoq.size;
1711 if (
is->subtitle_st)
1712 sqsize =
is->subtitleq.size;
1714 if (
is->audio_st &&
is->video_st)
1716 else if (
is->video_st)
1718 else if (
is->audio_st)
1723 "%7.2f %s:%7.3f fd=%4d aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64
"/%"PRId64
" \r",
1725 (
is->audio_st &&
is->video_st) ?
"A-V" : (
is->video_st ?
"M-V" : (
is->audio_st ?
"M-A" :
" ")),
1727 is->frame_drops_early +
is->frame_drops_late,
1731 is->video_st ?
is->viddec.avctx->pts_correction_num_faulty_dts : 0,
1732 is->video_st ?
is->viddec.avctx->pts_correction_num_faulty_pts : 0);
1735 fprintf(stderr,
"%s", buf.str);
1742 last_time = cur_time;
1751 #if defined(DEBUG_SYNC)
1752 printf(
"frame_type=%c pts=%0.3f\n",
1797 diff -
is->frame_last_filter_delay < 0 &&
1798 is->viddec.pkt_serial ==
is->vidclk.serial &&
1799 is->videoq.nb_packets) {
1800 is->frame_drops_early++;
1828 outputs->filter_ctx = source_ctx;
1833 inputs->filter_ctx = sink_ctx;
1858 char sws_flags_str[512] =
"";
1859 char buffersrc_args[256];
1865 int nb_pix_fmts = 0;
1879 if (!strcmp(e->
key,
"sws_flags")) {
1880 av_strlcatf(sws_flags_str,
sizeof(sws_flags_str),
"%s=%s:",
"flags", e->
value);
1884 if (strlen(sws_flags_str))
1885 sws_flags_str[strlen(sws_flags_str)-1] =
'\0';
1889 snprintf(buffersrc_args,
sizeof(buffersrc_args),
1890 "video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d",
1892 is->video_st->time_base.num,
is->video_st->time_base.den,
1895 av_strlcatf(buffersrc_args,
sizeof(buffersrc_args),
":frame_rate=%d/%d", fr.
num, fr.
den);
1899 "ffplay_buffer", buffersrc_args,
NULL,
1905 "ffplay_buffersink",
NULL,
NULL, graph);
1912 last_filter = filt_out;
1916 #define INSERT_FILT(name, arg) do { \
1917 AVFilterContext *filt_ctx; \
1919 ret = avfilter_graph_create_filter(&filt_ctx, \
1920 avfilter_get_by_name(name), \
1921 "ffplay_" name, arg, NULL, graph); \
1925 ret = avfilter_link(filt_ctx, 0, last_filter, 0); \
1929 last_filter = filt_ctx; \
1935 if (fabs(theta - 90) < 1.0) {
1936 INSERT_FILT(
"transpose",
"clock");
1937 }
else if (fabs(theta - 180) < 1.0) {
1938 INSERT_FILT(
"hflip",
NULL);
1939 INSERT_FILT(
"vflip",
NULL);
1940 }
else if (fabs(theta - 270) < 1.0) {
1941 INSERT_FILT(
"transpose",
"cclock");
1942 }
else if (fabs(theta) > 1.0) {
1943 char rotate_buf[64];
1944 snprintf(rotate_buf,
sizeof(rotate_buf),
"%f*PI/180", theta);
1945 INSERT_FILT(
"rotate", rotate_buf);
1952 is->in_video_filter = filt_src;
1953 is->out_video_filter = filt_out;
1959 static int configure_audio_filters(
VideoState *
is,
const char *afilters,
int force_output_format)
1966 char aresample_swr_opts[512] =
"";
1968 char asrc_args[256];
1978 if (strlen(aresample_swr_opts))
1979 aresample_swr_opts[strlen(aresample_swr_opts)-1] =
'\0';
1980 av_opt_set(
is->agraph,
"aresample_swr_opts", aresample_swr_opts, 0);
1983 "sample_rate=%d:sample_fmt=%s:channels=%d:time_base=%d/%d",
1985 is->audio_filter_src.channels,
1986 1,
is->audio_filter_src.freq);
1987 if (
is->audio_filter_src.channel_layout)
1989 ":channel_layout=0x%"PRIx64,
is->audio_filter_src.channel_layout);
1993 asrc_args,
NULL,
is->agraph);
2009 if (force_output_format) {
2027 is->in_audio_filter = filt_asrc;
2028 is->out_audio_filter = filt_asink;
2043 int last_serial = -1;
2044 int64_t dec_channel_layout;
2067 is->audio_filter_src.channel_layout != dec_channel_layout ||
2068 is->audio_filter_src.freq !=
frame->sample_rate ||
2069 is->auddec.pkt_serial != last_serial;
2072 char buf1[1024], buf2[1024];
2076 "Audio frame changed from rate:%d ch:%d fmt:%s layout:%s serial:%d to rate:%d ch:%d fmt:%s layout:%s serial:%d\n",
2080 is->audio_filter_src.fmt =
frame->format;
2081 is->audio_filter_src.channels =
frame->channels;
2082 is->audio_filter_src.channel_layout = dec_channel_layout;
2083 is->audio_filter_src.freq =
frame->sample_rate;
2084 last_serial =
is->auddec.pkt_serial;
2086 if ((
ret = configure_audio_filters(
is, afilters, 1)) < 0)
2101 af->
serial =
is->auddec.pkt_serial;
2108 if (
is->audioq.serial !=
is->auddec.pkt_serial)
2112 is->auddec.finished =
is->auddec.pkt_serial;
2151 int last_serial = -1;
2152 int last_vfilter_idx = 0;
2166 if ( last_w !=
frame->width
2167 || last_h !=
frame->height
2168 || last_format !=
frame->format
2169 || last_serial !=
is->viddec.pkt_serial
2170 || last_vfilter_idx !=
is->vfilter_idx) {
2172 "Video frame changed from size:%dx%d format:%s serial:%d to size:%dx%d format:%s serial:%d\n",
2184 if ((
ret = configure_video_filters(graph,
is, vfilters_list ? vfilters_list[
is->vfilter_idx] :
NULL,
frame)) < 0) {
2187 event.user.data1 =
is;
2188 SDL_PushEvent(&event);
2191 filt_in =
is->in_video_filter;
2192 filt_out =
is->out_video_filter;
2193 last_w =
frame->width;
2194 last_h =
frame->height;
2195 last_format =
frame->format;
2196 last_serial =
is->viddec.pkt_serial;
2197 last_vfilter_idx =
is->vfilter_idx;
2211 is->viddec.finished =
is->viddec.pkt_serial;
2218 is->frame_last_filter_delay = 0;
2226 if (
is->videoq.serial !=
is->viddec.pkt_serial)
2258 if (got_subtitle &&
sp->sub.format == 0) {
2262 sp->serial =
is->subdec.pkt_serial;
2263 sp->width =
is->subdec.avctx->width;
2264 sp->height =
is->subdec.avctx->height;
2269 }
else if (got_subtitle) {
2286 memcpy(
is->sample_array +
is->sample_array_index,
samples,
len *
sizeof(
short));
2288 is->sample_array_index +=
len;
2290 is->sample_array_index = 0;
2299 int wanted_nb_samples = nb_samples;
2303 double diff, avg_diff;
2304 int min_nb_samples, max_nb_samples;
2309 is->audio_diff_cum =
diff +
is->audio_diff_avg_coef *
is->audio_diff_cum;
2312 is->audio_diff_avg_count++;
2315 avg_diff =
is->audio_diff_cum * (1.0 -
is->audio_diff_avg_coef);
2317 if (fabs(avg_diff) >=
is->audio_diff_threshold) {
2318 wanted_nb_samples = nb_samples + (
int)(
diff *
is->audio_src.freq);
2321 wanted_nb_samples = av_clip(wanted_nb_samples, min_nb_samples, max_nb_samples);
2324 diff, avg_diff, wanted_nb_samples - nb_samples,
2325 is->audio_clock,
is->audio_diff_threshold);
2330 is->audio_diff_avg_count = 0;
2331 is->audio_diff_cum = 0;
2335 return wanted_nb_samples;
2347 int data_size, resampled_data_size;
2348 int64_t dec_channel_layout;
2350 int wanted_nb_samples;
2367 }
while (af->
serial !=
is->audioq.serial);
2373 dec_channel_layout =
2379 dec_channel_layout !=
is->audio_src.channel_layout ||
2384 is->audio_tgt.channel_layout,
is->audio_tgt.fmt,
is->audio_tgt.freq,
2389 "Cannot create sample rate converter for conversion of %d Hz %s %d channels to %d Hz %s %d channels!\n",
2395 is->audio_src.channel_layout = dec_channel_layout;
2404 int out_count = (int64_t)wanted_nb_samples *
is->audio_tgt.freq / af->
frame->
sample_rate + 256;
2419 if (!
is->audio_buf1)
2426 if (len2 == out_count) {
2431 is->audio_buf =
is->audio_buf1;
2435 resampled_data_size = data_size;
2438 audio_clock0 =
is->audio_clock;
2443 is->audio_clock =
NAN;
2444 is->audio_clock_serial = af->
serial;
2447 static double last_clock;
2448 printf(
"audio: delay=%0.3f clock=%0.3f clock0=%0.3f\n",
2449 is->audio_clock - last_clock,
2450 is->audio_clock, audio_clock0);
2451 last_clock =
is->audio_clock;
2454 return resampled_data_size;
2461 int audio_size, len1;
2466 if (
is->audio_buf_index >=
is->audio_buf_size) {
2468 if (audio_size < 0) {
2473 if (
is->show_mode != SHOW_MODE_VIDEO)
2475 is->audio_buf_size = audio_size;
2477 is->audio_buf_index = 0;
2479 len1 =
is->audio_buf_size -
is->audio_buf_index;
2482 if (!
is->muted &&
is->audio_buf &&
is->audio_volume == SDL_MIX_MAXVOLUME)
2483 memcpy(stream, (
uint8_t *)
is->audio_buf +
is->audio_buf_index, len1);
2485 memset(stream, 0, len1);
2486 if (!
is->muted &&
is->audio_buf)
2487 SDL_MixAudioFormat(stream, (
uint8_t *)
is->audio_buf +
is->audio_buf_index, AUDIO_S16SYS, len1,
is->audio_volume);
2491 is->audio_buf_index += len1;
2493 is->audio_write_buf_size =
is->audio_buf_size -
is->audio_buf_index;
2495 if (!
isnan(
is->audio_clock)) {
2501 static int audio_open(
void *opaque, int64_t wanted_channel_layout,
int wanted_nb_channels,
int wanted_sample_rate,
struct AudioParams *audio_hw_params)
2503 SDL_AudioSpec wanted_spec, spec;
2505 static const int next_nb_channels[] = {0, 0, 1, 6, 2, 6, 4, 6};
2506 static const int next_sample_rates[] = {0, 44100, 48000, 96000, 192000};
2507 int next_sample_rate_idx =
FF_ARRAY_ELEMS(next_sample_rates) - 1;
2509 env = SDL_getenv(
"SDL_AUDIO_CHANNELS");
2511 wanted_nb_channels = atoi(env);
2519 wanted_spec.channels = wanted_nb_channels;
2520 wanted_spec.freq = wanted_sample_rate;
2521 if (wanted_spec.freq <= 0 || wanted_spec.channels <= 0) {
2525 while (next_sample_rate_idx && next_sample_rates[next_sample_rate_idx] >= wanted_spec.freq)
2526 next_sample_rate_idx--;
2527 wanted_spec.format = AUDIO_S16SYS;
2528 wanted_spec.silence = 0;
2531 wanted_spec.userdata = opaque;
2532 while (!(
audio_dev = SDL_OpenAudioDevice(
NULL, 0, &wanted_spec, &spec, SDL_AUDIO_ALLOW_FREQUENCY_CHANGE | SDL_AUDIO_ALLOW_CHANNELS_CHANGE))) {
2534 wanted_spec.channels, wanted_spec.freq, SDL_GetError());
2535 wanted_spec.channels = next_nb_channels[
FFMIN(7, wanted_spec.channels)];
2536 if (!wanted_spec.channels) {
2537 wanted_spec.freq = next_sample_rates[next_sample_rate_idx--];
2538 wanted_spec.channels = wanted_nb_channels;
2539 if (!wanted_spec.freq) {
2541 "No more combinations to try, audio open failed\n");
2547 if (spec.format != AUDIO_S16SYS) {
2549 "SDL advised audio format %d is not supported!\n", spec.format);
2552 if (spec.channels != wanted_spec.channels) {
2554 if (!wanted_channel_layout) {
2556 "SDL advised channel count %d is not supported!\n", spec.channels);
2562 audio_hw_params->
freq = spec.freq;
2564 audio_hw_params->
channels = spec.channels;
2580 const char *forced_codec_name =
NULL;
2584 int64_t channel_layout;
2586 int stream_lowres =
lowres;
2588 if (stream_index < 0 || stream_index >= ic->
nb_streams)
2607 if (forced_codec_name)
2611 "No codec could be found with name '%s'\n", forced_codec_name);
2624 avctx->
lowres = stream_lowres;
2654 is->audio_filter_src.channels = avctx->
channels;
2657 if ((
ret = configure_audio_filters(
is, afilters, 0)) < 0)
2659 sink =
is->out_audio_filter;
2673 is->audio_hw_buf_size =
ret;
2674 is->audio_src =
is->audio_tgt;
2675 is->audio_buf_size = 0;
2676 is->audio_buf_index = 0;
2680 is->audio_diff_avg_count = 0;
2683 is->audio_diff_threshold = (double)(
is->audio_hw_buf_size) /
is->audio_tgt.bytes_per_sec;
2685 is->audio_stream = stream_index;
2686 is->audio_st = ic->
streams[stream_index];
2690 is->auddec.start_pts =
is->audio_st->start_time;
2691 is->auddec.start_pts_tb =
is->audio_st->time_base;
2698 is->video_stream = stream_index;
2699 is->video_st = ic->
streams[stream_index];
2704 is->queue_attachments_req = 1;
2707 is->subtitle_stream = stream_index;
2708 is->subtitle_st = ic->
streams[stream_index];
2730 return is->abort_request;
2734 return stream_id < 0 ||
2742 if( !strcmp(
s->iformat->name,
"rtp")
2743 || !strcmp(
s->iformat->name,
"rtsp")
2744 || !strcmp(
s->iformat->name,
"sdp")
2748 if(
s->pb && ( !strncmp(
s->url,
"rtp:", 4)
2749 || !strncmp(
s->url,
"udp:", 4)
2764 int64_t stream_start_time;
2765 int pkt_in_play_range = 0;
2767 SDL_mutex *wait_mutex = SDL_CreateMutex();
2768 int scan_all_pmts_set = 0;
2777 memset(st_index, -1,
sizeof(st_index));
2790 scan_all_pmts_set = 1;
2798 if (scan_all_pmts_set)
2819 for (
i = 0;
i < orig_nb_streams;
i++)
2825 "%s: could not find codec parameters\n",
is->filename);
2873 st_index[
i] = INT_MAX;
2901 if (codecpar->
width)
2914 if (
is->show_mode == SHOW_MODE_NONE)
2915 is->show_mode =
ret >= 0 ? SHOW_MODE_VIDEO : SHOW_MODE_RDFT;
2921 if (
is->video_stream < 0 &&
is->audio_stream < 0) {
2928 if (infinite_buffer < 0 && is->realtime)
2932 if (
is->abort_request)
2934 if (
is->paused !=
is->last_paused) {
2935 is->last_paused =
is->paused;
2941 #if CONFIG_RTSP_DEMUXER || CONFIG_MMSH_PROTOCOL
2952 int64_t seek_target =
is->seek_pos;
2953 int64_t seek_min =
is->seek_rel > 0 ? seek_target -
is->seek_rel + 2: INT64_MIN;
2954 int64_t seek_max =
is->seek_rel < 0 ? seek_target -
is->seek_rel - 2: INT64_MAX;
2961 "%s: error while seeking\n",
is->ic->url);
2963 if (
is->audio_stream >= 0) {
2967 if (
is->subtitle_stream >= 0) {
2971 if (
is->video_stream >= 0) {
2982 is->queue_attachments_req = 1;
2987 if (
is->queue_attachments_req) {
2995 is->queue_attachments_req = 0;
3005 SDL_LockMutex(wait_mutex);
3006 SDL_CondWaitTimeout(
is->continue_read_thread, wait_mutex, 10);
3007 SDL_UnlockMutex(wait_mutex);
3023 if (
is->video_stream >= 0)
3025 if (
is->audio_stream >= 0)
3027 if (
is->subtitle_stream >= 0)
3033 SDL_LockMutex(wait_mutex);
3034 SDL_CondWaitTimeout(
is->continue_read_thread, wait_mutex, 10);
3035 SDL_UnlockMutex(wait_mutex);
3044 (pkt_ts - (stream_start_time !=
AV_NOPTS_VALUE ? stream_start_time : 0)) *
3069 event.user.data1 =
is;
3070 SDL_PushEvent(&event);
3072 SDL_DestroyMutex(wait_mutex);
3083 is->last_video_stream =
is->video_stream = -1;
3084 is->last_audio_stream =
is->audio_stream = -1;
3085 is->last_subtitle_stream =
is->subtitle_stream = -1;
3106 if (!(
is->continue_read_thread = SDL_CreateCond())) {
3114 is->audio_clock_serial = -1;
3125 if (!
is->read_tid) {
3137 int start_index, stream_index;
3144 start_index =
is->last_video_stream;
3145 old_index =
is->video_stream;
3147 start_index =
is->last_audio_stream;
3148 old_index =
is->audio_stream;
3150 start_index =
is->last_subtitle_stream;
3151 old_index =
is->subtitle_stream;
3153 stream_index = start_index;
3159 for (start_index = 0; start_index <
nb_streams; start_index++)
3164 stream_index = start_index;
3174 is->last_subtitle_stream = -1;
3177 if (start_index == -1)
3181 if (stream_index == start_index)
3183 st =
is->ic->streams[p ? p->
stream_index[stream_index] : stream_index];
3201 if (p && stream_index != -1)
3221 int next =
is->show_mode;
3223 next = (next + 1) % SHOW_MODE_NB;
3224 }
while (next !=
is->show_mode && (next == SHOW_MODE_VIDEO && !
is->video_st || next != SHOW_MODE_VIDEO && !
is->audio_st));
3225 if (
is->show_mode != next) {
3226 is->force_refresh = 1;
3227 is->show_mode = next;
3232 double remaining_time = 0.0;
3234 while (!SDL_PeepEvents(event, 1, SDL_GETEVENT, SDL_FIRSTEVENT, SDL_LASTEVENT)) {
3239 if (remaining_time > 0.0)
3240 av_usleep((int64_t)(remaining_time * 1000000.0));
3242 if (
is->show_mode != SHOW_MODE_NONE && (!
is->paused ||
is->force_refresh))
3253 if (!
is->ic->nb_chapters)
3257 for (
i = 0;
i <
is->ic->nb_chapters;
i++) {
3267 if (
i >=
is->ic->nb_chapters)
3279 double incr,
pos, frac;
3284 switch (event.type) {
3286 if (
exit_on_keydown || event.key.keysym.sym == SDLK_ESCAPE || event.key.keysym.sym == SDLK_q) {
3291 if (!cur_stream->
width)
3293 switch (event.key.keysym.sym) {
3305 case SDLK_KP_MULTIPLY:
3309 case SDLK_KP_DIVIDE:
3332 if (cur_stream->
show_mode == SHOW_MODE_VIDEO && cur_stream->vfilter_idx < nb_vfilters - 1) {
3333 if (++cur_stream->vfilter_idx >= nb_vfilters)
3334 cur_stream->vfilter_idx = 0;
3336 cur_stream->vfilter_idx = 0;
3397 case SDL_MOUSEBUTTONDOWN:
3402 if (event.button.button == SDL_BUTTON_LEFT) {
3403 static int64_t last_mouse_left_click = 0;
3407 last_mouse_left_click = 0;
3412 case SDL_MOUSEMOTION:
3418 if (event.type == SDL_MOUSEBUTTONDOWN) {
3419 if (event.button.button != SDL_BUTTON_RIGHT)
3423 if (!(event.motion.state & SDL_BUTTON_RMASK))
3433 int tns, thh, tmm, tss;
3436 tmm = (tns % 3600) / 60;
3438 frac = x / cur_stream->
width;
3441 mm = (
ns % 3600) / 60;
3444 "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d) \n", frac*100,
3445 hh, mm,
ss, thh, tmm, tss);
3452 case SDL_WINDOWEVENT:
3453 switch (event.window.event) {
3454 case SDL_WINDOWEVENT_SIZE_CHANGED:
3461 case SDL_WINDOWEVENT_EXPOSED:
3511 if (!strcmp(
arg,
"audio"))
3513 else if (!strcmp(
arg,
"video"))
3515 else if (!strcmp(
arg,
"ext"))
3539 !strcmp(
arg,
"waves") ? SHOW_MODE_WAVES :
3540 !strcmp(
arg,
"rdft" ) ? SHOW_MODE_RDFT :
3549 "Argument '%s' provided as input filename, but '%s' was already specified.\n",
3553 if (!strcmp(filename,
"-"))
3560 const char *spec = strchr(opt,
':');
3563 "No media specifier was specified in '%s' in option '%s'\n",
3574 "Invalid media specifier '%s' in option '%s'\n", spec, opt);
3584 {
"x",
HAS_ARG, { .func_arg =
opt_width },
"force displayed width",
"width" },
3585 {
"y",
HAS_ARG, { .func_arg =
opt_height },
"force displayed height",
"height" },
3594 {
"ss",
HAS_ARG, { .func_arg =
opt_seek },
"seek to a given position in seconds",
"pos" },
3595 {
"t",
HAS_ARG, { .func_arg =
opt_duration },
"play \"duration\" seconds of audio/video",
"duration" },
3609 {
"sync",
HAS_ARG |
OPT_EXPERT, { .func_arg =
opt_sync },
"set audio-video sync. type (type=audio/video/ext)",
"type" },
3620 {
"vf",
OPT_EXPERT |
HAS_ARG, { .func_arg = opt_add_vfilter },
"set video filters",
"filter_graph" },
3621 {
"af",
OPT_STRING |
HAS_ARG, { &afilters },
"set audio filters",
"filter_graph" },
3624 {
"showmode",
HAS_ARG, { .func_arg =
opt_show_mode},
"select show mode (0 = video, 1 = waves, 2 = RDFT)",
"mode" },
3626 {
"i",
OPT_BOOL, { &
dummy},
"read specified file",
"input_file"},
3627 {
"codec",
HAS_ARG, { .func_arg =
opt_codec},
"force decoder",
"decoder_name" },
3633 "read and decode the streams to fill missing information with heuristics" },
3654 #if !CONFIG_AVFILTER
3659 printf(
"\nWhile playing:\n"
3661 "f toggle full screen\n"
3664 "9, 0 decrease and increase volume respectively\n"
3665 "/, * decrease and increase volume respectively\n"
3666 "a cycle audio channel in the current program\n"
3667 "v cycle video channel\n"
3668 "t cycle subtitle channel in the current program\n"
3670 "w cycle video filters or show modes\n"
3671 "s activate frame-step mode\n"
3672 "left/right seek backward/forward 10 seconds or to custom interval if -seek_interval is set\n"
3673 "down/up seek backward/forward 1 minute\n"
3674 "page down/page up seek backward/forward 10 minutes\n"
3675 "right mouse click seek to percentage in file corresponding to fraction of width\n"
3676 "left double-click toggle full screen\n"
3710 "Use -h to get full help or, even better, run 'man %s'\n",
program_name);
3717 flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
3719 flags &= ~SDL_INIT_AUDIO;
3723 if (!SDL_getenv(
"SDL_AUDIO_ALSA_SET_BUFFER_SIZE"))
3724 SDL_setenv(
"SDL_AUDIO_ALSA_SET_BUFFER_SIZE",
"1", 1);
3727 flags &= ~SDL_INIT_VIDEO;
3728 if (SDL_Init (
flags)) {
3734 SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
3735 SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
3741 int flags = SDL_WINDOW_HIDDEN;
3743 #if SDL_VERSION_ATLEAST(2,0,5)
3744 flags |= SDL_WINDOW_ALWAYS_ON_TOP;
3746 av_log(
NULL,
AV_LOG_WARNING,
"Your SDL version doesn't support SDL_WINDOW_ALWAYS_ON_TOP. Feature will be inactive.\n");
3749 flags |= SDL_WINDOW_BORDERLESS;
3751 flags |= SDL_WINDOW_RESIZABLE;
3753 SDL_SetHint(SDL_HINT_RENDER_SCALE_QUALITY,
"linear");
3755 renderer = SDL_CreateRenderer(
window, -1, SDL_RENDERER_ACCELERATED | SDL_RENDERER_PRESENTVSYNC);
struct SwsContext * sws_getCachedContext(struct SwsContext *context, int srcW, int srcH, enum AVPixelFormat srcFormat, int dstW, int dstH, enum AVPixelFormat dstFormat, int flags, SwsFilter *srcFilter, SwsFilter *dstFilter, const double *param)
Check if context can be reused, otherwise reallocate a new one.
static void do_exit(VideoState *is)
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
static int opt_frame_size(void *optctx, const char *opt, const char *arg)
int64_t av_gettime_relative(void)
Get the current time in microseconds since some unspecified starting point.
#define AV_LOG_WARNING
Something somehow does not look correct.
AVPixelFormat
Pixel format.
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
static SDL_RendererInfo renderer_info
int configure_filtergraph(FilterGraph *fg)
static int frame_queue_nb_remaining(FrameQueue *f)
static void frame_queue_next(FrameQueue *f)
enum AVMediaType codec_type
General type of the encoded data.
int nb_threads
Maximum number of threads used by filters in this graph.
uint64_t channel_layout
Audio channel layout.
int av_compare_ts(int64_t ts_a, AVRational tb_a, int64_t ts_b, AVRational tb_b)
Compare two timestamps each in its own time base.
unsigned int nb_stream_indexes
int av_bprint_finalize(AVBPrint *buf, char **ret_str)
Finalize a print buffer.
static int64_t frame_queue_last_pos(FrameQueue *f)
int sample_rate
samples per second
#define FFSWAP(type, a, b)
void av_bprint_init(AVBPrint *buf, unsigned size_init, unsigned size_max)
static int video_thread(void *arg)
The official guide to swscale for confused that is
static void set_default_window_size(int width, int height, AVRational sar)
#define AV_NOSYNC_THRESHOLD
unsigned int nb_chapters
Number of chapters in AVChapter array.
This struct describes the properties of an encoded stream.
#define AV_LOG_QUIET
Print no output.
static enum AVSampleFormat sample_fmts[]
static int decoder_decode_frame(Decoder *d, AVFrame *frame, AVSubtitle *sub)
#define AVERROR_EOF
End of file.
int attribute_align_arg av_buffersink_get_frame_flags(AVFilterContext *ctx, AVFrame *frame, int flags)
Get a frame with filtered data from sink and put it in frame.
enum AVDiscard discard
Selects which packets can be discarded at will and do not need to be demuxed.
AVProgram * av_find_program_from_stream(AVFormatContext *ic, AVProgram *last, int s)
Find the programs which belong to a given stream.
static int display_disable
int swr_set_compensation(struct SwrContext *s, int sample_delta, int compensation_distance)
Activate resampling compensation ("soft" compensation).
#define SAMPLE_ARRAY_SIZE
static void update_volume(VideoState *is, int sign, double step)
#define AV_TIME_BASE_Q
Internal time base represented as fractional value.
#define av_opt_set_int_list(obj, name, val, term, flags)
Set a binary option to an integer list.
char * av_asprintf(const char *fmt,...)
static int decoder_start(Decoder *d, int(*fn)(void *), const char *thread_name, void *arg)
const AVClass * avformat_get_class(void)
Get the AVClass for AVFormatContext.
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
static int upload_texture(SDL_Texture **tex, AVFrame *frame, struct SwsContext **img_convert_ctx)
static av_cold int end(AVCodecContext *avctx)
SDL_Texture * vis_texture
static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, double duration, int64_t pos, int serial)
void av_get_channel_layout_string(char *buf, int buf_size, int nb_channels, uint64_t channel_layout)
Return a description of a channel layout.
This structure describes decoded (raw) audio or video data.
AVStream ** streams
A list of all streams in the file.
trying all byte sequences megabyte in length and selecting the best looking sequence will yield cases to try But a word about which is also called distortion Distortion can be quantified by almost any quality measurement one chooses the sum of squared differences is used but more complex methods that consider psychovisual effects can be used as well It makes no difference in this discussion First step
double frame_last_filter_delay
@ AVCOL_RANGE_JPEG
the normal 2^n-1 "JPEG" YUV ranges
int avcodec_decode_subtitle2(AVCodecContext *avctx, AVSubtitle *sub, int *got_sub_ptr, AVPacket *avpkt)
Decode a subtitle message.
static int frame_queue_init(FrameQueue *f, PacketQueue *pktq, int max_size, int keep_last)
static const char * subtitle_codec_name
#define EXTERNAL_CLOCK_MIN_FRAMES
int attribute_align_arg av_buffersrc_add_frame(AVFilterContext *ctx, AVFrame *frame)
Add a frame to the buffer source.
static void frame_queue_destory(FrameQueue *f)
#define SAMPLE_QUEUE_SIZE
const char program_name[]
program name, defined by the program for show_version().
AVDictionary * format_opts
int error
contains the error code or 0 if no error happened
#define AV_DICT_IGNORE_SUFFIX
Return first entry in a dictionary whose first part corresponds to the search key,...
static int packet_queue_put_nullpacket(PacketQueue *q, int stream_index)
#define AV_PIX_FMT_RGB32_1
double audio_diff_avg_coef
#define AV_LOG_VERBOSE
Detailed information.
#define CURSOR_HIDE_DELAY
void show_help_children(const AVClass *class, int flags)
Show help for all options with given flags in class and all its children.
@ AV_PIX_FMT_BGR24
packed RGB 8:8:8, 24bpp, BGRBGR...
@ AV_PIX_FMT_BGRA
packed BGRA 8:8:8:8, 32bpp, BGRABGRA...
int64_t duration
Duration of this packet in AVStream->time_base units, 0 if unknown.
int attribute_align_arg sws_scale(struct SwsContext *c, const uint8_t *const srcSlice[], const int srcStride[], int srcSliceY, int srcSliceH, uint8_t *const dst[], const int dstStride[])
swscale wrapper, so we don't need to export the SwsContext.
static double compute_target_delay(double delay, VideoState *is)
static void decoder_init(Decoder *d, AVCodecContext *avctx, PacketQueue *queue, SDL_cond *empty_queue_cond)
static void stream_close(VideoState *is)
int av_read_frame(AVFormatContext *s, AVPacket *pkt)
Return the next frame of a stream.
#define SDL_AUDIO_MAX_CALLBACKS_PER_SEC
static void init_clock(Clock *c, int *queue_serial)
enum AVMediaType codec_type
#define AV_OPT_FLAG_FILTERING_PARAM
a generic parameter which can be set by the user for filtering
static int opt_seek(void *optctx, const char *opt, const char *arg)
int64_t avio_size(AVIOContext *s)
Get the filesize.
size_t av_strlcatf(char *dst, size_t size, const char *fmt,...)
static double get_master_clock(VideoState *is)
static void update_video_pts(VideoState *is, double pts, int64_t pos, int serial)
static int subtitle_thread(void *arg)
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
void avfilter_graph_free(AVFilterGraph **graph)
Free a graph, destroy its links, and set *graph to NULL.
static int audio_decode_frame(VideoState *is)
Decode one audio frame and return its uncompressed size.
static int subtitle_disable
struct SwrContext * swr_ctx
static int opt_sync(void *optctx, const char *opt, const char *arg)
static void step_to_next_frame(VideoState *is)
enum AVPixelFormat format
static void video_display(VideoState *is)
uint8_t max_lowres
maximum value for lowres supported by the decoder
void avformat_close_input(AVFormatContext **s)
Close an opened input AVFormatContext.
double parse_number_or_die(const char *context, const char *numstr, int type, double min, double max)
Parse a string and return its corresponding value as a double.
@ AVCOL_SPC_BT470BG
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601
AVIOInterruptCB interrupt_callback
Custom interrupt callbacks for the I/O layer.
AVCodec * avcodec_find_decoder_by_name(const char *name)
Find a registered decoder with the specified name.
#define SDL_AUDIO_MIN_BUFFER_SIZE
void print_error(const char *filename, int err)
Print an error message to stderr, indicating filename and a human readable description of the error c...
static int startup_volume
static SDL_Window * window
int avfilter_graph_create_filter(AVFilterContext **filt_ctx, const AVFilter *filt, const char *name, const char *args, void *opaque, AVFilterGraph *graph_ctx)
Create and add a filter instance into an existing graph.
static void toggle_full_screen(VideoState *is)
ff_const59 struct AVInputFormat * iformat
The input container format.
static int packet_queue_init(PacketQueue *q)
#define AUDIO_DIFF_AVG_NB
AVFilterGraph * avfilter_graph_alloc(void)
Allocate a filter graph.
static int opt_duration(void *optctx, const char *opt, const char *arg)
static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
int x
top left corner of pict, undefined when pict is not set
static av_always_inline int64_t avio_tell(AVIOContext *s)
ftell() equivalent for AVIOContext.
#define AVERROR_OPTION_NOT_FOUND
Option not found.
#define AV_BPRINT_SIZE_AUTOMATIC
static void video_image_display(VideoState *is)
static double val(void *priv, double ch)
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
static int opt_show_mode(void *optctx, const char *opt, const char *arg)
SDL_cond * empty_queue_cond
static void set_clock_speed(Clock *c, double speed)
double audio_diff_threshold
const AVClass * sws_get_class(void)
Get the AVClass for swsContext.
int av_opt_set(void *obj, const char *name, const char *val, int search_flags)
#define ss(width, name, subs,...)
int avformat_network_init(void)
Do global initialization of network libraries.
static int opt_height(void *optctx, const char *opt, const char *arg)
void avsubtitle_free(AVSubtitle *sub)
Free all allocated data in the given subtitle struct.
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
static const struct TextureFormatEntry sdl_texture_format_map[]
int64_t bit_rate
Total stream bitrate in bit/s, 0 if not available.
static int is_full_screen
void avfilter_inout_free(AVFilterInOut **inout)
Free the supplied list of AVFilterInOut and set *inout to NULL.
static void set_sdl_yuv_conversion_mode(AVFrame *frame)
AVRational av_guess_sample_aspect_ratio(AVFormatContext *format, AVStream *stream, AVFrame *frame)
Guess the sample aspect ratio of a frame, based on both the stream and the frame aspect ratio.
#define AV_LOG_TRACE
Extremely verbose debugging, useful for libav* development.
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
AVDictionary * metadata
Metadata that applies to the whole file.
static int audio_thread(void *arg)
static void set_clock(Clock *c, double pts, int serial)
void av_dump_format(AVFormatContext *ic, int index, const char *url, int is_output)
Print detailed information about the input or output format, such as duration, bitrate,...
static Frame * frame_queue_peek_next(FrameQueue *f)
static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
static void sync_clock_to_slave(Clock *c, Clock *slave)
av_cold int swr_init(struct SwrContext *s)
Initialize context after user parameters have been set.
static void opt_input_file(void *optctx, const char *filename)
AVRational av_buffersink_get_frame_rate(const AVFilterContext *ctx)
static void frame_queue_signal(FrameQueue *f)
AVDictionaryEntry * av_dict_get(const AVDictionary *m, const char *key, const AVDictionaryEntry *prev, int flags)
Get a dictionary entry with matching key.
AVDictionary * filter_codec_opts(AVDictionary *opts, enum AVCodecID codec_id, AVFormatContext *s, AVStream *st, AVCodec *codec)
Filter out options for given codec.
struct SwsContext * img_convert_ctx
AVCodecContext * avcodec_alloc_context3(const AVCodec *codec)
Allocate an AVCodecContext and set its fields to default values.
#define AV_OPT_FLAG_ENCODING_PARAM
a generic parameter which can be set by the user for muxing or encoding
void show_help_default(const char *opt, const char *arg)
Per-fftool specific help handler.
#define AV_CEIL_RSHIFT(a, b)
static int default_height
int flags
Flags modifying the (de)muxer behaviour.
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample format(the sample packing is implied by the sample format) and sample rate. The lists are not just lists
AVRational sample_aspect_ratio
Video only.
int channels
number of audio channels, only used for audio.
#define AV_PIX_FMT_0BGR32
@ AVCOL_SPC_SMPTE170M
also ITU-R BT601-6 525 / ITU-R BT1358 525 / ITU-R BT1700 NTSC
int y
top left corner of pict, undefined when pict is not set
static void calculate_display_rect(SDL_Rect *rect, int scr_xleft, int scr_ytop, int scr_width, int scr_height, int pic_width, int pic_height, AVRational pic_sar)
static double av_q2d(AVRational a)
Convert an AVRational to a double.
#define EXTERNAL_CLOCK_SPEED_STEP
#define AV_CH_LAYOUT_STEREO_DOWNMIX
static int opt_frame_pix_fmt(void *optctx, const char *opt, const char *arg)
int avcodec_receive_frame(AVCodecContext *avctx, AVFrame *frame)
Return decoded output data from a decoder.
AVRational av_buffersink_get_time_base(const AVFilterContext *ctx)
static const AVFilterPad outputs[]
static enum AVPixelFormat pix_fmts[]
int av_read_play(AVFormatContext *s)
Start playing a network-based stream (e.g.
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
static int opt_codec(void *optctx, const char *opt, const char *arg)
int attribute_align_arg swr_convert(struct SwrContext *s, uint8_t *out_arg[SWR_CH_MAX], int out_count, const uint8_t *in_arg[SWR_CH_MAX], int in_count)
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
static double get_clock(Clock *c)
#define EXTERNAL_CLOCK_SPEED_MIN
MyAVPacketList * last_pkt
static unsigned sws_flags
void parse_options(void *optctx, int argc, char **argv, const OptionDef *options, void(*parse_arg_function)(void *, const char *))
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
const char * av_get_sample_fmt_name(enum AVSampleFormat sample_fmt)
Return the name of sample_fmt, or NULL if sample_fmt is not recognized.
static SDL_Renderer * renderer
int av_usleep(unsigned usec)
Sleep for a period of time.
The libswresample context.
static double vp_duration(VideoState *is, Frame *vp, Frame *nextvp)
struct SwrContext * swr_alloc_set_opts(struct SwrContext *s, int64_t out_ch_layout, enum AVSampleFormat out_sample_fmt, int out_sample_rate, int64_t in_ch_layout, enum AVSampleFormat in_sample_fmt, int in_sample_rate, int log_offset, void *log_ctx)
Allocate SwrContext if needed and set/reset common parameters.
#define AV_PIX_FMT_BGR32_1
void av_rdft_calc(RDFTContext *s, FFTSample *data)
static int synchronize_audio(VideoState *is, int nb_samples)
static const char * window_title
@ AVDISCARD_ALL
discard all
int av_log_get_level(void)
Get the current log level.
const AVFilter * avfilter_get_by_name(const char *name)
Get a filter definition matching the given name.
void init_dynload(void)
Initialize dynamic library loading.
AVCodecParameters * codecpar
Codec parameters associated with this stream.
int w
width of pict, undefined when pict is not set
static void seek_chapter(VideoState *is, int incr)
static int get_master_sync_type(VideoState *is)
const AVClass * avcodec_get_class(void)
Get the AVClass for AVCodecContext.
int avfilter_graph_config(AVFilterGraph *graphctx, void *log_ctx)
Check validity and configure all the links and formats in the graph.
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented.
static VideoState * stream_open(const char *filename, AVInputFormat *iformat)
@ AV_PIX_FMT_YUYV422
packed YUV 4:2:2, 16bpp, Y0 Cb Y1 Cr
void avcodec_free_context(AVCodecContext **avctx)
Free the codec context and everything associated with it and write NULL to the provided pointer.
Rational number (pair of numerator and denominator).
AVFilterContext ** filters
static void stream_cycle_channel(VideoState *is, int codec_type)
#define AV_CODEC_FLAG2_FAST
Allow non spec compliant speedup tricks.
AVIOContext * pb
I/O context.
void av_log_set_flags(int arg)
static void frame_queue_unref_item(Frame *vp)
Frame queue[FRAME_QUEUE_SIZE]
void init_opts(void)
Initialize the cmdutils option system, in particular allocate the *_opts contexts.
static int64_t cursor_last_shown
unsigned int * stream_index
static Frame * frame_queue_peek(FrameQueue *f)
AVFilterInOut * avfilter_inout_alloc(void)
Allocate a single AVFilterInOut entry.
#define AV_DICT_DONT_OVERWRITE
Don't overwrite existing entries.
@ AV_PIX_FMT_RGB8
packed RGB 3:3:2, 8bpp, (msb)2R 3G 3B(lsb)
int avcodec_open2(AVCodecContext *avctx, const AVCodec *codec, AVDictionary **options)
Initialize the AVCodecContext to use the given AVCodec.
double frame_last_returned_time
static void set_clock_at(Clock *c, double pts, int serial, double time)
static void toggle_pause(VideoState *is)
static int stream_component_open(VideoState *is, int stream_index)
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several inputs
uint64_t av_buffersink_get_channel_layout(const AVFilterContext *ctx)
int av_packet_ref(AVPacket *dst, const AVPacket *src)
Setup a new reference to the data described by a given packet.
void av_packet_move_ref(AVPacket *dst, AVPacket *src)
Move every field in src to dst and reset src.
#define AV_PIX_FMT_NE(be, le)
static void event_loop(VideoState *cur_stream)
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
int sample_rate
Audio only.
int av_get_channel_layout_nb_channels(uint64_t channel_layout)
Return the number of channels in the channel layout.
int av_opt_set_int(void *obj, const char *name, int64_t val, int search_flags)
int16_t sample_array[SAMPLE_ARRAY_SIZE]
unsigned int nb_streams
Number of elements in AVFormatContext.streams.
static int exit_on_mousedown
char * scale_sws_opts
sws options to use for the auto-inserted scale filters
int lowres
low resolution decoding, 1-> 1/2 size, 2->1/4 size
AVDictionary * codec_opts
static int64_t audio_callback_time
int avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options)
Read packets of a media file to get stream information.
static int64_t get_valid_channel_layout(int64_t channel_layout, int channels)
int flags2
AV_CODEC_FLAG2_*.
enum AVPictureType pict_type
Picture type of the frame.
@ AV_PIX_FMT_RGB24
packed RGB 8:8:8, 24bpp, RGBRGB...
void av_log_set_callback(void(*callback)(void *, int, const char *, va_list))
Set the logging callback.
int avformat_match_stream_specifier(AVFormatContext *s, AVStream *st, const char *spec)
Check if the stream st contained in s is matched by the stream specifier spec.
RDFTContext * av_rdft_init(int nbits, enum RDFTransformType trans)
Set up a real FFT.
static void copy(const float *p1, float *p2, const int length)
AVFormatContext * avformat_alloc_context(void)
Allocate an AVFormatContext.
@ AVDISCARD_DEFAULT
discard useless packets like 0 size packets in avi
static Frame * frame_queue_peek_writable(FrameQueue *f)
int sample_rate
Sample rate of the audio data.
int avfilter_link(AVFilterContext *src, unsigned srcpad, AVFilterContext *dst, unsigned dstpad)
Link two filters together.
static int64_t start_time
enum AVSampleFormat sample_fmt
audio sample format
AVRational pkt_timebase
Timebase in which pkt_dts/pts and AVPacket.dts/pts are.
static av_const double hypot(double x, double y)
void uninit_opts(void)
Uninitialize the cmdutils option system, in particular free the *_opts contexts and their contents.
static AVRational av_make_q(int num, int den)
Create an AVRational.
static int read_thread(void *arg)
#define AV_PIX_FMT_BGR555
int avformat_seek_file(AVFormatContext *s, int stream_index, int64_t min_ts, int64_t ts, int64_t max_ts, int flags)
Seek to timestamp ts.
#define AV_NOPTS_VALUE
Undefined timestamp value.
SDL_Texture * sub_texture
AVRational av_guess_frame_rate(AVFormatContext *ctx, AVStream *stream, AVFrame *frame)
Guess the frame rate, based on both the container and codec information.
av_cold void swr_free(SwrContext **ss)
Free the given SwrContext and set the pointer to NULL.
SDL_Texture * vid_texture
AVDictionary ** setup_find_stream_info_opts(AVFormatContext *s, AVDictionary *codec_opts)
Setup AVCodecContext options for avformat_find_stream_info().
printf("static const uint8_t my_array[100] = {\n")
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames,...
static const char * wanted_stream_spec[AVMEDIA_TYPE_NB]
static int infinite_buffer
double max_frame_duration
#define AV_OPT_SEARCH_CHILDREN
Search in possible children of the given object first.
static void packet_queue_destroy(PacketQueue *q)
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed.
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
uint64_t channel_layout
Channel layout of the audio data.
static void toggle_mute(VideoState *is)
static void decoder_abort(Decoder *d, FrameQueue *fq)
static void video_refresh(void *opaque, double *remaining_time)
#define ns(max_value, name, subs,...)
static float seek_interval
void av_dict_free(AVDictionary **pm)
Free all the memory allocated for an AVDictionary struct and all keys and values.
static void frame_queue_push(FrameQueue *f)
static SDL_AudioDeviceID audio_dev
static void sigterm_handler(int sig)
#define AV_LOG_INFO
Standard information.
static void packet_queue_abort(PacketQueue *q)
static const char * video_codec_name
int channels
number of audio channels
void show_banner(int argc, char **argv, const OptionDef *options)
Print the program banner to stderr.
static void packet_queue_flush(PacketQueue *q)
static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block, int *serial)
char av_get_picture_type_char(enum AVPictureType pict_type)
Return a single letter to describe the given picture type pict_type.
@ AVCOL_SPC_SMPTE240M
functionally identical to above
const char * avcodec_get_name(enum AVCodecID id)
Get the name of a codec.
int queue_attachments_req
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) #define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac) { } void ff_audio_convert_free(AudioConvert **ac) { if(! *ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);} AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map) { AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method !=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2) { ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc) { av_free(ac);return NULL;} return ac;} in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar) { ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar ? ac->channels :1;} else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;} int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in) { int use_generic=1;int len=in->nb_samples;int p;if(ac->dc) { av_log(ac->avr, AV_LOG_TRACE, "%d samples - audio_convert: %s to %s (dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> in
int avcodec_send_packet(AVCodecContext *avctx, const AVPacket *avpkt)
Supply raw packet data as input to a decoder.
ff_const59 AVInputFormat * av_find_input_format(const char *short_name)
Find AVInputFormat based on the short name of the input format.
int nb_samples
number of audio samples (per channel) described by this frame
#define VIDEO_PICTURE_QUEUE_SIZE
#define i(width, name, range_min, range_max)
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
enum VideoState::ShowMode show_mode
struct AudioParams audio_src
const int program_birth_year
program birth year, defined by the program for show_banner()
int avformat_open_input(AVFormatContext **ps, const char *url, ff_const59 AVInputFormat *fmt, AVDictionary **options)
Open an input stream and read the header.
int av_get_bytes_per_sample(enum AVSampleFormat sample_fmt)
Return number of bytes per sample.
int avfilter_graph_parse_ptr(AVFilterGraph *graph, const char *filters, AVFilterInOut **inputs, AVFilterInOut **outputs, void *log_ctx)
Add a graph described by a string to a graph.
static int compute_mod(int a, int b)
int avcodec_parameters_to_context(AVCodecContext *codec, const AVCodecParameters *par)
Fill the codec context based on the values from the supplied codec parameters.
#define AV_TIME_BASE
Internal time base represented as integer.
uint8_t ** extended_data
pointers to the data planes/channels.
static AVInputFormat * file_iformat
#define av_malloc_array(a, b)
static int video_open(VideoState *is)
static void get_sdl_pix_fmt_and_blendmode(int format, Uint32 *sdl_pix_fmt, SDL_BlendMode *sdl_blendmode)
static int opt_format(void *optctx, const char *opt, const char *arg)
AVSampleFormat
Audio sample formats.
#define AV_PIX_FMT_RGB555
#define AV_OPT_FLAG_DECODING_PARAM
a generic parameter which can be set by the user for demuxing or decoding
void av_frame_move_ref(AVFrame *dst, AVFrame *src)
Move everything contained in src to dst and reset src.
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
static void update_sample_display(VideoState *is, short *samples, int samples_size)
@ AV_SAMPLE_FMT_S16
signed 16 bits
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
New fields can be added to the end with minor version bumps.
int av_read_pause(AVFormatContext *s)
Pause a network-based stream (e.g.
#define AV_PIX_FMT_BGR565
int av_samples_get_buffer_size(int *linesize, int nb_channels, int nb_samples, enum AVSampleFormat sample_fmt, int align)
Get the required buffer size for the given audio parameters.
const char * name
Pad name.
int64_t av_rescale(int64_t a, int64_t b, int64_t c)
Rescale a 64-bit integer with rounding to nearest.
MyAVPacketList * first_pkt
static Frame * frame_queue_peek_readable(FrameQueue *f)
#define AV_PIX_FMT_RGB565
void parse_loglevel(int argc, char **argv, const OptionDef *options)
Find the '-loglevel' option in the command line args and apply it.
static int av_cmp_q(AVRational a, AVRational b)
Compare two rationals.
@ AV_PIX_FMT_PAL8
8 bits with AV_PIX_FMT_RGB32 palette
int disposition
AV_DISPOSITION_* bit field.
#define AV_LOG_FATAL
Something went wrong and recovery is not possible.
static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
#define EXTERNAL_CLOCK_MAX_FRAMES
int h
height of pict, undefined when pict is not set
void avcodec_flush_buffers(AVCodecContext *avctx)
Reset the internal codec state / flush internal buffers.
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
#define AV_PIX_FMT_0RGB32
static AVStream * video_stream
static int audio_open(void *opaque, int64_t wanted_channel_layout, int wanted_nb_channels, int wanted_sample_rate, struct AudioParams *audio_hw_params)
static int filter_nbthreads
AVCodec * avcodec_find_decoder(enum AVCodecID id)
Find a registered decoder with a matching codec ID.
void log_callback_help(void *ptr, int level, const char *fmt, va_list vl)
Trivial log callback.
static int find_stream_info
static int packet_queue_put_private(PacketQueue *q, AVPacket *pkt)
static AVInputFormat * iformat
void av_bprintf(AVBPrint *buf, const char *fmt,...)
AVRational sample_aspect_ratio
Sample aspect ratio for the video frame, 0/1 if unknown/unspecified.
#define AV_LOG_SKIP_REPEATED
Skip repeated messages, this requires the user app to use av_log() instead of (f)printf as the 2 woul...
@ AV_PIX_FMT_UYVY422
packed YUV 4:2:2, 16bpp, Cb Y0 Cr Y1
#define AV_DICT_MATCH_CASE
Only get an entry with exact-case key match.
#define FF_ARRAY_ELEMS(a)
static int opt_width(void *optctx, const char *opt, const char *arg)
int main(int argc, char **argv)
int avformat_network_deinit(void)
Undo the initialization done by avformat_network_init.
static void show_usage(void)
const char * av_get_media_type_string(enum AVMediaType media_type)
Return a string describing the media_type enum, NULL if media_type is unknown.
main external API structure.
#define CMDUTILS_COMMON_OPTIONS
static void packet_queue_start(PacketQueue *q)
static const char * audio_codec_name
double get_rotation(AVStream *st)
int av_buffersink_get_sample_rate(const AVFilterContext *ctx)
#define AV_SYNC_FRAMEDUP_THRESHOLD
static enum ShowMode show_mode
void sws_freeContext(struct SwsContext *swsContext)
Free the swscaler context swsContext.
static const OptionDef options[]
static void fill_rectangle(int x, int y, int w, int h)
static AVPacket flush_pkt
unsigned int audio_buf1_size
#define AV_SYNC_THRESHOLD_MAX
int av_buffersink_get_channels(const AVFilterContext *ctx)
static void decoder_destroy(Decoder *d)
enum AVSampleFormat av_get_packed_sample_fmt(enum AVSampleFormat sample_fmt)
Get the packed alternative form of the given sample format.
int eof_reached
true if was unable to read due to error or eof
static int stream_has_enough_packets(AVStream *st, int stream_id, PacketQueue *queue)
Filter the word “frame” indicates either a video frame or a group of audio samples
int64_t duration
Duration of the stream, in AV_TIME_BASE fractional seconds.
AVRational av_mul_q(AVRational b, AVRational c)
Multiply two rationals.
#define GROW_ARRAY(array, nb_elems)
#define SUBPICTURE_QUEUE_SIZE
static const char * input_filename
static void stream_toggle_pause(VideoState *is)
SDL_cond * continue_read_thread
int av_dict_set_int(AVDictionary **pm, const char *key, int64_t value, int flags)
Convenience wrapper for av_dict_set that converts the value to a string and stores it.
static void toggle_audio_display(VideoState *is)
enum AVMediaType codec_type
int opt_default(void *optctx, const char *opt, const char *arg)
Fallback for options that are not explicitly handled, these will be parsed through AVOptions.
char * av_strdup(const char *s)
Duplicate a string.
static int get_video_frame(VideoState *is, AVFrame *frame)
int64_t av_get_default_channel_layout(int nb_channels)
Return default channel layout for a given number of channels.
struct SwsContext * sub_convert_ctx
static av_always_inline int diff(const uint32_t a, const uint32_t b)
This structure stores compressed data.
static void refresh_loop_wait_event(VideoState *is, SDL_Event *event)
static const uint16_t channel_layouts[7]
int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags)
Set the given entry in *pm, overwriting an existing entry.
void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size)
Allocate a buffer, reusing the given one if large enough.
static void stream_component_close(VideoState *is, int stream_index)
static int cmp_audio_fmts(enum AVSampleFormat fmt1, int64_t channel_count1, enum AVSampleFormat fmt2, int64_t channel_count2)
int64_t parse_time_or_die(const char *context, const char *timestr, int is_duration)
Parse a string specifying a time and return its corresponding value as a number of microseconds.
unsigned int audio_buf_size
#define flags(name, subs,...)
void av_rdft_end(RDFTContext *s)
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
struct MyAVPacketList * next
The exact code depends on how similar the blocks are and how related they are to the block
int64_t start_time
Position of the first frame of the component, in AV_TIME_BASE fractional seconds.
int64_t start_time
Decoding: pts of the first frame of the stream in presentation order, in stream time base.
static Frame * frame_queue_peek_last(FrameQueue *f)
void show_help_options(const OptionDef *options, const char *msg, int req_flags, int rej_flags, int alt_flags)
Print help for all options matching specified flags.
static int decoder_reorder_pts
@ AVCOL_SPC_BT709
also ITU-R BT1361 / IEC 61966-2-4 xvYCC709 / SMPTE RP177 Annex B
struct AudioParams audio_tgt
AVRational time_base
time base in which the start/end timestamps are specified
static AVStream * audio_stream
const AVClass * avfilter_get_class(void)
A linked-list of the inputs/outputs of the filter chain.
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default minimum maximum flags name is the option keep it simple and lowercase description are short
static void video_audio_display(VideoState *s)
#define AV_SYNC_THRESHOLD_MIN
static void check_external_clock_speed(VideoState *is)
uint32_t start_display_time
#define SAMPLE_CORRECTION_PERCENT_MAX
#define EXTERNAL_CLOCK_SPEED_MAX
static int is_realtime(AVFormatContext *s)
static void * av_x_if_null(const void *p, const void *x)
Return x default pointer in case p is NULL.
static int decode_interrupt_cb(void *ctx)
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.
void av_init_packet(AVPacket *pkt)
Initialize optional fields of a packet with default values.
int av_find_best_stream(AVFormatContext *ic, enum AVMediaType type, int wanted_stream_nb, int related_stream, AVCodec **decoder_ret, int flags)
Find the "best" stream in the file.
void avdevice_register_all(void)
Initialize libavdevice and register all the input and output devices.
int avio_feof(AVIOContext *s)
Similar to feof() but also returns nonzero on read errors.
static int realloc_texture(SDL_Texture **texture, Uint32 new_format, int new_width, int new_height, SDL_BlendMode blendmode, int init_texture)
#define AV_PIX_FMT_RGB444
static int exit_on_keydown