00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018
00019
00020
00021
00022 #include "config.h"
00023 #include <inttypes.h>
00024 #include <math.h>
00025 #include <limits.h>
00026 #include "libavutil/avstring.h"
00027 #include "libavutil/pixdesc.h"
00028 #include "libavformat/avformat.h"
00029 #include "libavdevice/avdevice.h"
00030 #include "libswscale/swscale.h"
00031 #include "libavcodec/audioconvert.h"
00032 #include "libavcodec/colorspace.h"
00033 #include "libavcodec/opt.h"
00034 #include "libavcodec/avfft.h"
00035
00036 #if CONFIG_AVFILTER
00037 # include "libavfilter/avfilter.h"
00038 # include "libavfilter/avfiltergraph.h"
00039 # include "libavfilter/graphparser.h"
00040 #endif
00041
00042 #include "cmdutils.h"
00043
00044 #include <SDL.h>
00045 #include <SDL_thread.h>
00046
00047 #ifdef __MINGW32__
00048 #undef main
00049 #endif
00050
00051 #include <unistd.h>
00052 #include <assert.h>
00053
00054 const char program_name[] = "FFplay";
00055 const int program_birth_year = 2003;
00056
00057
00058
00059 #define MAX_QUEUE_SIZE (15 * 1024 * 1024)
00060 #define MIN_AUDIOQ_SIZE (20 * 16 * 1024)
00061 #define MIN_FRAMES 5
00062
00063
00064
00065 #define SDL_AUDIO_BUFFER_SIZE 1024
00066
00067
00068 #define AV_SYNC_THRESHOLD 0.01
00069
00070 #define AV_NOSYNC_THRESHOLD 10.0
00071
00072 #define FRAME_SKIP_FACTOR 0.05
00073
00074
00075 #define SAMPLE_CORRECTION_PERCENT_MAX 10
00076
00077
00078 #define AUDIO_DIFF_AVG_NB 20
00079
00080
00081 #define SAMPLE_ARRAY_SIZE (2*65536)
00082
00083 #if !CONFIG_AVFILTER
00084 static int sws_flags = SWS_BICUBIC;
00085 #endif
00086
00087 typedef struct PacketQueue {
00088 AVPacketList *first_pkt, *last_pkt;
00089 int nb_packets;
00090 int size;
00091 int abort_request;
00092 SDL_mutex *mutex;
00093 SDL_cond *cond;
00094 } PacketQueue;
00095
00096 #define VIDEO_PICTURE_QUEUE_SIZE 2
00097 #define SUBPICTURE_QUEUE_SIZE 4
00098
00099 typedef struct VideoPicture {
00100 double pts;
00101 double target_clock;
00102 int64_t pos;
00103 SDL_Overlay *bmp;
00104 int width, height;
00105 int allocated;
00106 enum PixelFormat pix_fmt;
00107
00108 #if CONFIG_AVFILTER
00109 AVFilterPicRef *picref;
00110 #endif
00111 } VideoPicture;
00112
00113 typedef struct SubPicture {
00114 double pts;
00115 AVSubtitle sub;
00116 } SubPicture;
00117
00118 enum {
00119 AV_SYNC_AUDIO_MASTER,
00120 AV_SYNC_VIDEO_MASTER,
00121 AV_SYNC_EXTERNAL_CLOCK,
00122 };
00123
00124 typedef struct VideoState {
00125 SDL_Thread *parse_tid;
00126 SDL_Thread *video_tid;
00127 SDL_Thread *refresh_tid;
00128 AVInputFormat *iformat;
00129 int no_background;
00130 int abort_request;
00131 int paused;
00132 int last_paused;
00133 int seek_req;
00134 int seek_flags;
00135 int64_t seek_pos;
00136 int64_t seek_rel;
00137 int read_pause_return;
00138 AVFormatContext *ic;
00139 int dtg_active_format;
00140
00141 int audio_stream;
00142
00143 int av_sync_type;
00144 double external_clock;
00145 int64_t external_clock_time;
00146
00147 double audio_clock;
00148 double audio_diff_cum;
00149 double audio_diff_avg_coef;
00150 double audio_diff_threshold;
00151 int audio_diff_avg_count;
00152 AVStream *audio_st;
00153 PacketQueue audioq;
00154 int audio_hw_buf_size;
00155
00156
00157 DECLARE_ALIGNED(16,uint8_t,audio_buf1)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
00158 DECLARE_ALIGNED(16,uint8_t,audio_buf2)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
00159 uint8_t *audio_buf;
00160 unsigned int audio_buf_size;
00161 int audio_buf_index;
00162 AVPacket audio_pkt_temp;
00163 AVPacket audio_pkt;
00164 enum SampleFormat audio_src_fmt;
00165 AVAudioConvert *reformat_ctx;
00166
00167 int show_audio;
00168 int16_t sample_array[SAMPLE_ARRAY_SIZE];
00169 int sample_array_index;
00170 int last_i_start;
00171 RDFTContext *rdft;
00172 int rdft_bits;
00173 int xpos;
00174
00175 SDL_Thread *subtitle_tid;
00176 int subtitle_stream;
00177 int subtitle_stream_changed;
00178 AVStream *subtitle_st;
00179 PacketQueue subtitleq;
00180 SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
00181 int subpq_size, subpq_rindex, subpq_windex;
00182 SDL_mutex *subpq_mutex;
00183 SDL_cond *subpq_cond;
00184
00185 double frame_timer;
00186 double frame_last_pts;
00187 double frame_last_delay;
00188 double video_clock;
00189 int video_stream;
00190 AVStream *video_st;
00191 PacketQueue videoq;
00192 double video_current_pts;
00193 double video_current_pts_drift;
00194 int64_t video_current_pos;
00195 VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
00196 int pictq_size, pictq_rindex, pictq_windex;
00197 SDL_mutex *pictq_mutex;
00198 SDL_cond *pictq_cond;
00199 #if !CONFIG_AVFILTER
00200 struct SwsContext *img_convert_ctx;
00201 #endif
00202
00203
00204 char filename[1024];
00205 int width, height, xleft, ytop;
00206
00207 int64_t faulty_pts;
00208 int64_t faulty_dts;
00209 int64_t last_dts_for_fault_detection;
00210 int64_t last_pts_for_fault_detection;
00211
00212 #if CONFIG_AVFILTER
00213 AVFilterContext *out_video_filter;
00214 #endif
00215
00216 float skip_frames;
00217 float skip_frames_index;
00218 int refresh;
00219 } VideoState;
00220
00221 static void show_help(void);
00222 static int audio_write_get_buf_size(VideoState *is);
00223
00224
00225 static AVInputFormat *file_iformat;
00226 static const char *input_filename;
00227 static const char *window_title;
00228 static int fs_screen_width;
00229 static int fs_screen_height;
00230 static int screen_width = 0;
00231 static int screen_height = 0;
00232 static int frame_width = 0;
00233 static int frame_height = 0;
00234 static enum PixelFormat frame_pix_fmt = PIX_FMT_NONE;
00235 static int audio_disable;
00236 static int video_disable;
00237 static int wanted_stream[AVMEDIA_TYPE_NB]={
00238 [AVMEDIA_TYPE_AUDIO]=-1,
00239 [AVMEDIA_TYPE_VIDEO]=-1,
00240 [AVMEDIA_TYPE_SUBTITLE]=-1,
00241 };
00242 static int seek_by_bytes=-1;
00243 static int display_disable;
00244 static int show_status = 1;
00245 static int av_sync_type = AV_SYNC_AUDIO_MASTER;
00246 static int64_t start_time = AV_NOPTS_VALUE;
00247 static int64_t duration = AV_NOPTS_VALUE;
00248 static int debug = 0;
00249 static int debug_mv = 0;
00250 static int step = 0;
00251 static int thread_count = 1;
00252 static int workaround_bugs = 1;
00253 static int fast = 0;
00254 static int genpts = 0;
00255 static int lowres = 0;
00256 static int idct = FF_IDCT_AUTO;
00257 static enum AVDiscard skip_frame= AVDISCARD_DEFAULT;
00258 static enum AVDiscard skip_idct= AVDISCARD_DEFAULT;
00259 static enum AVDiscard skip_loop_filter= AVDISCARD_DEFAULT;
00260 static int error_recognition = FF_ER_CAREFUL;
00261 static int error_concealment = 3;
00262 static int decoder_reorder_pts= -1;
00263 static int autoexit;
00264 static int loop=1;
00265 static int framedrop=1;
00266
00267 static int rdftspeed=20;
00268 #if CONFIG_AVFILTER
00269 static char *vfilters = NULL;
00270 #endif
00271
00272
00273 static int is_full_screen;
00274 static VideoState *cur_stream;
00275 static int64_t audio_callback_time;
00276
00277 static AVPacket flush_pkt;
00278
00279 #define FF_ALLOC_EVENT (SDL_USEREVENT)
00280 #define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
00281 #define FF_QUIT_EVENT (SDL_USEREVENT + 2)
00282
00283 static SDL_Surface *screen;
00284
00285 static int packet_queue_put(PacketQueue *q, AVPacket *pkt);
00286
00287
00288 static void packet_queue_init(PacketQueue *q)
00289 {
00290 memset(q, 0, sizeof(PacketQueue));
00291 q->mutex = SDL_CreateMutex();
00292 q->cond = SDL_CreateCond();
00293 packet_queue_put(q, &flush_pkt);
00294 }
00295
00296 static void packet_queue_flush(PacketQueue *q)
00297 {
00298 AVPacketList *pkt, *pkt1;
00299
00300 SDL_LockMutex(q->mutex);
00301 for(pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
00302 pkt1 = pkt->next;
00303 av_free_packet(&pkt->pkt);
00304 av_freep(&pkt);
00305 }
00306 q->last_pkt = NULL;
00307 q->first_pkt = NULL;
00308 q->nb_packets = 0;
00309 q->size = 0;
00310 SDL_UnlockMutex(q->mutex);
00311 }
00312
00313 static void packet_queue_end(PacketQueue *q)
00314 {
00315 packet_queue_flush(q);
00316 SDL_DestroyMutex(q->mutex);
00317 SDL_DestroyCond(q->cond);
00318 }
00319
00320 static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
00321 {
00322 AVPacketList *pkt1;
00323
00324
00325 if (pkt!=&flush_pkt && av_dup_packet(pkt) < 0)
00326 return -1;
00327
00328 pkt1 = av_malloc(sizeof(AVPacketList));
00329 if (!pkt1)
00330 return -1;
00331 pkt1->pkt = *pkt;
00332 pkt1->next = NULL;
00333
00334
00335 SDL_LockMutex(q->mutex);
00336
00337 if (!q->last_pkt)
00338
00339 q->first_pkt = pkt1;
00340 else
00341 q->last_pkt->next = pkt1;
00342 q->last_pkt = pkt1;
00343 q->nb_packets++;
00344 q->size += pkt1->pkt.size + sizeof(*pkt1);
00345
00346 SDL_CondSignal(q->cond);
00347
00348 SDL_UnlockMutex(q->mutex);
00349 return 0;
00350 }
00351
00352 static void packet_queue_abort(PacketQueue *q)
00353 {
00354 SDL_LockMutex(q->mutex);
00355
00356 q->abort_request = 1;
00357
00358 SDL_CondSignal(q->cond);
00359
00360 SDL_UnlockMutex(q->mutex);
00361 }
00362
00363
00364 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
00365 {
00366 AVPacketList *pkt1;
00367 int ret;
00368
00369 SDL_LockMutex(q->mutex);
00370
00371 for(;;) {
00372 if (q->abort_request) {
00373 ret = -1;
00374 break;
00375 }
00376
00377 pkt1 = q->first_pkt;
00378 if (pkt1) {
00379 q->first_pkt = pkt1->next;
00380 if (!q->first_pkt)
00381 q->last_pkt = NULL;
00382 q->nb_packets--;
00383 q->size -= pkt1->pkt.size + sizeof(*pkt1);
00384 *pkt = pkt1->pkt;
00385 av_free(pkt1);
00386 ret = 1;
00387 break;
00388 } else if (!block) {
00389 ret = 0;
00390 break;
00391 } else {
00392 SDL_CondWait(q->cond, q->mutex);
00393 }
00394 }
00395 SDL_UnlockMutex(q->mutex);
00396 return ret;
00397 }
00398
00399 static inline void fill_rectangle(SDL_Surface *screen,
00400 int x, int y, int w, int h, int color)
00401 {
00402 SDL_Rect rect;
00403 rect.x = x;
00404 rect.y = y;
00405 rect.w = w;
00406 rect.h = h;
00407 SDL_FillRect(screen, &rect, color);
00408 }
00409
00410 #if 0
00411
00412 void fill_border(VideoState *s, int x, int y, int w, int h, int color)
00413 {
00414 int w1, w2, h1, h2;
00415
00416
00417 w1 = x;
00418 if (w1 < 0)
00419 w1 = 0;
00420 w2 = s->width - (x + w);
00421 if (w2 < 0)
00422 w2 = 0;
00423 h1 = y;
00424 if (h1 < 0)
00425 h1 = 0;
00426 h2 = s->height - (y + h);
00427 if (h2 < 0)
00428 h2 = 0;
00429 fill_rectangle(screen,
00430 s->xleft, s->ytop,
00431 w1, s->height,
00432 color);
00433 fill_rectangle(screen,
00434 s->xleft + s->width - w2, s->ytop,
00435 w2, s->height,
00436 color);
00437 fill_rectangle(screen,
00438 s->xleft + w1, s->ytop,
00439 s->width - w1 - w2, h1,
00440 color);
00441 fill_rectangle(screen,
00442 s->xleft + w1, s->ytop + s->height - h2,
00443 s->width - w1 - w2, h2,
00444 color);
00445 }
00446 #endif
00447
00448 #define ALPHA_BLEND(a, oldp, newp, s)\
00449 ((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
00450
00451 #define RGBA_IN(r, g, b, a, s)\
00452 {\
00453 unsigned int v = ((const uint32_t *)(s))[0];\
00454 a = (v >> 24) & 0xff;\
00455 r = (v >> 16) & 0xff;\
00456 g = (v >> 8) & 0xff;\
00457 b = v & 0xff;\
00458 }
00459
00460 #define YUVA_IN(y, u, v, a, s, pal)\
00461 {\
00462 unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
00463 a = (val >> 24) & 0xff;\
00464 y = (val >> 16) & 0xff;\
00465 u = (val >> 8) & 0xff;\
00466 v = val & 0xff;\
00467 }
00468
00469 #define YUVA_OUT(d, y, u, v, a)\
00470 {\
00471 ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
00472 }
00473
00474
00475 #define BPP 1
00476
00477 static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
00478 {
00479 int wrap, wrap3, width2, skip2;
00480 int y, u, v, a, u1, v1, a1, w, h;
00481 uint8_t *lum, *cb, *cr;
00482 const uint8_t *p;
00483 const uint32_t *pal;
00484 int dstx, dsty, dstw, dsth;
00485
00486 dstw = av_clip(rect->w, 0, imgw);
00487 dsth = av_clip(rect->h, 0, imgh);
00488 dstx = av_clip(rect->x, 0, imgw - dstw);
00489 dsty = av_clip(rect->y, 0, imgh - dsth);
00490 lum = dst->data[0] + dsty * dst->linesize[0];
00491 cb = dst->data[1] + (dsty >> 1) * dst->linesize[1];
00492 cr = dst->data[2] + (dsty >> 1) * dst->linesize[2];
00493
00494 width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1);
00495 skip2 = dstx >> 1;
00496 wrap = dst->linesize[0];
00497 wrap3 = rect->pict.linesize[0];
00498 p = rect->pict.data[0];
00499 pal = (const uint32_t *)rect->pict.data[1];
00500
00501 if (dsty & 1) {
00502 lum += dstx;
00503 cb += skip2;
00504 cr += skip2;
00505
00506 if (dstx & 1) {
00507 YUVA_IN(y, u, v, a, p, pal);
00508 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
00509 cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
00510 cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
00511 cb++;
00512 cr++;
00513 lum++;
00514 p += BPP;
00515 }
00516 for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
00517 YUVA_IN(y, u, v, a, p, pal);
00518 u1 = u;
00519 v1 = v;
00520 a1 = a;
00521 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
00522
00523 YUVA_IN(y, u, v, a, p + BPP, pal);
00524 u1 += u;
00525 v1 += v;
00526 a1 += a;
00527 lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
00528 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
00529 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
00530 cb++;
00531 cr++;
00532 p += 2 * BPP;
00533 lum += 2;
00534 }
00535 if (w) {
00536 YUVA_IN(y, u, v, a, p, pal);
00537 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
00538 cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
00539 cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
00540 p++;
00541 lum++;
00542 }
00543 p += wrap3 - dstw * BPP;
00544 lum += wrap - dstw - dstx;
00545 cb += dst->linesize[1] - width2 - skip2;
00546 cr += dst->linesize[2] - width2 - skip2;
00547 }
00548 for(h = dsth - (dsty & 1); h >= 2; h -= 2) {
00549 lum += dstx;
00550 cb += skip2;
00551 cr += skip2;
00552
00553 if (dstx & 1) {
00554 YUVA_IN(y, u, v, a, p, pal);
00555 u1 = u;
00556 v1 = v;
00557 a1 = a;
00558 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
00559 p += wrap3;
00560 lum += wrap;
00561 YUVA_IN(y, u, v, a, p, pal);
00562 u1 += u;
00563 v1 += v;
00564 a1 += a;
00565 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
00566 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
00567 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
00568 cb++;
00569 cr++;
00570 p += -wrap3 + BPP;
00571 lum += -wrap + 1;
00572 }
00573 for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
00574 YUVA_IN(y, u, v, a, p, pal);
00575 u1 = u;
00576 v1 = v;
00577 a1 = a;
00578 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
00579
00580 YUVA_IN(y, u, v, a, p + BPP, pal);
00581 u1 += u;
00582 v1 += v;
00583 a1 += a;
00584 lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
00585 p += wrap3;
00586 lum += wrap;
00587
00588 YUVA_IN(y, u, v, a, p, pal);
00589 u1 += u;
00590 v1 += v;
00591 a1 += a;
00592 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
00593
00594 YUVA_IN(y, u, v, a, p + BPP, pal);
00595 u1 += u;
00596 v1 += v;
00597 a1 += a;
00598 lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
00599
00600 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
00601 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
00602
00603 cb++;
00604 cr++;
00605 p += -wrap3 + 2 * BPP;
00606 lum += -wrap + 2;
00607 }
00608 if (w) {
00609 YUVA_IN(y, u, v, a, p, pal);
00610 u1 = u;
00611 v1 = v;
00612 a1 = a;
00613 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
00614 p += wrap3;
00615 lum += wrap;
00616 YUVA_IN(y, u, v, a, p, pal);
00617 u1 += u;
00618 v1 += v;
00619 a1 += a;
00620 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
00621 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
00622 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
00623 cb++;
00624 cr++;
00625 p += -wrap3 + BPP;
00626 lum += -wrap + 1;
00627 }
00628 p += wrap3 + (wrap3 - dstw * BPP);
00629 lum += wrap + (wrap - dstw - dstx);
00630 cb += dst->linesize[1] - width2 - skip2;
00631 cr += dst->linesize[2] - width2 - skip2;
00632 }
00633
00634 if (h) {
00635 lum += dstx;
00636 cb += skip2;
00637 cr += skip2;
00638
00639 if (dstx & 1) {
00640 YUVA_IN(y, u, v, a, p, pal);
00641 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
00642 cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
00643 cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
00644 cb++;
00645 cr++;
00646 lum++;
00647 p += BPP;
00648 }
00649 for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
00650 YUVA_IN(y, u, v, a, p, pal);
00651 u1 = u;
00652 v1 = v;
00653 a1 = a;
00654 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
00655
00656 YUVA_IN(y, u, v, a, p + BPP, pal);
00657 u1 += u;
00658 v1 += v;
00659 a1 += a;
00660 lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
00661 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
00662 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
00663 cb++;
00664 cr++;
00665 p += 2 * BPP;
00666 lum += 2;
00667 }
00668 if (w) {
00669 YUVA_IN(y, u, v, a, p, pal);
00670 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
00671 cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
00672 cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
00673 }
00674 }
00675 }
00676
00677 static void free_subpicture(SubPicture *sp)
00678 {
00679 int i;
00680
00681 for (i = 0; i < sp->sub.num_rects; i++)
00682 {
00683 av_freep(&sp->sub.rects[i]->pict.data[0]);
00684 av_freep(&sp->sub.rects[i]->pict.data[1]);
00685 av_freep(&sp->sub.rects[i]);
00686 }
00687
00688 av_free(sp->sub.rects);
00689
00690 memset(&sp->sub, 0, sizeof(AVSubtitle));
00691 }
00692
00693 static void video_image_display(VideoState *is)
00694 {
00695 VideoPicture *vp;
00696 SubPicture *sp;
00697 AVPicture pict;
00698 float aspect_ratio;
00699 int width, height, x, y;
00700 SDL_Rect rect;
00701 int i;
00702
00703 vp = &is->pictq[is->pictq_rindex];
00704 if (vp->bmp) {
00705 #if CONFIG_AVFILTER
00706 if (vp->picref->pixel_aspect.num == 0)
00707 aspect_ratio = 0;
00708 else
00709 aspect_ratio = av_q2d(vp->picref->pixel_aspect);
00710 #else
00711
00712
00713 if (is->video_st->sample_aspect_ratio.num)
00714 aspect_ratio = av_q2d(is->video_st->sample_aspect_ratio);
00715 else if (is->video_st->codec->sample_aspect_ratio.num)
00716 aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio);
00717 else
00718 aspect_ratio = 0;
00719 #endif
00720 if (aspect_ratio <= 0.0)
00721 aspect_ratio = 1.0;
00722 aspect_ratio *= (float)vp->width / (float)vp->height;
00723
00724
00725 #if 0
00726 if (is->video_st->codec->dtg_active_format != is->dtg_active_format) {
00727 is->dtg_active_format = is->video_st->codec->dtg_active_format;
00728 printf("dtg_active_format=%d\n", is->dtg_active_format);
00729 }
00730 #endif
00731 #if 0
00732 switch(is->video_st->codec->dtg_active_format) {
00733 case FF_DTG_AFD_SAME:
00734 default:
00735
00736 break;
00737 case FF_DTG_AFD_4_3:
00738 aspect_ratio = 4.0 / 3.0;
00739 break;
00740 case FF_DTG_AFD_16_9:
00741 aspect_ratio = 16.0 / 9.0;
00742 break;
00743 case FF_DTG_AFD_14_9:
00744 aspect_ratio = 14.0 / 9.0;
00745 break;
00746 case FF_DTG_AFD_4_3_SP_14_9:
00747 aspect_ratio = 14.0 / 9.0;
00748 break;
00749 case FF_DTG_AFD_16_9_SP_14_9:
00750 aspect_ratio = 14.0 / 9.0;
00751 break;
00752 case FF_DTG_AFD_SP_4_3:
00753 aspect_ratio = 4.0 / 3.0;
00754 break;
00755 }
00756 #endif
00757
00758 if (is->subtitle_st)
00759 {
00760 if (is->subpq_size > 0)
00761 {
00762 sp = &is->subpq[is->subpq_rindex];
00763
00764 if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000))
00765 {
00766 SDL_LockYUVOverlay (vp->bmp);
00767
00768 pict.data[0] = vp->bmp->pixels[0];
00769 pict.data[1] = vp->bmp->pixels[2];
00770 pict.data[2] = vp->bmp->pixels[1];
00771
00772 pict.linesize[0] = vp->bmp->pitches[0];
00773 pict.linesize[1] = vp->bmp->pitches[2];
00774 pict.linesize[2] = vp->bmp->pitches[1];
00775
00776 for (i = 0; i < sp->sub.num_rects; i++)
00777 blend_subrect(&pict, sp->sub.rects[i],
00778 vp->bmp->w, vp->bmp->h);
00779
00780 SDL_UnlockYUVOverlay (vp->bmp);
00781 }
00782 }
00783 }
00784
00785
00786
00787 height = is->height;
00788 width = ((int)rint(height * aspect_ratio)) & ~1;
00789 if (width > is->width) {
00790 width = is->width;
00791 height = ((int)rint(width / aspect_ratio)) & ~1;
00792 }
00793 x = (is->width - width) / 2;
00794 y = (is->height - height) / 2;
00795 if (!is->no_background) {
00796
00797
00798 } else {
00799 is->no_background = 0;
00800 }
00801 rect.x = is->xleft + x;
00802 rect.y = is->ytop + y;
00803 rect.w = width;
00804 rect.h = height;
00805 SDL_DisplayYUVOverlay(vp->bmp, &rect);
00806 } else {
00807 #if 0
00808 fill_rectangle(screen,
00809 is->xleft, is->ytop, is->width, is->height,
00810 QERGB(0x00, 0x00, 0x00));
00811 #endif
00812 }
00813 }
00814
00815 static inline int compute_mod(int a, int b)
00816 {
00817 a = a % b;
00818 if (a >= 0)
00819 return a;
00820 else
00821 return a + b;
00822 }
00823
00824 static void video_audio_display(VideoState *s)
00825 {
00826 int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
00827 int ch, channels, h, h2, bgcolor, fgcolor;
00828 int16_t time_diff;
00829 int rdft_bits, nb_freq;
00830
00831 for(rdft_bits=1; (1<<rdft_bits)<2*s->height; rdft_bits++)
00832 ;
00833 nb_freq= 1<<(rdft_bits-1);
00834
00835
00836 channels = s->audio_st->codec->channels;
00837 nb_display_channels = channels;
00838 if (!s->paused) {
00839 int data_used= s->show_audio==1 ? s->width : (2*nb_freq);
00840 n = 2 * channels;
00841 delay = audio_write_get_buf_size(s);
00842 delay /= n;
00843
00844
00845
00846 if (audio_callback_time) {
00847 time_diff = av_gettime() - audio_callback_time;
00848 delay -= (time_diff * s->audio_st->codec->sample_rate) / 1000000;
00849 }
00850
00851 delay += 2*data_used;
00852 if (delay < data_used)
00853 delay = data_used;
00854
00855 i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
00856 if(s->show_audio==1){
00857 h= INT_MIN;
00858 for(i=0; i<1000; i+=channels){
00859 int idx= (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
00860 int a= s->sample_array[idx];
00861 int b= s->sample_array[(idx + 4*channels)%SAMPLE_ARRAY_SIZE];
00862 int c= s->sample_array[(idx + 5*channels)%SAMPLE_ARRAY_SIZE];
00863 int d= s->sample_array[(idx + 9*channels)%SAMPLE_ARRAY_SIZE];
00864 int score= a-d;
00865 if(h<score && (b^c)<0){
00866 h= score;
00867 i_start= idx;
00868 }
00869 }
00870 }
00871
00872 s->last_i_start = i_start;
00873 } else {
00874 i_start = s->last_i_start;
00875 }
00876
00877 bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
00878 if(s->show_audio==1){
00879 fill_rectangle(screen,
00880 s->xleft, s->ytop, s->width, s->height,
00881 bgcolor);
00882
00883 fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
00884
00885
00886 h = s->height / nb_display_channels;
00887
00888 h2 = (h * 9) / 20;
00889 for(ch = 0;ch < nb_display_channels; ch++) {
00890 i = i_start + ch;
00891 y1 = s->ytop + ch * h + (h / 2);
00892 for(x = 0; x < s->width; x++) {
00893 y = (s->sample_array[i] * h2) >> 15;
00894 if (y < 0) {
00895 y = -y;
00896 ys = y1 - y;
00897 } else {
00898 ys = y1;
00899 }
00900 fill_rectangle(screen,
00901 s->xleft + x, ys, 1, y,
00902 fgcolor);
00903 i += channels;
00904 if (i >= SAMPLE_ARRAY_SIZE)
00905 i -= SAMPLE_ARRAY_SIZE;
00906 }
00907 }
00908
00909 fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
00910
00911 for(ch = 1;ch < nb_display_channels; ch++) {
00912 y = s->ytop + ch * h;
00913 fill_rectangle(screen,
00914 s->xleft, y, s->width, 1,
00915 fgcolor);
00916 }
00917 SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
00918 }else{
00919 nb_display_channels= FFMIN(nb_display_channels, 2);
00920 if(rdft_bits != s->rdft_bits){
00921 av_rdft_end(s->rdft);
00922 s->rdft = av_rdft_init(rdft_bits, DFT_R2C);
00923 s->rdft_bits= rdft_bits;
00924 }
00925 {
00926 FFTSample data[2][2*nb_freq];
00927 for(ch = 0;ch < nb_display_channels; ch++) {
00928 i = i_start + ch;
00929 for(x = 0; x < 2*nb_freq; x++) {
00930 double w= (x-nb_freq)*(1.0/nb_freq);
00931 data[ch][x]= s->sample_array[i]*(1.0-w*w);
00932 i += channels;
00933 if (i >= SAMPLE_ARRAY_SIZE)
00934 i -= SAMPLE_ARRAY_SIZE;
00935 }
00936 av_rdft_calc(s->rdft, data[ch]);
00937 }
00938
00939 for(y=0; y<s->height; y++){
00940 double w= 1/sqrt(nb_freq);
00941 int a= sqrt(w*sqrt(data[0][2*y+0]*data[0][2*y+0] + data[0][2*y+1]*data[0][2*y+1]));
00942 int b= sqrt(w*sqrt(data[1][2*y+0]*data[1][2*y+0] + data[1][2*y+1]*data[1][2*y+1]));
00943 a= FFMIN(a,255);
00944 b= FFMIN(b,255);
00945 fgcolor = SDL_MapRGB(screen->format, a, b, (a+b)/2);
00946
00947 fill_rectangle(screen,
00948 s->xpos, s->height-y, 1, 1,
00949 fgcolor);
00950 }
00951 }
00952 SDL_UpdateRect(screen, s->xpos, s->ytop, 1, s->height);
00953 s->xpos++;
00954 if(s->xpos >= s->width)
00955 s->xpos= s->xleft;
00956 }
00957 }
00958
00959 static int video_open(VideoState *is){
00960 int flags = SDL_HWSURFACE|SDL_ASYNCBLIT|SDL_HWACCEL;
00961 int w,h;
00962
00963 if(is_full_screen) flags |= SDL_FULLSCREEN;
00964 else flags |= SDL_RESIZABLE;
00965
00966 if (is_full_screen && fs_screen_width) {
00967 w = fs_screen_width;
00968 h = fs_screen_height;
00969 } else if(!is_full_screen && screen_width){
00970 w = screen_width;
00971 h = screen_height;
00972 #if CONFIG_AVFILTER
00973 }else if (is->out_video_filter && is->out_video_filter->inputs[0]){
00974 w = is->out_video_filter->inputs[0]->w;
00975 h = is->out_video_filter->inputs[0]->h;
00976 #else
00977 }else if (is->video_st && is->video_st->codec->width){
00978 w = is->video_st->codec->width;
00979 h = is->video_st->codec->height;
00980 #endif
00981 } else {
00982 w = 640;
00983 h = 480;
00984 }
00985 if(screen && is->width == screen->w && screen->w == w
00986 && is->height== screen->h && screen->h == h)
00987 return 0;
00988
00989 #ifndef __APPLE__
00990 screen = SDL_SetVideoMode(w, h, 0, flags);
00991 #else
00992
00993 screen = SDL_SetVideoMode(w, h, 24, flags);
00994 #endif
00995 if (!screen) {
00996 fprintf(stderr, "SDL: could not set video mode - exiting\n");
00997 return -1;
00998 }
00999 if (!window_title)
01000 window_title = input_filename;
01001 SDL_WM_SetCaption(window_title, window_title);
01002
01003 is->width = screen->w;
01004 is->height = screen->h;
01005
01006 return 0;
01007 }
01008
01009
01010 static void video_display(VideoState *is)
01011 {
01012 if(!screen)
01013 video_open(cur_stream);
01014 if (is->audio_st && is->show_audio)
01015 video_audio_display(is);
01016 else if (is->video_st)
01017 video_image_display(is);
01018 }
01019
01020 static int refresh_thread(void *opaque)
01021 {
01022 VideoState *is= opaque;
01023 while(!is->abort_request){
01024 SDL_Event event;
01025 event.type = FF_REFRESH_EVENT;
01026 event.user.data1 = opaque;
01027 if(!is->refresh){
01028 is->refresh=1;
01029 SDL_PushEvent(&event);
01030 }
01031 usleep(is->audio_st && is->show_audio ? rdftspeed*1000 : 5000);
01032 }
01033 return 0;
01034 }
01035
01036
01037 static double get_audio_clock(VideoState *is)
01038 {
01039 double pts;
01040 int hw_buf_size, bytes_per_sec;
01041 pts = is->audio_clock;
01042 hw_buf_size = audio_write_get_buf_size(is);
01043 bytes_per_sec = 0;
01044 if (is->audio_st) {
01045 bytes_per_sec = is->audio_st->codec->sample_rate *
01046 2 * is->audio_st->codec->channels;
01047 }
01048 if (bytes_per_sec)
01049 pts -= (double)hw_buf_size / bytes_per_sec;
01050 return pts;
01051 }
01052
01053
01054 static double get_video_clock(VideoState *is)
01055 {
01056 if (is->paused) {
01057 return is->video_current_pts;
01058 } else {
01059 return is->video_current_pts_drift + av_gettime() / 1000000.0;
01060 }
01061 }
01062
01063
01064 static double get_external_clock(VideoState *is)
01065 {
01066 int64_t ti;
01067 ti = av_gettime();
01068 return is->external_clock + ((ti - is->external_clock_time) * 1e-6);
01069 }
01070
01071
01072 static double get_master_clock(VideoState *is)
01073 {
01074 double val;
01075
01076 if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
01077 if (is->video_st)
01078 val = get_video_clock(is);
01079 else
01080 val = get_audio_clock(is);
01081 } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
01082 if (is->audio_st)
01083 val = get_audio_clock(is);
01084 else
01085 val = get_video_clock(is);
01086 } else {
01087 val = get_external_clock(is);
01088 }
01089 return val;
01090 }
01091
01092
01093 static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
01094 {
01095 if (!is->seek_req) {
01096 is->seek_pos = pos;
01097 is->seek_rel = rel;
01098 is->seek_flags &= ~AVSEEK_FLAG_BYTE;
01099 if (seek_by_bytes)
01100 is->seek_flags |= AVSEEK_FLAG_BYTE;
01101 is->seek_req = 1;
01102 }
01103 }
01104
01105
01106 static void stream_pause(VideoState *is)
01107 {
01108 if (is->paused) {
01109 is->frame_timer += av_gettime() / 1000000.0 + is->video_current_pts_drift - is->video_current_pts;
01110 if(is->read_pause_return != AVERROR(ENOSYS)){
01111 is->video_current_pts = is->video_current_pts_drift + av_gettime() / 1000000.0;
01112 }
01113 is->video_current_pts_drift = is->video_current_pts - av_gettime() / 1000000.0;
01114 }
01115 is->paused = !is->paused;
01116 }
01117
01118 static double compute_target_time(double frame_current_pts, VideoState *is)
01119 {
01120 double delay, sync_threshold, diff;
01121
01122
01123 delay = frame_current_pts - is->frame_last_pts;
01124 if (delay <= 0 || delay >= 10.0) {
01125
01126 delay = is->frame_last_delay;
01127 } else {
01128 is->frame_last_delay = delay;
01129 }
01130 is->frame_last_pts = frame_current_pts;
01131
01132
01133 if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) ||
01134 is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
01135
01136
01137 diff = get_video_clock(is) - get_master_clock(is);
01138
01139
01140
01141
01142 sync_threshold = FFMAX(AV_SYNC_THRESHOLD, delay);
01143 if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
01144 if (diff <= -sync_threshold)
01145 delay = 0;
01146 else if (diff >= sync_threshold)
01147 delay = 2 * delay;
01148 }
01149 }
01150 is->frame_timer += delay;
01151 #if defined(DEBUG_SYNC)
01152 printf("video: delay=%0.3f actual_delay=%0.3f pts=%0.3f A-V=%f\n",
01153 delay, actual_delay, frame_current_pts, -diff);
01154 #endif
01155
01156 return is->frame_timer;
01157 }
01158
01159
01160 static void video_refresh_timer(void *opaque)
01161 {
01162 VideoState *is = opaque;
01163 VideoPicture *vp;
01164
01165 SubPicture *sp, *sp2;
01166
01167 if (is->video_st) {
01168 retry:
01169 if (is->pictq_size == 0) {
01170
01171 } else {
01172 double time= av_gettime()/1000000.0;
01173 double next_target;
01174
01175 vp = &is->pictq[is->pictq_rindex];
01176
01177 if(time < vp->target_clock)
01178 return;
01179
01180 is->video_current_pts = vp->pts;
01181 is->video_current_pts_drift = is->video_current_pts - time;
01182 is->video_current_pos = vp->pos;
01183 if(is->pictq_size > 1){
01184 VideoPicture *nextvp= &is->pictq[(is->pictq_rindex+1)%VIDEO_PICTURE_QUEUE_SIZE];
01185 assert(nextvp->target_clock >= vp->target_clock);
01186 next_target= nextvp->target_clock;
01187 }else{
01188 next_target= vp->target_clock + is->video_clock - vp->pts;
01189 }
01190 if(framedrop && time > next_target){
01191 is->skip_frames *= 1.0 + FRAME_SKIP_FACTOR;
01192 if(is->pictq_size > 1 || time > next_target + 0.5){
01193
01194 if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
01195 is->pictq_rindex = 0;
01196
01197 SDL_LockMutex(is->pictq_mutex);
01198 is->pictq_size--;
01199 SDL_CondSignal(is->pictq_cond);
01200 SDL_UnlockMutex(is->pictq_mutex);
01201 goto retry;
01202 }
01203 }
01204
01205 if(is->subtitle_st) {
01206 if (is->subtitle_stream_changed) {
01207 SDL_LockMutex(is->subpq_mutex);
01208
01209 while (is->subpq_size) {
01210 free_subpicture(&is->subpq[is->subpq_rindex]);
01211
01212
01213 if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
01214 is->subpq_rindex = 0;
01215
01216 is->subpq_size--;
01217 }
01218 is->subtitle_stream_changed = 0;
01219
01220 SDL_CondSignal(is->subpq_cond);
01221 SDL_UnlockMutex(is->subpq_mutex);
01222 } else {
01223 if (is->subpq_size > 0) {
01224 sp = &is->subpq[is->subpq_rindex];
01225
01226 if (is->subpq_size > 1)
01227 sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
01228 else
01229 sp2 = NULL;
01230
01231 if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
01232 || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
01233 {
01234 free_subpicture(sp);
01235
01236
01237 if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
01238 is->subpq_rindex = 0;
01239
01240 SDL_LockMutex(is->subpq_mutex);
01241 is->subpq_size--;
01242 SDL_CondSignal(is->subpq_cond);
01243 SDL_UnlockMutex(is->subpq_mutex);
01244 }
01245 }
01246 }
01247 }
01248
01249
01250 video_display(is);
01251
01252
01253 if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
01254 is->pictq_rindex = 0;
01255
01256 SDL_LockMutex(is->pictq_mutex);
01257 is->pictq_size--;
01258 SDL_CondSignal(is->pictq_cond);
01259 SDL_UnlockMutex(is->pictq_mutex);
01260 }
01261 } else if (is->audio_st) {
01262
01263
01264
01265
01266
01267
01268 video_display(is);
01269 }
01270 if (show_status) {
01271 static int64_t last_time;
01272 int64_t cur_time;
01273 int aqsize, vqsize, sqsize;
01274 double av_diff;
01275
01276 cur_time = av_gettime();
01277 if (!last_time || (cur_time - last_time) >= 30000) {
01278 aqsize = 0;
01279 vqsize = 0;
01280 sqsize = 0;
01281 if (is->audio_st)
01282 aqsize = is->audioq.size;
01283 if (is->video_st)
01284 vqsize = is->videoq.size;
01285 if (is->subtitle_st)
01286 sqsize = is->subtitleq.size;
01287 av_diff = 0;
01288 if (is->audio_st && is->video_st)
01289 av_diff = get_audio_clock(is) - get_video_clock(is);
01290 printf("%7.2f A-V:%7.3f s:%3.1f aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64" \r",
01291 get_master_clock(is), av_diff, FFMAX(is->skip_frames-1, 0), aqsize / 1024, vqsize / 1024, sqsize, is->faulty_dts, is->faulty_pts);
01292 fflush(stdout);
01293 last_time = cur_time;
01294 }
01295 }
01296 }
01297
01298
01299
01300 static void alloc_picture(void *opaque)
01301 {
01302 VideoState *is = opaque;
01303 VideoPicture *vp;
01304
01305 vp = &is->pictq[is->pictq_windex];
01306
01307 if (vp->bmp)
01308 SDL_FreeYUVOverlay(vp->bmp);
01309
01310 #if CONFIG_AVFILTER
01311 if (vp->picref)
01312 avfilter_unref_pic(vp->picref);
01313 vp->picref = NULL;
01314
01315 vp->width = is->out_video_filter->inputs[0]->w;
01316 vp->height = is->out_video_filter->inputs[0]->h;
01317 vp->pix_fmt = is->out_video_filter->inputs[0]->format;
01318 #else
01319 vp->width = is->video_st->codec->width;
01320 vp->height = is->video_st->codec->height;
01321 vp->pix_fmt = is->video_st->codec->pix_fmt;
01322 #endif
01323
01324 vp->bmp = SDL_CreateYUVOverlay(vp->width, vp->height,
01325 SDL_YV12_OVERLAY,
01326 screen);
01327
01328 SDL_LockMutex(is->pictq_mutex);
01329 vp->allocated = 1;
01330 SDL_CondSignal(is->pictq_cond);
01331 SDL_UnlockMutex(is->pictq_mutex);
01332 }
01333
01338 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, int64_t pos)
01339 {
01340 VideoPicture *vp;
01341 int dst_pix_fmt;
01342 #if CONFIG_AVFILTER
01343 AVPicture pict_src;
01344 #endif
01345
01346 SDL_LockMutex(is->pictq_mutex);
01347
01348 if(is->pictq_size>=VIDEO_PICTURE_QUEUE_SIZE && !is->refresh)
01349 is->skip_frames= FFMAX(1.0 - FRAME_SKIP_FACTOR, is->skip_frames * (1.0-FRAME_SKIP_FACTOR));
01350
01351 while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
01352 !is->videoq.abort_request) {
01353 SDL_CondWait(is->pictq_cond, is->pictq_mutex);
01354 }
01355 SDL_UnlockMutex(is->pictq_mutex);
01356
01357 if (is->videoq.abort_request)
01358 return -1;
01359
01360 vp = &is->pictq[is->pictq_windex];
01361
01362
01363 if (!vp->bmp ||
01364 #if CONFIG_AVFILTER
01365 vp->width != is->out_video_filter->inputs[0]->w ||
01366 vp->height != is->out_video_filter->inputs[0]->h) {
01367 #else
01368 vp->width != is->video_st->codec->width ||
01369 vp->height != is->video_st->codec->height) {
01370 #endif
01371 SDL_Event event;
01372
01373 vp->allocated = 0;
01374
01375
01376
01377 event.type = FF_ALLOC_EVENT;
01378 event.user.data1 = is;
01379 SDL_PushEvent(&event);
01380
01381
01382 SDL_LockMutex(is->pictq_mutex);
01383 while (!vp->allocated && !is->videoq.abort_request) {
01384 SDL_CondWait(is->pictq_cond, is->pictq_mutex);
01385 }
01386 SDL_UnlockMutex(is->pictq_mutex);
01387
01388 if (is->videoq.abort_request)
01389 return -1;
01390 }
01391
01392
01393 if (vp->bmp) {
01394 AVPicture pict;
01395 #if CONFIG_AVFILTER
01396 if(vp->picref)
01397 avfilter_unref_pic(vp->picref);
01398 vp->picref = src_frame->opaque;
01399 #endif
01400
01401
01402 SDL_LockYUVOverlay (vp->bmp);
01403
01404 dst_pix_fmt = PIX_FMT_YUV420P;
01405 memset(&pict,0,sizeof(AVPicture));
01406 pict.data[0] = vp->bmp->pixels[0];
01407 pict.data[1] = vp->bmp->pixels[2];
01408 pict.data[2] = vp->bmp->pixels[1];
01409
01410 pict.linesize[0] = vp->bmp->pitches[0];
01411 pict.linesize[1] = vp->bmp->pitches[2];
01412 pict.linesize[2] = vp->bmp->pitches[1];
01413
01414 #if CONFIG_AVFILTER
01415 pict_src.data[0] = src_frame->data[0];
01416 pict_src.data[1] = src_frame->data[1];
01417 pict_src.data[2] = src_frame->data[2];
01418
01419 pict_src.linesize[0] = src_frame->linesize[0];
01420 pict_src.linesize[1] = src_frame->linesize[1];
01421 pict_src.linesize[2] = src_frame->linesize[2];
01422
01423
01424 av_picture_copy(&pict, &pict_src,
01425 vp->pix_fmt, vp->width, vp->height);
01426 #else
01427 sws_flags = av_get_int(sws_opts, "sws_flags", NULL);
01428 is->img_convert_ctx = sws_getCachedContext(is->img_convert_ctx,
01429 vp->width, vp->height, vp->pix_fmt, vp->width, vp->height,
01430 dst_pix_fmt, sws_flags, NULL, NULL, NULL);
01431 if (is->img_convert_ctx == NULL) {
01432 fprintf(stderr, "Cannot initialize the conversion context\n");
01433 exit(1);
01434 }
01435 sws_scale(is->img_convert_ctx, src_frame->data, src_frame->linesize,
01436 0, vp->height, pict.data, pict.linesize);
01437 #endif
01438
01439 SDL_UnlockYUVOverlay(vp->bmp);
01440
01441 vp->pts = pts;
01442 vp->pos = pos;
01443
01444
01445 if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
01446 is->pictq_windex = 0;
01447 SDL_LockMutex(is->pictq_mutex);
01448 vp->target_clock= compute_target_time(vp->pts, is);
01449
01450 is->pictq_size++;
01451 SDL_UnlockMutex(is->pictq_mutex);
01452 }
01453 return 0;
01454 }
01455
01460 static int output_picture2(VideoState *is, AVFrame *src_frame, double pts1, int64_t pos)
01461 {
01462 double frame_delay, pts;
01463
01464 pts = pts1;
01465
01466 if (pts != 0) {
01467
01468 is->video_clock = pts;
01469 } else {
01470 pts = is->video_clock;
01471 }
01472
01473 frame_delay = av_q2d(is->video_st->codec->time_base);
01474
01475
01476 frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
01477 is->video_clock += frame_delay;
01478
01479 #if defined(DEBUG_SYNC) && 0
01480 printf("frame_type=%c clock=%0.3f pts=%0.3f\n",
01481 av_get_pict_type_char(src_frame->pict_type), pts, pts1);
01482 #endif
01483 return queue_picture(is, src_frame, pts, pos);
01484 }
01485
01486 static int get_video_frame(VideoState *is, AVFrame *frame, int64_t *pts, AVPacket *pkt)
01487 {
01488 int len1, got_picture, i;
01489
01490 if (packet_queue_get(&is->videoq, pkt, 1) < 0)
01491 return -1;
01492
01493 if(pkt->data == flush_pkt.data){
01494 avcodec_flush_buffers(is->video_st->codec);
01495
01496 SDL_LockMutex(is->pictq_mutex);
01497
01498 for(i=0; i<VIDEO_PICTURE_QUEUE_SIZE; i++){
01499 is->pictq[i].target_clock= 0;
01500 }
01501 while (is->pictq_size && !is->videoq.abort_request) {
01502 SDL_CondWait(is->pictq_cond, is->pictq_mutex);
01503 }
01504 is->video_current_pos= -1;
01505 SDL_UnlockMutex(is->pictq_mutex);
01506
01507 is->last_dts_for_fault_detection=
01508 is->last_pts_for_fault_detection= INT64_MIN;
01509 is->frame_last_pts= AV_NOPTS_VALUE;
01510 is->frame_last_delay = 0;
01511 is->frame_timer = (double)av_gettime() / 1000000.0;
01512 is->skip_frames= 1;
01513 is->skip_frames_index= 0;
01514 return 0;
01515 }
01516
01517
01518
01519 is->video_st->codec->reordered_opaque= pkt->pts;
01520 len1 = avcodec_decode_video2(is->video_st->codec,
01521 frame, &got_picture,
01522 pkt);
01523
01524 if (got_picture) {
01525 if(pkt->dts != AV_NOPTS_VALUE){
01526 is->faulty_dts += pkt->dts <= is->last_dts_for_fault_detection;
01527 is->last_dts_for_fault_detection= pkt->dts;
01528 }
01529 if(frame->reordered_opaque != AV_NOPTS_VALUE){
01530 is->faulty_pts += frame->reordered_opaque <= is->last_pts_for_fault_detection;
01531 is->last_pts_for_fault_detection= frame->reordered_opaque;
01532 }
01533 }
01534
01535 if( ( decoder_reorder_pts==1
01536 || (decoder_reorder_pts && is->faulty_pts<is->faulty_dts)
01537 || pkt->dts == AV_NOPTS_VALUE)
01538 && frame->reordered_opaque != AV_NOPTS_VALUE)
01539 *pts= frame->reordered_opaque;
01540 else if(pkt->dts != AV_NOPTS_VALUE)
01541 *pts= pkt->dts;
01542 else
01543 *pts= 0;
01544
01545
01546
01547 if (got_picture){
01548 is->skip_frames_index += 1;
01549 if(is->skip_frames_index >= is->skip_frames){
01550 is->skip_frames_index -= FFMAX(is->skip_frames, 1.0);
01551 return 1;
01552 }
01553
01554 }
01555 return 0;
01556 }
01557
01558 #if CONFIG_AVFILTER
01559 typedef struct {
01560 VideoState *is;
01561 AVFrame *frame;
01562 int use_dr1;
01563 } FilterPriv;
01564
01565 static int input_get_buffer(AVCodecContext *codec, AVFrame *pic)
01566 {
01567 AVFilterContext *ctx = codec->opaque;
01568 AVFilterPicRef *ref;
01569 int perms = AV_PERM_WRITE;
01570 int w, h, stride[4];
01571 unsigned edge;
01572
01573 if(pic->buffer_hints & FF_BUFFER_HINTS_VALID) {
01574 if(pic->buffer_hints & FF_BUFFER_HINTS_READABLE) perms |= AV_PERM_READ;
01575 if(pic->buffer_hints & FF_BUFFER_HINTS_PRESERVE) perms |= AV_PERM_PRESERVE;
01576 if(pic->buffer_hints & FF_BUFFER_HINTS_REUSABLE) perms |= AV_PERM_REUSE2;
01577 }
01578 if(pic->reference) perms |= AV_PERM_READ | AV_PERM_PRESERVE;
01579
01580 w = codec->width;
01581 h = codec->height;
01582 avcodec_align_dimensions2(codec, &w, &h, stride);
01583 edge = codec->flags & CODEC_FLAG_EMU_EDGE ? 0 : avcodec_get_edge_width();
01584 w += edge << 1;
01585 h += edge << 1;
01586
01587 if(!(ref = avfilter_get_video_buffer(ctx->outputs[0], perms, w, h)))
01588 return -1;
01589
01590 ref->w = codec->width;
01591 ref->h = codec->height;
01592 for(int i = 0; i < 3; i ++) {
01593 unsigned hshift = i == 0 ? 0 : av_pix_fmt_descriptors[ref->pic->format].log2_chroma_w;
01594 unsigned vshift = i == 0 ? 0 : av_pix_fmt_descriptors[ref->pic->format].log2_chroma_h;
01595
01596 if (ref->data[i]) {
01597 ref->data[i] += (edge >> hshift) + ((edge * ref->linesize[i]) >> vshift);
01598 }
01599 pic->data[i] = ref->data[i];
01600 pic->linesize[i] = ref->linesize[i];
01601 }
01602 pic->opaque = ref;
01603 pic->age = INT_MAX;
01604 pic->type = FF_BUFFER_TYPE_USER;
01605 return 0;
01606 }
01607
01608 static void input_release_buffer(AVCodecContext *codec, AVFrame *pic)
01609 {
01610 memset(pic->data, 0, sizeof(pic->data));
01611 avfilter_unref_pic(pic->opaque);
01612 }
01613
01614 static int input_init(AVFilterContext *ctx, const char *args, void *opaque)
01615 {
01616 FilterPriv *priv = ctx->priv;
01617 AVCodecContext *codec;
01618 if(!opaque) return -1;
01619
01620 priv->is = opaque;
01621 codec = priv->is->video_st->codec;
01622 codec->opaque = ctx;
01623 if(codec->codec->capabilities & CODEC_CAP_DR1) {
01624 priv->use_dr1 = 1;
01625 codec->get_buffer = input_get_buffer;
01626 codec->release_buffer = input_release_buffer;
01627 }
01628
01629 priv->frame = avcodec_alloc_frame();
01630
01631 return 0;
01632 }
01633
01634 static void input_uninit(AVFilterContext *ctx)
01635 {
01636 FilterPriv *priv = ctx->priv;
01637 av_free(priv->frame);
01638 }
01639
01640 static int input_request_frame(AVFilterLink *link)
01641 {
01642 FilterPriv *priv = link->src->priv;
01643 AVFilterPicRef *picref;
01644 int64_t pts = 0;
01645 AVPacket pkt;
01646 int ret;
01647
01648 while (!(ret = get_video_frame(priv->is, priv->frame, &pts, &pkt)))
01649 av_free_packet(&pkt);
01650 if (ret < 0)
01651 return -1;
01652
01653 if(priv->use_dr1) {
01654 picref = avfilter_ref_pic(priv->frame->opaque, ~0);
01655 } else {
01656 picref = avfilter_get_video_buffer(link, AV_PERM_WRITE, link->w, link->h);
01657 av_picture_copy((AVPicture *)&picref->data, (AVPicture *)priv->frame,
01658 picref->pic->format, link->w, link->h);
01659 }
01660 av_free_packet(&pkt);
01661
01662 picref->pts = pts;
01663 picref->pos = pkt.pos;
01664 picref->pixel_aspect = priv->is->video_st->codec->sample_aspect_ratio;
01665 avfilter_start_frame(link, picref);
01666 avfilter_draw_slice(link, 0, link->h, 1);
01667 avfilter_end_frame(link);
01668
01669 return 0;
01670 }
01671
01672 static int input_query_formats(AVFilterContext *ctx)
01673 {
01674 FilterPriv *priv = ctx->priv;
01675 enum PixelFormat pix_fmts[] = {
01676 priv->is->video_st->codec->pix_fmt, PIX_FMT_NONE
01677 };
01678
01679 avfilter_set_common_formats(ctx, avfilter_make_format_list(pix_fmts));
01680 return 0;
01681 }
01682
01683 static int input_config_props(AVFilterLink *link)
01684 {
01685 FilterPriv *priv = link->src->priv;
01686 AVCodecContext *c = priv->is->video_st->codec;
01687
01688 link->w = c->width;
01689 link->h = c->height;
01690
01691 return 0;
01692 }
01693
01694 static AVFilter input_filter =
01695 {
01696 .name = "ffplay_input",
01697
01698 .priv_size = sizeof(FilterPriv),
01699
01700 .init = input_init,
01701 .uninit = input_uninit,
01702
01703 .query_formats = input_query_formats,
01704
01705 .inputs = (AVFilterPad[]) {{ .name = NULL }},
01706 .outputs = (AVFilterPad[]) {{ .name = "default",
01707 .type = AVMEDIA_TYPE_VIDEO,
01708 .request_frame = input_request_frame,
01709 .config_props = input_config_props, },
01710 { .name = NULL }},
01711 };
01712
01713 static void output_end_frame(AVFilterLink *link)
01714 {
01715 }
01716
01717 static int output_query_formats(AVFilterContext *ctx)
01718 {
01719 enum PixelFormat pix_fmts[] = { PIX_FMT_YUV420P, PIX_FMT_NONE };
01720
01721 avfilter_set_common_formats(ctx, avfilter_make_format_list(pix_fmts));
01722 return 0;
01723 }
01724
01725 static int get_filtered_video_frame(AVFilterContext *ctx, AVFrame *frame,
01726 int64_t *pts, int64_t *pos)
01727 {
01728 AVFilterPicRef *pic;
01729
01730 if(avfilter_request_frame(ctx->inputs[0]))
01731 return -1;
01732 if(!(pic = ctx->inputs[0]->cur_pic))
01733 return -1;
01734 ctx->inputs[0]->cur_pic = NULL;
01735
01736 frame->opaque = pic;
01737 *pts = pic->pts;
01738 *pos = pic->pos;
01739
01740 memcpy(frame->data, pic->data, sizeof(frame->data));
01741 memcpy(frame->linesize, pic->linesize, sizeof(frame->linesize));
01742
01743 return 1;
01744 }
01745
01746 static AVFilter output_filter =
01747 {
01748 .name = "ffplay_output",
01749
01750 .query_formats = output_query_formats,
01751
01752 .inputs = (AVFilterPad[]) {{ .name = "default",
01753 .type = AVMEDIA_TYPE_VIDEO,
01754 .end_frame = output_end_frame,
01755 .min_perms = AV_PERM_READ, },
01756 { .name = NULL }},
01757 .outputs = (AVFilterPad[]) {{ .name = NULL }},
01758 };
01759 #endif
01760
01761 static int video_thread(void *arg)
01762 {
01763 VideoState *is = arg;
01764 AVFrame *frame= avcodec_alloc_frame();
01765 int64_t pts_int;
01766 double pts;
01767 int ret;
01768
01769 #if CONFIG_AVFILTER
01770 int64_t pos;
01771 AVFilterContext *filt_src = NULL, *filt_out = NULL;
01772 AVFilterGraph *graph = av_mallocz(sizeof(AVFilterGraph));
01773 graph->scale_sws_opts = av_strdup("sws_flags=bilinear");
01774
01775 if(!(filt_src = avfilter_open(&input_filter, "src"))) goto the_end;
01776 if(!(filt_out = avfilter_open(&output_filter, "out"))) goto the_end;
01777
01778 if(avfilter_init_filter(filt_src, NULL, is)) goto the_end;
01779 if(avfilter_init_filter(filt_out, NULL, frame)) goto the_end;
01780
01781
01782 if(vfilters) {
01783 AVFilterInOut *outputs = av_malloc(sizeof(AVFilterInOut));
01784 AVFilterInOut *inputs = av_malloc(sizeof(AVFilterInOut));
01785
01786 outputs->name = av_strdup("in");
01787 outputs->filter = filt_src;
01788 outputs->pad_idx = 0;
01789 outputs->next = NULL;
01790
01791 inputs->name = av_strdup("out");
01792 inputs->filter = filt_out;
01793 inputs->pad_idx = 0;
01794 inputs->next = NULL;
01795
01796 if (avfilter_graph_parse(graph, vfilters, inputs, outputs, NULL) < 0)
01797 goto the_end;
01798 av_freep(&vfilters);
01799 } else {
01800 if(avfilter_link(filt_src, 0, filt_out, 0) < 0) goto the_end;
01801 }
01802 avfilter_graph_add_filter(graph, filt_src);
01803 avfilter_graph_add_filter(graph, filt_out);
01804
01805 if(avfilter_graph_check_validity(graph, NULL)) goto the_end;
01806 if(avfilter_graph_config_formats(graph, NULL)) goto the_end;
01807 if(avfilter_graph_config_links(graph, NULL)) goto the_end;
01808
01809 is->out_video_filter = filt_out;
01810 #endif
01811
01812 for(;;) {
01813 #if !CONFIG_AVFILTER
01814 AVPacket pkt;
01815 #endif
01816 while (is->paused && !is->videoq.abort_request)
01817 SDL_Delay(10);
01818 #if CONFIG_AVFILTER
01819 ret = get_filtered_video_frame(filt_out, frame, &pts_int, &pos);
01820 #else
01821 ret = get_video_frame(is, frame, &pts_int, &pkt);
01822 #endif
01823
01824 if (ret < 0) goto the_end;
01825
01826 if (!ret)
01827 continue;
01828
01829 pts = pts_int*av_q2d(is->video_st->time_base);
01830
01831 #if CONFIG_AVFILTER
01832 ret = output_picture2(is, frame, pts, pos);
01833 #else
01834 ret = output_picture2(is, frame, pts, pkt.pos);
01835 av_free_packet(&pkt);
01836 #endif
01837 if (ret < 0)
01838 goto the_end;
01839
01840 if (step)
01841 if (cur_stream)
01842 stream_pause(cur_stream);
01843 }
01844 the_end:
01845 #if CONFIG_AVFILTER
01846 avfilter_graph_destroy(graph);
01847 av_freep(&graph);
01848 #endif
01849 av_free(frame);
01850 return 0;
01851 }
01852
01853 static int subtitle_thread(void *arg)
01854 {
01855 VideoState *is = arg;
01856 SubPicture *sp;
01857 AVPacket pkt1, *pkt = &pkt1;
01858 int len1, got_subtitle;
01859 double pts;
01860 int i, j;
01861 int r, g, b, y, u, v, a;
01862
01863 for(;;) {
01864 while (is->paused && !is->subtitleq.abort_request) {
01865 SDL_Delay(10);
01866 }
01867 if (packet_queue_get(&is->subtitleq, pkt, 1) < 0)
01868 break;
01869
01870 if(pkt->data == flush_pkt.data){
01871 avcodec_flush_buffers(is->subtitle_st->codec);
01872 continue;
01873 }
01874 SDL_LockMutex(is->subpq_mutex);
01875 while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
01876 !is->subtitleq.abort_request) {
01877 SDL_CondWait(is->subpq_cond, is->subpq_mutex);
01878 }
01879 SDL_UnlockMutex(is->subpq_mutex);
01880
01881 if (is->subtitleq.abort_request)
01882 goto the_end;
01883
01884 sp = &is->subpq[is->subpq_windex];
01885
01886
01887
01888 pts = 0;
01889 if (pkt->pts != AV_NOPTS_VALUE)
01890 pts = av_q2d(is->subtitle_st->time_base)*pkt->pts;
01891
01892 len1 = avcodec_decode_subtitle2(is->subtitle_st->codec,
01893 &sp->sub, &got_subtitle,
01894 pkt);
01895
01896
01897 if (got_subtitle && sp->sub.format == 0) {
01898 sp->pts = pts;
01899
01900 for (i = 0; i < sp->sub.num_rects; i++)
01901 {
01902 for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
01903 {
01904 RGBA_IN(r, g, b, a, (uint32_t*)sp->sub.rects[i]->pict.data[1] + j);
01905 y = RGB_TO_Y_CCIR(r, g, b);
01906 u = RGB_TO_U_CCIR(r, g, b, 0);
01907 v = RGB_TO_V_CCIR(r, g, b, 0);
01908 YUVA_OUT((uint32_t*)sp->sub.rects[i]->pict.data[1] + j, y, u, v, a);
01909 }
01910 }
01911
01912
01913 if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
01914 is->subpq_windex = 0;
01915 SDL_LockMutex(is->subpq_mutex);
01916 is->subpq_size++;
01917 SDL_UnlockMutex(is->subpq_mutex);
01918 }
01919 av_free_packet(pkt);
01920
01921
01922
01923 }
01924 the_end:
01925 return 0;
01926 }
01927
01928
01929 static void update_sample_display(VideoState *is, short *samples, int samples_size)
01930 {
01931 int size, len, channels;
01932
01933 channels = is->audio_st->codec->channels;
01934
01935 size = samples_size / sizeof(short);
01936 while (size > 0) {
01937 len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
01938 if (len > size)
01939 len = size;
01940 memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
01941 samples += len;
01942 is->sample_array_index += len;
01943 if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
01944 is->sample_array_index = 0;
01945 size -= len;
01946 }
01947 }
01948
01949
01950
01951 static int synchronize_audio(VideoState *is, short *samples,
01952 int samples_size1, double pts)
01953 {
01954 int n, samples_size;
01955 double ref_clock;
01956
01957 n = 2 * is->audio_st->codec->channels;
01958 samples_size = samples_size1;
01959
01960
01961 if (((is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st) ||
01962 is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
01963 double diff, avg_diff;
01964 int wanted_size, min_size, max_size, nb_samples;
01965
01966 ref_clock = get_master_clock(is);
01967 diff = get_audio_clock(is) - ref_clock;
01968
01969 if (diff < AV_NOSYNC_THRESHOLD) {
01970 is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
01971 if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
01972
01973 is->audio_diff_avg_count++;
01974 } else {
01975
01976 avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
01977
01978 if (fabs(avg_diff) >= is->audio_diff_threshold) {
01979 wanted_size = samples_size + ((int)(diff * is->audio_st->codec->sample_rate) * n);
01980 nb_samples = samples_size / n;
01981
01982 min_size = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
01983 max_size = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
01984 if (wanted_size < min_size)
01985 wanted_size = min_size;
01986 else if (wanted_size > max_size)
01987 wanted_size = max_size;
01988
01989
01990 if (wanted_size < samples_size) {
01991
01992 samples_size = wanted_size;
01993 } else if (wanted_size > samples_size) {
01994 uint8_t *samples_end, *q;
01995 int nb;
01996
01997
01998 nb = (samples_size - wanted_size);
01999 samples_end = (uint8_t *)samples + samples_size - n;
02000 q = samples_end + n;
02001 while (nb > 0) {
02002 memcpy(q, samples_end, n);
02003 q += n;
02004 nb -= n;
02005 }
02006 samples_size = wanted_size;
02007 }
02008 }
02009 #if 0
02010 printf("diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
02011 diff, avg_diff, samples_size - samples_size1,
02012 is->audio_clock, is->video_clock, is->audio_diff_threshold);
02013 #endif
02014 }
02015 } else {
02016
02017
02018 is->audio_diff_avg_count = 0;
02019 is->audio_diff_cum = 0;
02020 }
02021 }
02022
02023 return samples_size;
02024 }
02025
02026
02027 static int audio_decode_frame(VideoState *is, double *pts_ptr)
02028 {
02029 AVPacket *pkt_temp = &is->audio_pkt_temp;
02030 AVPacket *pkt = &is->audio_pkt;
02031 AVCodecContext *dec= is->audio_st->codec;
02032 int n, len1, data_size;
02033 double pts;
02034
02035 for(;;) {
02036
02037 while (pkt_temp->size > 0) {
02038 data_size = sizeof(is->audio_buf1);
02039 len1 = avcodec_decode_audio3(dec,
02040 (int16_t *)is->audio_buf1, &data_size,
02041 pkt_temp);
02042 if (len1 < 0) {
02043
02044 pkt_temp->size = 0;
02045 break;
02046 }
02047
02048 pkt_temp->data += len1;
02049 pkt_temp->size -= len1;
02050 if (data_size <= 0)
02051 continue;
02052
02053 if (dec->sample_fmt != is->audio_src_fmt) {
02054 if (is->reformat_ctx)
02055 av_audio_convert_free(is->reformat_ctx);
02056 is->reformat_ctx= av_audio_convert_alloc(SAMPLE_FMT_S16, 1,
02057 dec->sample_fmt, 1, NULL, 0);
02058 if (!is->reformat_ctx) {
02059 fprintf(stderr, "Cannot convert %s sample format to %s sample format\n",
02060 avcodec_get_sample_fmt_name(dec->sample_fmt),
02061 avcodec_get_sample_fmt_name(SAMPLE_FMT_S16));
02062 break;
02063 }
02064 is->audio_src_fmt= dec->sample_fmt;
02065 }
02066
02067 if (is->reformat_ctx) {
02068 const void *ibuf[6]= {is->audio_buf1};
02069 void *obuf[6]= {is->audio_buf2};
02070 int istride[6]= {av_get_bits_per_sample_format(dec->sample_fmt)/8};
02071 int ostride[6]= {2};
02072 int len= data_size/istride[0];
02073 if (av_audio_convert(is->reformat_ctx, obuf, ostride, ibuf, istride, len)<0) {
02074 printf("av_audio_convert() failed\n");
02075 break;
02076 }
02077 is->audio_buf= is->audio_buf2;
02078
02079
02080 data_size= len*2;
02081 }else{
02082 is->audio_buf= is->audio_buf1;
02083 }
02084
02085
02086 pts = is->audio_clock;
02087 *pts_ptr = pts;
02088 n = 2 * dec->channels;
02089 is->audio_clock += (double)data_size /
02090 (double)(n * dec->sample_rate);
02091 #if defined(DEBUG_SYNC)
02092 {
02093 static double last_clock;
02094 printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
02095 is->audio_clock - last_clock,
02096 is->audio_clock, pts);
02097 last_clock = is->audio_clock;
02098 }
02099 #endif
02100 return data_size;
02101 }
02102
02103
02104 if (pkt->data)
02105 av_free_packet(pkt);
02106
02107 if (is->paused || is->audioq.abort_request) {
02108 return -1;
02109 }
02110
02111
02112 if (packet_queue_get(&is->audioq, pkt, 1) < 0)
02113 return -1;
02114 if(pkt->data == flush_pkt.data){
02115 avcodec_flush_buffers(dec);
02116 continue;
02117 }
02118
02119 pkt_temp->data = pkt->data;
02120 pkt_temp->size = pkt->size;
02121
02122
02123 if (pkt->pts != AV_NOPTS_VALUE) {
02124 is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
02125 }
02126 }
02127 }
02128
02129
02130
02131 static int audio_write_get_buf_size(VideoState *is)
02132 {
02133 return is->audio_buf_size - is->audio_buf_index;
02134 }
02135
02136
02137
02138 static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
02139 {
02140 VideoState *is = opaque;
02141 int audio_size, len1;
02142 double pts;
02143
02144 audio_callback_time = av_gettime();
02145
02146 while (len > 0) {
02147 if (is->audio_buf_index >= is->audio_buf_size) {
02148 audio_size = audio_decode_frame(is, &pts);
02149 if (audio_size < 0) {
02150
02151 is->audio_buf = is->audio_buf1;
02152 is->audio_buf_size = 1024;
02153 memset(is->audio_buf, 0, is->audio_buf_size);
02154 } else {
02155 if (is->show_audio)
02156 update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
02157 audio_size = synchronize_audio(is, (int16_t *)is->audio_buf, audio_size,
02158 pts);
02159 is->audio_buf_size = audio_size;
02160 }
02161 is->audio_buf_index = 0;
02162 }
02163 len1 = is->audio_buf_size - is->audio_buf_index;
02164 if (len1 > len)
02165 len1 = len;
02166 memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
02167 len -= len1;
02168 stream += len1;
02169 is->audio_buf_index += len1;
02170 }
02171 }
02172
02173
02174 static int stream_component_open(VideoState *is, int stream_index)
02175 {
02176 AVFormatContext *ic = is->ic;
02177 AVCodecContext *avctx;
02178 AVCodec *codec;
02179 SDL_AudioSpec wanted_spec, spec;
02180
02181 if (stream_index < 0 || stream_index >= ic->nb_streams)
02182 return -1;
02183 avctx = ic->streams[stream_index]->codec;
02184
02185
02186 if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
02187 if (avctx->channels > 0) {
02188 avctx->request_channels = FFMIN(2, avctx->channels);
02189 } else {
02190 avctx->request_channels = 2;
02191 }
02192 }
02193
02194 codec = avcodec_find_decoder(avctx->codec_id);
02195 avctx->debug_mv = debug_mv;
02196 avctx->debug = debug;
02197 avctx->workaround_bugs = workaround_bugs;
02198 avctx->lowres = lowres;
02199 if(lowres) avctx->flags |= CODEC_FLAG_EMU_EDGE;
02200 avctx->idct_algo= idct;
02201 if(fast) avctx->flags2 |= CODEC_FLAG2_FAST;
02202 avctx->skip_frame= skip_frame;
02203 avctx->skip_idct= skip_idct;
02204 avctx->skip_loop_filter= skip_loop_filter;
02205 avctx->error_recognition= error_recognition;
02206 avctx->error_concealment= error_concealment;
02207 avcodec_thread_init(avctx, thread_count);
02208
02209 set_context_opts(avctx, avcodec_opts[avctx->codec_type], 0);
02210
02211 if (!codec ||
02212 avcodec_open(avctx, codec) < 0)
02213 return -1;
02214
02215
02216 if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
02217 wanted_spec.freq = avctx->sample_rate;
02218 wanted_spec.format = AUDIO_S16SYS;
02219 wanted_spec.channels = avctx->channels;
02220 wanted_spec.silence = 0;
02221 wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
02222 wanted_spec.callback = sdl_audio_callback;
02223 wanted_spec.userdata = is;
02224 if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
02225 fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
02226 return -1;
02227 }
02228 is->audio_hw_buf_size = spec.size;
02229 is->audio_src_fmt= SAMPLE_FMT_S16;
02230 }
02231
02232 ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
02233 switch(avctx->codec_type) {
02234 case AVMEDIA_TYPE_AUDIO:
02235 is->audio_stream = stream_index;
02236 is->audio_st = ic->streams[stream_index];
02237 is->audio_buf_size = 0;
02238 is->audio_buf_index = 0;
02239
02240
02241 is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
02242 is->audio_diff_avg_count = 0;
02243
02244
02245 is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / avctx->sample_rate;
02246
02247 memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
02248 packet_queue_init(&is->audioq);
02249 SDL_PauseAudio(0);
02250 break;
02251 case AVMEDIA_TYPE_VIDEO:
02252 is->video_stream = stream_index;
02253 is->video_st = ic->streams[stream_index];
02254
02255
02256
02257 packet_queue_init(&is->videoq);
02258 is->video_tid = SDL_CreateThread(video_thread, is);
02259 break;
02260 case AVMEDIA_TYPE_SUBTITLE:
02261 is->subtitle_stream = stream_index;
02262 is->subtitle_st = ic->streams[stream_index];
02263 packet_queue_init(&is->subtitleq);
02264
02265 is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
02266 break;
02267 default:
02268 break;
02269 }
02270 return 0;
02271 }
02272
02273 static void stream_component_close(VideoState *is, int stream_index)
02274 {
02275 AVFormatContext *ic = is->ic;
02276 AVCodecContext *avctx;
02277
02278 if (stream_index < 0 || stream_index >= ic->nb_streams)
02279 return;
02280 avctx = ic->streams[stream_index]->codec;
02281
02282 switch(avctx->codec_type) {
02283 case AVMEDIA_TYPE_AUDIO:
02284 packet_queue_abort(&is->audioq);
02285
02286 SDL_CloseAudio();
02287
02288 packet_queue_end(&is->audioq);
02289 if (is->reformat_ctx)
02290 av_audio_convert_free(is->reformat_ctx);
02291 is->reformat_ctx = NULL;
02292 break;
02293 case AVMEDIA_TYPE_VIDEO:
02294 packet_queue_abort(&is->videoq);
02295
02296
02297
02298 SDL_LockMutex(is->pictq_mutex);
02299 SDL_CondSignal(is->pictq_cond);
02300 SDL_UnlockMutex(is->pictq_mutex);
02301
02302 SDL_WaitThread(is->video_tid, NULL);
02303
02304 packet_queue_end(&is->videoq);
02305 break;
02306 case AVMEDIA_TYPE_SUBTITLE:
02307 packet_queue_abort(&is->subtitleq);
02308
02309
02310
02311 SDL_LockMutex(is->subpq_mutex);
02312 is->subtitle_stream_changed = 1;
02313
02314 SDL_CondSignal(is->subpq_cond);
02315 SDL_UnlockMutex(is->subpq_mutex);
02316
02317 SDL_WaitThread(is->subtitle_tid, NULL);
02318
02319 packet_queue_end(&is->subtitleq);
02320 break;
02321 default:
02322 break;
02323 }
02324
02325 ic->streams[stream_index]->discard = AVDISCARD_ALL;
02326 avcodec_close(avctx);
02327 switch(avctx->codec_type) {
02328 case AVMEDIA_TYPE_AUDIO:
02329 is->audio_st = NULL;
02330 is->audio_stream = -1;
02331 break;
02332 case AVMEDIA_TYPE_VIDEO:
02333 is->video_st = NULL;
02334 is->video_stream = -1;
02335 break;
02336 case AVMEDIA_TYPE_SUBTITLE:
02337 is->subtitle_st = NULL;
02338 is->subtitle_stream = -1;
02339 break;
02340 default:
02341 break;
02342 }
02343 }
02344
02345
02346
02347 static VideoState *global_video_state;
02348
02349 static int decode_interrupt_cb(void)
02350 {
02351 return (global_video_state && global_video_state->abort_request);
02352 }
02353
02354
02355 static int decode_thread(void *arg)
02356 {
02357 VideoState *is = arg;
02358 AVFormatContext *ic;
02359 int err, i, ret;
02360 int st_index[AVMEDIA_TYPE_NB];
02361 int st_count[AVMEDIA_TYPE_NB]={0};
02362 int st_best_packet_count[AVMEDIA_TYPE_NB];
02363 AVPacket pkt1, *pkt = &pkt1;
02364 AVFormatParameters params, *ap = ¶ms;
02365 int eof=0;
02366 int pkt_in_play_range = 0;
02367
02368 ic = avformat_alloc_context();
02369
02370 memset(st_index, -1, sizeof(st_index));
02371 memset(st_best_packet_count, -1, sizeof(st_best_packet_count));
02372 is->video_stream = -1;
02373 is->audio_stream = -1;
02374 is->subtitle_stream = -1;
02375
02376 global_video_state = is;
02377 url_set_interrupt_cb(decode_interrupt_cb);
02378
02379 memset(ap, 0, sizeof(*ap));
02380
02381 ap->prealloced_context = 1;
02382 ap->width = frame_width;
02383 ap->height= frame_height;
02384 ap->time_base= (AVRational){1, 25};
02385 ap->pix_fmt = frame_pix_fmt;
02386
02387 set_context_opts(ic, avformat_opts, AV_OPT_FLAG_DECODING_PARAM);
02388
02389 err = av_open_input_file(&ic, is->filename, is->iformat, 0, ap);
02390 if (err < 0) {
02391 print_error(is->filename, err);
02392 ret = -1;
02393 goto fail;
02394 }
02395 is->ic = ic;
02396
02397 if(genpts)
02398 ic->flags |= AVFMT_FLAG_GENPTS;
02399
02400 err = av_find_stream_info(ic);
02401 if (err < 0) {
02402 fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
02403 ret = -1;
02404 goto fail;
02405 }
02406 if(ic->pb)
02407 ic->pb->eof_reached= 0;
02408
02409 if(seek_by_bytes<0)
02410 seek_by_bytes= !!(ic->iformat->flags & AVFMT_TS_DISCONT);
02411
02412
02413 if (start_time != AV_NOPTS_VALUE) {
02414 int64_t timestamp;
02415
02416 timestamp = start_time;
02417
02418 if (ic->start_time != AV_NOPTS_VALUE)
02419 timestamp += ic->start_time;
02420 ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
02421 if (ret < 0) {
02422 fprintf(stderr, "%s: could not seek to position %0.3f\n",
02423 is->filename, (double)timestamp / AV_TIME_BASE);
02424 }
02425 }
02426
02427 for(i = 0; i < ic->nb_streams; i++) {
02428 AVStream *st= ic->streams[i];
02429 AVCodecContext *avctx = st->codec;
02430 ic->streams[i]->discard = AVDISCARD_ALL;
02431 if(avctx->codec_type >= (unsigned)AVMEDIA_TYPE_NB)
02432 continue;
02433 if(st_count[avctx->codec_type]++ != wanted_stream[avctx->codec_type] && wanted_stream[avctx->codec_type] >= 0)
02434 continue;
02435
02436 if(st_best_packet_count[avctx->codec_type] >= st->codec_info_nb_frames)
02437 continue;
02438 st_best_packet_count[avctx->codec_type]= st->codec_info_nb_frames;
02439
02440 switch(avctx->codec_type) {
02441 case AVMEDIA_TYPE_AUDIO:
02442 if (!audio_disable)
02443 st_index[AVMEDIA_TYPE_AUDIO] = i;
02444 break;
02445 case AVMEDIA_TYPE_VIDEO:
02446 case AVMEDIA_TYPE_SUBTITLE:
02447 if (!video_disable)
02448 st_index[avctx->codec_type] = i;
02449 break;
02450 default:
02451 break;
02452 }
02453 }
02454 if (show_status) {
02455 dump_format(ic, 0, is->filename, 0);
02456 }
02457
02458
02459 if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
02460 stream_component_open(is, st_index[AVMEDIA_TYPE_AUDIO]);
02461 }
02462
02463 ret=-1;
02464 if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
02465 ret= stream_component_open(is, st_index[AVMEDIA_TYPE_VIDEO]);
02466 }
02467 is->refresh_tid = SDL_CreateThread(refresh_thread, is);
02468 if(ret<0) {
02469 if (!display_disable)
02470 is->show_audio = 2;
02471 }
02472
02473 if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {
02474 stream_component_open(is, st_index[AVMEDIA_TYPE_SUBTITLE]);
02475 }
02476
02477 if (is->video_stream < 0 && is->audio_stream < 0) {
02478 fprintf(stderr, "%s: could not open codecs\n", is->filename);
02479 ret = -1;
02480 goto fail;
02481 }
02482
02483 for(;;) {
02484 if (is->abort_request)
02485 break;
02486 if (is->paused != is->last_paused) {
02487 is->last_paused = is->paused;
02488 if (is->paused)
02489 is->read_pause_return= av_read_pause(ic);
02490 else
02491 av_read_play(ic);
02492 }
02493 #if CONFIG_RTSP_DEMUXER
02494 if (is->paused && !strcmp(ic->iformat->name, "rtsp")) {
02495
02496
02497 SDL_Delay(10);
02498 continue;
02499 }
02500 #endif
02501 if (is->seek_req) {
02502 int64_t seek_target= is->seek_pos;
02503 int64_t seek_min= is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
02504 int64_t seek_max= is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
02505
02506
02507
02508 ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
02509 if (ret < 0) {
02510 fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
02511 }else{
02512 if (is->audio_stream >= 0) {
02513 packet_queue_flush(&is->audioq);
02514 packet_queue_put(&is->audioq, &flush_pkt);
02515 }
02516 if (is->subtitle_stream >= 0) {
02517 packet_queue_flush(&is->subtitleq);
02518 packet_queue_put(&is->subtitleq, &flush_pkt);
02519 }
02520 if (is->video_stream >= 0) {
02521 packet_queue_flush(&is->videoq);
02522 packet_queue_put(&is->videoq, &flush_pkt);
02523 }
02524 }
02525 is->seek_req = 0;
02526 eof= 0;
02527 }
02528
02529
02530 if ( is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
02531 || ( (is->audioq .size > MIN_AUDIOQ_SIZE || is->audio_stream<0)
02532 && (is->videoq .nb_packets > MIN_FRAMES || is->video_stream<0)
02533 && (is->subtitleq.nb_packets > MIN_FRAMES || is->subtitle_stream<0))) {
02534
02535 SDL_Delay(10);
02536 continue;
02537 }
02538 if(url_feof(ic->pb) || eof) {
02539 if(is->video_stream >= 0){
02540 av_init_packet(pkt);
02541 pkt->data=NULL;
02542 pkt->size=0;
02543 pkt->stream_index= is->video_stream;
02544 packet_queue_put(&is->videoq, pkt);
02545 }
02546 SDL_Delay(10);
02547 if(is->audioq.size + is->videoq.size + is->subtitleq.size ==0){
02548 if(loop!=1 && (!loop || --loop)){
02549 stream_seek(cur_stream, start_time != AV_NOPTS_VALUE ? start_time : 0, 0, 0);
02550 }else if(autoexit){
02551 ret=AVERROR_EOF;
02552 goto fail;
02553 }
02554 }
02555 continue;
02556 }
02557 ret = av_read_frame(ic, pkt);
02558 if (ret < 0) {
02559 if (ret == AVERROR_EOF)
02560 eof=1;
02561 if (url_ferror(ic->pb))
02562 break;
02563 SDL_Delay(100);
02564 continue;
02565 }
02566
02567 pkt_in_play_range = duration == AV_NOPTS_VALUE ||
02568 (pkt->pts - ic->streams[pkt->stream_index]->start_time) *
02569 av_q2d(ic->streams[pkt->stream_index]->time_base) -
02570 (double)(start_time != AV_NOPTS_VALUE ? start_time : 0)/1000000
02571 <= ((double)duration/1000000);
02572 if (pkt->stream_index == is->audio_stream && pkt_in_play_range) {
02573 packet_queue_put(&is->audioq, pkt);
02574 } else if (pkt->stream_index == is->video_stream && pkt_in_play_range) {
02575 packet_queue_put(&is->videoq, pkt);
02576 } else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) {
02577 packet_queue_put(&is->subtitleq, pkt);
02578 } else {
02579 av_free_packet(pkt);
02580 }
02581 }
02582
02583 while (!is->abort_request) {
02584 SDL_Delay(100);
02585 }
02586
02587 ret = 0;
02588 fail:
02589
02590 global_video_state = NULL;
02591
02592
02593 if (is->audio_stream >= 0)
02594 stream_component_close(is, is->audio_stream);
02595 if (is->video_stream >= 0)
02596 stream_component_close(is, is->video_stream);
02597 if (is->subtitle_stream >= 0)
02598 stream_component_close(is, is->subtitle_stream);
02599 if (is->ic) {
02600 av_close_input_file(is->ic);
02601 is->ic = NULL;
02602 }
02603 url_set_interrupt_cb(NULL);
02604
02605 if (ret != 0) {
02606 SDL_Event event;
02607
02608 event.type = FF_QUIT_EVENT;
02609 event.user.data1 = is;
02610 SDL_PushEvent(&event);
02611 }
02612 return 0;
02613 }
02614
02615 static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
02616 {
02617 VideoState *is;
02618
02619 is = av_mallocz(sizeof(VideoState));
02620 if (!is)
02621 return NULL;
02622 av_strlcpy(is->filename, filename, sizeof(is->filename));
02623 is->iformat = iformat;
02624 is->ytop = 0;
02625 is->xleft = 0;
02626
02627
02628 is->pictq_mutex = SDL_CreateMutex();
02629 is->pictq_cond = SDL_CreateCond();
02630
02631 is->subpq_mutex = SDL_CreateMutex();
02632 is->subpq_cond = SDL_CreateCond();
02633
02634 is->av_sync_type = av_sync_type;
02635 is->parse_tid = SDL_CreateThread(decode_thread, is);
02636 if (!is->parse_tid) {
02637 av_free(is);
02638 return NULL;
02639 }
02640 return is;
02641 }
02642
02643 static void stream_close(VideoState *is)
02644 {
02645 VideoPicture *vp;
02646 int i;
02647
02648 is->abort_request = 1;
02649 SDL_WaitThread(is->parse_tid, NULL);
02650 SDL_WaitThread(is->refresh_tid, NULL);
02651
02652
02653 for(i=0;i<VIDEO_PICTURE_QUEUE_SIZE; i++) {
02654 vp = &is->pictq[i];
02655 #if CONFIG_AVFILTER
02656 if (vp->picref) {
02657 avfilter_unref_pic(vp->picref);
02658 vp->picref = NULL;
02659 }
02660 #endif
02661 if (vp->bmp) {
02662 SDL_FreeYUVOverlay(vp->bmp);
02663 vp->bmp = NULL;
02664 }
02665 }
02666 SDL_DestroyMutex(is->pictq_mutex);
02667 SDL_DestroyCond(is->pictq_cond);
02668 SDL_DestroyMutex(is->subpq_mutex);
02669 SDL_DestroyCond(is->subpq_cond);
02670 #if !CONFIG_AVFILTER
02671 if (is->img_convert_ctx)
02672 sws_freeContext(is->img_convert_ctx);
02673 #endif
02674 av_free(is);
02675 }
02676
02677 static void stream_cycle_channel(VideoState *is, int codec_type)
02678 {
02679 AVFormatContext *ic = is->ic;
02680 int start_index, stream_index;
02681 AVStream *st;
02682
02683 if (codec_type == AVMEDIA_TYPE_VIDEO)
02684 start_index = is->video_stream;
02685 else if (codec_type == AVMEDIA_TYPE_AUDIO)
02686 start_index = is->audio_stream;
02687 else
02688 start_index = is->subtitle_stream;
02689 if (start_index < (codec_type == AVMEDIA_TYPE_SUBTITLE ? -1 : 0))
02690 return;
02691 stream_index = start_index;
02692 for(;;) {
02693 if (++stream_index >= is->ic->nb_streams)
02694 {
02695 if (codec_type == AVMEDIA_TYPE_SUBTITLE)
02696 {
02697 stream_index = -1;
02698 goto the_end;
02699 } else
02700 stream_index = 0;
02701 }
02702 if (stream_index == start_index)
02703 return;
02704 st = ic->streams[stream_index];
02705 if (st->codec->codec_type == codec_type) {
02706
02707 switch(codec_type) {
02708 case AVMEDIA_TYPE_AUDIO:
02709 if (st->codec->sample_rate != 0 &&
02710 st->codec->channels != 0)
02711 goto the_end;
02712 break;
02713 case AVMEDIA_TYPE_VIDEO:
02714 case AVMEDIA_TYPE_SUBTITLE:
02715 goto the_end;
02716 default:
02717 break;
02718 }
02719 }
02720 }
02721 the_end:
02722 stream_component_close(is, start_index);
02723 stream_component_open(is, stream_index);
02724 }
02725
02726
02727 static void toggle_full_screen(void)
02728 {
02729 is_full_screen = !is_full_screen;
02730 if (!fs_screen_width) {
02731
02732
02733 }
02734 video_open(cur_stream);
02735 }
02736
02737 static void toggle_pause(void)
02738 {
02739 if (cur_stream)
02740 stream_pause(cur_stream);
02741 step = 0;
02742 }
02743
02744 static void step_to_next_frame(void)
02745 {
02746 if (cur_stream) {
02747
02748 if (cur_stream->paused)
02749 stream_pause(cur_stream);
02750 }
02751 step = 1;
02752 }
02753
02754 static void do_exit(void)
02755 {
02756 int i;
02757 if (cur_stream) {
02758 stream_close(cur_stream);
02759 cur_stream = NULL;
02760 }
02761 for (i = 0; i < AVMEDIA_TYPE_NB; i++)
02762 av_free(avcodec_opts[i]);
02763 av_free(avformat_opts);
02764 av_free(sws_opts);
02765 #if CONFIG_AVFILTER
02766 avfilter_uninit();
02767 #endif
02768 if (show_status)
02769 printf("\n");
02770 SDL_Quit();
02771 exit(0);
02772 }
02773
02774 static void toggle_audio_display(void)
02775 {
02776 if (cur_stream) {
02777 int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
02778 cur_stream->show_audio = (cur_stream->show_audio + 1) % 3;
02779 fill_rectangle(screen,
02780 cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height,
02781 bgcolor);
02782 SDL_UpdateRect(screen, cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height);
02783 }
02784 }
02785
02786
02787 static void event_loop(void)
02788 {
02789 SDL_Event event;
02790 double incr, pos, frac;
02791
02792 for(;;) {
02793 double x;
02794 SDL_WaitEvent(&event);
02795 switch(event.type) {
02796 case SDL_KEYDOWN:
02797 switch(event.key.keysym.sym) {
02798 case SDLK_ESCAPE:
02799 case SDLK_q:
02800 do_exit();
02801 break;
02802 case SDLK_f:
02803 toggle_full_screen();
02804 break;
02805 case SDLK_p:
02806 case SDLK_SPACE:
02807 toggle_pause();
02808 break;
02809 case SDLK_s:
02810 step_to_next_frame();
02811 break;
02812 case SDLK_a:
02813 if (cur_stream)
02814 stream_cycle_channel(cur_stream, AVMEDIA_TYPE_AUDIO);
02815 break;
02816 case SDLK_v:
02817 if (cur_stream)
02818 stream_cycle_channel(cur_stream, AVMEDIA_TYPE_VIDEO);
02819 break;
02820 case SDLK_t:
02821 if (cur_stream)
02822 stream_cycle_channel(cur_stream, AVMEDIA_TYPE_SUBTITLE);
02823 break;
02824 case SDLK_w:
02825 toggle_audio_display();
02826 break;
02827 case SDLK_LEFT:
02828 incr = -10.0;
02829 goto do_seek;
02830 case SDLK_RIGHT:
02831 incr = 10.0;
02832 goto do_seek;
02833 case SDLK_UP:
02834 incr = 60.0;
02835 goto do_seek;
02836 case SDLK_DOWN:
02837 incr = -60.0;
02838 do_seek:
02839 if (cur_stream) {
02840 if (seek_by_bytes) {
02841 if (cur_stream->video_stream >= 0 && cur_stream->video_current_pos>=0){
02842 pos= cur_stream->video_current_pos;
02843 }else if(cur_stream->audio_stream >= 0 && cur_stream->audio_pkt.pos>=0){
02844 pos= cur_stream->audio_pkt.pos;
02845 }else
02846 pos = url_ftell(cur_stream->ic->pb);
02847 if (cur_stream->ic->bit_rate)
02848 incr *= cur_stream->ic->bit_rate / 8.0;
02849 else
02850 incr *= 180000.0;
02851 pos += incr;
02852 stream_seek(cur_stream, pos, incr, 1);
02853 } else {
02854 pos = get_master_clock(cur_stream);
02855 pos += incr;
02856 stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
02857 }
02858 }
02859 break;
02860 default:
02861 break;
02862 }
02863 break;
02864 case SDL_MOUSEBUTTONDOWN:
02865 case SDL_MOUSEMOTION:
02866 if(event.type ==SDL_MOUSEBUTTONDOWN){
02867 x= event.button.x;
02868 }else{
02869 if(event.motion.state != SDL_PRESSED)
02870 break;
02871 x= event.motion.x;
02872 }
02873 if (cur_stream) {
02874 if(seek_by_bytes || cur_stream->ic->duration<=0){
02875 uint64_t size= url_fsize(cur_stream->ic->pb);
02876 stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
02877 }else{
02878 int64_t ts;
02879 int ns, hh, mm, ss;
02880 int tns, thh, tmm, tss;
02881 tns = cur_stream->ic->duration/1000000LL;
02882 thh = tns/3600;
02883 tmm = (tns%3600)/60;
02884 tss = (tns%60);
02885 frac = x/cur_stream->width;
02886 ns = frac*tns;
02887 hh = ns/3600;
02888 mm = (ns%3600)/60;
02889 ss = (ns%60);
02890 fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d) \n", frac*100,
02891 hh, mm, ss, thh, tmm, tss);
02892 ts = frac*cur_stream->ic->duration;
02893 if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
02894 ts += cur_stream->ic->start_time;
02895 stream_seek(cur_stream, ts, 0, 0);
02896 }
02897 }
02898 break;
02899 case SDL_VIDEORESIZE:
02900 if (cur_stream) {
02901 screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
02902 SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
02903 screen_width = cur_stream->width = event.resize.w;
02904 screen_height= cur_stream->height= event.resize.h;
02905 }
02906 break;
02907 case SDL_QUIT:
02908 case FF_QUIT_EVENT:
02909 do_exit();
02910 break;
02911 case FF_ALLOC_EVENT:
02912 video_open(event.user.data1);
02913 alloc_picture(event.user.data1);
02914 break;
02915 case FF_REFRESH_EVENT:
02916 video_refresh_timer(event.user.data1);
02917 cur_stream->refresh=0;
02918 break;
02919 default:
02920 break;
02921 }
02922 }
02923 }
02924
02925 static void opt_frame_size(const char *arg)
02926 {
02927 if (av_parse_video_frame_size(&frame_width, &frame_height, arg) < 0) {
02928 fprintf(stderr, "Incorrect frame size\n");
02929 exit(1);
02930 }
02931 if ((frame_width % 2) != 0 || (frame_height % 2) != 0) {
02932 fprintf(stderr, "Frame size must be a multiple of 2\n");
02933 exit(1);
02934 }
02935 }
02936
02937 static int opt_width(const char *opt, const char *arg)
02938 {
02939 screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
02940 return 0;
02941 }
02942
02943 static int opt_height(const char *opt, const char *arg)
02944 {
02945 screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
02946 return 0;
02947 }
02948
02949 static void opt_format(const char *arg)
02950 {
02951 file_iformat = av_find_input_format(arg);
02952 if (!file_iformat) {
02953 fprintf(stderr, "Unknown input format: %s\n", arg);
02954 exit(1);
02955 }
02956 }
02957
02958 static void opt_frame_pix_fmt(const char *arg)
02959 {
02960 frame_pix_fmt = av_get_pix_fmt(arg);
02961 }
02962
02963 static int opt_sync(const char *opt, const char *arg)
02964 {
02965 if (!strcmp(arg, "audio"))
02966 av_sync_type = AV_SYNC_AUDIO_MASTER;
02967 else if (!strcmp(arg, "video"))
02968 av_sync_type = AV_SYNC_VIDEO_MASTER;
02969 else if (!strcmp(arg, "ext"))
02970 av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
02971 else {
02972 fprintf(stderr, "Unknown value for %s: %s\n", opt, arg);
02973 exit(1);
02974 }
02975 return 0;
02976 }
02977
02978 static int opt_seek(const char *opt, const char *arg)
02979 {
02980 start_time = parse_time_or_die(opt, arg, 1);
02981 return 0;
02982 }
02983
02984 static int opt_duration(const char *opt, const char *arg)
02985 {
02986 duration = parse_time_or_die(opt, arg, 1);
02987 return 0;
02988 }
02989
02990 static int opt_debug(const char *opt, const char *arg)
02991 {
02992 av_log_set_level(99);
02993 debug = parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
02994 return 0;
02995 }
02996
02997 static int opt_vismv(const char *opt, const char *arg)
02998 {
02999 debug_mv = parse_number_or_die(opt, arg, OPT_INT64, INT_MIN, INT_MAX);
03000 return 0;
03001 }
03002
03003 static int opt_thread_count(const char *opt, const char *arg)
03004 {
03005 thread_count= parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
03006 #if !HAVE_THREADS
03007 fprintf(stderr, "Warning: not compiled with thread support, using thread emulation\n");
03008 #endif
03009 return 0;
03010 }
03011
03012 static const OptionDef options[] = {
03013 #include "cmdutils_common_opts.h"
03014 { "x", HAS_ARG | OPT_FUNC2, {(void*)opt_width}, "force displayed width", "width" },
03015 { "y", HAS_ARG | OPT_FUNC2, {(void*)opt_height}, "force displayed height", "height" },
03016 { "s", HAS_ARG | OPT_VIDEO, {(void*)opt_frame_size}, "set frame size (WxH or abbreviation)", "size" },
03017 { "fs", OPT_BOOL, {(void*)&is_full_screen}, "force full screen" },
03018 { "an", OPT_BOOL, {(void*)&audio_disable}, "disable audio" },
03019 { "vn", OPT_BOOL, {(void*)&video_disable}, "disable video" },
03020 { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_AUDIO]}, "select desired audio stream", "stream_number" },
03021 { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_VIDEO]}, "select desired video stream", "stream_number" },
03022 { "sst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_SUBTITLE]}, "select desired subtitle stream", "stream_number" },
03023 { "ss", HAS_ARG | OPT_FUNC2, {(void*)&opt_seek}, "seek to a given position in seconds", "pos" },
03024 { "t", HAS_ARG | OPT_FUNC2, {(void*)&opt_duration}, "play \"duration\" seconds of audio/video", "duration" },
03025 { "bytes", OPT_INT | HAS_ARG, {(void*)&seek_by_bytes}, "seek by bytes 0=off 1=on -1=auto", "val" },
03026 { "nodisp", OPT_BOOL, {(void*)&display_disable}, "disable graphical display" },
03027 { "f", HAS_ARG, {(void*)opt_format}, "force format", "fmt" },
03028 { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_frame_pix_fmt}, "set pixel format", "format" },
03029 { "stats", OPT_BOOL | OPT_EXPERT, {(void*)&show_status}, "show status", "" },
03030 { "debug", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_debug}, "print specific debug info", "" },
03031 { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&workaround_bugs}, "workaround bugs", "" },
03032 { "vismv", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_vismv}, "visualize motion vectors", "" },
03033 { "fast", OPT_BOOL | OPT_EXPERT, {(void*)&fast}, "non spec compliant optimizations", "" },
03034 { "genpts", OPT_BOOL | OPT_EXPERT, {(void*)&genpts}, "generate pts", "" },
03035 { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&decoder_reorder_pts}, "let decoder reorder pts 0=off 1=on -1=auto", ""},
03036 { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&lowres}, "", "" },
03037 { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_loop_filter}, "", "" },
03038 { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_frame}, "", "" },
03039 { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_idct}, "", "" },
03040 { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&idct}, "set idct algo", "algo" },
03041 { "er", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_recognition}, "set error detection threshold (0-4)", "threshold" },
03042 { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_concealment}, "set error concealment options", "bit_mask" },
03043 { "sync", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_sync}, "set audio-video sync. type (type=audio/video/ext)", "type" },
03044 { "threads", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_thread_count}, "thread count", "count" },
03045 { "autoexit", OPT_BOOL | OPT_EXPERT, {(void*)&autoexit}, "exit at the end", "" },
03046 { "loop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&loop}, "set number of times the playback shall be looped", "loop count" },
03047 { "framedrop", OPT_BOOL | OPT_EXPERT, {(void*)&framedrop}, "drop frames when cpu is too slow", "" },
03048 { "window_title", OPT_STRING | HAS_ARG, {(void*)&window_title}, "set window title", "window title" },
03049 #if CONFIG_AVFILTER
03050 { "vfilters", OPT_STRING | HAS_ARG, {(void*)&vfilters}, "video filters", "filter list" },
03051 #endif
03052 { "rdftspeed", OPT_INT | HAS_ARG| OPT_AUDIO | OPT_EXPERT, {(void*)&rdftspeed}, "rdft speed", "msecs" },
03053 { "default", OPT_FUNC2 | HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, {(void*)opt_default}, "generic catch all option", "" },
03054 { NULL, },
03055 };
03056
03057 static void show_usage(void)
03058 {
03059 printf("Simple media player\n");
03060 printf("usage: ffplay [options] input_file\n");
03061 printf("\n");
03062 }
03063
03064 static void show_help(void)
03065 {
03066 show_usage();
03067 show_help_options(options, "Main options:\n",
03068 OPT_EXPERT, 0);
03069 show_help_options(options, "\nAdvanced options:\n",
03070 OPT_EXPERT, OPT_EXPERT);
03071 printf("\nWhile playing:\n"
03072 "q, ESC quit\n"
03073 "f toggle full screen\n"
03074 "p, SPC pause\n"
03075 "a cycle audio channel\n"
03076 "v cycle video channel\n"
03077 "t cycle subtitle channel\n"
03078 "w show audio waves\n"
03079 "s activate frame-step mode\n"
03080 "left/right seek backward/forward 10 seconds\n"
03081 "down/up seek backward/forward 1 minute\n"
03082 "mouse click seek to percentage in file corresponding to fraction of width\n"
03083 );
03084 }
03085
03086 static void opt_input_file(const char *filename)
03087 {
03088 if (input_filename) {
03089 fprintf(stderr, "Argument '%s' provided as input filename, but '%s' was already specified.\n",
03090 filename, input_filename);
03091 exit(1);
03092 }
03093 if (!strcmp(filename, "-"))
03094 filename = "pipe:";
03095 input_filename = filename;
03096 }
03097
03098
03099 int main(int argc, char **argv)
03100 {
03101 int flags, i;
03102
03103
03104 avcodec_register_all();
03105 #if CONFIG_AVDEVICE
03106 avdevice_register_all();
03107 #endif
03108 #if CONFIG_AVFILTER
03109 avfilter_register_all();
03110 #endif
03111 av_register_all();
03112
03113 for(i=0; i<AVMEDIA_TYPE_NB; i++){
03114 avcodec_opts[i]= avcodec_alloc_context2(i);
03115 }
03116 avformat_opts = avformat_alloc_context();
03117 #if !CONFIG_AVFILTER
03118 sws_opts = sws_getContext(16,16,0, 16,16,0, sws_flags, NULL,NULL,NULL);
03119 #endif
03120
03121 show_banner();
03122
03123 parse_options(argc, argv, options, opt_input_file);
03124
03125 if (!input_filename) {
03126 show_usage();
03127 fprintf(stderr, "An input file must be specified\n");
03128 fprintf(stderr, "Use -h to get full help or, even better, run 'man ffplay'\n");
03129 exit(1);
03130 }
03131
03132 if (display_disable) {
03133 video_disable = 1;
03134 }
03135 flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
03136 #if !defined(__MINGW32__) && !defined(__APPLE__)
03137 flags |= SDL_INIT_EVENTTHREAD;
03138 #endif
03139 if (SDL_Init (flags)) {
03140 fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
03141 exit(1);
03142 }
03143
03144 if (!display_disable) {
03145 #if HAVE_SDL_VIDEO_SIZE
03146 const SDL_VideoInfo *vi = SDL_GetVideoInfo();
03147 fs_screen_width = vi->current_w;
03148 fs_screen_height = vi->current_h;
03149 #endif
03150 }
03151
03152 SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
03153 SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
03154 SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
03155
03156 av_init_packet(&flush_pkt);
03157 flush_pkt.data= "FLUSH";
03158
03159 cur_stream = stream_open(input_filename, file_iformat);
03160
03161 event_loop();
03162
03163
03164
03165 return 0;
03166 }