00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018
00019
00020
00021
00022
00031 #include <stdlib.h>
00032 #include <stdio.h>
00033 #include <string.h>
00034 #include <math.h>
00035
00036 #include <libavutil/mathematics.h>
00037 #include <libavformat/avformat.h>
00038 #include <libswscale/swscale.h>
00039
00040
00041 #define STREAM_DURATION 200.0
00042 #define STREAM_FRAME_RATE 25
00043 #define STREAM_NB_FRAMES ((int)(STREAM_DURATION * STREAM_FRAME_RATE))
00044 #define STREAM_PIX_FMT PIX_FMT_YUV420P
00045
00046 static int sws_flags = SWS_BICUBIC;
00047
00048
00049
00050
00051 static float t, tincr, tincr2;
00052 static int16_t *samples;
00053 static int audio_input_frame_size;
00054
00055
00056
00057
00058 static AVStream *add_audio_stream(AVFormatContext *oc, AVCodec **codec,
00059 enum AVCodecID codec_id)
00060 {
00061 AVCodecContext *c;
00062 AVStream *st;
00063
00064
00065 *codec = avcodec_find_encoder(codec_id);
00066 if (!(*codec)) {
00067 fprintf(stderr, "Could not find codec\n");
00068 exit(1);
00069 }
00070
00071 st = avformat_new_stream(oc, *codec);
00072 if (!st) {
00073 fprintf(stderr, "Could not allocate stream\n");
00074 exit(1);
00075 }
00076 st->id = 1;
00077
00078 c = st->codec;
00079
00080
00081 c->sample_fmt = AV_SAMPLE_FMT_S16;
00082 c->bit_rate = 64000;
00083 c->sample_rate = 44100;
00084 c->channels = 2;
00085
00086
00087 if (oc->oformat->flags & AVFMT_GLOBALHEADER)
00088 c->flags |= CODEC_FLAG_GLOBAL_HEADER;
00089
00090 return st;
00091 }
00092
00093 static void open_audio(AVFormatContext *oc, AVCodec *codec, AVStream *st)
00094 {
00095 AVCodecContext *c;
00096
00097 c = st->codec;
00098
00099
00100 if (avcodec_open2(c, codec, NULL) < 0) {
00101 fprintf(stderr, "could not open codec\n");
00102 exit(1);
00103 }
00104
00105
00106 t = 0;
00107 tincr = 2 * M_PI * 110.0 / c->sample_rate;
00108
00109 tincr2 = 2 * M_PI * 110.0 / c->sample_rate / c->sample_rate;
00110
00111 if (c->codec->capabilities & CODEC_CAP_VARIABLE_FRAME_SIZE)
00112 audio_input_frame_size = 10000;
00113 else
00114 audio_input_frame_size = c->frame_size;
00115 samples = av_malloc(audio_input_frame_size *
00116 av_get_bytes_per_sample(c->sample_fmt) *
00117 c->channels);
00118 }
00119
00120
00121
00122 static void get_audio_frame(int16_t *samples, int frame_size, int nb_channels)
00123 {
00124 int j, i, v;
00125 int16_t *q;
00126
00127 q = samples;
00128 for (j = 0; j < frame_size; j++) {
00129 v = (int)(sin(t) * 10000);
00130 for (i = 0; i < nb_channels; i++)
00131 *q++ = v;
00132 t += tincr;
00133 tincr += tincr2;
00134 }
00135 }
00136
00137 static void write_audio_frame(AVFormatContext *oc, AVStream *st)
00138 {
00139 AVCodecContext *c;
00140 AVPacket pkt = { 0 };
00141 AVFrame *frame = avcodec_alloc_frame();
00142 int got_packet;
00143
00144 av_init_packet(&pkt);
00145 c = st->codec;
00146
00147 get_audio_frame(samples, audio_input_frame_size, c->channels);
00148 frame->nb_samples = audio_input_frame_size;
00149 avcodec_fill_audio_frame(frame, c->channels, c->sample_fmt,
00150 (uint8_t *)samples,
00151 audio_input_frame_size *
00152 av_get_bytes_per_sample(c->sample_fmt) *
00153 c->channels, 1);
00154
00155 avcodec_encode_audio2(c, &pkt, frame, &got_packet);
00156 if (!got_packet)
00157 return;
00158
00159 pkt.stream_index = st->index;
00160
00161
00162 if (av_interleaved_write_frame(oc, &pkt) != 0) {
00163 fprintf(stderr, "Error while writing audio frame\n");
00164 exit(1);
00165 }
00166 avcodec_free_frame(&frame);
00167 }
00168
00169 static void close_audio(AVFormatContext *oc, AVStream *st)
00170 {
00171 avcodec_close(st->codec);
00172
00173 av_free(samples);
00174 }
00175
00176
00177
00178
00179 static AVFrame *frame;
00180 static AVPicture src_picture, dst_picture;
00181 static uint8_t *video_outbuf;
00182 static int frame_count, video_outbuf_size;
00183
00184
00185 static AVStream *add_video_stream(AVFormatContext *oc, AVCodec **codec,
00186 enum AVCodecID codec_id)
00187 {
00188 AVCodecContext *c;
00189 AVStream *st;
00190
00191
00192 *codec = avcodec_find_encoder(codec_id);
00193 if (!(*codec)) {
00194 fprintf(stderr, "codec not found\n");
00195 exit(1);
00196 }
00197
00198 st = avformat_new_stream(oc, *codec);
00199 if (!st) {
00200 fprintf(stderr, "Could not alloc stream\n");
00201 exit(1);
00202 }
00203
00204 c = st->codec;
00205
00206 avcodec_get_context_defaults3(c, *codec);
00207
00208 c->codec_id = codec_id;
00209
00210
00211 c->bit_rate = 400000;
00212
00213 c->width = 352;
00214 c->height = 288;
00215
00216
00217
00218
00219 c->time_base.den = STREAM_FRAME_RATE;
00220 c->time_base.num = 1;
00221 c->gop_size = 12;
00222 c->pix_fmt = STREAM_PIX_FMT;
00223 if (c->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
00224
00225 c->max_b_frames = 2;
00226 }
00227 if (c->codec_id == AV_CODEC_ID_MPEG1VIDEO) {
00228
00229
00230
00231 c->mb_decision = 2;
00232 }
00233
00234 if (oc->oformat->flags & AVFMT_GLOBALHEADER)
00235 c->flags |= CODEC_FLAG_GLOBAL_HEADER;
00236
00237 return st;
00238 }
00239
00240 static void open_video(AVFormatContext *oc, AVCodec *codec, AVStream *st)
00241 {
00242 int ret;
00243 AVCodecContext *c = st->codec;
00244
00245
00246 if (avcodec_open2(c, codec, NULL) < 0) {
00247 fprintf(stderr, "Could not open codec\n");
00248 exit(1);
00249 }
00250
00251 video_outbuf = NULL;
00252 if (!(oc->oformat->flags & AVFMT_RAWPICTURE)) {
00253
00254
00255
00256
00257
00258
00259 video_outbuf_size = 200000;
00260 video_outbuf = av_malloc(video_outbuf_size);
00261 }
00262
00263
00264 frame = avcodec_alloc_frame();
00265 if (!frame) {
00266 fprintf(stderr, "Could not allocate video frame\n");
00267 exit(1);
00268 }
00269
00270
00271 ret = avpicture_alloc(&dst_picture, c->pix_fmt, c->width, c->height);
00272 if (ret < 0) {
00273 fprintf(stderr, "Could not allocate picture\n");
00274 exit(1);
00275 }
00276
00277
00278
00279
00280 if (c->pix_fmt != PIX_FMT_YUV420P) {
00281 ret = avpicture_alloc(&src_picture, PIX_FMT_YUV420P, c->width, c->height);
00282 if (ret < 0) {
00283 fprintf(stderr, "Could not allocate temporary picture\n");
00284 exit(1);
00285 }
00286 }
00287
00288
00289 *((AVPicture *)frame) = dst_picture;
00290 }
00291
00292
00293 static void fill_yuv_image(AVPicture *pict, int frame_index,
00294 int width, int height)
00295 {
00296 int x, y, i;
00297
00298 i = frame_index;
00299
00300
00301 for (y = 0; y < height; y++)
00302 for (x = 0; x < width; x++)
00303 pict->data[0][y * pict->linesize[0] + x] = x + y + i * 3;
00304
00305
00306 for (y = 0; y < height / 2; y++) {
00307 for (x = 0; x < width / 2; x++) {
00308 pict->data[1][y * pict->linesize[1] + x] = 128 + y + i * 2;
00309 pict->data[2][y * pict->linesize[2] + x] = 64 + x + i * 5;
00310 }
00311 }
00312 }
00313
00314 static void write_video_frame(AVFormatContext *oc, AVStream *st)
00315 {
00316 int ret;
00317 static struct SwsContext *sws_ctx;
00318 AVCodecContext *c = st->codec;
00319
00320 if (frame_count >= STREAM_NB_FRAMES) {
00321
00322
00323
00324 } else {
00325 if (c->pix_fmt != PIX_FMT_YUV420P) {
00326
00327
00328 if (!sws_ctx) {
00329 sws_ctx = sws_getContext(c->width, c->height, PIX_FMT_YUV420P,
00330 c->width, c->height, c->pix_fmt,
00331 sws_flags, NULL, NULL, NULL);
00332 if (!sws_ctx) {
00333 fprintf(stderr,
00334 "Could not initialize the conversion context\n");
00335 exit(1);
00336 }
00337 }
00338 fill_yuv_image(&src_picture, frame_count, c->width, c->height);
00339 sws_scale(sws_ctx,
00340 (const uint8_t * const *)src_picture.data, src_picture.linesize,
00341 0, c->height, dst_picture.data, dst_picture.linesize);
00342 } else {
00343 fill_yuv_image(&dst_picture, frame_count, c->width, c->height);
00344 }
00345 }
00346
00347 if (oc->oformat->flags & AVFMT_RAWPICTURE) {
00348
00349
00350 AVPacket pkt;
00351 av_init_packet(&pkt);
00352
00353 pkt.flags |= AV_PKT_FLAG_KEY;
00354 pkt.stream_index = st->index;
00355 pkt.data = dst_picture.data[0];
00356 pkt.size = sizeof(AVPicture);
00357
00358 ret = av_interleaved_write_frame(oc, &pkt);
00359 } else {
00360
00361 AVPacket pkt;
00362 int got_output;
00363
00364 av_init_packet(&pkt);
00365 pkt.data = NULL;
00366 pkt.size = 0;
00367
00368 ret = avcodec_encode_video2(c, &pkt, frame, &got_output);
00369 if (ret < 0) {
00370 fprintf(stderr, "Error encoding video frame\n");
00371 exit(1);
00372 }
00373
00374
00375 if (got_output) {
00376 if (c->coded_frame->pts != AV_NOPTS_VALUE)
00377 pkt.pts = av_rescale_q(c->coded_frame->pts,
00378 c->time_base, st->time_base);
00379 if (c->coded_frame->key_frame)
00380 pkt.flags |= AV_PKT_FLAG_KEY;
00381
00382 pkt.stream_index = st->index;
00383
00384
00385 ret = av_interleaved_write_frame(oc, &pkt);
00386 } else {
00387 ret = 0;
00388 }
00389 }
00390 if (ret != 0) {
00391 fprintf(stderr, "Error while writing video frame\n");
00392 exit(1);
00393 }
00394 frame_count++;
00395 }
00396
00397 static void close_video(AVFormatContext *oc, AVStream *st)
00398 {
00399 avcodec_close(st->codec);
00400 av_free(src_picture.data[0]);
00401 av_free(dst_picture.data[0]);
00402 av_free(frame);
00403 av_free(video_outbuf);
00404 }
00405
00406
00407
00408
00409 int main(int argc, char **argv)
00410 {
00411 const char *filename;
00412 AVOutputFormat *fmt;
00413 AVFormatContext *oc;
00414 AVStream *audio_st, *video_st;
00415 AVCodec *audio_codec, *video_codec;
00416 double audio_pts, video_pts;
00417 int i;
00418
00419
00420 av_register_all();
00421
00422 if (argc != 2) {
00423 printf("usage: %s output_file\n"
00424 "API example program to output a media file with libavformat.\n"
00425 "The output format is automatically guessed according to the file extension.\n"
00426 "Raw images can also be output by using '%%d' in the filename\n"
00427 "\n", argv[0]);
00428 return 1;
00429 }
00430
00431 filename = argv[1];
00432
00433
00434 avformat_alloc_output_context2(&oc, NULL, NULL, filename);
00435 if (!oc) {
00436 printf("Could not deduce output format from file extension: using MPEG.\n");
00437 avformat_alloc_output_context2(&oc, NULL, "mpeg", filename);
00438 }
00439 if (!oc) {
00440 return 1;
00441 }
00442 fmt = oc->oformat;
00443
00444
00445
00446 video_st = NULL;
00447 audio_st = NULL;
00448 if (fmt->video_codec != AV_CODEC_ID_NONE) {
00449 video_st = add_video_stream(oc, &video_codec, fmt->video_codec);
00450 }
00451 if (fmt->audio_codec != AV_CODEC_ID_NONE) {
00452 audio_st = add_audio_stream(oc, &audio_codec, fmt->audio_codec);
00453 }
00454
00455
00456
00457 if (video_st)
00458 open_video(oc, video_codec, video_st);
00459 if (audio_st)
00460 open_audio(oc, audio_codec, audio_st);
00461
00462 av_dump_format(oc, 0, filename, 1);
00463
00464
00465 if (!(fmt->flags & AVFMT_NOFILE)) {
00466 if (avio_open(&oc->pb, filename, AVIO_FLAG_WRITE) < 0) {
00467 fprintf(stderr, "Could not open '%s'\n", filename);
00468 return 1;
00469 }
00470 }
00471
00472
00473 if (avformat_write_header(oc, NULL) < 0) {
00474 fprintf(stderr, "Error occurred when opening output file\n");
00475 return 1;
00476 }
00477
00478 frame->pts = 0;
00479 for (;;) {
00480
00481 if (audio_st)
00482 audio_pts = (double)audio_st->pts.val * audio_st->time_base.num / audio_st->time_base.den;
00483 else
00484 audio_pts = 0.0;
00485
00486 if (video_st)
00487 video_pts = (double)video_st->pts.val * video_st->time_base.num /
00488 video_st->time_base.den;
00489 else
00490 video_pts = 0.0;
00491
00492 if ((!audio_st || audio_pts >= STREAM_DURATION) &&
00493 (!video_st || video_pts >= STREAM_DURATION))
00494 break;
00495
00496
00497 if (!video_st || (video_st && audio_st && audio_pts < video_pts)) {
00498 write_audio_frame(oc, audio_st);
00499 } else {
00500 write_video_frame(oc, video_st);
00501 frame->pts++;
00502 }
00503 }
00504
00505
00506
00507
00508
00509 av_write_trailer(oc);
00510
00511
00512 if (video_st)
00513 close_video(oc, video_st);
00514 if (audio_st)
00515 close_audio(oc, audio_st);
00516
00517
00518 for (i = 0; i < oc->nb_streams; i++) {
00519 av_freep(&oc->streams[i]->codec);
00520 av_freep(&oc->streams[i]);
00521 }
00522
00523 if (!(fmt->flags & AVFMT_NOFILE))
00524
00525 avio_close(oc->pb);
00526
00527
00528 av_free(oc);
00529
00530 return 0;
00531 }