Decode data from an MP2 input file and generate a raw audio file to be played with ffplay.
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#define AUDIO_INBUF_SIZE 20480
#define AUDIO_REFILL_THRESH 4096
{
struct sample_fmt_entry {
} sample_fmt_entries[] = {
};
struct sample_fmt_entry *
entry = &sample_fmt_entries[
i];
if (sample_fmt ==
entry->sample_fmt) {
return 0;
}
}
fprintf(stderr,
"sample format %s is not supported as output format\n",
return -1;
}
{
fprintf(stderr, "Error submitting the packet to the decoder\n");
exit(1);
}
return;
fprintf(stderr, "Error during decoding\n");
exit(1);
}
if (data_size < 0) {
fprintf(stderr, "Failed to calculate data size\n");
exit(1);
}
}
}
int main(
int argc,
char **argv)
{
const char *outfilename, *filename;
size_t data_size;
int n_channels = 0;
const char *fmt;
if (argc <= 2) {
fprintf(stderr, "Usage: %s <input file> <output file>\n", argv[0]);
exit(0);
}
filename = argv[1];
outfilename = argv[2];
if (!codec) {
fprintf(stderr, "Codec not found\n");
exit(1);
}
if (!parser) {
fprintf(stderr, "Parser not found\n");
exit(1);
}
fprintf(stderr, "Could not allocate audio codec context\n");
exit(1);
}
fprintf(stderr, "Could not open codec\n");
exit(1);
}
f = fopen(filename,
"rb");
fprintf(stderr, "Could not open %s\n", filename);
exit(1);
}
outfile = fopen(outfilename,
"wb");
exit(1);
}
while (data_size > 0) {
if (!decoded_frame) {
fprintf(stderr, "Could not allocate audio frame\n");
exit(1);
}
}
fprintf(stderr, "Error while parsing\n");
exit(1);
}
memmove(inbuf,
data, data_size);
}
}
printf(
"Warning: the sample format the decoder produced is planar "
"(%s). This example will output the first channel only.\n",
packed ? packed : "?");
}
n_channels =
c->ch_layout.nb_channels;
goto end;
printf(
"Play the output audio file with the command:\n"
"ffplay -f %s -ac %d -ar %d %s\n",
fmt, n_channels,
c->sample_rate,
outfilename);
end:
return 0;
}
static AVCodecContext * dec_ctx
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
#define AVERROR_EOF
End of file.
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
This structure describes decoded (raw) audio or video data.
int nb_channels
Number of channels in this layout.
void av_packet_free(AVPacket **pkt)
Free the packet, if the packet is reference counted, it will be unreferenced first.
AVChannelLayout ch_layout
Audio channel layout.
AVCodecParserContext * av_parser_init(int codec_id)
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
#define FF_ARRAY_ELEMS(a)
#define AUDIO_REFILL_THRESH
static void decode(AVCodecContext *dec_ctx, AVPacket *pkt, AVFrame *frame, FILE *outfile)
AVCodecContext * avcodec_alloc_context3(const AVCodec *codec)
Allocate an AVCodecContext and set its fields to default values.
int attribute_align_arg avcodec_receive_frame(AVCodecContext *avctx, AVFrame *frame)
Return decoded output data from a decoder or encoder (when the AV_CODEC_FLAG_RECON_FRAME flag is used...
int av_sample_fmt_is_planar(enum AVSampleFormat sample_fmt)
Check if the sample format is planar.
const char * av_get_sample_fmt_name(enum AVSampleFormat sample_fmt)
Return the name of sample_fmt, or NULL if sample_fmt is not recognized.
void avcodec_free_context(AVCodecContext **avctx)
Free the codec context and everything associated with it and write NULL to the provided pointer.
static int get_format_from_sample_fmt(const char **fmt, enum AVSampleFormat sample_fmt)
int attribute_align_arg avcodec_open2(AVCodecContext *avctx, const AVCodec *codec, AVDictionary **options)
Initialize the AVCodecContext to use the given AVCodec.
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
const AVCodec * avcodec_find_decoder(enum AVCodecID id)
Find a registered decoder with a matching codec ID.
enum AVSampleFormat sample_fmt
audio sample format
#define AV_NOPTS_VALUE
Undefined timestamp value.
printf("static const uint8_t my_array[100] = {\n")
AVPacket * av_packet_alloc(void)
Allocate an AVPacket and set its fields to default values.
int avcodec_send_packet(AVCodecContext *avctx, const AVPacket *avpkt)
Supply raw packet data as input to a decoder.
#define i(width, name, range_min, range_max)
int av_get_bytes_per_sample(enum AVSampleFormat sample_fmt)
Return number of bytes per sample.
@ AV_SAMPLE_FMT_U8
unsigned 8 bits
AVSampleFormat
Audio sample formats.
@ AV_SAMPLE_FMT_S16
signed 16 bits
int main(int argc, char **argv)
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
#define AV_INPUT_BUFFER_PADDING_SIZE
main external API structure.
enum AVSampleFormat av_get_packed_sample_fmt(enum AVSampleFormat sample_fmt)
Get the packed alternative form of the given sample format.
int av_parser_parse2(AVCodecParserContext *s, AVCodecContext *avctx, uint8_t **poutbuf, int *poutbuf_size, const uint8_t *buf, int buf_size, int64_t pts, int64_t dts, int64_t pos)
Parse a packet.
This structure stores compressed data.
@ AV_SAMPLE_FMT_DBL
double
@ AV_SAMPLE_FMT_S32
signed 32 bits
void av_parser_close(AVCodecParserContext *s)