Go to the documentation of this file.
46 #define STREAM_DURATION 10.0
47 #define STREAM_FRAME_RATE 25
48 #define STREAM_PIX_FMT AV_PIX_FMT_YUV420P
50 #define SCALE_FLAGS SWS_BICUBIC
74 printf(
"pts:%s pts_time:%s dts:%s dts_time:%s duration:%s duration_time:%s stream_index:%d\n",
89 fprintf(stderr,
"Error sending a frame to the encoder: %s\n",
101 fprintf(stderr,
"Error encoding a frame: %s\n",
av_err2str(
ret));
114 fprintf(stderr,
"Error while writing output packet: %s\n",
av_err2str(
ret));
133 fprintf(stderr,
"Could not find encoder for '%s'\n",
140 fprintf(stderr,
"Could not allocate stream\n");
146 fprintf(stderr,
"Could not alloc an encoding context\n");
151 switch ((*codec)->type) {
153 c->sample_fmt = (*codec)->sample_fmts ?
156 c->sample_rate = 44100;
157 if ((*codec)->supported_samplerates) {
158 c->sample_rate = (*codec)->supported_samplerates[0];
159 for (
i = 0; (*codec)->supported_samplerates[
i];
i++) {
160 if ((*codec)->supported_samplerates[
i] == 44100)
161 c->sample_rate = 44100;
166 if ((*codec)->channel_layouts) {
167 c->channel_layout = (*codec)->channel_layouts[0];
168 for (
i = 0; (*codec)->channel_layouts[
i];
i++) {
180 c->bit_rate = 400000;
218 uint64_t channel_layout,
225 fprintf(stderr,
"Error allocating an audio frame\n");
229 frame->format = sample_fmt;
230 frame->channel_layout = channel_layout;
232 frame->nb_samples = nb_samples;
237 fprintf(stderr,
"Error allocating an audio buffer\n");
259 fprintf(stderr,
"Could not open audio codec: %s\n",
av_err2str(
ret));
265 ost->tincr = 2 *
M_PI * 110.0 /
c->sample_rate;
267 ost->tincr2 = 2 *
M_PI * 110.0 /
c->sample_rate /
c->sample_rate;
272 nb_samples =
c->frame_size;
275 c->sample_rate, nb_samples);
277 c->sample_rate, nb_samples);
282 fprintf(stderr,
"Could not copy the stream parameters\n");
289 fprintf(stderr,
"Could not allocate resampler context\n");
303 fprintf(stderr,
"Failed to initialize the resampling context\n");
314 int16_t *q = (int16_t*)
frame->data[0];
321 for (j = 0; j <
frame->nb_samples; j++) {
322 v = (
int)(sin(
ost->t) * 10000);
323 for (
i = 0;
i <
ost->enc->channels;
i++)
326 ost->tincr +=
ost->tincr2;
367 ost->frame->data, dst_nb_samples,
370 fprintf(stderr,
"Error while converting\n");
376 ost->samples_count += dst_nb_samples;
401 fprintf(stderr,
"Could not allocate frame data.\n");
420 fprintf(stderr,
"Could not open video codec: %s\n",
av_err2str(
ret));
427 fprintf(stderr,
"Could not allocate video frame\n");
437 if (!
ost->tmp_frame) {
438 fprintf(stderr,
"Could not allocate temporary picture\n");
446 fprintf(stderr,
"Could not copy the stream parameters\n");
460 for (y = 0; y <
height; y++)
498 "Could not initialize the conversion context\n");
504 ost->tmp_frame->linesize, 0,
c->height,
ost->frame->data,
505 ost->frame->linesize);
510 ost->frame->pts =
ost->next_pts++;
537 int main(
int argc,
char **argv)
540 const char *filename;
543 AVCodec *audio_codec, *video_codec;
545 int have_video = 0, have_audio = 0;
546 int encode_video = 0, encode_audio = 0;
551 printf(
"usage: %s output_file\n"
552 "API example program to output a media file with libavformat.\n"
553 "This program generates a synthetic audio and video stream, encodes and\n"
554 "muxes them into a file named output_file.\n"
555 "The output format is automatically guessed according to the file extension.\n"
556 "Raw images can also be output by using '%%d' in the filename.\n"
562 for (
i = 2;
i+1 < argc;
i+=2) {
563 if (!strcmp(argv[
i],
"-flags") || !strcmp(argv[
i],
"-fflags"))
570 printf(
"Could not deduce output format from file extension: using MPEG.\n");
605 fprintf(stderr,
"Could not open '%s': %s\n", filename,
614 fprintf(stderr,
"Error occurred when opening output file: %s\n",
619 while (encode_video || encode_audio) {
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
@ AV_SAMPLE_FMT_FLTP
float, planar
AVPixelFormat
Pixel format.
int avcodec_receive_packet(AVCodecContext *avctx, AVPacket *avpkt)
Read encoded data from the encoder.
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
AVStream * avformat_new_stream(AVFormatContext *s, const AVCodec *c)
Add a new stream to a media file.
int av_compare_ts(int64_t ts_a, AVRational tb_a, int64_t ts_b, AVRational tb_b)
Compare two timestamps each in its own time base.
static AVFrame * alloc_picture(enum AVPixelFormat pix_fmt, int width, int height)
int av_frame_get_buffer(AVFrame *frame, int align)
Allocate new buffer(s) for audio or video data.
#define AVERROR_EOF
End of file.
int avcodec_parameters_from_context(AVCodecParameters *par, const AVCodecContext *codec)
Fill the parameters struct based on the values from the supplied codec context.
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
This structure describes decoded (raw) audio or video data.
int av_frame_make_writable(AVFrame *frame)
Ensure that the frame data is writable, avoiding data copy if possible.
AVStream ** streams
A list of all streams in the file.
static void fill_yuv_image(AVFrame *pict, int frame_index, int width, int height)
static int write_frame(AVFormatContext *fmt_ctx, AVCodecContext *c, AVStream *st, AVFrame *frame)
int64_t duration
Duration of this packet in AVStream->time_base units, 0 if unknown.
int attribute_align_arg sws_scale(struct SwsContext *c, const uint8_t *const srcSlice[], const int srcStride[], int srcSliceY, int srcSliceH, uint8_t *const dst[], const int dstStride[])
swscale wrapper, so we don't need to export the SwsContext.
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
#define AV_CODEC_FLAG_GLOBAL_HEADER
Place global headers in extradata instead of every keyframe.
static AVFrame * get_video_frame(OutputStream *ost)
FFmpeg Automated Testing Environment ************************************Introduction Using FATE from your FFmpeg source directory Submitting the results to the FFmpeg result aggregation server Uploading new samples to the fate suite FATE makefile targets and variables Makefile targets Makefile variables Examples Introduction **************FATE is an extended regression suite on the client side and a means for results aggregation and presentation on the server side The first part of this document explains how you can use FATE from your FFmpeg source directory to test your ffmpeg binary The second part describes how you can run FATE to submit the results to FFmpeg’s FATE server In any way you can have a look at the publicly viewable FATE results by visiting this as it can be seen if some test on some platform broke with their recent contribution This usually happens on the platforms the developers could not test on The second part of this document describes how you can run FATE to submit your results to FFmpeg’s FATE server If you want to submit your results be sure to check that your combination of OS and compiler is not already listed on the above mentioned website In the third part you can find a comprehensive listing of FATE makefile targets and variables Using FATE from your FFmpeg source directory **********************************************If you want to run FATE on your machine you need to have the samples in place You can get the samples via the build target fate rsync Use this command from the top level source this will cause FATE to fail NOTE To use a custom wrapper to run the pass ‘ target exec’ to ‘configure’ or set the TARGET_EXEC Make variable Submitting the results to the FFmpeg result aggregation server ****************************************************************To submit your results to the server you should run fate through the shell script ‘tests fate sh’ from the FFmpeg sources This script needs to be invoked with a configuration file as its first argument tests fate sh path to fate_config A configuration file template with comments describing the individual configuration variables can be found at ‘doc fate_config sh template’ Create a configuration that suits your based on the configuration template The ‘slot’ configuration variable can be any string that is not yet but it is suggested that you name it adhering to the following pattern ‘ARCH OS COMPILER COMPILER VERSION’ The configuration file itself will be sourced in a shell therefore all shell features may be used This enables you to setup the environment as you need it for your build For your first test runs the ‘fate_recv’ variable should be empty or commented out This will run everything as normal except that it will omit the submission of the results to the server The following files should be present in $workdir as specified in the configuration it may help to try out the ‘ssh’ command with one or more ‘ v’ options You should get detailed output concerning your SSH configuration and the authentication process The only thing left is to automate the execution of the fate sh script and the synchronisation of the samples directory Uploading new samples to the fate suite *****************************************If you need a sample uploaded send a mail to samples request This is for developers who have an account on the fate suite server If you upload new please make sure they are as small as space on each network bandwidth and so on benefit from smaller test cases Also keep in mind older checkouts use existing sample that means in practice generally do not remove or overwrite files as it likely would break older checkouts or releases Also all needed samples for a commit should be ideally before the push If you need an account for frequently uploading samples or you wish to help others by doing that send a mail to ffmpeg devel rsync vauL Duo x
static int write_audio_frame(AVFormatContext *oc, OutputStream *ost)
AVCodec * avcodec_find_encoder(enum AVCodecID id)
Find a registered encoder with a matching codec ID.
@ AV_ROUND_UP
Round toward +infinity.
#define AV_CH_LAYOUT_STEREO
int64_t swr_get_delay(struct SwrContext *s, int64_t base)
Gets the delay the next input sample will experience relative to the next output sample.
int main(int argc, char **argv)
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
struct SwrContext * swr_ctx
void av_dump_format(AVFormatContext *ic, int index, const char *url, int is_output)
Print detailed information about the input or output format, such as duration, bitrate,...
av_cold int swr_init(struct SwrContext *s)
Initialize context after user parameters have been set.
static AVFrame * alloc_audio_frame(enum AVSampleFormat sample_fmt, uint64_t channel_layout, int sample_rate, int nb_samples)
static AVFrame * get_audio_frame(OutputStream *ost)
AVCodecContext * avcodec_alloc_context3(const AVCodec *codec)
Allocate an AVCodecContext and set its fields to default values.
av_cold struct SwrContext * swr_alloc(void)
Allocate SwrContext.
#define av_assert0(cond)
assert() equivalent, that is always enabled.
#define AVIO_FLAG_WRITE
write-only
int attribute_align_arg swr_convert(struct SwrContext *s, uint8_t *out_arg[SWR_CH_MAX], int out_count, const uint8_t *in_arg[SWR_CH_MAX], int in_count)
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
static enum AVPixelFormat pix_fmt
static void log_packet(const AVFormatContext *fmt_ctx, const AVPacket *pkt)
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
The libswresample context.
av_warn_unused_result int avformat_write_header(AVFormatContext *s, AVDictionary **options)
Allocate the stream private data and write the stream header to an output media file.
AVCodecParameters * codecpar
Codec parameters associated with this stream.
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented.
static AVFormatContext * fmt_ctx
void avcodec_free_context(AVCodecContext **avctx)
Free the codec context and everything associated with it and write NULL to the provided pointer.
Rational number (pair of numerator and denominator).
AVIOContext * pb
I/O context.
#define AV_CODEC_CAP_VARIABLE_FRAME_SIZE
Audio encoder supports receiving a different number of samples in each call.
int avcodec_open2(AVCodecContext *avctx, const AVCodec *codec, AVDictionary **options)
Initialize the AVCodecContext to use the given AVCodec.
struct SwsContext * sws_ctx
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
int av_get_channel_layout_nb_channels(uint64_t channel_layout)
Return the number of channels in the channel layout.
ff_const59 struct AVOutputFormat * oformat
The output container format.
int av_opt_set_int(void *obj, const char *name, int64_t val, int search_flags)
AVCodecID
Identify the syntax and semantics of the bitstream.
unsigned int nb_streams
Number of elements in AVFormatContext.streams.
int64_t av_rescale_rnd(int64_t a, int64_t b, int64_t c, enum AVRounding rnd)
Rescale a 64-bit integer with specified rounding.
#define av_ts2timestr(ts, tb)
Convenience macro, the return value should be used only directly in function arguments but never stan...
static void open_audio(AVFormatContext *oc, AVCodec *codec, OutputStream *ost, AVDictionary *opt_arg)
struct SwsContext * sws_getContext(int srcW, int srcH, enum AVPixelFormat srcFormat, int dstW, int dstH, enum AVPixelFormat dstFormat, int flags, SwsFilter *srcFilter, SwsFilter *dstFilter, const double *param)
Allocate and return an SwsContext.
#define av_err2str(errnum)
Convenience macro, the return value should be used only directly in function arguments but never stan...
av_cold void swr_free(SwrContext **ss)
Free the given SwrContext and set the pointer to NULL.
printf("static const uint8_t my_array[100] = {\n")
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames,...
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed.
void av_dict_free(AVDictionary **pm)
Free all the memory allocated for an AVDictionary struct and all keys and values.
void av_packet_rescale_ts(AVPacket *pkt, AVRational src_tb, AVRational dst_tb)
Convert valid timing fields (timestamps / durations) in a packet from one timebase to another.
const char * avcodec_get_name(enum AVCodecID id)
Get the name of a codec.
int avio_closep(AVIOContext **s)
Close the resource accessed by the AVIOContext *s, free it and set the pointer pointing to it to NULL...
int av_write_trailer(AVFormatContext *s)
Write the stream trailer to an output media file and free the file private data.
#define i(width, name, range_min, range_max)
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
AVSampleFormat
Audio sample formats.
@ AV_SAMPLE_FMT_S16
signed 16 bits
int avcodec_send_frame(AVCodecContext *avctx, const AVFrame *frame)
Supply a raw video or audio frame to the encoder.
int id
Format-specific stream ID.
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
main external API structure.
int index
stream index in AVFormatContext
void sws_freeContext(struct SwsContext *swsContext)
Free the swscaler context swsContext.
void avformat_free_context(AVFormatContext *s)
Free an AVFormatContext and all its streams.
int avio_open(AVIOContext **s, const char *url, int flags)
Create and initialize a AVIOContext for accessing the resource indicated by url.
static void close_stream(AVFormatContext *oc, OutputStream *ost)
This structure stores compressed data.
#define STREAM_FRAME_RATE
int av_interleaved_write_frame(AVFormatContext *s, AVPacket *pkt)
Write a packet to an output media file ensuring correct interleaving.
int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags)
Set the given entry in *pm, overwriting an existing entry.
int av_dict_copy(AVDictionary **dst, const AVDictionary *src, int flags)
Copy entries from one AVDictionary struct into another.
static int write_video_frame(AVFormatContext *oc, OutputStream *ost)
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
static void open_video(AVFormatContext *oc, AVCodec *codec, OutputStream *ost, AVDictionary *opt_arg)
#define av_ts2str(ts)
Convenience macro, the return value should be used only directly in function arguments but never stan...
int av_opt_set_sample_fmt(void *obj, const char *name, enum AVSampleFormat fmt, int search_flags)
@ AV_CODEC_ID_MPEG2VIDEO
preferred ID for MPEG-1/2 video decoding
static void add_stream(OutputStream *ost, AVFormatContext *oc, AVCodec **codec, enum AVCodecID codec_id)