Go to the documentation of this file.
58 fprintf(stderr,
"%s [-split] [-ismf] [-n basename] [-path-prefix prefix] "
59 "[-ismc-prefix prefix] [-output dir] file1 [file2] ...\n", argv0);
96 if (got_tag != expected_tag) {
97 char got_tag_str[4], expected_tag_str[4];
99 AV_WB32(expected_tag_str, expected_tag);
100 fprintf(stderr,
"wanted tag %.4s, got %.4s\n", expected_tag_str,
123 fprintf(stderr,
"short read, wanted %d, got %d\n",
len, got);
153 fprintf(stderr,
"Unable to open %s: %s\n", filename, errbuf);
177 int split,
int ismf,
const char* output_prefix)
179 char dirname[2048], filename[2048], idxname[2048];
180 int i, j,
ret = 0, fragment_ret;
184 snprintf(idxname,
sizeof(idxname),
"%s%s.ismf", output_prefix, basename);
185 out = fopen(idxname,
"w");
195 snprintf(dirname,
sizeof(dirname),
"%sQualityLevels(%d)", output_prefix, track->
bitrate);
197 if (mkdir(dirname, 0777) == -1 && errno != EEXIST) {
203 for (j = 0; j < track->
chunks; j++) {
204 snprintf(filename,
sizeof(filename),
"%s/Fragments(%s=%"PRId64
")",
215 if (fragment_ret != 0) {
216 fprintf(stderr,
"failed fragment %d in track %d (%s)\n", j,
235 int64_t first_pts = 0;
240 fprintf(stderr,
"No sample duration in trun flags\n");
249 for (
i = 0;
i < entries &&
pos <
end;
i++) {
250 int sample_duration = default_duration;
256 if (sample_duration < 0) {
257 fprintf(stderr,
"Negative sample duration %d\n", sample_duration);
262 max_pts =
FFMAX(max_pts,
pts + sample_duration);
263 dts += sample_duration;
267 return max_pts - first_pts;
275 int default_duration = 0;
287 int64_t traf_pos =
pos;
288 int64_t traf_size =
size;
289 while (
pos < traf_pos + traf_size) {
311 fprintf(stderr,
"Couldn't find trun\n");
316 fprintf(stderr,
"Couldn't find traf\n");
335 for (
i = start_index;
i < tracks->
nb_tracks && !track;
i++)
361 for (j = 0; j < ((fieldlength >> 4) & 3) + 1; j++)
363 for (j = 0; j < ((fieldlength >> 2) & 3) + 1; j++)
365 for (j = 0; j < ((fieldlength >> 0) & 3) + 1; j++)
387 fprintf(stderr,
"Calculated last chunk duration for track %d "
388 "was non-positive (%"PRId64
"), probably due to missing "
397 fprintf(stderr,
"corrected to %"PRId64
"\n",
402 fprintf(stderr,
"Track duration corrected to %"PRId64
"\n",
415 const char *basename,
const char* output_prefix)
418 const char* err_str =
"";
429 err_str =
"mfra size mismatch";
434 err_str =
"mfra tag mismatch";
444 err_str =
"error in write_fragments";
450 fprintf(stderr,
"Unable to read the MFRA atom in %s (%s)\n",
file, err_str);
468 uint16_t sps_size, pps_size;
497 int ismf,
const char *basename,
498 const char* output_prefix)
501 int err = 0,
i, orig_tracks = tracks->
nb_tracks;
502 char errbuf[50], *ptr;
508 fprintf(stderr,
"Unable to open %s: %s\n",
file, errbuf);
515 fprintf(stderr,
"Unable to identify %s: %s\n",
file, errbuf);
520 fprintf(stderr,
"No streams found in %s\n",
file);
529 fprintf(stderr,
"Skipping track %d in %s as it has zero bitrate\n",
551 if ((ptr = strrchr(
file,
'/')))
552 track->
name = ptr + 1;
563 "Track %d in %s is neither video nor audio, skipping\n",
618 const char *output_prefix,
619 const char *path_prefix,
620 const char *ismc_prefix)
626 snprintf(filename,
sizeof(filename),
"%s%s.ism", output_prefix, basename);
627 out = fopen(filename,
"w");
632 fprintf(
out,
"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n");
633 fprintf(
out,
"<smil xmlns=\"http://www.w3.org/2001/SMIL20/Language\">\n");
634 fprintf(
out,
"\t<head>\n");
635 fprintf(
out,
"\t\t<meta name=\"clientManifestRelativePath\" "
636 "content=\"%s%s.ismc\" />\n", ismc_prefix, basename);
637 fprintf(
out,
"\t</head>\n");
638 fprintf(
out,
"\t<body>\n");
639 fprintf(
out,
"\t\t<switch>\n");
643 fprintf(
out,
"\t\t\t<%s src=\"%s%s\" systemBitrate=\"%d\">\n",
645 fprintf(
out,
"\t\t\t\t<param name=\"trackID\" value=\"%d\" "
646 "valueType=\"data\" />\n", track->
track_id);
647 fprintf(
out,
"\t\t\t</%s>\n",
type);
649 fprintf(
out,
"\t\t</switch>\n");
650 fprintf(
out,
"\t</body>\n");
651 fprintf(
out,
"</smil>\n");
661 int should_print_time_mismatch = 1;
667 fprintf(stderr,
"Mismatched duration of %s chunk %d in %s (%d) and %s (%d)\n",
669 should_print_time_mismatch = 1;
672 if (should_print_time_mismatch)
673 fprintf(stderr,
"Mismatched (start) time of %s chunk %d in %s (%d) and %s (%d)\n",
675 should_print_time_mismatch = 0;
679 fprintf(
out,
"\t\t<c n=\"%d\" d=\"%"PRId64
"\" ",
686 fprintf(
out,
"/>\n");
691 const char *output_prefix,
int split)
698 snprintf(filename,
sizeof(filename),
"%sManifest", output_prefix);
700 snprintf(filename,
sizeof(filename),
"%s%s.ismc", output_prefix, basename);
701 out = fopen(filename,
"w");
706 fprintf(
out,
"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n");
707 fprintf(
out,
"<SmoothStreamingMedia MajorVersion=\"2\" MinorVersion=\"0\" "
708 "Duration=\"%"PRId64
"\">\n", tracks->
duration * 10);
711 struct Track *first_track = track;
714 "\t<StreamIndex Type=\"video\" QualityLevels=\"%d\" "
716 "Url=\"QualityLevels({bitrate})/Fragments(video={start time})\">\n",
723 "\t\t<QualityLevel Index=\"%d\" Bitrate=\"%d\" "
724 "FourCC=\"%s\" MaxWidth=\"%d\" MaxHeight=\"%d\" "
725 "CodecPrivateData=\"",
729 fprintf(
out,
"\" />\n");
732 fprintf(stderr,
"Mismatched number of video chunks in %s (id: %d, chunks %d) and %s (id: %d, chunks %d)\n",
736 fprintf(
out,
"\t</StreamIndex>\n");
740 struct Track *first_track = track;
743 "\t<StreamIndex Type=\"audio\" QualityLevels=\"%d\" "
745 "Url=\"QualityLevels({bitrate})/Fragments(audio={start time})\">\n",
752 "\t\t<QualityLevel Index=\"%d\" Bitrate=\"%d\" "
753 "FourCC=\"%s\" SamplingRate=\"%d\" Channels=\"%d\" "
754 "BitsPerSample=\"16\" PacketSize=\"%d\" "
755 "AudioTag=\"%d\" CodecPrivateData=\"",
760 fprintf(
out,
"\" />\n");
763 fprintf(stderr,
"Mismatched number of audio chunks in %s and %s\n",
767 fprintf(
out,
"\t</StreamIndex>\n");
769 fprintf(
out,
"</SmoothStreamingMedia>\n");
785 int main(
int argc,
char **argv)
787 const char *basename =
NULL;
788 const char *path_prefix =
"", *ismc_prefix =
"";
789 const char *output_prefix =
"";
790 char output_prefix_buf[2048];
791 int split = 0, ismf = 0,
i;
792 struct Tracks tracks = { 0, .video_track = -1, .audio_track = -1 };
794 for (
i = 1;
i < argc;
i++) {
795 if (!strcmp(argv[
i],
"-n")) {
796 basename = argv[
i + 1];
798 }
else if (!strcmp(argv[
i],
"-path-prefix")) {
799 path_prefix = argv[
i + 1];
801 }
else if (!strcmp(argv[
i],
"-ismc-prefix")) {
802 ismc_prefix = argv[
i + 1];
804 }
else if (!strcmp(argv[
i],
"-output")) {
805 output_prefix = argv[
i + 1];
807 if (output_prefix[strlen(output_prefix) - 1] !=
'/') {
808 snprintf(output_prefix_buf,
sizeof(output_prefix_buf),
809 "%s/", output_prefix);
810 output_prefix = output_prefix_buf;
812 }
else if (!strcmp(argv[
i],
"-split")) {
814 }
else if (!strcmp(argv[
i],
"-ismf")) {
816 }
else if (argv[
i][0] ==
'-') {
817 return usage(argv[0], 1);
822 basename, output_prefix))
827 return usage(argv[0], 1);
831 path_prefix, ismc_prefix);
uint8_t * extradata
Extra binary data needed for initializing the decoder, codec-dependent.
#define MOV_TRUN_SAMPLE_FLAGS
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
static int64_t read_moof_duration(AVIOContext *in, int64_t offset)
enum AVMediaType codec_type
General type of the encoded data.
int avio_close(AVIOContext *s)
Close the resource accessed by the AVIOContext s and free it.
This struct describes the properties of an encoded stream.
#define AVERROR_EOF
End of file.
static av_cold int end(AVCodecContext *avctx)
static int write_fragment(const char *filename, AVIOContext *in)
AVStream ** streams
A list of all streams in the file.
void * av_mallocz_array(size_t nmemb, size_t size)
Allocate a memory block for an array with av_mallocz().
uint32_t codec_tag
Additional information about the codec (corresponds to the AVI FOURCC).
static void output_server_manifest(struct Tracks *tracks, const char *basename, const char *output_prefix, const char *path_prefix, const char *ismc_prefix)
int64_t avio_size(AVIOContext *s)
Get the filesize.
void avformat_close_input(AVFormatContext **s)
Close an opened input AVFormatContext.
static int read_mfra(struct Tracks *tracks, int start_index, const char *file, int split, int ismf, const char *basename, const char *output_prefix)
int avio_open2(AVIOContext **s, const char *url, int flags, const AVIOInterruptCB *int_cb, AVDictionary **options)
Create and initialize a AVIOContext for accessing the resource indicated by url.
int av_strerror(int errnum, char *errbuf, size_t errbuf_size)
Put a description of the AVERROR code errnum in errbuf.
static int expect_tag(int32_t got_tag, int32_t expected_tag)
static av_always_inline int64_t avio_tell(AVIOContext *s)
ftell() equivalent for AVIOContext.
static void output_client_manifest(struct Tracks *tracks, const char *basename, const char *output_prefix, int split)
static int write_fragments(struct Tracks *tracks, int start_index, AVIOContext *in, const char *basename, int split, int ismf, const char *output_prefix)
#define MOV_TRUN_SAMPLE_DURATION
static void clean_tracks(struct Tracks *tracks)
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
@ AV_ROUND_UP
Round toward +infinity.
int64_t duration
Decoding: duration of the stream, in stream time base.
#define MOV_TRUN_DATA_OFFSET
int avio_close_dyn_buf(AVIOContext *s, uint8_t **pbuffer)
Return the written size and a pointer to the buffer.
unsigned int avio_rb32(AVIOContext *s)
static int skip_tag(AVIOContext *in, int32_t tag_name)
int avio_open_dyn_buf(AVIOContext **s)
Open a write only memory stream.
#define MOV_TFHD_DEFAULT_DURATION
void * av_realloc_array(void *ptr, size_t nmemb, size_t size)
Allocate, reallocate, or free an array.
#define MOV_TRUN_FIRST_SAMPLE_FLAGS
#define AVIO_FLAG_WRITE
write-only
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
static int usage(const char *argv0, int ret)
void avio_flush(AVIOContext *s)
Force flushing of buffered data.
AVCodecParameters * codecpar
Codec parameters associated with this stream.
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented.
uint64_t avio_rb64(AVIOContext *s)
static int read_tfra(struct Tracks *tracks, int start_index, AVIOContext *f)
#define MOV_TFHD_BASE_DATA_OFFSET
int sample_rate
Audio only.
int extradata_size
Size of the extradata content in bytes.
unsigned int nb_streams
Number of elements in AVFormatContext.streams.
int64_t av_rescale_rnd(int64_t a, int64_t b, int64_t c, enum AVRounding rnd)
Rescale a 64-bit integer with specified rounding.
int avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options)
Read packets of a media file to get stream information.
unsigned int avio_rb24(AVIOContext *s)
int main(int argc, char **argv)
#define MKBETAG(a, b, c, d)
static char * split(char *message, char delim)
void avio_write(AVIOContext *s, const unsigned char *buf, int size)
void avio_wb32(AVIOContext *s, unsigned int val)
int avio_r8(AVIOContext *s)
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) #define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac) { } void ff_audio_convert_free(AudioConvert **ac) { if(! *ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);} AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map) { AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method !=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2) { ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc) { av_free(ac);return NULL;} return ac;} in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar) { ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar ? ac->channels :1;} else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;} int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in) { int use_generic=1;int len=in->nb_samples;int p;if(ac->dc) { av_log(ac->avr, AV_LOG_TRACE, "%d samples - audio_convert: %s to %s (dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> in
#define i(width, name, range_min, range_max)
int avformat_open_input(AVFormatContext **ps, const char *url, ff_const59 AVInputFormat *fmt, AVDictionary **options)
Open an input stream and read the header.
static void print_track_chunks(FILE *out, struct Tracks *tracks, int main, const char *type)
#define AV_TIME_BASE
Internal time base represented as integer.
int block_align
Audio only.
static int get_video_private_data(struct Track *track, AVCodecParameters *codecpar)
int id
Format-specific stream ID.
int64_t avio_seek(AVIOContext *s, int64_t offset, int whence)
fseek() equivalent for AVIOContext.
static int get_private_data(struct Track *track, AVCodecParameters *codecpar)
FFmpeg Automated Testing Environment ************************************Introduction Using FATE from your FFmpeg source directory Submitting the results to the FFmpeg result aggregation server Uploading new samples to the fate suite FATE makefile targets and variables Makefile targets Makefile variables Examples Introduction **************FATE is an extended regression suite on the client side and a means for results aggregation and presentation on the server side The first part of this document explains how you can use FATE from your FFmpeg source directory to test your ffmpeg binary The second part describes how you can run FATE to submit the results to FFmpeg’s FATE server In any way you can have a look at the publicly viewable FATE results by visiting this as it can be seen if some test on some platform broke with their recent contribution This usually happens on the platforms the developers could not test on The second part of this document describes how you can run FATE to submit your results to FFmpeg’s FATE server If you want to submit your results be sure to check that your combination of OS and compiler is not already listed on the above mentioned website In the third part you can find a comprehensive listing of FATE makefile targets and variables Using FATE from your FFmpeg source directory **********************************************If you want to run FATE on your machine you need to have the samples in place You can get the samples via the build target fate rsync Use this command from the top level source this will cause FATE to fail NOTE To use a custom wrapper to run the pass ‘ target exec’ to ‘configure’ or set the TARGET_EXEC Make variable Submitting the results to the FFmpeg result aggregation server ****************************************************************To submit your results to the server you should run fate through the shell script ‘tests fate sh’ from the FFmpeg sources This script needs to be invoked with a configuration file as its first argument tests fate sh path to fate_config A configuration file template with comments describing the individual configuration variables can be found at ‘doc fate_config sh template’ Create a configuration that suits your based on the configuration template The ‘slot’ configuration variable can be any string that is not yet but it is suggested that you name it adhering to the following pattern ‘ARCH OS COMPILER COMPILER VERSION’ The configuration file itself will be sourced in a shell therefore all shell features may be used This enables you to setup the environment as you need it for your build For your first test runs the ‘fate_recv’ variable should be empty or commented out This will run everything as normal except that it will omit the submission of the results to the server The following files should be present in $workdir as specified in the configuration file
struct MoofOffset * offsets
static int64_t read_trun_duration(AVIOContext *in, int default_duration, int64_t end)
int avio_read(AVIOContext *s, unsigned char *buf, int size)
Read size bytes from AVIOContext into buf.
static int skip_fragment(AVIOContext *in)
#define AVIO_FLAG_READ
read-only
static int handle_file(struct Tracks *tracks, const char *file, int split, int ismf, const char *basename, const char *output_prefix)
enum AVCodecID codec_id
Specific type of the encoded data (the codec used).
#define MOV_TRUN_SAMPLE_SIZE
#define flags(name, subs,...)
int64_t bit_rate
The average bitrate of the encoded data (in bits per second).
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
#define MOV_TRUN_SAMPLE_CTS
static int copy_tag(AVIOContext *in, AVIOContext *out, int32_t tag_name)