Go to the documentation of this file.
70 for (
i = 0;
i <
s->nb_streams;
i++) {
82 double min_buffer_time = 1.0;
83 avio_printf(
s->pb,
"<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n");
85 avio_printf(
s->pb,
" xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\"\n");
86 avio_printf(
s->pb,
" xmlns=\"urn:mpeg:DASH:schema:MPD:2011\"\n");
87 avio_printf(
s->pb,
" xsi:schemaLocation=\"urn:mpeg:DASH:schema:MPD:2011\"\n");
88 avio_printf(
s->pb,
" type=\"%s\"\n",
w->is_live ?
"dynamic" :
"static");
90 avio_printf(
s->pb,
" mediaPresentationDuration=\"PT%gS\"\n",
93 avio_printf(
s->pb,
" minBufferTime=\"PT%gS\"\n", min_buffer_time);
95 w->is_live ?
"urn:mpeg:dash:profile:isoff-live:2011" :
"urn:webm:dash:profile:webm-on-demand:2012",
96 w->is_live ?
"\n" :
">\n");
98 time_t local_time = time(
NULL);
100 struct tm *gmt =
gmtime_r(&local_time, &gmt_buffer);
102 if (!strftime(gmt_iso, 21,
"%Y-%m-%dT%H:%M:%SZ", gmt)) {
108 avio_printf(
s->pb,
" availabilityStartTime=\"%s\"\n", gmt_iso);
109 avio_printf(
s->pb,
" timeShiftBufferDepth=\"PT%gS\"\n",
w->time_shift_buffer_depth);
110 avio_printf(
s->pb,
" minimumUpdatePeriod=\"PT%dS\"",
w->minimum_update_period);
112 if (
w->utc_timing_url) {
114 avio_printf(
s->pb,
" schemeIdUri=\"urn:mpeg:dash:utc:http-iso:2014\"\n");
134 if (!ts || strncmp(gold->
value, ts->
value, strlen(gold->
value)))
return 0;
144 if (!gold_track_num)
return 0;
150 strncmp(gold_track_num->
value, track_num->
value, strlen(gold_track_num->
value)) ||
165 int output_width,
int output_height,
166 int output_sample_rate) {
173 const char *bandwidth_str;
174 if ((
w->is_live && (!filename)) ||
175 (!
w->is_live && (!irange || !cues_start || !cues_end || !filename || !bandwidth))) {
180 if (
w->is_live && !bandwidth) {
183 bandwidth_str = bandwidth->
value;
220 first_width =
s->streams[as->
streams[0]]->codecpar->width;
222 if (first_width !=
s->streams[as->
streams[
i]]->codecpar->width)
233 first_height =
s->streams[as->
streams[0]]->codecpar->height;
235 if (first_height !=
s->streams[as->
streams[
i]]->codecpar->height)
244 int first_sample_rate,
i;
246 first_sample_rate =
s->streams[as->
streams[0]]->codecpar->sample_rate;
248 if (first_sample_rate !=
s->streams[as->
streams[
i]]->codecpar->sample_rate)
256 for (
i = 0;
i <
w->nb_as;
i++) {
274 char **initialization_pattern,
char **media_pattern) {
275 char *underscore_pos =
NULL;
276 char *period_pos =
NULL;
277 char *filename_str =
av_strdup(filename);
284 underscore_pos = strrchr(filename_str,
'_');
285 if (!underscore_pos) {
289 period_pos = strchr(++underscore_pos,
'.');
294 *(underscore_pos - 1) = 0;
295 if (representation_id) {
296 *representation_id =
av_malloc(period_pos - underscore_pos + 1);
297 if (!(*representation_id)) {
301 av_strlcpy(*representation_id, underscore_pos, period_pos - underscore_pos + 1);
303 if (initialization_pattern) {
304 *initialization_pattern =
av_asprintf(
"%s_$RepresentationID$.hdr",
306 if (!(*initialization_pattern)) {
312 *media_pattern =
av_asprintf(
"%s_$RepresentationID$_$Number$.chk",
314 if (!(*media_pattern)) {
335 static const char boolean[2][6] = {
"false",
"true" };
336 int subsegmentStartsWithSAP = 1;
342 int width_in_as = 1, height_in_as = 1, sample_rate_in_as = 1;
373 if (!
w->is_live && (!kf || !strncmp(kf->
value,
"0", 1))) subsegmentStartsWithSAP = 0;
375 avio_printf(
s->pb,
" subsegmentStartsWithSAP=\"%d\"", subsegmentStartsWithSAP);
381 char *initialization_pattern =
NULL;
382 char *media_pattern =
NULL;
386 avio_printf(
s->pb,
"<ContentComponent id=\"1\" type=\"%s\"/>\n",
392 avio_printf(
s->pb,
" startNumber=\"%d\"",
w->chunk_start_index);
393 avio_printf(
s->pb,
" initialization=\"%s\"", initialization_pattern);
395 av_free(initialization_pattern);
400 char *representation_id =
NULL;
410 representation_id =
av_asprintf(
"%d",
w->representation_id++);
411 if (!representation_id)
return AVERROR(ENOMEM);
414 representation_id, !width_in_as,
415 !height_in_as, !sample_rate_in_as);
426 char *p =
w->adaptation_sets;
428 enum { new_set, parsed_id, parsing_streams }
state;
429 if (!
w->adaptation_sets) {
437 if (
state == new_set)
441 }
else if (
state == new_set && *p ==
' ') {
444 }
else if (
state == new_set && !strncmp(p,
"id=", 3)) {
445 void *mem =
av_realloc(
w->as,
sizeof(*
w->as) * (
w->nb_as + 1));
451 w->as[
w->nb_as - 1].nb_streams = 0;
452 w->as[
w->nb_as - 1].streams =
NULL;
454 q =
w->as[
w->nb_as - 1].id;
455 comma = strchr(p,
',');
456 if (!comma || comma - p >=
sizeof(
w->as[
w->nb_as - 1].id)) {
460 while (*p !=
',') *q++ = *p++;
464 }
else if (
state == parsed_id && !strncmp(p,
"streams=", 8)) {
466 state = parsing_streams;
467 }
else if (
state == parsing_streams) {
474 num = strtoll(p, &q, 10);
475 if (!
av_isdigit(*p) || (*q !=
' ' && *q !=
'\0' && *q !=
',') ||
476 num < 0 || num >=
s->nb_streams) {
481 if (*q ==
'\0')
break;
482 if (*q ==
' ')
state = new_set;
498 for (
unsigned i = 0;
i <
s->nb_streams;
i++) {
520 for (
i = 0;
i <
w->nb_as;
i++) {
539 #define OFFSET(x) offsetof(WebMDashMuxContext, x)
541 {
"adaptation_sets",
"Adaptation sets. Syntax: id=0,streams=0,1,2 id=1,streams=3,4 and so on",
OFFSET(adaptation_sets),
AV_OPT_TYPE_STRING, { 0 }, 0, 0,
AV_OPT_FLAG_ENCODING_PARAM },
546 {
"time_shift_buffer_depth",
"Smallest time (in seconds) shifting buffer for which any Representation is guaranteed to be available.",
OFFSET(time_shift_buffer_depth),
AV_OPT_TYPE_DOUBLE, { .dbl = 60.0 }, 1.0, DBL_MAX,
AV_OPT_FLAG_ENCODING_PARAM },
559 .
name =
"webm_dash_manifest",
561 .mime_type =
"application/xml",
uint8_t * extradata
Extra binary data needed for initializing the decoder, codec-dependent.
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
enum AVMediaType codec_type
General type of the encoded data.
This struct describes the properties of an encoded stream.
void * av_realloc(void *ptr, size_t size)
Allocate, reallocate, or free a block of memory.
#define AVERROR_EOF
End of file.
static int write_representation(AVFormatContext *s, AVStream *stream, char *id, int output_width, int output_height, int output_sample_rate)
static void free_adaptation_sets(AVFormatContext *s)
const char * name
Name of the codec described by this descriptor.
char * av_asprintf(const char *fmt,...)
static int check_matching_width(AVFormatContext *s, AdaptationSet *as)
static av_cold int end(AVCodecContext *avctx)
static void write_footer(AVFormatContext *s)
#define AVERROR_UNKNOWN
Unknown error, typically from an external library.
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
AVDictionaryEntry * av_dict_get(const AVDictionary *m, const char *key, const AVDictionaryEntry *prev, int flags)
Get a dictionary entry with matching key.
static int webm_dash_manifest_write_packet(AVFormatContext *s, AVPacket *pkt)
#define AV_OPT_FLAG_ENCODING_PARAM
a generic parameter which can be set by the user for muxing or encoding
static double get_duration(AVFormatContext *s)
static int write_header(AVFormatContext *s)
static const char * get_codec_name(int codec_id)
static int parse_adaptation_sets(AVFormatContext *s)
AVCodecParameters * codecpar
Codec parameters associated with this stream.
#define LIBAVUTIL_VERSION_INT
Describe the class of an AVClass context structure.
static int subsegment_alignment(AVFormatContext *s, AdaptationSet *as)
const char * av_default_item_name(void *ptr)
Return the context name.
int sample_rate
Audio only.
static int webm_dash_manifest_write_header(AVFormatContext *s)
AVCodecID
Identify the syntax and semantics of the bitstream.
int extradata_size
Size of the extradata content in bytes.
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
#define INITIALIZATION_RANGE
static av_const int av_isdigit(int c)
Locale-independent conversion of ASCII isdigit.
int av_reallocp_array(void *ptr, size_t nmemb, size_t size)
Allocate, reallocate, or free an array through a pointer to a pointer.
static void write_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost, int unqueue)
#define i(width, name, range_min, range_max)
AVOutputFormat ff_webm_dash_manifest_muxer
static int write_adaptation_set(AVFormatContext *s, int as_index)
static int parse_filename(char *filename, char **representation_id, char **initialization_pattern, char **media_pattern)
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
static const AVOption options[]
int minimum_update_period
FFmpeg Automated Testing Environment ************************************Introduction Using FATE from your FFmpeg source directory Submitting the results to the FFmpeg result aggregation server Uploading new samples to the fate suite FATE makefile targets and variables Makefile targets Makefile variables Examples Introduction **************FATE is an extended regression suite on the client side and a means for results aggregation and presentation on the server side The first part of this document explains how you can use FATE from your FFmpeg source directory to test your ffmpeg binary The second part describes how you can run FATE to submit the results to FFmpeg’s FATE server In any way you can have a look at the publicly viewable FATE results by visiting this as it can be seen if some test on some platform broke with their recent contribution This usually happens on the platforms the developers could not test on The second part of this document describes how you can run FATE to submit your results to FFmpeg’s FATE server If you want to submit your results be sure to check that your combination of OS and compiler is not already listed on the above mentioned website In the third part you can find a comprehensive listing of FATE makefile targets and variables Using FATE from your FFmpeg source directory **********************************************If you want to run FATE on your machine you need to have the samples in place You can get the samples via the build target fate rsync Use this command from the top level source this will cause FATE to fail NOTE To use a custom wrapper to run the pass ‘ target exec’ to ‘configure’ or set the TARGET_EXEC Make variable Submitting the results to the FFmpeg result aggregation server ****************************************************************To submit your results to the server you should run fate through the shell script ‘tests fate sh’ from the FFmpeg sources This script needs to be invoked with a configuration file as its first argument tests fate sh path to fate_config A configuration file template with comments describing the individual configuration variables can be found at ‘doc fate_config sh template’ Create a configuration that suits your based on the configuration template The ‘slot’ configuration variable can be any string that is not yet but it is suggested that you name it adhering to the following pattern ‘ARCH OS COMPILER COMPILER VERSION’ The configuration file itself will be sourced in a shell therefore all shell features may be used This enables you to setup the environment as you need it for your build For your first test runs the ‘fate_recv’ variable should be empty or commented out This will run everything as normal except that it will omit the submission of the results to the server The following files should be present in $workdir as specified in the configuration it may help to try out the ‘ssh’ command with one or more ‘ v’ options You should get detailed output concerning your SSH configuration and the authentication process The only thing left is to automate the execution of the fate sh script and the synchronisation of the samples directory Uploading new samples to the fate suite *****************************************If you need a sample uploaded send a mail to samples request This is for developers who have an account on the fate suite server If you upload new please make sure they are as small as space on each network bandwidth and so on benefit from smaller test cases Also keep in mind older checkouts use existing sample that means in practice generally do not remove or overwrite files as it likely would break older checkouts or releases Also all needed samples for a commit should be ideally before the push If you need an account for frequently uploading samples or you wish to help others by doing that send a mail to ffmpeg devel rsync vauL Duo ug o o w
int avio_printf(AVIOContext *s, const char *fmt,...) av_printf_format(2
Writes a formatted string to the context.
static const AVClass webm_dash_class
double time_shift_buffer_depth
static int bitstream_switching(AVFormatContext *s, AdaptationSet *as)
static int check_matching_sample_rate(AVFormatContext *s, AdaptationSet *as)
char * av_strdup(const char *s)
Duplicate a string.
static int check_matching_height(AVFormatContext *s, AdaptationSet *as)
enum AVCodecID codec_id
Specific type of the encoded data (the codec used).
This structure stores compressed data.
size_t av_strlcpy(char *dst, const char *src, size_t size)
Copy the string src to dst, but no more than size - 1 bytes, and null-terminate dst.
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
const AVCodecDescriptor * avcodec_descriptor_get(enum AVCodecID id)