Go to the documentation of this file.
71 #define EBML_UNKNOWN_LENGTH UINT64_MAX
72 #define NEEDS_CHECKING 2
76 #define SKIP_THRESHOLD 1024 * 1024
79 #define UNKNOWN_EQUIV 50 * 1024
124 typedef struct Ebml {
393 #define CHILD_OF(parent) { .def = { .n = parent } }
779 uint32_t
id, int64_t position)
806 "Seek to desired resync point failed. Seeking to "
807 "earliest point available instead.\n");
809 last_pos + 1), SEEK_SET);
845 int max_size, uint64_t *number,
int eof_forbidden)
859 if (!total || read > max_size) {
863 "0x00 at pos %"PRId64
" (0x%"PRIx64
") invalid as first byte "
864 "of an EBML number\n",
pos,
pos);
867 "Length %d indicated by an EBML number's first byte 0x%02x "
868 "at pos %"PRId64
" (0x%"PRIx64
") exceeds max length %d.\n",
877 total = (total << 8) |
avio_r8(pb);
892 "Read error at pos. %"PRIu64
" (0x%"PRIx64
")\n",
898 "at pos. %"PRIu64
" (0x%"PRIx64
")\n",
pos,
pos);
913 if (res > 0 && *number + 1 == 1ULL << (7 * res))
929 *num = (*num << 8) |
avio_r8(pb);
949 *num = ((uint64_t)*num << 8) |
avio_r8(pb);
1030 uint64_t length, int64_t
pos)
1042 level->length = length;
1062 *num = unum - ((1LL << (7 * res - 1)) - 1);
1076 for (
i = 0; syntax[
i].
id;
i++)
1077 if (
id == syntax[
i].
id)
1089 for (
int i = 0; syntax[
i].
id;
i++)
1090 switch (syntax[
i].
type) {
1095 *(int64_t *) ((
char *)
data + syntax[
i].data_offset) = syntax[
i].
def.
i;
1103 if (syntax[
i].def.
s) {
1131 return id && (
bits + 7) / 8 == (8 -
bits % 8);
1139 uint32_t
id, int64_t
pos)
1211 "at pos. %"PRIu64
" (0x%"PRIx64
")\n",
pos,
pos);
1218 pos_alt =
pos + res;
1231 while (syntax->
def.
n) {
1241 "%"PRId64
"\n",
id,
pos);
1254 &
list->alloc_elem_size,
1258 list->elem = newelem;
1275 uint64_t elem_end = pos_alt + length,
1278 if (elem_end < level_end) {
1280 }
else if (elem_end == level_end) {
1284 "Element at 0x%"PRIx64
" ending at 0x%"PRIx64
" exceeds "
1285 "containing master element ending at 0x%"PRIx64
"\n",
1286 pos, elem_end, level_end);
1293 "at 0x%"PRIx64
" inside parent with finite size\n",
pos);
1303 "Found unknown-length element 0x%"PRIX32
" other than "
1304 "a cluster at 0x%"PRIx64
". Spec-incompliant, but "
1305 "parsing will nevertheless be attempted.\n",
id,
pos);
1312 if (max_lengths[syntax->
type] && length > max_lengths[syntax->
type]) {
1315 "Invalid length 0x%"PRIx64
" > 0x%"PRIx64
" for element "
1316 "with ID 0x%"PRIX32
" at 0x%"PRIx64
"\n",
1317 length, max_lengths[syntax->
type],
id,
pos);
1320 "Element with ID 0x%"PRIX32
" at pos. 0x%"PRIx64
" has "
1321 "unknown length, yet the length of an element of its "
1322 "type must be known.\n",
id,
pos);
1325 "Found unknown-length element with ID 0x%"PRIX32
" at "
1326 "pos. 0x%"PRIx64
" for which no syntax for parsing is "
1327 "available.\n",
id,
pos);
1364 "Unknown element %"PRIX32
" at pos. 0x%"PRIx64
" with "
1365 "length 0x%"PRIx64
" considered as invalid data. Last "
1366 "known good position 0x%"PRIx64
", %d unknown elements"
1385 switch (syntax->
type) {
1412 if (!level1_elem->
pos) {
1415 }
else if (level1_elem->
pos !=
pos)
1433 if ((res2 =
avio_skip(pb, length - 1)) >= 0) {
1488 for (
i = 0; syntax[
i].
id;
i++) {
1489 void *data_off = (
char *)
data + syntax[
i].data_offset;
1490 switch (syntax[
i].
type) {
1500 if (syntax[
i].list_elem_size) {
1502 char *ptr =
list->elem;
1503 for (j = 0; j <
list->nb_elem;
1508 list->alloc_elem_size = 0;
1523 int len_mask = 0x80,
size = 1, n = 1,
i;
1531 while (
size <= 8 && !(total & len_mask)) {
1537 total &= (len_mask - 1);
1539 total = (total << 8) | p->
buf[4 + n++];
1541 if (total + 1 == 1ULL << (7 *
size)){
1556 if (total < probelen)
1558 for (n = 4 +
size; n <= 4 +
size + total - probelen; n++)
1574 if (tracks[
i].num == num)
1586 int isize = *buf_size;
1589 int pkt_size = isize;
1593 if (pkt_size >= 10000000
U)
1596 switch (encodings[0].compression.algo) {
1602 if (header_size && !
header) {
1610 pkt_size = isize + header_size;
1615 memcpy(pkt_data,
header, header_size);
1616 memcpy(pkt_data + header_size,
data, isize);
1623 olen = pkt_size *= 3;
1630 pkt_data = newpktdata;
1643 z_stream zstream = { 0 };
1644 if (inflateInit(&zstream) != Z_OK)
1646 zstream.next_in =
data;
1647 zstream.avail_in = isize;
1652 inflateEnd(&zstream);
1656 pkt_data = newpktdata;
1657 zstream.avail_out = pkt_size - zstream.total_out;
1658 zstream.next_out = pkt_data + zstream.total_out;
1660 }
while (
result == Z_OK && pkt_size < 10000000);
1661 pkt_size = zstream.total_out;
1662 inflateEnd(&zstream);
1663 if (
result != Z_STREAM_END) {
1664 if (
result == Z_MEM_ERROR)
1676 bz_stream bzstream = { 0 };
1677 if (BZ2_bzDecompressInit(&bzstream, 0, 0) != BZ_OK)
1679 bzstream.next_in =
data;
1680 bzstream.avail_in = isize;
1685 BZ2_bzDecompressEnd(&bzstream);
1689 pkt_data = newpktdata;
1690 bzstream.avail_out = pkt_size - bzstream.total_out_lo32;
1691 bzstream.next_out = pkt_data + bzstream.total_out_lo32;
1692 result = BZ2_bzDecompress(&bzstream);
1693 }
while (
result == BZ_OK && pkt_size < 10000000);
1694 pkt_size = bzstream.total_out_lo32;
1695 BZ2_bzDecompressEnd(&bzstream);
1696 if (
result != BZ_STREAM_END) {
1697 if (
result == BZ_MEM_ERROR)
1713 *buf_size = pkt_size;
1728 for (
i = 0;
i <
list->nb_elem;
i++) {
1729 const char *lang = tags[
i].
lang &&
1730 strcmp(tags[
i].lang,
"und") ? tags[
i].
lang :
NULL;
1732 if (!tags[
i].
name) {
1740 if (tags[
i].def || !lang) {
1742 if (tags[
i].sub.nb_elem)
1749 if (tags[
i].sub.nb_elem)
1763 if (tags[
i].target.attachuid) {
1767 if (attachment[j].
uid == tags[
i].target.attachuid &&
1768 attachment[j].stream) {
1770 &attachment[j].stream->metadata,
NULL);
1776 "The tags at index %d refer to a "
1777 "non-existent attachment %"PRId64
".\n",
1778 i, tags[
i].target.attachuid);
1780 }
else if (tags[
i].target.chapteruid) {
1784 if (chapter[j].
uid == tags[
i].target.chapteruid &&
1785 chapter[j].chapter) {
1787 &chapter[j].chapter->metadata,
NULL);
1793 "The tags at index %d refer to a non-existent chapter "
1795 i, tags[
i].target.chapteruid);
1797 }
else if (tags[
i].target.trackuid) {
1801 if (track[j].
uid == tags[
i].target.trackuid &&
1804 &track[j].stream->metadata,
NULL);
1810 "The tags at index %d refer to a non-existent track "
1812 i, tags[
i].target.trackuid);
1816 tags[
i].target.type);
1834 "Max EBML element depth (%d) reached, "
1865 for (
i = 0;
i < seekhead_list->
nb_elem;
i++) {
1867 uint32_t
id = seekheads[
i].
id;
1875 if (!elem || elem->
parsed)
1898 uint64_t index_scale = 1;
1904 index_list = &matroska->
index;
1915 for (j = 0; j < pos_list->
nb_elem; j++) {
1918 if (track && track->
stream)
1948 static const char *
const aac_profiles[] = {
"MAIN",
"LC",
"SSR" };
1994 int block_last, block_type, block_size;
2000 if (block_size >
size)
2009 chmask =
av_dict_get(dict,
"WAVEFORMATEXTENSIBLE_CHANNEL_MASK",
NULL, 0);
2014 "Invalid value of WAVEFORMATEXTENSIBLE_CHANNEL_MASK\n");
2035 bttb = (
major == 57 &&
minor >= 36 && minor <= 51 && micro >= 100);
2037 switch (field_order) {
2056 int *h_width,
int *h_height)
2058 switch (stereo_mode) {
2083 int has_mastering_primaries, has_mastering_luminance;
2088 mastering_meta = &
color->mastering_meta;
2090 has_mastering_primaries =
2091 mastering_meta->
r_x > 0 && mastering_meta->
r_y > 0 &&
2092 mastering_meta->
g_x > 0 && mastering_meta->
g_y > 0 &&
2093 mastering_meta->
b_x > 0 && mastering_meta->
b_y > 0 &&
2095 has_mastering_luminance = mastering_meta->
max_luminance > 0;
2114 (
color->chroma_siting_vert - 1) << 7);
2132 if (has_mastering_primaries || has_mastering_luminance) {
2141 if (has_mastering_primaries) {
2152 if (has_mastering_luminance) {
2166 size_t spherical_size;
2167 uint32_t l = 0, t = 0,
r = 0,
b = 0;
2168 uint32_t padding = 0;
2175 if (bytestream2_get_byte(&gb) != 0) {
2185 t = bytestream2_get_be32(&gb);
2186 b = bytestream2_get_be32(&gb);
2187 l = bytestream2_get_be32(&gb);
2188 r = bytestream2_get_be32(&gb);
2190 if (
b >= UINT_MAX - t ||
r >= UINT_MAX - l) {
2192 "Invalid bounding rectangle coordinates "
2193 "%"PRIu32
",%"PRIu32
",%"PRIu32
",%"PRIu32
"\n",
2202 if (l || t ||
r ||
b)
2212 uint32_t
layout = bytestream2_get_be32(&gb);
2215 "Unknown spherical cubemap layout %"PRIu32
"\n",
layout);
2219 padding = bytestream2_get_be32(&gb);
2230 "Unknown spherical metadata type %"PRIu64
"\n",
2304 int extradata_size = 0;
2305 int extradata_offset = 0;
2308 char* key_id_base64 =
NULL;
2317 "Unknown or unsupported track type %"PRIu64
"\n",
2327 "Invalid sample rate %f, defaulting to 8000 instead.\n",
2335 if (default_duration > UINT64_MAX || default_duration < 0) {
2337 "Invalid frame rate %e. Cannot calculate default duration.\n",
2353 if (encodings_list->
nb_elem > 1) {
2355 "Multiple combined encodings not supported");
2356 }
else if (encodings_list->
nb_elem == 1) {
2357 if (encodings[0].
type) {
2358 if (encodings[0].encryption.key_id.size > 0) {
2361 const int b64_size =
AV_BASE64_SIZE(encodings[0].encryption.key_id.size);
2363 if (key_id_base64 ==
NULL)
2367 encodings[0].encryption.key_id.data,
2368 encodings[0].encryption.key_id.size);
2370 encodings[0].
scope = 0;
2372 "Unsupported encoding type");
2385 encodings[0].
scope = 0;
2387 "Unsupported encoding type");
2397 "Failed to decode codec private data\n");
2416 encodings[0].
scope & 1 &&
2435 if (key_id_base64) {
2441 if (!strcmp(track->
codec_id,
"V_MS/VFW/FOURCC") &&
2452 extradata_offset = 40;
2453 }
else if (!strcmp(track->
codec_id,
"A_MS/ACM") &&
2466 }
else if (!strcmp(track->
codec_id,
"A_QUICKTIME")
2470 uint16_t sample_size;
2476 if (sample_size == 8) {
2479 }
else if (sample_size == 16) {
2488 }
else if (!strcmp(track->
codec_id,
"V_QUICKTIME") &&
2544 extradata[0] = (
profile << 3) | ((sri & 0x0E) >> 1);
2545 extradata[1] = ((sri & 0x01) << 7) | (track->
audio.
channels << 3);
2546 if (strstr(track->
codec_id,
"SBR")) {
2548 extradata[2] = 0x56;
2549 extradata[3] = 0xE5;
2550 extradata[4] = 0x80 | (sri << 3);
2563 AV_WB32(extradata, extradata_size);
2564 memcpy(&extradata[4],
"alac", 4);
2573 "Too large audio channel number %"PRIu64
2574 " or bitdepth %"PRIu64
". Skipping track.\n",
2583 extradata_size = 22;
2588 bytestream_put_be32(&ptr,
AV_RB32(
"TTA1"));
2589 bytestream_put_le16(&ptr, 1);
2600 extradata_offset = 26;
2634 static const int sipr_bit_rate[4] = { 6504, 8496, 5000, 16000 };
2643 extradata_offset = 78;
2655 "in absence of valid CodecPrivate.\n");
2670 extradata_offset = 4;
2676 "Unknown/unsupported AVCodecID %s.\n", track->
codec_id);
2681 1000 * 1000 * 1000);
2690 if (strcmp(track->
language,
"und"))
2714 int display_width_mul = 1;
2715 int display_height_mul = 1;
2745 #if FF_API_R_FRAME_RATE
2765 snprintf(buf,
sizeof(buf),
"%s_%d",
2768 if (
planes[j].
uid == tracks[k].
uid && tracks[k].stream) {
2770 "stereo_mode", buf, 0);
2804 (
AVRational){1, st->codecpar->codec_id == AV_CODEC_ID_OPUS ?
2805 48000 : st->codecpar->sample_rate});
2815 if (!strcmp(track->
codec_id,
"D_WEBVTT/CAPTIONS")) {
2817 }
else if (!strcmp(track->
codec_id,
"D_WEBVTT/DESCRIPTIONS")) {
2819 }
else if (!strcmp(track->
codec_id,
"D_WEBVTT/METADATA")) {
2837 uint64_t max_start = 0;
2852 ebml.
max_size >
sizeof(uint64_t) ||
2856 "EBML version %"PRIu64
", doctype %s, doc version %"PRIu64,
2862 "EBML header using unsupported features\n"
2863 "(EBML version %"PRIu64
", doctype %s, doc version %"PRIu64
")\n",
2909 attachments = attachments_list->
elem;
2910 for (j = 0; j < attachments_list->
nb_elem; j++) {
2911 if (!(attachments[j].filename && attachments[j].mime &&
2912 attachments[j].bin.data && attachments[j].bin.size > 0)) {
2932 attachments[j].
stream = st;
2952 attachments[j].bin.size);
2965 chapters = chapters_list->
elem;
2968 (max_start == 0 || chapters[
i].start > max_start)) {
2974 max_start = chapters[
i].
start;
2994 if (matroska->
queue) {
3025 uint32_t lace_size[256],
int *laces)
3032 lace_size[0] =
size;
3048 for (n = 0; n < *laces - 1; n++) {
3056 lace_size[n] +=
temp;
3059 }
while (
temp == 0xff);
3064 lace_size[n] =
size - total;
3069 if (
size % (*laces))
3071 for (n = 0; n < *laces; n++)
3072 lace_size[n] =
size / *laces;
3089 total = lace_size[0] = num;
3091 for (n = 1; n < *laces - 1; n++) {
3097 if (lace_size[n - 1] + snum > (uint64_t)INT_MAX)
3100 lace_size[n] = lace_size[n - 1] + snum;
3101 total += lace_size[n];
3109 lace_size[*laces - 1] =
size - total;
3136 if (
size < cfs *
h / 2) {
3138 "Corrupt int4 RM-style audio packet size\n");
3141 for (
x = 0;
x <
h / 2;
x++)
3142 memcpy(track->
audio.
buf +
x * 2 *
w + y * cfs,
3143 data +
x * cfs, cfs);
3147 "Corrupt sipr RM-style audio packet size\n");
3154 "Corrupt generic RM-style audio packet size\n");
3159 sps * (
h *
x + ((
h + 1) / 2) * (y & 1) + (y >> 1)),
3218 while (srclen >= 8) {
3228 multiblock = (
flags & 0x1800) != 0x1800;
3240 if (blocksize > srclen) {
3251 dstlen += blocksize + 32;
3262 memcpy(dst +
offset + 32,
src, blocksize);
3265 srclen -= blocksize;
3266 offset += blocksize + 32;
3285 int dstlen = *
size + 8;
3293 memcpy(dst + 8, *
data, dstlen - 8);
3312 int id_len, settings_len, text_len;
3320 q =
data + data_len;
3325 if (*p ==
'\r' || *p ==
'\n') {
3334 if (p >= q || *p !=
'\n')
3341 if (*p ==
'\r' || *p ==
'\n') {
3342 settings_len = p - settings;
3350 if (p >= q || *p !=
'\n')
3356 while (text_len > 0) {
3357 const int len = text_len - 1;
3359 if (
c !=
'\r' &&
c !=
'\n')
3372 memcpy(
pkt->
data, text, text_len);
3382 memcpy(buf,
id, id_len);
3385 if (settings_len > 0) {
3393 memcpy(buf, settings, settings_len);
3420 uint64_t timecode, uint64_t lace_duration,
3422 uint8_t *additional, uint64_t additional_id,
int additional_size,
3423 int64_t discard_padding)
3433 "Error parsing a wavpack block.\n");
3446 "Error parsing a prores block.\n");
3454 if (!pkt_size && !additional_size)
3474 if (additional_size > 0) {
3477 additional_size + 8);
3482 AV_WB64(side_data, additional_id);
3483 memcpy(side_data + 8, additional, additional_size);
3486 if (discard_padding) {
3497 if (discard_padding > 0) {
3498 AV_WL32(side_data + 4, discard_padding);
3500 AV_WL32(side_data, -discard_padding);
3511 #if FF_API_CONVERGENCE_DURATION
3514 pkt->convergence_duration = lace_duration;
3535 int size, int64_t
pos, uint64_t cluster_time,
3537 uint8_t *additional, uint64_t additional_id,
int additional_size,
3538 int64_t cluster_pos, int64_t discard_padding)
3546 uint32_t lace_size[256];
3547 int n,
flags, laces = 0;
3549 int trust_default_duration = 1;
3559 if (!track ||
size < 3)
3562 if (!(st = track->
stream)) {
3564 "No stream associated to TrackNumber %"PRIu64
". "
3565 "Ignoring Block with this TrackNumber.\n", num);
3571 if (block_duration > INT64_MAX)
3572 block_duration = INT64_MAX;
3581 if (cluster_time != (uint64_t) -1 &&
3582 (block_time >= 0 || cluster_time >= -block_time)) {
3585 timecode < track->end_timecode)
3610 &pb, lace_size, &laces);
3620 trust_default_duration = 0;
3624 if (!block_duration && trust_default_duration)
3627 if (cluster_time != (uint64_t)-1 && (block_time >= 0 || cluster_time >= -block_time))
3631 for (n = 0; n < laces; n++) {
3632 int64_t lace_duration = block_duration*(n+1) / laces - block_duration*n / laces;
3658 timecode, lace_duration,
3668 additional, additional_id, additional_size,
3675 timecode = lace_duration ? timecode + lace_duration :
AV_NOPTS_VALUE;
3707 if (res >= 0 &&
block->bin.size > 0) {
3716 block->additional.size, cluster->
pos,
3717 block->discard_padding);
3727 "end of segment.\n");
3759 int64_t timestamp,
int flags)
3763 AVStream *st =
s->streams[stream_index];
3839 int64_t start_time_ns;
3840 int64_t end_time_ns;
3841 int64_t start_offset;
3853 int nb_index_entries =
s->streams[0]->nb_index_entries;
3856 for (
i = 1;
i < nb_index_entries;
i++) {
3857 if (index_entries[
i - 1].timestamp * matroska->
time_scale <= ts &&
3858 index_entries[
i].timestamp * matroska->
time_scale > ts) {
3865 if (
i != nb_index_entries - 1) {
3882 int64_t cluster_pos, before_pos;
3884 if (
s->streams[0]->nb_index_entries <= 0)
return 0;
3887 if (
index < 0)
return 0;
3888 cluster_pos =
s->streams[0]->index_entries[
index].pos;
3891 uint64_t cluster_id, cluster_length;
3897 if (read < 0 || cluster_id != 0xF43B675)
3911 cluster_pos += 4 + read + cluster_length;
3925 double min_buffer,
double*
buffer,
3929 double nano_seconds_per_second = 1000000000.0;
3930 double time_sec = time_ns / nano_seconds_per_second;
3932 int64_t time_to_search_ns = (int64_t)(search_sec * nano_seconds_per_second);
3933 int64_t end_time_ns = time_ns + time_to_search_ns;
3934 double sec_downloaded = 0.0;
3938 *sec_to_download = 0.0;
3942 int64_t cue_nano = desc_curr.
end_time_ns - time_ns;
3945 double timeToDownload = (cueBytes * 8.0) /
bps;
3947 sec_downloaded += (cue_nano / nano_seconds_per_second) - timeToDownload;
3948 *sec_to_download += timeToDownload;
3952 double desc_end_time_sec = desc_curr.
end_time_ns / nano_seconds_per_second;
3953 double percent_to_sub = search_sec / (desc_end_time_sec - time_sec);
3954 sec_downloaded = percent_to_sub * sec_downloaded;
3955 *sec_to_download = percent_to_sub * *sec_to_download;
3958 if ((sec_downloaded + *
buffer) <= min_buffer) {
3969 double desc_sec = desc_ns / nano_seconds_per_second;
3970 double bits = (desc_bytes * 8.0);
3971 double time_to_download =
bits /
bps;
3973 sec_downloaded += desc_sec - time_to_download;
3974 *sec_to_download += time_to_download;
3977 double desc_end_time_sec = desc_curr.
end_time_ns / nano_seconds_per_second;
3978 double percent_to_sub = search_sec / (desc_end_time_sec - time_sec);
3979 sec_downloaded = percent_to_sub * sec_downloaded;
3980 *sec_to_download = percent_to_sub * *sec_to_download;
3982 if ((sec_downloaded + *
buffer) <= min_buffer)
3987 if ((sec_downloaded + *
buffer) <= min_buffer) {
4009 double bandwidth = 0.0;
4013 int64_t prebuffer_ns = 1000000000;
4015 double nano_seconds_per_second = 1000000000.0;
4016 int64_t prebuffered_ns = time_ns + prebuffer_ns;
4017 double prebuffer_bytes = 0.0;
4018 int64_t temp_prebuffer_ns = prebuffer_ns;
4019 int64_t pre_bytes, pre_ns;
4020 double pre_sec, prebuffer, bits_per_second;
4038 bits_per_second = 0.0;
4044 pre_sec = pre_ns / nano_seconds_per_second;
4046 pre_bytes * ((temp_prebuffer_ns / nano_seconds_per_second) / pre_sec);
4048 prebuffer = prebuffer_ns / nano_seconds_per_second;
4051 bits_per_second = 0.0;
4055 double desc_sec = desc_ns / nano_seconds_per_second;
4056 double calc_bits_per_second = (desc_bytes * 8) / desc_sec;
4059 double percent = (desc_bytes - prebuffer_bytes) / desc_bytes;
4060 double mod_bits_per_second = calc_bits_per_second * percent;
4062 if (prebuffer < desc_sec) {
4068 int64_t
bps = (int64_t)(mod_bits_per_second) + 1;
4069 const double min_buffer = 0.0;
4070 double buffer = prebuffer;
4071 double sec_to_download = 0.0;
4074 min_buffer, &
buffer, &sec_to_download,
4078 }
else if (rv == 0) {
4079 bits_per_second = (double)(
bps);
4087 if (bandwidth < bits_per_second) bandwidth = bits_per_second;
4089 return (int64_t)bandwidth;
4098 int64_t cues_start = -1, cues_end = -1, before_pos, bandwidth;
4107 if (
i >= seekhead_list->
nb_elem)
return -1;
4111 if (
avio_seek(matroska->
ctx->
pb, cues_start, SEEK_SET) == cues_start) {
4115 uint64_t cues_length, cues_id;
4123 cues_end = cues_start + 4 + bytes_read + cues_length - 1;
4126 if (cues_start == -1 || cues_end == -1)
return -1;
4139 if (cues_start <= init_range)
4144 if (bandwidth < 0)
return -1;
4153 if (!buf)
return -1;
4155 for (
i = 0;
i <
s->streams[0]->nb_index_entries;
i++) {
4157 "%" PRId64
"%s",
s->streams[0]->index_entries[
i].timestamp,
4158 i !=
s->streams[0]->nb_index_entries - 1 ?
"," :
"");
4159 if (
ret <= 0 || (
ret == 20 &&
i ==
s->streams[0]->nb_index_entries - 1)) {
4205 buf = strrchr(
s->url,
'/');
4237 #define OFFSET(x) offsetof(MatroskaDemuxContext, x)
4252 .
name =
"matroska,webm",
4254 .extensions =
"mkv,mk3d,mka,mks",
4261 .mime_type =
"audio/webm,audio/x-matroska,video/webm,video/x-matroska"
4265 .
name =
"webm_dash_manifest",
AVIndexEntry * index_entries
Only used if the format does not support seeking natively.
#define MATROSKA_ID_ENCODINGENCRYPTION
#define MATROSKA_ID_TAGTARGETS_ATTACHUID
#define MATROSKA_ID_CHAPCOUNTRY
static int ebml_read_master(MatroskaDemuxContext *matroska, uint64_t length, int64_t pos)
static int ebml_read_binary(AVIOContext *pb, int length, int64_t pos, EbmlBin *bin)
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
#define MATROSKA_ID_VIDEODISPLAYUNIT
static const CodecMime mkv_image_mime_tags[]
static int matroska_parse_laces(MatroskaDemuxContext *matroska, uint8_t **buf, int size, int type, AVIOContext *pb, uint32_t lace_size[256], int *laces)
#define FF_ENABLE_DEPRECATION_WARNINGS
#define MATROSKA_ID_CODECPRIVATE
#define MATROSKA_ID_TRACKNUMBER
static void bit_depth(AudioStatsContext *s, uint64_t mask, uint64_t imask, AVRational *depth)
@ MATROSKA_VIDEO_STEREOMODE_TYPE_ROW_INTERLEAVED_RL
#define AV_LOG_WARNING
Something somehow does not look correct.
#define MATROSKA_ID_CHAPSTRING
#define MATROSKA_ID_ENCODINGSIGHASHALGO
#define MATROSKA_ID_TAGTARGETS
AVSphericalProjection
Projection of the video surface(s) on a sphere.
uint8_t * extradata
Extra binary data needed for initializing the decoder, codec-dependent.
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default minimum maximum flags name is the option name
#define MATROSKA_ID_CLUSTERTIMECODE
static EbmlSyntax matroska_attachment[]
static EbmlSyntax ebml_header[]
#define MATROSKA_ID_TITLE
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
AVStream * avformat_new_stream(AVFormatContext *s, const AVCodec *c)
Add a new stream to a media file.
static int matroska_parse_tracks(AVFormatContext *s)
enum AVMediaType codec_type
General type of the encoded data.
enum AVSphericalProjection projection
Projection type.
MatroskaTrackOperation operation
int av_buffer_realloc(AVBufferRef **pbuf, int size)
Reallocate a given buffer.
uint32_t bound_bottom
Distance from the bottom edge.
#define MATROSKA_ID_CODECDECODEALL
void ff_rm_reorder_sipr_data(uint8_t *buf, int sub_packet_h, int framesize)
Perform 4-bit block reordering for SIPR data.
#define MATROSKA_ID_VIDEOCOLORMASTERINGMETA
#define MATROSKA_ID_CHAPTERFLAGENABLED
static int matroska_parse_prores(MatroskaTrack *track, uint8_t **data, int *size)
static int segment_start(AVFormatContext *s, int write_header)
#define EBML_ID_EBMLMAXSIZELENGTH
void * av_realloc(void *ptr, size_t size)
Allocate, reallocate, or free a block of memory.
enum AVColorSpace color_space
AVInputFormat ff_matroska_demuxer
#define MATROSKA_ID_TRACKOPERATION
static int ebml_read_num(MatroskaDemuxContext *matroska, AVIOContext *pb, int max_size, uint64_t *number, int eof_forbidden)
@ MATROSKA_VIDEO_FIELDORDER_PROGRESSIVE
#define AVERROR_EOF
End of file.
uint8_t * data
The data buffer.
#define MKTAG(a, b, c, d)
enum AVDiscard discard
Selects which packets can be discarded at will and do not need to be demuxed.
#define MATROSKA_ID_VIDEOPIXELWIDTH
@ MATROSKA_VIDEO_STEREOMODE_TYPE_CHECKERBOARD_LR
#define MATROSKA_ID_AUDIOSAMPLINGFREQ
static av_always_inline double av_int2double(uint64_t i)
Reinterpret a 64-bit integer as a double.
static int matroska_resync(MatroskaDemuxContext *matroska, int64_t last_pos)
@ MATROSKA_VIDEO_PROJECTION_TYPE_RECTANGULAR
static EbmlSyntax matroska_track_combine_planes[2]
#define MATROSKA_ID_DISCARDPADDING
@ MATROSKA_VIDEO_FIELDORDER_BB
static EbmlSyntax matroska_cluster_parsing[8]
const struct EbmlSyntax * n
#define MATROSKA_ID_EDITIONFLAGORDERED
#define MATROSKA_ID_DURATION
char * av_asprintf(const char *fmt,...)
#define MATROSKA_ID_VIDEOCOLORCHROMASITINGVERT
static int matroska_read_header(AVFormatContext *s)
static MatroskaTrack * matroska_find_track_by_num(MatroskaDemuxContext *matroska, uint64_t num)
#define MATROSKA_ID_VIDEOCOLORPRIMARIES
static EbmlSyntax matroska_chapters[2]
@ MATROSKA_VIDEO_DISPLAYUNIT_PIXELS
#define MATROSKA_ID_ENCODINGSIGNATURE
#define MATROSKA_ID_SEGMENT
unsigned MaxCLL
Max content light level (cd/m^2).
static av_cold int end(AVCodecContext *avctx)
#define MATROSKA_ID_CHAPTERS
#define MATROSKA_ID_VIDEOCOLOR_BY
#define MATROSKA_ID_BLOCKDURATION
@ AVCOL_RANGE_JPEG
the normal 2^n-1 "JPEG" YUV ranges
#define MATROSKA_ID_VIDEOCOLORRANGE
#define MATROSKA_ID_BLOCK
#define MATROSKA_ID_TRACKDEFAULTDURATION
static EbmlSyntax matroska_track_encodings[2]
static EbmlSyntax matroska_tags[2]
#define MATROSKA_ID_TAGTARGETS_TYPE
int seek_preroll
Audio only.
static int matroska_read_seek(AVFormatContext *s, int stream_index, int64_t timestamp, int flags)
@ AV_PKT_DATA_PALETTE
An AV_PKT_DATA_PALETTE side data packet contains exactly AVPALETTE_SIZE bytes worth of palette.
AVRational avg_frame_rate
Average framerate.
static int matroska_read_close(AVFormatContext *s)
#define MATROSKA_ID_ENCODINGSCOPE
@ MATROSKA_VIDEO_INTERLACE_FLAG_INTERLACED
#define MATROSKA_ID_DATEUTC
#define MATROSKA_ID_VIDEOCOLORCHROMASUBHORZ
int error
contains the error code or 0 if no error happened
AVInputFormat ff_webm_dash_manifest_demuxer
@ MATROSKA_VIDEO_STEREOMODE_TYPE_BOTH_EYES_BLOCK_LR
#define AV_LOG_VERBOSE
Detailed information.
#define MATROSKA_ID_VIDEOPIXELCROPT
static int ebml_read_ascii(AVIOContext *pb, int size, char **str)
AVBufferRef * av_buffer_create(uint8_t *data, int size, void(*free)(void *opaque, uint8_t *data), void *opaque, int flags)
Create an AVBuffer from an existing array.
static int matroska_parse_block(MatroskaDemuxContext *matroska, AVBufferRef *buf, uint8_t *data, int size, int64_t pos, uint64_t cluster_time, uint64_t block_duration, int is_keyframe, uint8_t *additional, uint64_t additional_id, int additional_size, int64_t cluster_pos, int64_t discard_padding)
FFmpeg Automated Testing Environment ************************************Introduction Using FATE from your FFmpeg source directory Submitting the results to the FFmpeg result aggregation server Uploading new samples to the fate suite FATE makefile targets and variables Makefile targets Makefile variables Examples Introduction **************FATE is an extended regression suite on the client side and a means for results aggregation and presentation on the server side The first part of this document explains how you can use FATE from your FFmpeg source directory to test your ffmpeg binary The second part describes how you can run FATE to submit the results to FFmpeg’s FATE server In any way you can have a look at the publicly viewable FATE results by visiting this as it can be seen if some test on some platform broke with their recent contribution This usually happens on the platforms the developers could not test on The second part of this document describes how you can run FATE to submit your results to FFmpeg’s FATE server If you want to submit your results be sure to check that your combination of OS and compiler is not already listed on the above mentioned website In the third part you can find a comprehensive listing of FATE makefile targets and variables Using FATE from your FFmpeg source directory **********************************************If you want to run FATE on your machine you need to have the samples in place You can get the samples via the build target fate rsync Use this command from the top level source this will cause FATE to fail NOTE To use a custom wrapper to run the pass ‘ target exec’ to ‘configure’ or set the TARGET_EXEC Make variable Submitting the results to the FFmpeg result aggregation server ****************************************************************To submit your results to the server you should run fate through the shell script ‘tests fate sh’ from the FFmpeg sources This script needs to be invoked with a configuration file as its first argument tests fate sh path to fate_config A configuration file template with comments describing the individual configuration variables can be found at ‘doc fate_config sh template’ Create a configuration that suits your based on the configuration template The ‘slot’ configuration variable can be any string that is not yet but it is suggested that you name it adhering to the following pattern ‘ARCH OS COMPILER COMPILER VERSION’ The configuration file itself will be sourced in a shell therefore all shell features may be used This enables you to setup the environment as you need it for your build For your first test runs the ‘fate_recv’ variable should be empty or commented out This will run everything as normal except that it will omit the submission of the results to the server The following files should be present in $workdir as specified in the configuration it may help to try out the ‘ssh’ command with one or more ‘ v’ options You should get detailed output concerning your SSH configuration and the authentication process The only thing left is to automate the execution of the fate sh script and the synchronisation of the samples directory Uploading new samples to the fate suite *****************************************If you need a sample uploaded send a mail to samples request This is for developers who have an account on the fate suite server If you upload new samples
#define MATROSKA_ID_VIDEOPIXELCROPB
int64_t duration
Duration of this packet in AVStream->time_base units, 0 if unknown.
#define MATROSKA_ID_VIDEOCOLORCBSUBVERT
uint32_t codec_tag
Additional information about the codec (corresponds to the AVI FOURCC).
@ AV_SPHERICAL_EQUIRECTANGULAR_TILE
Video represents a portion of a sphere mapped on a flat surface using equirectangular projection.
static int mkv_parse_video_projection(AVStream *st, const MatroskaTrack *track, void *logctx)
@ MATROSKA_COLOUR_CHROMASITINGHORZ_UNDETERMINED
int buf_size
Size of buf except extra allocated bytes.
static EbmlSyntax matroska_blockadditions[2]
#define MATROSKA_ID_CUETIME
int skip_to_keyframe
Indicates that everything up to the next keyframe should be discarded.
@ AV_PKT_DATA_SPHERICAL
This side data should be associated with a video stream and corresponds to the AVSphericalMapping str...
static int matroska_read_packet(AVFormatContext *s, AVPacket *pkt)
#define MATROSKA_ID_CUERELATIVEPOSITION
@ AV_SPHERICAL_EQUIRECTANGULAR
Video represents a sphere mapped on a flat surface using equirectangular projection.
#define MATROSKA_ID_CHAPLANG
static int ebml_parse(MatroskaDemuxContext *matroska, EbmlSyntax *syntax, void *data)
#define MATROSKA_ID_CUEDURATION
#define AV_PKT_FLAG_KEY
The packet contains a keyframe.
@ MATROSKA_VIDEO_STEREOMODE_TYPE_ROW_INTERLEAVED_LR
#define MATROSKA_ID_VIDEOCOLORBITSPERCHANNEL
static EbmlSyntax matroska_segment[9]
static void matroska_metadata_creation_time(AVDictionary **metadata, int64_t date_utc)
@ MATROSKA_VIDEO_DISPLAYUNIT_UNKNOWN
Content light level needed by to transmit HDR over HDMI (CTA-861.3).
#define MATROSKA_ID_VIDEOCOLOR_LUMINANCEMAX
#define update_pos(td, mb_y, mb_x)
static EbmlSyntax matroska_track_encoding_compression[]
enum AVColorPrimaries color_primaries
#define MATROSKA_ID_VIDEOCOLOR_WHITEY
static av_always_inline void bytestream2_skip(GetByteContext *g, unsigned int size)
static EbmlSyntax ebml_syntax[3]
#define MATROSKA_ID_AUDIOOUTSAMPLINGFREQ
#define MATROSKA_ID_VIDEOPROJECTIONPRIVATE
MatroskaCluster current_cluster
@ MATROSKA_VIDEO_STEREOMODE_TYPE_MONO
#define MATROSKA_ID_ENCODINGSIGALGO
#define MATROSKA_ID_VIDEOASPECTRATIO
static av_always_inline float av_int2float(uint32_t i)
Reinterpret a 32-bit integer as a float.
MatroskaLevel1Element level1_elems[64]
static int read_seek(AVFormatContext *ctx, int stream_index, int64_t timestamp, int flags)
#define MATROSKA_ID_CHAPTERDISPLAY
static void inflate(uint8_t *dst, const uint8_t *p1, int width, int threshold, const uint8_t *coordinates[], int coord, int maxc)
int av_add_index_entry(AVStream *st, int64_t pos, int64_t timestamp, int size, int distance, int flags)
Add an index entry into a sorted list.
#define MATROSKA_ID_TRACKUID
#define MATROSKA_ID_ENCODINGSIGKEYID
static const AVClass webm_dash_class
FFmpeg Automated Testing Environment ************************************Introduction Using FATE from your FFmpeg source directory Submitting the results to the FFmpeg result aggregation server Uploading new samples to the fate suite FATE makefile targets and variables Makefile targets Makefile variables Examples Introduction **************FATE is an extended regression suite on the client side and a means for results aggregation and presentation on the server side The first part of this document explains how you can use FATE from your FFmpeg source directory to test your ffmpeg binary The second part describes how you can run FATE to submit the results to FFmpeg’s FATE server In any way you can have a look at the publicly viewable FATE results by visiting this as it can be seen if some test on some platform broke with their recent contribution This usually happens on the platforms the developers could not test on The second part of this document describes how you can run FATE to submit your results to FFmpeg’s FATE server If you want to submit your results be sure to check that your combination of OS and compiler is not already listed on the above mentioned website In the third part you can find a comprehensive listing of FATE makefile targets and variables Using FATE from your FFmpeg source directory **********************************************If you want to run FATE on your machine you need to have the samples in place You can get the samples via the build target fate rsync Use this command from the top level source this will cause FATE to fail NOTE To use a custom wrapper to run the pass ‘ target exec’ to ‘configure’ or set the TARGET_EXEC Make variable Submitting the results to the FFmpeg result aggregation server ****************************************************************To submit your results to the server you should run fate through the shell script ‘tests fate sh’ from the FFmpeg sources This script needs to be invoked with a configuration file as its first argument tests fate sh path to fate_config A configuration file template with comments describing the individual configuration variables can be found at ‘doc fate_config sh template’ Create a configuration that suits your based on the configuration template The ‘slot’ configuration variable can be any string that is not yet but it is suggested that you name it adhering to the following pattern ‘ARCH OS COMPILER COMPILER VERSION’ The configuration file itself will be sourced in a shell therefore all shell features may be used This enables you to setup the environment as you need it for your build For your first test runs the ‘fate_recv’ variable should be empty or commented out This will run everything as normal except that it will omit the submission of the results to the server The following files should be present in $workdir as specified in the configuration it may help to try out the ‘ssh’ command with one or more ‘ v’ options You should get detailed output concerning your SSH configuration and the authentication process The only thing left is to automate the execution of the fate sh script and the synchronisation of the samples directory Uploading new samples to the fate suite *****************************************If you need a sample uploaded send a mail to samples request This is for developers who have an account on the fate suite server If you upload new please make sure they are as small as space on each network bandwidth and so on benefit from smaller test cases Also keep in mind older checkouts use existing sample that means in practice generally do not remove or overwrite files as it likely would break older checkouts or releases Also all needed samples for a commit should be ideally before the push If you need an account for frequently uploading samples or you wish to help others by doing that send a mail to ffmpeg devel rsync vauL Duo x
#define EBML_ID_DOCTYPEVERSION
static av_cold int read_close(AVFormatContext *ctx)
@ MATROSKA_COLOUR_CHROMASITINGVERT_NB
static av_always_inline int64_t avio_tell(AVIOContext *s)
ftell() equivalent for AVIOContext.
const char *const ff_matroska_video_stereo_mode[MATROSKA_VIDEO_STEREOMODE_TYPE_NB]
static int matroska_probe(const AVProbeData *p)
@ MATROSKA_VIDEO_STEREOMODE_TYPE_CHECKERBOARD_RL
static EbmlSyntax matroska_segments[]
static const char *const matroska_doctypes[]
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
#define MATROSKA_ID_VIDEOALPHAMODE
@ AV_CODEC_ID_MP3
preferred ID for decoding MPEG audio layer 1, 2 or 3
static int mkv_parse_video_color(AVStream *st, const MatroskaTrack *track)
MatroskaMasteringMeta mastering_meta
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
#define MATROSKA_ID_VIDEOCOLOR_WHITEX
AVPacket attached_pic
For streams with AV_DISPOSITION_ATTACHED_PIC disposition, this packet will contain the attached pictu...
@ MATROSKA_TRACK_TYPE_METADATA
static EbmlSyntax matroska_tag[3]
#define AV_DICT_DONT_STRDUP_VAL
Take ownership of a value that's been allocated with av_malloc() or another memory allocation functio...
@ MATROSKA_VIDEO_FIELDORDER_TT
MatroskaLevel levels[EBML_MAX_DEPTH]
static EbmlSyntax matroska_track[27]
enum AVColorTransferCharacteristic color_trc
#define MATROSKA_ID_BLOCKADDITIONAL
static int ebml_parse_nest(MatroskaDemuxContext *matroska, EbmlSyntax *syntax, void *data)
unsigned int avio_rb32(AVIOContext *s)
@ AV_PKT_DATA_WEBVTT_SETTINGS
The optional settings (rendering instructions) that immediately follow the timestamp specifier of a W...
static int matroska_parse_seekhead_entry(MatroskaDemuxContext *matroska, int64_t pos)
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
#define MATROSKA_ID_TRACKCONTENTENCODING
uint64_t bits_per_channel
#define MATROSKA_ID_TAGDEFAULT_BUG
#define MATROSKA_ID_CODECDOWNLOADURL
#define MATROSKA_ID_VIDEOFIELDORDER
static EbmlSyntax matroska_info[]
#define MATROSKA_ID_VIDEOPIXELCROPR
@ MATROSKA_COLOUR_CHROMASITINGHORZ_NB
static EbmlSyntax matroska_index_entry[3]
#define EBML_ID_EBMLMAXIDLENGTH
uint8_t * av_stream_new_side_data(AVStream *stream, enum AVPacketSideDataType type, int size)
Allocate new information from stream.
static const uint16_t mask[17]
int frame_size
Audio only.
static int matroska_parse_cluster(MatroskaDemuxContext *matroska)
AVDictionaryEntry * av_dict_get(const AVDictionary *m, const char *key, const AVDictionaryEntry *prev, int flags)
Get a dictionary entry with matching key.
void * av_fast_realloc(void *ptr, unsigned int *size, size_t min_size)
Reallocate the given buffer if it is not large enough, otherwise do nothing.
static MatroskaLevel1Element * matroska_find_level1_elem(MatroskaDemuxContext *matroska, uint32_t id, int64_t pos)
@ MATROSKA_COLOUR_CHROMASITINGVERT_UNDETERMINED
static const AVOption options[]
#define MATROSKA_ID_FILEUID
@ MATROSKA_VIDEO_INTERLACE_FLAG_PROGRESSIVE
int av_new_packet(AVPacket *pkt, int size)
Allocate the payload of a packet and initialize its fields with default values.
#define MATROSKA_ID_VIDEODISPLAYWIDTH
@ AV_PKT_DATA_MASTERING_DISPLAY_METADATA
Mastering display metadata (based on SMPTE-2086:2014).
int flags
Flags modifying the (de)muxer behaviour.
@ MATROSKA_VIDEO_FIELDORDER_BT
unsigned char * buf
Buffer must have AVPROBE_PADDING_SIZE of extra allocated bytes filled with zero.
uint32_t bound_top
Distance from the top edge.
#define MATROSKA_ID_TAGNAME
#define MATROSKA_ID_TIMECODESCALE
static EbmlSyntax matroska_cluster_enter[]
#define MATROSKA_ID_CLUSTERPOSITION
static int matroska_parse_frame(MatroskaDemuxContext *matroska, MatroskaTrack *track, AVStream *st, AVBufferRef *buf, uint8_t *data, int pkt_size, uint64_t timecode, uint64_t lace_duration, int64_t pos, int is_keyframe, uint8_t *additional, uint64_t additional_id, int additional_size, int64_t discard_padding)
int64_t timestamp
Timestamp in AVStream.time_base units, preferably the time from which on correctly decoded frames are...
#define av_assert0(cond)
assert() equivalent, that is always enabled.
#define MATROSKA_ID_CUETRACKPOSITION
static int webm_dash_manifest_cues(AVFormatContext *s, int64_t init_range)
#define MATROSKA_ID_SEEKENTRY
static int matroska_decode_buffer(uint8_t **buf, int *buf_size, MatroskaTrack *track)
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
#define MATROSKA_ID_TRACKTYPE
enum AVStreamParseType need_parsing
@ MATROSKA_VIDEO_STEREOMODE_TYPE_COL_INTERLEAVED_LR
#define MATROSKA_ID_SEGMENTUID
static EbmlSyntax matroska_track_video_projection[]
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
static int ebml_read_sint(AVIOContext *pb, int size, int64_t *num)
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
#define MATROSKA_ID_AUDIOCHANNELS
uint64_t chroma_siting_vert
static int ebml_read_float(AVIOContext *pb, int size, double *num)
uint8_t * av_packet_new_side_data(AVPacket *pkt, enum AVPacketSideDataType type, int size)
Allocate new information of a packet.
#define MATROSKA_ID_VIDEOSTEREOMODE
static void matroska_clear_queue(MatroskaDemuxContext *matroska)
#define MATROSKA_ID_VIDEOCOLORSPACE
#define MATROSKA_ID_TRACKNAME
static void matroska_convert_tag(AVFormatContext *s, EbmlList *list, AVDictionary **metadata, char *prefix)
#define EBML_ID_DOCTYPEREADVERSION
@ AVDISCARD_ALL
discard all
#define MATROSKA_ID_CODECSTATE
AVBufferRef * buf
A reference to the reference-counted buffer where the packet data is stored.
AVCodecParameters * codecpar
Codec parameters associated with this stream.
@ MATROSKA_VIDEO_STEREOMODE_TYPE_TOP_BOTTOM
#define LIBAVUTIL_VERSION_INT
#define MATROSKA_ID_ENCODINGENCALGO
static int read_header(FFV1Context *f)
#define MATROSKA_ID_BLOCKGROUP
#define MATROSKA_ID_VIDEOCOLOR_LUMINANCEMIN
Describe the class of an AVClass context structure.
and forward the result(frame or status change) to the corresponding input. If nothing is possible
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented.
#define MATROSKA_ID_ATTACHMENTS
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
#define MATROSKA_ID_CHAPTERATOM
static EbmlSyntax * ebml_parse_id(EbmlSyntax *syntax, uint32_t id)
#define MATROSKA_ID_TRACKCONTENTENCODINGS
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it.
static void matroska_parse_cues(MatroskaDemuxContext *matroska)
@ MATROSKA_TRACK_TYPE_AUDIO
#define MATROSKA_ID_EDITIONUID
static int matroska_reset_status(MatroskaDemuxContext *matroska, uint32_t id, int64_t position)
static void matroska_execute_seekhead(MatroskaDemuxContext *matroska)
static int matroska_ebmlnum_sint(MatroskaDemuxContext *matroska, AVIOContext *pb, int64_t *num)
#define MATROSKA_ID_ENCODINGORDER
static av_always_inline void flac_parse_block_header(const uint8_t *block_header, int *last, int *type, int *size)
Parse the metadata block parameters from the header.
Rational number (pair of numerator and denominator).
static int matroska_parse_webvtt(MatroskaDemuxContext *matroska, MatroskaTrack *track, AVStream *st, uint8_t *data, int data_len, uint64_t timecode, uint64_t duration, int64_t pos)
#define MATROSKA_ID_WRITINGAPP
int av_stream_add_side_data(AVStream *st, enum AVPacketSideDataType type, uint8_t *data, size_t size)
Wrap an existing array as stream side data.
#define MATROSKA_ID_VIDEOCOLOR_GY
static EbmlSyntax matroska_track_operation[2]
@ MATROSKA_VIDEO_STEREOMODE_TYPE_BOTTOM_TOP
const char * av_default_item_name(void *ptr)
Return the context name.
uint64_t avio_rb64(AVIOContext *s)
AVIOContext * pb
I/O context.
#define MATROSKA_ID_SIMPLETAG
#define MATROSKA_ID_TRACKPLANEUID
#define MATROSKA_ID_VIDEOPROJECTION
This structure contains the data a format has to probe a file.
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining list
#define MATROSKA_ID_SEEKID
#define FLAC_STREAMINFO_SIZE
#define MATROSKA_ID_CHAPTERTIMESTART
const char *const ff_matroska_video_stereo_plane[MATROSKA_VIDEO_STEREO_PLANE_COUNT]
#define MATROSKA_ID_CUEBLOCKNUMBER
@ MATROSKA_TRACK_TYPE_VIDEO
#define AV_EF_EXPLODE
abort decoding on minor error detection
#define MATROSKA_ID_FILEMIMETYPE
#define MATROSKA_ID_VIDEODISPLAYHEIGHT
#define MATROSKA_ID_TRACKAUDIO
@ AVCOL_RANGE_UNSPECIFIED
static EbmlSyntax matroska_index_pos[]
MatroskaTrackCompression compression
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
int sample_rate
Audio only.
const uint8_t ff_log2_tab[256]
AVCodecID
Identify the syntax and semantics of the bitstream.
#define MATROSKA_ID_VIDEOCOLORMAXCLL
#define MATROSKA_ID_TRACKENTRY
int extradata_size
Size of the extradata content in bytes.
static EbmlSyntax matroska_track_encoding_encryption[]
uint64_t default_duration
#define MATROSKA_ID_TAGDEFAULT
@ MATROSKA_TRACK_ENCODING_COMP_BZLIB
const AVCodecTag ff_codec_movvideo_tags[]
#define MATROSKA_ID_AUDIOBITDEPTH
@ MATROSKA_VIDEO_STEREOMODE_TYPE_LEFT_RIGHT
#define MATROSKA_ID_TRACKFLAGDEFAULT
int av_lzo1x_decode(void *out, int *outlen, const void *in, int *inlen)
Decodes LZO 1x compressed data.
int ffio_init_context(AVIOContext *s, unsigned char *buffer, int buffer_size, int write_flag, void *opaque, int(*read_packet)(void *opaque, uint8_t *buf, int buf_size), int(*write_packet)(void *opaque, uint8_t *buf, int buf_size), int64_t(*seek)(void *opaque, int64_t offset, int whence))
@ AV_SPHERICAL_CUBEMAP
Video frame is split into 6 faces of a cube, and arranged on a 3x2 layout.
#define MATROSKA_ID_ENCODINGCOMPALGO
#define MATROSKA_ID_TRACKFLAGLACING
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
uint64_t matrix_coefficients
#define MATROSKA_ID_VIDEOCOLOR_GX
int seekable
A combination of AVIO_SEEKABLE_ flags or 0 when the stream is not seekable.
static void matroska_convert_tags(AVFormatContext *s)
uint32_t bound_right
Distance from the right edge.
MatroskaTrackEncryption encryption
#define MATROSKA_ID_TRACKFLAGENABLED
static int webm_dash_manifest_read_packet(AVFormatContext *s, AVPacket *pkt)
#define MATROSKA_ID_FILEDATA
uint64_t max_block_additional_id
#define MATROSKA_ID_VIDEOPROJECTIONPOSEPITCH
@ MATROSKA_VIDEO_FIELDORDER_UNDETERMINED
#define MATROSKA_ID_BLOCKMORE
int ff_get_qtpalette(int codec_id, AVIOContext *pb, uint32_t *palette)
Retrieve the palette (or "color table" in QuickTime terms), either from the video sample description,...
#define AV_NOPTS_VALUE
Undefined timestamp value.
static void mkv_stereo_mode_display_mul(int stereo_mode, int *h_width, int *h_height)
void avpriv_report_missing_feature(void *avc, const char *msg,...) av_printf_format(2
Log a generic warning message about a missing feature.
#define MATROSKA_ID_VIDEOCOLORMAXFALL
static EbmlSyntax matroska_attachments[2]
#define MKBETAG(a, b, c, d)
static const struct TransferCharacteristics transfer_characteristics[AVCOL_TRC_NB]
#define EBML_UNKNOWN_LENGTH
#define MATROSKA_ID_VIDEOCOLORCBSUBHORZ
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown)
#define INITIALIZATION_RANGE
#define MATROSKA_ID_POINTENTRY
static const uint8_t header[24]
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed.
int avio_r8(AVIOContext *s)
static EbmlSyntax matroska_simpletag[]
uint32_t padding
Number of pixels to pad from the edge of each cube face.
static int mkv_field_order(MatroskaDemuxContext *matroska, int64_t field_order)
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
#define MATROSKA_ID_ENCODINGTYPE
static int ebml_read_uint(AVIOContext *pb, int size, uint64_t *num)
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
@ AV_PKT_DATA_CONTENT_LIGHT_LEVEL
Content light level (based on CTA-861.3).
int flags
A combination of AV_PKT_FLAG values.
void av_dict_free(AVDictionary **pm)
Free all the memory allocated for an AVDictionary struct and all keys and values.
uint64_t codec_delay_in_track_tb
#define MATROSKA_ID_VIDEOPIXELHEIGHT
uint64_t chroma_siting_horz
#define MATROSKA_ID_VIDEOFRAMERATE
#define AV_LOG_INFO
Standard information.
static int matroska_aac_sri(int samplerate)
#define MATROSKA_ID_MUXINGAPP
#define MATROSKA_ID_EDITIONFLAGDEFAULT
static const struct @315 planes[]
int error_recognition
Error recognition; higher values will detect more errors but may misdetect some more or less valid pa...
#define MATROSKA_ID_TRACKPLANETYPE
int ffio_limit(AVIOContext *s, int size)
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel layout
#define MATROSKA_ID_CODECINFOURL
#define MATROSKA_ID_FILEDESC
static int webm_clusters_start_with_keyframe(AVFormatContext *s)
#define MATROSKA_ID_VIDEOPROJECTIONTYPE
#define MATROSKA_ID_FILENAME
#define AV_BASE64_SIZE(x)
Calculate the output size needed to base64-encode x bytes to a null-terminated string.
int32_t roll
Rotation around the forward vector [-180, 180].
#define MATROSKA_ID_CODECID
static EbmlSyntax matroska_track_video[19]
#define i(width, name, range_min, range_max)
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
#define AV_LZO_OUTPUT_PADDING
static EbmlSyntax matroska_track_audio[]
static EbmlSyntax matroska_tagtargets[]
@ MATROSKA_TRACK_ENCODING_COMP_HEADERSTRIP
#define AV_TIME_BASE
Internal time base represented as integer.
#define MATROSKA_ID_CUECLUSTERPOSITION
@ MATROSKA_TRACK_ENCODING_COMP_ZLIB
int block_align
Audio only.
#define MATROSKA_ID_CHAPTERPHYSEQUIV
MatroskaTrackVideoProjection projection
#define av_malloc_array(a, b)
#define MATROSKA_ID_CUETRACK
#define MATROSKA_ID_SEEKPOSITION
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
#define MATROSKA_ID_CLUSTER
#define MATROSKA_ID_TRACKLANGUAGE
@ AVMEDIA_TYPE_ATTACHMENT
Opaque data information usually sparse.
#define AV_OPT_FLAG_DECODING_PARAM
a generic parameter which can be set by the user for demuxing or decoding
AVRational av_d2q(double d, int max)
Convert a double precision floating point number to a rational.
#define MATROSKA_ID_VIDEOFLAGINTERLACED
static EbmlSyntax matroska_chapter[6]
const AVMetadataConv ff_mkv_metadata_conv[]
@ AV_PKT_DATA_SKIP_SAMPLES
Recommmends skipping the specified number of samples.
enum AVColorRange color_range
Video only.
static int get_qt_codec(MatroskaTrack *track, uint32_t *fourcc, enum AVCodecID *codec_id)
int ff_get_wav_header(AVFormatContext *s, AVIOContext *pb, AVCodecParameters *par, int size, int big_endian)
int64_t av_rescale(int64_t a, int64_t b, int64_t c)
Rescale a 64-bit integer with rounding to nearest.
#define MATROSKA_ID_EDITIONFLAGHIDDEN
#define MATROSKA_ID_CHAPTERUID
unsigned int alloc_elem_size
static EbmlSyntax matroska_index[2]
enum AVFieldOrder field_order
Video only.
@ MATROSKA_VIDEO_STEREOMODE_TYPE_RIGHT_LEFT
static EbmlSyntax matroska_mastering_meta[]
#define MATROSKA_ID_VIDEOPROJECTIONPOSEYAW
Undefined Behavior In the C language
static EbmlSyntax matroska_track_video_color[15]
int disposition
AV_DISPOSITION_* bit field.
static int read_packet(void *opaque, uint8_t *buf, int buf_size)
static const CodecMime mkv_mime_tags[]
int64_t avio_seek(AVIOContext *s, int64_t offset, int whence)
fseek() equivalent for AVIOContext.
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
size_t av_strlcat(char *dst, const char *src, size_t size)
Append the string src to the string dst, but to a total length of no more than size - 1 bytes,...
int32_t pitch
Rotation around the right vector [-90, 90].
#define MATROSKA_ID_TRACKCOMBINEPLANES
unsigned int avio_rb16(AVIOContext *s)
FFmpeg Automated Testing Environment ************************************Introduction Using FATE from your FFmpeg source directory Submitting the results to the FFmpeg result aggregation server Uploading new samples to the fate suite FATE makefile targets and variables Makefile targets Makefile variables Examples Introduction **************FATE is an extended regression suite on the client side and a means for results aggregation and presentation on the server side The first part of this document explains how you can use FATE from your FFmpeg source directory to test your ffmpeg binary The second part describes how you can run FATE to submit the results to FFmpeg’s FATE server In any way you can have a look at the publicly viewable FATE results by visiting this as it can be seen if some test on some platform broke with their recent contribution This usually happens on the platforms the developers could not test on The second part of this document describes how you can run FATE to submit your results to FFmpeg’s FATE server If you want to submit your results be sure to check that your combination of OS and compiler is not already listed on the above mentioned website In the third part you can find a comprehensive listing of FATE makefile targets and variables Using FATE from your FFmpeg source directory **********************************************If you want to run FATE on your machine you need to have the samples in place You can get the samples via the build target fate rsync Use this command from the top level source this will cause FATE to fail NOTE To use a custom wrapper to run the pass ‘ target exec’ to ‘configure’ or set the TARGET_EXEC Make variable Submitting the results to the FFmpeg result aggregation server ****************************************************************To submit your results to the server you should run fate through the shell script ‘tests fate sh’ from the FFmpeg sources This script needs to be invoked with a configuration file as its first argument tests fate sh path to fate_config A configuration file template with comments describing the individual configuration variables can be found at ‘doc fate_config sh template’ Create a configuration that suits your based on the configuration template The ‘slot’ configuration variable can be any string that is not yet but it is suggested that you name it adhering to the following pattern ‘ARCH OS COMPILER COMPILER VERSION’ The configuration file itself will be sourced in a shell therefore all shell features may be used This enables you to setup the environment as you need it for your build For your first test runs the ‘fate_recv’ variable should be empty or commented out This will run everything as normal except that it will omit the submission of the results to the server The following files should be present in $workdir as specified in the configuration it may help to try out the ‘ssh’ command with one or more ‘ v’ options You should get detailed output concerning your SSH configuration and the authentication process The only thing left is to automate the execution of the fate sh script and the synchronisation of the samples directory Uploading new samples to the fate suite *****************************************If you need a sample uploaded send a mail to samples request This is for developers who have an account on the fate suite server If you upload new please make sure they are as small as space on each network bandwidth and so on benefit from smaller test cases Also keep in mind older checkouts use existing sample that means in practice generally do not remove or overwrite files as it likely would break older checkouts or releases Also all needed samples for a commit should be ideally before the push If you need an account for frequently uploading samples or you wish to help others by doing that send a mail to ffmpeg devel rsync vauL Duo ug o o w
static int FUNC() sps(CodedBitstreamContext *ctx, RWContext *rw, H264RawSPS *current)
static EbmlSyntax matroska_blockmore[]
#define MATROSKA_ID_VIDEOCOLOR_BX
#define MATROSKA_ID_TAGSTRING
#define MATROSKA_ID_VIDEOCOLORMATRIXCOEFF
union EbmlSyntax::@259 def
@ AV_CODEC_ID_TEXT
raw UTF-8 text
#define MATROSKA_ID_SIMPLEBLOCK
#define MATROSKA_ID_TRACKPLANE
static int buffer_size_after_time_downloaded(int64_t time_ns, double search_sec, int64_t bps, double min_buffer, double *buffer, double *sec_to_download, AVFormatContext *s, int64_t cues_start)
#define AV_INPUT_BUFFER_PADDING_SIZE
#define MATROSKA_ID_VIDEOPIXELCROPL
#define FF_ARRAY_ELEMS(a)
enum AVChromaLocation chroma_location
@ MATROSKA_VIDEO_STEREOMODE_TYPE_COL_INTERLEAVED_RL
#define MATROSKA_ID_CODECNAME
#define MATROSKA_ID_EDITIONENTRY
static EbmlSyntax matroska_seekhead_entry[]
int index
stream index in AVFormatContext
#define MATROSKA_ID_TRACKMAXCACHE
static EbmlSyntax matroska_seekhead[2]
const AVCodecTag ff_codec_bmp_tags[]
static void matroska_add_index_entries(MatroskaDemuxContext *matroska)
#define MATROSKA_ID_TRACKMAXBLKADDID
static EbmlSyntax matroska_blockgroup[8]
#define MATROSKA_ID_TAGTARGETS_TYPEVALUE
#define EBML_ID_EBMLVERSION
#define MATROSKA_ID_CLUSTERPREVSIZE
#define AVIO_SEEKABLE_NORMAL
Seeking works like for a local file.
the frame and frame reference mechanism is intended to as much as expensive copies of that data while still allowing the filters to produce correct results The data is stored in buffers represented by AVFrame structures Several references can point to the same frame buffer
const unsigned char ff_sipr_subpk_size[4]
#define MATROSKA_ID_TAGLANG
static int is_ebml_id_valid(uint32_t id)
#define MATROSKA_VIDEO_STEREO_PLANE_COUNT
#define MATROSKA_ID_VIDEOCOLOR
static int webm_dash_manifest_read_header(AVFormatContext *s)
static EbmlSyntax matroska_tracks[2]
#define MATROSKA_ID_VIDEOCOLORTRANSFERCHARACTERISTICS
A Quick Description Of Rate Distortion Theory We want to encode a video
#define MATROSKA_ID_VIDEOCOLOR_RX
static int matroska_parse_wavpack(MatroskaTrack *track, uint8_t **data, int *size)
int avio_read(AVIOContext *s, unsigned char *buf, int size)
Read size bytes from AVIOContext into buf.
@ MATROSKA_TRACK_TYPE_SUBTITLE
static av_const int sign_extend(int val, unsigned bits)
AVRational r_frame_rate
Real base framerate of the stream.
#define AV_LZO_OUTPUT_FULL
decoded data did not fit into output buffer
int eof_reached
true if was unable to read due to error or eof
char * av_base64_encode(char *out, int out_size, const uint8_t *in, int in_size)
Encode data to base64 and null-terminate.
static int64_t webm_dash_manifest_compute_bandwidth(AVFormatContext *s, int64_t cues_start)
#define MATROSKA_ID_VIDEOCOLORCHROMASUBVERT
@ MATROSKA_VIDEO_PROJECTION_TYPE_EQUIRECTANGULAR
enum AVChromaLocation avcodec_chroma_pos_to_enum(int xpos, int ypos)
Converts swscale x/y chroma position to AVChromaLocation.
int64_t duration
Duration of the stream, in AV_TIME_BASE fractional seconds.
static int matroska_parse_rm_audio(MatroskaDemuxContext *matroska, MatroskaTrack *track, AVStream *st, uint8_t *data, int size, uint64_t timecode, int64_t pos)
AVBufferRef * av_buffer_ref(AVBufferRef *buf)
Create a new reference to an AVBuffer.
int64_t avio_skip(AVIOContext *s, int64_t offset)
Skip given number of bytes forward.
#define MATROSKA_ID_VIDEOCOLORCHROMASITINGHORZ
@ MATROSKA_TRACK_ENCODING_COMP_LZO
#define FF_DISABLE_DEPRECATION_WARNINGS
int av_dict_set_int(AVDictionary **pm, const char *key, int64_t value, int flags)
Convenience wrapper for av_dict_set that converts the value to a string and stores it.
AVSphericalMapping * av_spherical_alloc(size_t *size)
Allocate a AVSphericalVideo structure and initialize its fields to default values.
char * av_strdup(const char *s)
Duplicate a string.
#define MATROSKA_ID_BLOCKREFERENCE
int bits_per_coded_sample
The number of bits per sample in the codedwords.
A reference to a data buffer.
#define MATROSKA_ID_VIDEOCOLOR_RY
@ AV_PKT_DATA_MATROSKA_BLOCKADDITIONAL
Data found in BlockAdditional element of matroska container.
@ MATROSKA_VIDEO_FIELDORDER_TB
const int avpriv_mpeg4audio_sample_rates[16]
uint32_t bound_left
Distance from the left edge.
int ff_mkv_stereo3d_conv(AVStream *st, MatroskaVideoStereoModeType stereo_mode)
#define MATROSKA_ID_TRACKVIDEO
@ MATROSKA_VIDEO_STEREOMODE_TYPE_BOTH_EYES_BLOCK_RL
unsigned char * buffer
Start of the buffer.
const CodecTags ff_mkv_codec_tags[]
#define MATROSKA_ID_TRACKFLAGFORCED
@ MATROSKA_VIDEO_STEREOMODE_TYPE_NB
#define EBML_ID_EBMLREADVERSION
static int matroska_deliver_packet(MatroskaDemuxContext *matroska, AVPacket *pkt)
enum AVCodecID codec_id
Specific type of the encoded data (the codec used).
uint32_t palette[AVPALETTE_COUNT]
This structure stores compressed data.
unsigned MaxFALL
Max average light level per frame (cd/m^2).
#define MATROSKA_ID_ENCODINGENCAESSETTINGS
#define MATROSKA_ID_CHAPTERFLAGHIDDEN
int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags)
Set the given entry in *pm, overwriting an existing entry.
#define MATROSKA_ID_ATTACHEDFILE
int64_t pos
byte position in stream, -1 if unknown
#define MATROSKA_ID_TRACKMINCACHE
#define MATROSKA_ID_CODECDELAY
uint64_t channel_layout
Audio only.
uint64_t transfer_characteristics
static EbmlSyntax matroska_track_plane[]
#define MATROSKA_ID_ENCODINGCOMPRESSION
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
#define flags(name, subs,...)
#define MATROSKA_ID_TAGTARGETS_TRACKUID
size_t av_strlcpy(char *dst, const char *src, size_t size)
Copy the string src to dst, but no more than size - 1 bytes, and null-terminate dst.
int64_t bit_rate
The average bitrate of the encoded data (in bits per second).
The exact code depends on how similar the blocks are and how related they are to the block
#define MATROSKA_ID_CHAPTERTIMEEND
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
static EbmlSyntax matroska_track_encoding[6]
static EbmlSyntax matroska_chapter_display[]
const AVCodecTag ff_codec_movaudio_tags[]
#define MATROSKA_ID_TRACKS
#define MATROSKA_ID_ENCODINGENCKEYID
@ AV_PKT_DATA_WEBVTT_IDENTIFIER
The optional first identifier line of a WebVTT cue.
static void ebml_free(EbmlSyntax *syntax, void *data)
#define MATROSKA_ID_ENCODINGCOMPSETTINGS
int cues_parsing_deferred
int ff_vorbis_comment(AVFormatContext *ms, AVDictionary **m, const uint8_t *buf, int size, int parse_picture)
#define MATROSKA_ID_SEEKPREROLL
unsigned char * buf_ptr
Current position in the buffer.
#define MATROSKA_ID_SEEKHEAD
uint64_t skip_to_timecode
int initial_padding
Audio only.
#define MATROSKA_ID_VIDEOPROJECTIONPOSEROLL
#define MATROSKA_ID_BLOCKADDID
static CueDesc get_cue_desc(AVFormatContext *s, int64_t ts, int64_t cues_start)
This structure describes how to handle spherical videos, outlining information about projection,...
int avpriv_dict_set_timestamp(AVDictionary **dict, const char *key, int64_t timestamp)
Set a dictionary value to an ISO-8601 compliant timestamp string.
int32_t yaw
Rotation around the up vector [-180, 180].
#define MATROSKA_ID_TAGTARGETS_CHAPTERUID
static int ebml_read_length(MatroskaDemuxContext *matroska, AVIOContext *pb, uint64_t *number)
Read a EBML length value.
@ FLAC_METADATA_TYPE_VORBIS_COMMENT
#define MATROSKA_ID_TRACKTIMECODESCALE
static EbmlSyntax matroska_chapter_entry[9]
@ MATROSKA_VIDEO_PROJECTION_TYPE_CUBEMAP
static int matroska_aac_profile(char *codec_id)
static int is_keyframe(NalUnitType naltype)
#define av_fourcc2str(fourcc)
void av_init_packet(AVPacket *pkt)
Initialize optional fields of a packet with default values.
int av_index_search_timestamp(AVStream *st, int64_t timestamp, int flags)
Get the index for a specific timestamp.
static int matroska_parse_flac(AVFormatContext *s, MatroskaTrack *track, int *offset)
int avio_feof(AVIOContext *s)
Similar to feof() but also returns nonzero on read errors.
#define MATROSKA_ID_BLOCKADDITIONS
@ MATROSKA_VIDEO_INTERLACE_FLAG_UNDETERMINED