Go to the documentation of this file.
166 #define HALF_FLOAT_MIN_BIASED_EXP_AS_SINGLE_FP_EXP 0x38000000
170 #define HALF_FLOAT_MAX_BIASED_EXP_AS_SINGLE_FP_EXP 0x47800000
173 #define FLOAT_MAX_BIASED_EXP (0xFF << 23)
175 #define HALF_FLOAT_MAX_BIASED_EXP (0x1F << 10)
186 unsigned int sign = (
unsigned int) (hf >> 15);
187 unsigned int mantissa = (
unsigned int) (hf & ((1 << 10) - 1));
197 mantissa = (1 << 23) - 1;
198 }
else if (
exp == 0x0) {
204 while ((mantissa & (1 << 10))) {
211 mantissa &= ((1 << 10) - 1);
222 f.i = (sign << 31) |
exp | mantissa;
230 unsigned long dest_len = uncompressed_size;
232 if (uncompress(
td->tmp, &dest_len,
src, compressed_size) != Z_OK ||
233 dest_len != uncompressed_size)
238 s->dsp.predictor(
td->tmp, uncompressed_size);
239 s->dsp.reorder_pixels(
td->uncompressed_data,
td->tmp, uncompressed_size);
248 const int8_t *
s =
src;
249 int ssize = compressed_size;
250 int dsize = uncompressed_size;
260 if ((dsize -= count) < 0 ||
261 (ssize -= count + 1) < 0)
269 if ((dsize -= count) < 0 ||
285 ctx->dsp.predictor(
td->tmp, uncompressed_size);
286 ctx->dsp.reorder_pixels(
td->uncompressed_data,
td->tmp, uncompressed_size);
291 #define USHORT_RANGE (1 << 16)
292 #define BITMAP_SIZE (1 << 13)
299 if ((
i == 0) || (bitmap[
i >> 3] & (1 << (
i & 7))))
309 static void apply_lut(
const uint16_t *lut, uint16_t *dst,
int dsize)
313 for (
i = 0;
i < dsize; ++
i)
314 dst[
i] = lut[dst[
i]];
317 #define HUF_ENCBITS 16 // literal (value) bit length
318 #define HUF_DECBITS 14 // decoding bit size (>= 8)
320 #define HUF_ENCSIZE ((1 << HUF_ENCBITS) + 1) // encoding table size
321 #define HUF_DECSIZE (1 << HUF_DECBITS) // decoding table size
322 #define HUF_DECMASK (HUF_DECSIZE - 1)
332 uint64_t
c, n[59] = { 0 };
339 for (
i = 58;
i > 0; --
i) {
340 uint64_t nc = ((
c + n[
i]) >> 1);
349 hcode[
i] = l | (n[l]++ << 6);
353 #define SHORT_ZEROCODE_RUN 59
354 #define LONG_ZEROCODE_RUN 63
355 #define SHORTEST_LONG_RUN (2 + LONG_ZEROCODE_RUN - SHORT_ZEROCODE_RUN)
356 #define LONGEST_LONG_RUN (255 + SHORTEST_LONG_RUN)
366 for (;
im <= iM;
im++) {
372 if (
im + zerun > iM + 1)
382 if (
im + zerun > iM + 1)
401 for (;
im <= iM;
im++) {
402 uint64_t
c = hcode[
im] >> 6;
403 int i, l = hcode[
im] & 63;
424 if (pl->
len || pl->
p)
435 #define get_char(c, lc, gb) \
437 c = (c << 8) | bytestream2_get_byte(gb); \
441 #define get_code(po, rlc, c, lc, gb, out, oe, outb) \
445 get_char(c, lc, gb); \
450 if (out + cs > oe || out == outb) \
451 return AVERROR_INVALIDDATA; \
457 } else if (out < oe) { \
460 return AVERROR_INVALIDDATA; \
466 int rlc,
int no, uint16_t *
out)
469 uint16_t *outb =
out;
470 uint16_t *oe =
out + no;
491 for (j = 0; j < pl.
lit; j++) {
492 int l = hcode[pl.
p[j]] & 63;
498 if ((hcode[pl.
p[j]] >> 6) ==
499 ((
c >> (lc - l)) & ((1LL << l) - 1))) {
520 if (pl.
len && lc >= pl.
len) {
528 if (
out - outb != no)
534 uint16_t *dst,
int dst_size)
542 src_size = bytestream2_get_le32(gb);
543 im = bytestream2_get_le32(gb);
544 iM = bytestream2_get_le32(gb);
546 nBits = bytestream2_get_le32(gb);
556 if (!freq || !hdec) {
584 static inline void wdec14(uint16_t l, uint16_t
h, uint16_t *
a, uint16_t *
b)
589 int ai = ls + (hi & 1) + (hi >> 1);
591 int16_t bs = ai - hi;
598 #define A_OFFSET (1 << (NBITS - 1))
599 #define MOD_MASK ((1 << NBITS) - 1)
601 static inline void wdec16(uint16_t l, uint16_t
h, uint16_t *
a, uint16_t *
b)
612 int ny,
int oy, uint16_t mx)
614 int w14 = (mx < (1 << 14));
615 int n = (nx > ny) ? ny : nx;
628 uint16_t *ey =
in + oy * (ny - p2);
629 uint16_t i00, i01, i10, i11;
635 for (; py <= ey; py += oy2) {
637 uint16_t *ex = py + ox * (nx - p2);
639 for (; px <= ex; px += ox2) {
640 uint16_t *p01 = px + ox1;
641 uint16_t *p10 = px + oy1;
642 uint16_t *p11 = p10 + ox1;
645 wdec14(*px, *p10, &i00, &i10);
646 wdec14(*p01, *p11, &i01, &i11);
647 wdec14(i00, i01, px, p01);
648 wdec14(i10, i11, p10, p11);
650 wdec16(*px, *p10, &i00, &i10);
651 wdec16(*p01, *p11, &i01, &i11);
652 wdec16(i00, i01, px, p01);
653 wdec16(i10, i11, p10, p11);
658 uint16_t *p10 = px + oy1;
661 wdec14(*px, *p10, &i00, p10);
663 wdec16(*px, *p10, &i00, p10);
671 uint16_t *ex = py + ox * (nx - p2);
673 for (; px <= ex; px += ox2) {
674 uint16_t *p01 = px + ox1;
677 wdec14(*px, *p01, &i00, p01);
679 wdec16(*px, *p01, &i00, p01);
694 uint16_t maxval, min_non_zero, max_non_zero;
696 uint16_t *
tmp = (uint16_t *)
td->tmp;
708 if (!
td->bitmap || !
td->lut) {
715 min_non_zero = bytestream2_get_le16(&gb);
716 max_non_zero = bytestream2_get_le16(&gb);
722 if (min_non_zero <= max_non_zero)
724 max_non_zero - min_non_zero + 1);
725 memset(
td->bitmap + max_non_zero + 1, 0,
BITMAP_SIZE - max_non_zero - 1);
734 for (
i = 0;
i <
s->nb_channels;
i++) {
742 for (j = 0; j < pixel_half_size; j++)
744 td->xsize * pixel_half_size, maxval);
745 ptr +=
td->xsize *
td->ysize * pixel_half_size;
750 out = (uint16_t *)
td->uncompressed_data;
753 for (j = 0; j <
s->nb_channels; j++) {
760 in =
tmp + tmp_offset *
td->xsize *
td->ysize +
i *
td->xsize * pixel_half_size;
761 tmp_offset += pixel_half_size;
764 s->bbdsp.bswap16_buf(
out,
in,
td->xsize * pixel_half_size);
766 memcpy(
out,
in,
td->xsize * 2 * pixel_half_size);
768 out +=
td->xsize * pixel_half_size;
776 int compressed_size,
int uncompressed_size,
779 unsigned long dest_len, expected_len = 0;
784 for (
i = 0;
i <
s->nb_channels;
i++) {
786 expected_len += (
td->xsize *
td->ysize * 3);
787 }
else if (
s->channels[
i].pixel_type ==
EXR_HALF) {
788 expected_len += (
td->xsize *
td->ysize * 2);
790 expected_len += (
td->xsize *
td->ysize * 4);
794 dest_len = expected_len;
796 if (uncompress(
td->tmp, &dest_len,
src, compressed_size) != Z_OK) {
798 }
else if (dest_len != expected_len) {
802 out =
td->uncompressed_data;
803 for (
i = 0;
i <
td->ysize;
i++)
804 for (
c = 0;
c <
s->nb_channels;
c++) {
812 ptr[1] = ptr[0] +
td->xsize;
813 ptr[2] = ptr[1] +
td->xsize;
814 in = ptr[2] +
td->xsize;
816 for (j = 0; j <
td->xsize; ++j) {
817 uint32_t
diff = ((unsigned)*(ptr[0]++) << 24) |
818 (*(ptr[1]++) << 16) |
826 ptr[1] = ptr[0] +
td->xsize;
827 in = ptr[1] +
td->xsize;
828 for (j = 0; j <
td->xsize; j++) {
829 uint32_t
diff = (*(ptr[0]++) << 8) | *(ptr[1]++);
837 ptr[1] = ptr[0] +
s->xdelta;
838 ptr[2] = ptr[1] +
s->xdelta;
839 ptr[3] = ptr[2] +
s->xdelta;
840 in = ptr[3] +
s->xdelta;
842 for (j = 0; j <
s->xdelta; ++j) {
843 uint32_t
diff = ((uint32_t)*(ptr[0]++) << 24) |
844 (*(ptr[1]++) << 16) |
845 (*(ptr[2]++) << 8 ) |
861 unsigned short shift = (
b[ 2] >> 2) & 15;
862 unsigned short bias = (0x20 <<
shift);
865 s[ 0] = (
b[0] << 8) |
b[1];
867 s[ 4] =
s[ 0] + ((((
b[ 2] << 4) | (
b[ 3] >> 4)) & 0x3f) <<
shift) - bias;
868 s[ 8] =
s[ 4] + ((((
b[ 3] << 2) | (
b[ 4] >> 6)) & 0x3f) <<
shift) - bias;
869 s[12] =
s[ 8] + ((
b[ 4] & 0x3f) <<
shift) - bias;
871 s[ 1] =
s[ 0] + ((
b[ 5] >> 2) <<
shift) - bias;
872 s[ 5] =
s[ 4] + ((((
b[ 5] << 4) | (
b[ 6] >> 4)) & 0x3f) <<
shift) - bias;
873 s[ 9] =
s[ 8] + ((((
b[ 6] << 2) | (
b[ 7] >> 6)) & 0x3f) <<
shift) - bias;
874 s[13] =
s[12] + ((
b[ 7] & 0x3f) <<
shift) - bias;
876 s[ 2] =
s[ 1] + ((
b[ 8] >> 2) <<
shift) - bias;
877 s[ 6] =
s[ 5] + ((((
b[ 8] << 4) | (
b[ 9] >> 4)) & 0x3f) <<
shift) - bias;
878 s[10] =
s[ 9] + ((((
b[ 9] << 2) | (
b[10] >> 6)) & 0x3f) <<
shift) - bias;
879 s[14] =
s[13] + ((
b[10] & 0x3f) <<
shift) - bias;
881 s[ 3] =
s[ 2] + ((
b[11] >> 2) <<
shift) - bias;
882 s[ 7] =
s[ 6] + ((((
b[11] << 4) | (
b[12] >> 4)) & 0x3f) <<
shift) - bias;
883 s[11] =
s[10] + ((((
b[12] << 2) | (
b[13] >> 6)) & 0x3f) <<
shift) - bias;
884 s[15] =
s[14] + ((
b[13] & 0x3f) <<
shift) - bias;
886 for (
i = 0;
i < 16; ++
i) {
898 s[0] = (
b[0] << 8) |
b[1];
905 for (
i = 1;
i < 16;
i++)
912 const int8_t *sr =
src;
913 int stay_to_uncompress = compressed_size;
914 int nb_b44_block_w, nb_b44_block_h;
915 int index_tl_x, index_tl_y, index_out, index_tmp;
916 uint16_t tmp_buffer[16];
918 int target_channel_offset = 0;
921 nb_b44_block_w =
td->xsize / 4;
922 if ((
td->xsize % 4) != 0)
925 nb_b44_block_h =
td->ysize / 4;
926 if ((
td->ysize % 4) != 0)
929 for (
c = 0;
c <
s->nb_channels;
c++) {
931 for (iY = 0; iY < nb_b44_block_h; iY++) {
932 for (iX = 0; iX < nb_b44_block_w; iX++) {
933 if (stay_to_uncompress < 3) {
938 if (
src[compressed_size - stay_to_uncompress + 2] == 0xfc) {
941 stay_to_uncompress -= 3;
943 if (stay_to_uncompress < 14) {
949 stay_to_uncompress -= 14;
956 for (y = index_tl_y; y <
FFMIN(index_tl_y + 4,
td->ysize); y++) {
957 for (
x = index_tl_x;
x <
FFMIN(index_tl_x + 4,
td->xsize);
x++) {
958 index_out = target_channel_offset *
td->xsize + y *
td->channel_line_size + 2 *
x;
959 index_tmp = (y-index_tl_y) * 4 + (
x-index_tl_x);
960 td->uncompressed_data[index_out] = tmp_buffer[index_tmp] & 0xff;
961 td->uncompressed_data[index_out + 1] = tmp_buffer[index_tmp] >> 8;
966 target_channel_offset += 2;
968 if (stay_to_uncompress < td->ysize *
td->xsize * 4) {
973 for (y = 0; y <
td->ysize; y++) {
974 index_out = target_channel_offset *
td->xsize + y *
td->channel_line_size;
975 memcpy(&
td->uncompressed_data[index_out], sr,
td->xsize * 4);
978 target_channel_offset += 4;
980 stay_to_uncompress -=
td->ysize *
td->xsize * 4;
988 int jobnr,
int threadnr)
993 const uint8_t *channel_buffer[4] = { 0 };
995 uint64_t line_offset, uncompressed_size;
998 uint64_t
line, col = 0;
999 uint64_t tile_x, tile_y, tile_level_x, tile_level_y;
1002 int axmax = (avctx->
width - (
s->xmax + 1)) *
step;
1003 int bxmin =
s->xmin *
step;
1004 int i,
x, buf_size =
s->buf_size;
1005 int c, rgb_channel_count;
1006 float one_gamma = 1.0f /
s->gamma;
1010 line_offset =
AV_RL64(
s->gb.buffer + jobnr * 8);
1013 if (buf_size < 20 || line_offset > buf_size - 20)
1016 src = buf + line_offset + 20;
1024 if (data_size <= 0 || data_size > buf_size - line_offset - 20)
1027 if (tile_level_x || tile_level_y) {
1032 if (
s->xmin ||
s->ymin) {
1037 line =
s->tile_attr.ySize * tile_y;
1038 col =
s->tile_attr.xSize * tile_x;
1041 col < s->xmin || col >
s->xmax)
1044 td->ysize =
FFMIN(
s->tile_attr.ySize,
s->ydelta - tile_y *
s->tile_attr.ySize);
1045 td->xsize =
FFMIN(
s->tile_attr.xSize,
s->xdelta - tile_x *
s->tile_attr.xSize);
1051 if ((col +
td->xsize) !=
s->xdelta)
1054 td->channel_line_size =
td->xsize *
s->current_channel_offset;
1055 uncompressed_size =
td->channel_line_size * (uint64_t)
td->ysize;
1057 if (buf_size < 8 || line_offset > buf_size - 8)
1060 src = buf + line_offset + 8;
1067 if (data_size <= 0 || data_size > buf_size - line_offset - 8)
1070 td->ysize =
FFMIN(
s->scan_lines_per_block,
s->ymax -
line + 1);
1071 td->xsize =
s->xdelta;
1073 td->channel_line_size =
td->xsize *
s->current_channel_offset;
1074 uncompressed_size =
td->channel_line_size * (uint64_t)
td->ysize;
1076 if ((
s->compression ==
EXR_RAW && (data_size != uncompressed_size ||
1077 line_offset > buf_size - uncompressed_size)) ||
1078 (
s->compression !=
EXR_RAW && (data_size > uncompressed_size ||
1079 line_offset > buf_size - data_size))) {
1084 if (data_size < uncompressed_size || s->is_tile) {
1090 if (data_size < uncompressed_size) {
1092 &
td->uncompressed_size, uncompressed_size + 64);
1094 if (!
td->uncompressed_data)
1098 switch (
s->compression) {
1121 src =
td->uncompressed_data;
1125 channel_buffer[0] =
src +
td->xsize *
s->channel_offsets[0];
1126 channel_buffer[1] =
src +
td->xsize *
s->channel_offsets[1];
1127 channel_buffer[2] =
src +
td->xsize *
s->channel_offsets[2];
1128 rgb_channel_count = 3;
1130 channel_buffer[0] =
src +
td->xsize *
s->channel_offsets[1];
1131 rgb_channel_count = 1;
1133 if (
s->channel_offsets[3] >= 0)
1134 channel_buffer[3] =
src +
td->xsize *
s->channel_offsets[3];
1139 int channel_count =
s->channel_offsets[3] >= 0 ? 4 : rgb_channel_count;
1141 channel_buffer[1] = channel_buffer[0];
1142 channel_buffer[2] = channel_buffer[0];
1145 for (
c = 0;
c < channel_count;
c++) {
1146 int plane =
s->desc->comp[
c].plane;
1149 for (
i = 0;
i <
td->ysize;
i++, ptr += p->
linesize[plane]) {
1153 src = channel_buffer[
c];
1157 memset(ptr_x, 0, bxmin);
1163 if (trc_func &&
c < 3) {
1164 for (
x = 0;
x <
td->xsize;
x++) {
1165 t.
i = bytestream_get_le32(&
src);
1166 t.
f = trc_func(t.
f);
1170 for (
x = 0;
x <
td->xsize;
x++) {
1171 t.
i = bytestream_get_le32(&
src);
1172 if (t.
f > 0.0f &&
c < 3)
1173 t.
f =
powf(t.
f, one_gamma);
1180 for (
x = 0;
x <
td->xsize;
x++) {
1181 *ptr_x++ =
s->gamma_table[bytestream_get_le16(&
src)];
1184 for (
x = 0;
x <
td->xsize;
x++) {
1191 memset(ptr_x, 0, axmax);
1192 channel_buffer[
c] +=
td->channel_line_size;
1206 for (
c = 0;
c < rgb_channel_count;
c++) {
1207 rgb[
c] = channel_buffer[
c];
1210 if (channel_buffer[3])
1211 a = channel_buffer[3];
1213 ptr_x = (uint16_t *) ptr;
1216 memset(ptr_x, 0, bxmin);
1217 ptr_x +=
s->xmin *
s->desc->nb_components;
1219 for (
x = 0;
x <
td->xsize;
x++) {
1220 for (
c = 0;
c < rgb_channel_count;
c++) {
1221 *ptr_x++ = bytestream_get_le32(&rgb[
c]) >> 16;
1224 if (channel_buffer[3])
1225 *ptr_x++ = bytestream_get_le32(&
a) >> 16;
1229 memset(ptr_x, 0, axmax);
1231 channel_buffer[0] +=
td->channel_line_size;
1232 channel_buffer[1] +=
td->channel_line_size;
1233 channel_buffer[2] +=
td->channel_line_size;
1234 if (channel_buffer[3])
1235 channel_buffer[3] +=
td->channel_line_size;
1255 const char *value_name,
1256 const char *value_type,
1257 unsigned int minimum_length)
1262 !strcmp(
s->gb.buffer, value_name)) {
1264 s->gb.buffer += strlen(value_name) + 1;
1265 if (!strcmp(
s->gb.buffer, value_type)) {
1266 s->gb.buffer += strlen(value_type) + 1;
1267 var_size = bytestream2_get_le32(&
s->gb);
1273 s->gb.buffer -= strlen(value_name) + 1;
1275 "Unknown data type %s for header variable %s.\n",
1276 value_type, value_name);
1287 int layer_match = 0;
1289 int dup_channels = 0;
1291 s->current_channel_offset = 0;
1298 s->channel_offsets[0] = -1;
1299 s->channel_offsets[1] = -1;
1300 s->channel_offsets[2] = -1;
1301 s->channel_offsets[3] = -1;
1307 s->tile_attr.xSize = -1;
1308 s->tile_attr.ySize = -1;
1317 magic_number = bytestream2_get_le32(&
s->gb);
1318 if (magic_number != 20000630) {
1325 version = bytestream2_get_byte(&
s->gb);
1331 flags = bytestream2_get_le24(&
s->gb);
1348 "chlist", 38)) >= 0) {
1360 int channel_index = -1;
1363 if (strcmp(
s->layer,
"") != 0) {
1364 if (strncmp(ch_gb.
buffer,
s->layer, strlen(
s->layer)) == 0) {
1367 "Channel match layer : %s.\n", ch_gb.
buffer);
1368 ch_gb.
buffer += strlen(
s->layer);
1369 if (*ch_gb.
buffer ==
'.')
1374 "Channel doesn't match layer : %s.\n", ch_gb.
buffer);
1402 "Unsupported channel %.256s.\n", ch_gb.
buffer);
1408 bytestream2_get_byte(&ch_gb))
1417 current_pixel_type = bytestream2_get_le32(&ch_gb);
1420 current_pixel_type);
1426 xsub = bytestream2_get_le32(&ch_gb);
1427 ysub = bytestream2_get_le32(&ch_gb);
1429 if (xsub != 1 || ysub != 1) {
1431 "Subsampling %dx%d",
1437 if (channel_index >= 0 &&
s->channel_offsets[channel_index] == -1) {
1439 s->pixel_type != current_pixel_type) {
1441 "RGB channels not of the same depth.\n");
1445 s->pixel_type = current_pixel_type;
1446 s->channel_offsets[channel_index] =
s->current_channel_offset;
1447 }
else if (channel_index >= 0) {
1449 "Multiple channels with index %d.\n", channel_index);
1450 if (++dup_channels > 10) {
1462 channel = &
s->channels[
s->nb_channels - 1];
1463 channel->pixel_type = current_pixel_type;
1467 if (current_pixel_type ==
EXR_HALF) {
1468 s->current_channel_offset += 2;
1470 s->current_channel_offset += 4;
1477 if (
FFMIN3(
s->channel_offsets[0],
1478 s->channel_offsets[1],
1479 s->channel_offsets[2]) < 0) {
1480 if (
s->channel_offsets[0] < 0)
1482 if (
s->channel_offsets[1] < 0)
1484 if (
s->channel_offsets[2] < 0)
1492 s->gb.buffer = ch_gb.
buffer + 1;
1501 s->xmin = bytestream2_get_le32(&
s->gb);
1502 s->ymin = bytestream2_get_le32(&
s->gb);
1503 s->xmax = bytestream2_get_le32(&
s->gb);
1504 s->ymax = bytestream2_get_le32(&
s->gb);
1505 s->xdelta = (
s->xmax -
s->xmin) + 1;
1506 s->ydelta = (
s->ymax -
s->ymin) + 1;
1510 "box2i", 34)) >= 0) {
1517 s->w = bytestream2_get_le32(&
s->gb) + 1;
1518 s->h = bytestream2_get_le32(&
s->gb) + 1;
1522 "lineOrder", 25)) >= 0) {
1529 line_order = bytestream2_get_byte(&
s->gb);
1531 if (line_order > 2) {
1539 "float", 31)) >= 0) {
1545 sar = bytestream2_get_le32(&
s->gb);
1549 "compression", 29)) >= 0) {
1556 s->compression = bytestream2_get_byte(&
s->gb);
1559 "Found more than one compression attribute.\n");
1563 "tiledesc", 22)) >= 0) {
1568 "Found tile attribute and scanline flags. Exr will be interpreted as scanline.\n");
1570 s->tile_attr.xSize = bytestream2_get_le32(&
s->gb);
1571 s->tile_attr.ySize = bytestream2_get_le32(&
s->gb);
1573 tileLevel = bytestream2_get_byte(&
s->gb);
1574 s->tile_attr.level_mode = tileLevel & 0x0f;
1575 s->tile_attr.level_round = (tileLevel >> 4) & 0x0f;
1579 s->tile_attr.level_mode);
1586 s->tile_attr.level_round);
1593 "string", 1)) >= 0) {
1610 for (
i = 0;
i < 2;
i++)
1611 while (bytestream2_get_byte(&
s->gb) != 0);
1626 if (
s->tile_attr.xSize < 1 ||
s->tile_attr.ySize < 1) {
1639 frame->metadata = metadata;
1661 uint64_t start_offset_table;
1662 uint64_t start_next_scanline;
1670 switch (
s->pixel_type) {
1673 if (
s->channel_offsets[3] >= 0) {
1689 if (
s->channel_offsets[3] >= 0) {
1711 switch (
s->compression) {
1715 s->scan_lines_per_block = 1;
1719 s->scan_lines_per_block = 16;
1724 s->scan_lines_per_block = 32;
1733 if (
s->xmin >
s->xmax ||
1734 s->ymin >
s->ymax ||
1735 s->xdelta !=
s->xmax -
s->xmin + 1 ||
1750 planes =
s->desc->nb_components;
1751 out_line_size = avctx->
width * 4;
1754 out_line_size = avctx->
width * 2 *
s->desc->nb_components;
1758 nb_blocks = ((
s->xdelta +
s->tile_attr.xSize - 1) /
s->tile_attr.xSize) *
1759 ((
s->ydelta +
s->tile_attr.ySize - 1) /
s->tile_attr.ySize);
1761 nb_blocks = (
s->ydelta +
s->scan_lines_per_block - 1) /
1762 s->scan_lines_per_block;
1772 if (!
s->is_tile && bytestream2_peek_le64(&
s->gb) == 0) {
1776 start_next_scanline = start_offset_table + nb_blocks * 8;
1779 for (y = 0; y < nb_blocks; y++) {
1781 bytestream2_put_le64(&offset_table_writer, start_next_scanline);
1785 start_next_scanline += (bytestream2_get_le32(&
s->gb) + 8);
1791 s->buf = avpkt->
data;
1792 s->buf_size = avpkt->
size;
1796 ptr = picture->
data[
i];
1797 for (y = 0; y <
s->ymin; y++) {
1798 memset(ptr, 0, out_line_size);
1803 s->picture = picture;
1810 for (y =
s->ymax + 1; y < avctx->
height; y++) {
1811 memset(ptr, 0, out_line_size);
1827 float one_gamma = 1.0
f /
s->gamma;
1840 for (
i = 0;
i < 65536; ++
i) {
1842 t.
f = trc_func(t.
f);
1843 s->gamma_table[
i] = t;
1846 if (one_gamma > 0.9999
f && one_gamma < 1.0001
f) {
1847 for (
i = 0;
i < 65536; ++
i) {
1851 for (
i = 0;
i < 65536; ++
i) {
1855 s->gamma_table[
i] = t;
1857 t.
f =
powf(t.
f, one_gamma);
1858 s->gamma_table[
i] = t;
1866 if (!
s->thread_data)
1890 #define OFFSET(x) offsetof(EXRContext, x)
1891 #define VD AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_DECODING_PARAM
1893 {
"layer",
"Set the decoding layer",
OFFSET(layer),
1895 {
"gamma",
"Set the float gamma value when decoding",
OFFSET(gamma),
1899 {
"apply_trc",
"color transfer characteristics to apply to EXR linear input",
OFFSET(apply_trc_type),
1901 {
"bt709",
"BT.709", 0,
1903 {
"gamma",
"gamma", 0,
1905 {
"gamma22",
"BT.470 M", 0,
1907 {
"gamma28",
"BT.470 BG", 0,
1909 {
"smpte170m",
"SMPTE 170 M", 0,
1911 {
"smpte240m",
"SMPTE 240 M", 0,
1913 {
"linear",
"Linear", 0,
1917 {
"log_sqrt",
"Log square root", 0,
1919 {
"iec61966_2_4",
"IEC 61966-2-4", 0,
1921 {
"bt1361",
"BT.1361", 0,
1923 {
"iec61966_2_1",
"IEC 61966-2-1", 0,
1925 {
"bt2020_10bit",
"BT.2020 - 10 bit", 0,
1927 {
"bt2020_12bit",
"BT.2020 - 12 bit", 0,
1929 {
"smpte2084",
"SMPTE ST 2084", 0,
1931 {
"smpte428_1",
"SMPTE ST 428-1", 0,
enum ExrTileLevelRound level_round
#define AV_LOG_WARNING
Something somehow does not look correct.
static av_cold int init(AVCodecContext *avctx)
static int rle_uncompress(EXRContext *ctx, const uint8_t *src, int compressed_size, int uncompressed_size, EXRThreadData *td)
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
AVColorTransferCharacteristic
Color Transfer Characteristic.
uint8_t * uncompressed_data
void * av_realloc(void *ptr, size_t size)
Allocate, reallocate, or free a block of memory.
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
@ AVCOL_TRC_LINEAR
"Linear transfer characteristics"
#define FLOAT_MAX_BIASED_EXP
static int decode_header(EXRContext *s, AVFrame *frame)
static int pxr24_uncompress(EXRContext *s, const uint8_t *src, int compressed_size, int uncompressed_size, EXRThreadData *td)
int av_strcasecmp(const char *a, const char *b)
Locale-independent case-insensitive compare.
static int get_bits_count(const GetBitContext *s)
#define AV_PIX_FMT_FLAG_FLOAT
The pixel format contains IEEE-754 floating point values.
static av_always_inline int bytestream2_seek(GetByteContext *g, int offset, int whence)
This structure describes decoded (raw) audio or video data.
@ AVCOL_TRC_NB
Not part of ABI.
trying all byte sequences megabyte in length and selecting the best looking sequence will yield cases to try But a word about which is also called distortion Distortion can be quantified by almost any quality measurement one chooses the sum of squared differences is used but more complex methods that consider psychovisual effects can be used as well It makes no difference in this discussion First step
enum AVColorTransferCharacteristic color_trc
Color Transfer Characteristic.
static av_cold int decode_init(AVCodecContext *avctx)
static uint16_t reverse_lut(const uint8_t *bitmap, uint16_t *lut)
static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
void * av_mallocz_array(size_t nmemb, size_t size)
Allocate a memory block for an array with av_mallocz().
@ AVCOL_TRC_BT2020_12
ITU-R BT2020 for 12-bit system.
static const AVOption options[]
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
static int huf_uncompress(GetByteContext *gb, uint16_t *dst, int dst_size)
static av_always_inline void bytestream2_skip(GetByteContext *g, unsigned int size)
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
EXRTileAttribute tile_attr
static void apply_lut(const uint16_t *lut, uint16_t *dst, int dsize)
@ AVCOL_TRC_IEC61966_2_1
IEC 61966-2-1 (sRGB or sYCC)
static av_always_inline float av_int2float(uint32_t i)
Reinterpret a 32-bit integer as a float.
int thread_count
thread count is used to decide how many independent tasks should be passed to execute()
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call have so the codec calls ff_thread_report set FF_CODEC_CAP_ALLOCATE_PROGRESS in AVCodec caps_internal and use ff_thread_get_buffer() to allocate frames. The frames must then be freed with ff_thread_release_buffer(). Otherwise decode directly into the user-supplied frames. Call ff_thread_report_progress() after some part of the current picture has decoded. A good place to put this is where draw_horiz_band() is called - add this if it isn 't called anywhere
FFmpeg Automated Testing Environment ************************************Introduction Using FATE from your FFmpeg source directory Submitting the results to the FFmpeg result aggregation server Uploading new samples to the fate suite FATE makefile targets and variables Makefile targets Makefile variables Examples Introduction **************FATE is an extended regression suite on the client side and a means for results aggregation and presentation on the server side The first part of this document explains how you can use FATE from your FFmpeg source directory to test your ffmpeg binary The second part describes how you can run FATE to submit the results to FFmpeg’s FATE server In any way you can have a look at the publicly viewable FATE results by visiting this as it can be seen if some test on some platform broke with their recent contribution This usually happens on the platforms the developers could not test on The second part of this document describes how you can run FATE to submit your results to FFmpeg’s FATE server If you want to submit your results be sure to check that your combination of OS and compiler is not already listed on the above mentioned website In the third part you can find a comprehensive listing of FATE makefile targets and variables Using FATE from your FFmpeg source directory **********************************************If you want to run FATE on your machine you need to have the samples in place You can get the samples via the build target fate rsync Use this command from the top level source this will cause FATE to fail NOTE To use a custom wrapper to run the pass ‘ target exec’ to ‘configure’ or set the TARGET_EXEC Make variable Submitting the results to the FFmpeg result aggregation server ****************************************************************To submit your results to the server you should run fate through the shell script ‘tests fate sh’ from the FFmpeg sources This script needs to be invoked with a configuration file as its first argument tests fate sh path to fate_config A configuration file template with comments describing the individual configuration variables can be found at ‘doc fate_config sh template’ Create a configuration that suits your based on the configuration template The ‘slot’ configuration variable can be any string that is not yet but it is suggested that you name it adhering to the following pattern ‘ARCH OS COMPILER COMPILER VERSION’ The configuration file itself will be sourced in a shell therefore all shell features may be used This enables you to setup the environment as you need it for your build For your first test runs the ‘fate_recv’ variable should be empty or commented out This will run everything as normal except that it will omit the submission of the results to the server The following files should be present in $workdir as specified in the configuration it may help to try out the ‘ssh’ command with one or more ‘ v’ options You should get detailed output concerning your SSH configuration and the authentication process The only thing left is to automate the execution of the fate sh script and the synchronisation of the samples directory Uploading new samples to the fate suite *****************************************If you need a sample uploaded send a mail to samples request This is for developers who have an account on the fate suite server If you upload new please make sure they are as small as space on each network bandwidth and so on benefit from smaller test cases Also keep in mind older checkouts use existing sample that means in practice generally do not remove or overwrite files as it likely would break older checkouts or releases Also all needed samples for a commit should be ideally before the push If you need an account for frequently uploading samples or you wish to help others by doing that send a mail to ffmpeg devel rsync vauL Duo x
@ AVCOL_TRC_GAMMA28
also ITU-R BT470BG
#define AV_PIX_FMT_GRAY16
@ AVCOL_TRC_LOG_SQRT
"Logarithmic transfer characteristic (100 * Sqrt(10) : 1 range)"
@ AVCOL_TRC_GAMMA22
also ITU-R BT470M / ITU-R BT1700 625 PAL & SECAM
#define HALF_FLOAT_MAX_BIASED_EXP
static av_cold int decode_end(AVCodecContext *avctx)
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
static int zip_uncompress(EXRContext *s, const uint8_t *src, int compressed_size, int uncompressed_size, EXRThreadData *td)
static av_always_inline void bytestream2_init_writer(PutByteContext *p, uint8_t *buf, int buf_size)
enum ExrCompr compression
static void decode(AVCodecContext *dec_ctx, AVPacket *pkt, AVFrame *frame, FILE *outfile)
static int check_header_variable(EXRContext *s, const char *value_name, const char *value_type, unsigned int minimum_length)
Check if the variable name corresponds to its data type.
@ AVCOL_TRC_BT1361_ECG
ITU-R BT1361 Extended Colour Gamut.
int current_channel_offset
static int decode_block(AVCodecContext *avctx, void *tdata, int jobnr, int threadnr)
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
enum ExrPixelType pixel_type
#define SHORTEST_LONG_RUN
#define AV_PIX_FMT_GRAYF32
const AVPixFmtDescriptor * desc
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
#define AV_PIX_FMT_RGBA64
#define LIBAVUTIL_VERSION_INT
Describe the class of an AVClass context structure.
av_cold void ff_bswapdsp_init(BswapDSPContext *c)
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
#define LONG_ZEROCODE_RUN
#define SHORT_ZEROCODE_RUN
@ AVCOL_TRC_IEC61966_2_4
IEC 61966-2-4.
avpriv_trc_function avpriv_get_trc_function_from_trc(enum AVColorTransferCharacteristic trc)
Determine the function needed to apply the given AVColorTransferCharacteristic to linear input.
const char * av_default_item_name(void *ptr)
Return the context name.
@ AV_PICTURE_TYPE_I
Intra.
static int huf_decode(const uint64_t *hcode, const HufDec *hdecod, GetByteContext *gb, int nbits, int rlc, int no, uint16_t *out)
static av_always_inline unsigned int bytestream2_get_buffer(GetByteContext *g, uint8_t *dst, unsigned int size)
av_cold void ff_exrdsp_init(ExrDSPContext *c)
#define get_char(c, lc, gb)
@ AVCOL_TRC_BT2020_10
ITU-R BT2020 for 10-bit system.
static void unpack_14(const uint8_t b[14], uint16_t s[16])
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
static av_always_inline int bytestream2_get_bytes_left(GetByteContext *g)
static av_always_inline int bytestream2_tell(GetByteContext *g)
static union av_intfloat32 exr_half2float(uint16_t hf)
Convert a half float as a uint16_t into a full float.
#define HALF_FLOAT_MIN_BIASED_EXP_AS_SINGLE_FP_EXP
enum ExrPixelType pixel_type
enum ExrTileLevelMode level_mode
EXRThreadData * thread_data
enum AVPictureType pict_type
Picture type of the frame.
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
static void wdec14(uint16_t l, uint16_t h, uint16_t *a, uint16_t *b)
static void wav_decode(uint16_t *in, int nx, int ox, int ny, int oy, uint16_t mx)
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
@ AVCOL_TRC_LOG
"Logarithmic transfer characteristic (100:1 range)"
#define AV_PIX_FMT_GBRPF32
void avpriv_report_missing_feature(void *avc, const char *msg,...) av_printf_format(2
Log a generic warning message about a missing feature.
static int huf_build_dec_table(const uint64_t *hcode, int im, int iM, HufDec *hdecod)
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
#define AV_CODEC_CAP_SLICE_THREADS
Codec supports slice-based (or partition-based) multithreading.
void av_dict_free(AVDictionary **pm)
Free all the memory allocated for an AVDictionary struct and all keys and values.
enum AVColorTransferCharacteristic apply_trc_type
static void unpack_3(const uint8_t b[3], uint16_t s[16])
#define AV_LOG_INFO
Standard information.
@ AVCOL_TRC_BT709
also ITU-R BT1361
static const struct @315 planes[]
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) #define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac) { } void ff_audio_convert_free(AudioConvert **ac) { if(! *ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);} AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map) { AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method !=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2) { ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc) { av_free(ac);return NULL;} return ac;} in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar) { ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar ? ac->channels :1;} else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;} int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in) { int use_generic=1;int len=in->nb_samples;int p;if(ac->dc) { av_log(ac->avr, AV_LOG_TRACE, "%d samples - audio_convert: %s to %s (dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> in
#define get_code(po, rlc, c, lc, gb, out, oe, outb)
#define i(width, name, range_min, range_max)
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_malloc but the buffer has additional AV_INPUT_BUFFER_PADDING_SIZE at the end w...
AVRational av_d2q(double d, int max)
Convert a double precision floating point number to a rational.
const char * name
Name of the codec implementation.
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
static int huf_unpack_enc_table(GetByteContext *gb, int32_t im, int32_t iM, uint64_t *hcode)
union av_intfloat32 gamma_table[65536]
double(* avpriv_trc_function)(double)
int ff_set_sar(AVCodecContext *avctx, AVRational sar)
Check that the provided sample aspect ratio is valid and set it on the codec context.
main external API structure.
static void wdec16(uint16_t l, uint16_t h, uint16_t *a, uint16_t *b)
#define AV_PIX_FMT_GBRAPF32
@ AVCOL_TRC_SMPTE170M
also ITU-R BT601-6 525 or 625 / ITU-R BT1358 525 or 625 / ITU-R BT1700 NTSC
static void huf_canonical_code_table(uint64_t *hcode)
static int shift(int a, int b)
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
static av_always_inline int diff(const uint32_t a, const uint32_t b)
This structure stores compressed data.
int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags)
Set the given entry in *pm, overwriting an existing entry.
int width
picture width / height.
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
#define flags(name, subs,...)
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
static const AVClass exr_class
static int b44_uncompress(EXRContext *s, const uint8_t *src, int compressed_size, int uncompressed_size, EXRThreadData *td)
int(* execute2)(struct AVCodecContext *c, int(*func)(struct AVCodecContext *c2, void *arg, int jobnr, int threadnr), void *arg2, int *ret, int count)
The codec may call this to execute several independent things.
static int piz_uncompress(EXRContext *s, const uint8_t *src, int ssize, int dsize, EXRThreadData *td)
The official guide to swscale for confused that consecutive non overlapping rectangles of slice_bottom special converter These generally are unscaled converters of common like for each output line the vertical scaler pulls lines from a ring buffer When the ring buffer does not contain the wanted line