Go to the documentation of this file.
85 { -1, -1, -1, -1, 2, 4, 6, 8 },
86 { -1, -1, -1, -1, -1, -1, -1, -1, 1, 2, 4, 6, 8, 10, 13, 16 }
90 -1, -1, -1, 1, 4, 7, 10, 12,
94 8, 6, 4, 2, -1, -1, -1, -1,
95 -1, -1, -1, -1, 2, 4, 6, 8,
109 unsigned int min_channels = 1;
110 unsigned int max_channels = 2;
152 c->status[0].step =
c->status[1].step = 511;
234 if ((nibble & 8) == 0)
235 pred = av_clip(
pred + (add >> 3), -32767, 32767);
237 pred = av_clip(
pred - (add >> 3), -32767, 32767);
244 c->step = av_clip(
c->step * 2, 127, 24576);
262 c->step = av_clip(
c->step, 127, 24576);
275 step_index = av_clip(step_index, 0, 88);
283 predictor =
c->predictor;
284 if (sign) predictor -=
diff;
285 else predictor +=
diff;
287 c->predictor = av_clip_int16(predictor);
288 c->step_index = step_index;
290 return (int16_t)
c->predictor;
301 step_index = av_clip(step_index, 0, 88);
306 predictor =
c->predictor;
307 if (sign) predictor -=
diff;
308 else predictor +=
diff;
310 c->predictor = av_clip_int16(predictor);
311 c->step_index = step_index;
313 return (int16_t)
c->predictor;
323 predictor =
c->predictor +
delta;
326 c->predictor = av_clip_int16(predictor >> 4);
327 c->step_index = av_clip(step_index, 0, 88);
329 return (int16_t)
c->predictor;
342 step_index = av_clip(step_index, 0, 60);
344 predictor =
c->predictor +
step * nibble;
346 c->predictor = av_clip_int16(predictor);
347 c->step_index = step_index;
360 step_index = av_clip(step_index, 0, 88);
362 sign = nibble & (1 <<
shift);
365 predictor =
c->predictor;
366 if (sign) predictor -=
diff;
367 else predictor +=
diff;
369 c->predictor = av_clip_int16(predictor);
370 c->step_index = step_index;
372 return (int16_t)
c->predictor;
383 step_index = av_clip(step_index, 0, 88);
391 predictor =
c->predictor -
diff;
393 predictor =
c->predictor +
diff;
395 c->predictor = av_clip_int16(predictor);
396 c->step_index = step_index;
405 predictor = (((
c->sample1) * (
c->coeff1)) + ((
c->sample2) * (
c->coeff2))) / 64;
406 predictor += ((nibble & 0x08)?(nibble - 0x10):(nibble)) *
c->idelta;
408 c->sample2 =
c->sample1;
409 c->sample1 = av_clip_int16(predictor);
411 if (
c->idelta < 16)
c->idelta = 16;
412 if (
c->idelta > INT_MAX/768) {
414 c->idelta = INT_MAX/768;
426 step_index = av_clip(step_index, 0, 48);
431 predictor =
c->predictor;
432 if (sign) predictor -=
diff;
433 else predictor +=
diff;
435 c->predictor = av_clip_intp2(predictor, 11);
436 c->step_index = step_index;
438 return c->predictor * 16;
453 c->predictor = ((
c->predictor * 254) >> 8) + (sign ? -
diff :
diff);
454 c->predictor = av_clip_int16(
c->predictor);
457 c->step = av_clip(new_step, 511, 32767);
459 return (int16_t)
c->predictor;
466 sign = nibble & (1<<(
size-1));
471 c->predictor = av_clip(
c->predictor + (sign ? -
diff :
diff), -16384,16256);
476 else if (
delta == 0 &&
c->step > 0)
479 return (int16_t)
c->predictor;
490 c->predictor = av_clip_int16(
c->predictor);
492 c->step = av_clip(
c->step, 127, 24576);
499 c->predictor = av_clip_int16(
c->predictor);
501 c->step = av_clip_uintp2(
c->step, 5);
507 int16_t
index =
c->step_index;
514 sample += lookup_sample >> 1;
516 sample += lookup_sample >> 2;
518 sample += lookup_sample >> 3;
520 sample += lookup_sample >> 4;
522 sample += lookup_sample >> 5;
524 sample += lookup_sample >> 6;
549 out0 += sample_offset;
553 out1 += sample_offset;
576 s = t*(1<<
shift) + ((s_1*f0 + s_2*f1+32)>>6);
578 s_1 = av_clip_int16(
s);
607 s = t*(1<<
shift) + ((s_1*f0 + s_2*f1+32)>>6);
609 s_1 = av_clip_int16(
s);
633 int k0, signmask, nb_bits, count;
634 int size = buf_size*8;
642 k0 = 1 << (nb_bits-2);
643 signmask = 1 << (nb_bits-1);
669 if (
delta & signmask)
670 c->status[
i].predictor -= vpdiff;
672 c->status[
i].predictor += vpdiff;
676 c->status[
i].step_index = av_clip(
c->status[
i].step_index, 0, 88);
677 c->status[
i].predictor = av_clip_int16(
c->status[
i].predictor);
714 int buf_size,
int *coded_samples,
int *approx_nb_samples)
719 int has_coded_samples = 0;
723 *approx_nb_samples = 0;
731 if (buf_size < 76 *
ch)
736 if (buf_size < 34 *
ch)
741 if (buf_size < 17 *
ch)
758 nb_samples = buf_size * 2 /
ch;
775 return (buf_size - header_size) * 2 /
ch;
780 has_coded_samples = 1;
781 *coded_samples = bytestream2_get_le32(gb);
782 *coded_samples -= *coded_samples % 28;
783 nb_samples = (buf_size - 12) / 30 * 28;
786 has_coded_samples = 1;
787 *coded_samples = bytestream2_get_le32(gb);
788 nb_samples = (buf_size - (4 + 8 *
ch)) * 2 /
ch;
791 nb_samples = (buf_size -
ch) /
ch * 2;
798 has_coded_samples = 1;
801 header_size = 4 + 9 *
ch;
802 *coded_samples = bytestream2_get_le32(gb);
805 header_size = 4 + 5 *
ch;
806 *coded_samples = bytestream2_get_le32(gb);
809 header_size = 4 + 5 *
ch;
810 *coded_samples = bytestream2_get_be32(gb);
813 *coded_samples -= *coded_samples % 28;
814 nb_samples = (buf_size - header_size) * 2 /
ch;
815 nb_samples -= nb_samples % 28;
816 *approx_nb_samples = 1;
821 nb_samples = ((buf_size - 16) * 2 / 3 * 4) /
ch;
826 if (buf_size < 4 *
ch)
828 nb_samples = 1 + (buf_size - 4 *
ch) * 2 /
ch;
833 nb_samples = (buf_size - 4 *
ch) * 2 /
ch;
841 if (buf_size < 4 *
ch)
843 nb_samples = 1 + (buf_size - 4 *
ch) / (bsize *
ch) * bsamples;
849 nb_samples = (buf_size - 6 *
ch) * 2 /
ch;
854 nb_samples = (buf_size - 16 * (
ch / 2)) * 2 /
ch;
860 int samples_per_byte;
866 if (!
s->status[0].step_index) {
872 nb_samples += buf_size * samples_per_byte /
ch;
877 int buf_bits = buf_size * 8 - 2;
878 int nbits = (bytestream2_get_byte(gb) >> 6) + 2;
879 int block_hdr_size = 22 *
ch;
880 int block_size = block_hdr_size + nbits *
ch * 4095;
881 int nblocks = buf_bits / block_size;
882 int bits_left = buf_bits - nblocks * block_size;
883 nb_samples = nblocks * 4096;
884 if (bits_left >= block_hdr_size)
885 nb_samples += 1 + (bits_left - block_hdr_size) / (nbits *
ch);
891 nb_samples = buf_size * 14 / (8 *
ch);
894 has_coded_samples = 1;
897 bytestream2_get_le32(gb) :
898 bytestream2_get_be32(gb);
899 buf_size -= 8 + 36 *
ch;
901 nb_samples = buf_size / 8 * 14;
902 if (buf_size % 8 > 1)
903 nb_samples += (buf_size % 8 - 1) * 2;
904 *approx_nb_samples = 1;
907 nb_samples = buf_size / (9 *
ch) * 16;
910 nb_samples = (buf_size / 128) * 224 /
ch;
914 nb_samples = buf_size / (16 *
ch) * 28;
917 nb_samples = buf_size /
ch;
922 if (has_coded_samples && (*coded_samples <= 0 || *coded_samples > nb_samples))
929 int *got_frame_ptr,
AVPacket *avpkt)
933 int buf_size = avpkt->
size;
941 int nb_samples, coded_samples, approx_nb_samples,
ret;
945 nb_samples =
get_nb_samples(avctx, &gb, buf_size, &coded_samples, &approx_nb_samples);
946 if (nb_samples <= 0) {
952 frame->nb_samples = nb_samples;
956 samples_p = (int16_t **)
frame->extended_data;
961 if (!approx_nb_samples && coded_samples != nb_samples)
963 frame->nb_samples = nb_samples = coded_samples;
979 predictor =
sign_extend(bytestream2_get_be16u(&gb), 16);
980 step_index = predictor & 0x7F;
1003 for (m = 0; m < 64; m += 2) {
1004 int byte = bytestream2_get_byteu(&gb);
1012 cs = &(
c->status[
i]);
1029 for (n = 0; n < (nb_samples - 1) / samples_per_block; n++) {
1034 samples = &samples_p[
i][1 + n * samples_per_block];
1035 for (j = 0; j < block_size; j++) {
1037 (j % 4) + (j / 4) * (avctx->
channels * 4) +
i * 4];
1042 for (m = 0; m < samples_per_block; m++) {
1050 for (n = 0; n < (nb_samples - 1) / 8; n++) {
1053 samples = &samples_p[
i][1 + n * 8];
1054 for (m = 0; m < 8; m += 2) {
1055 int v = bytestream2_get_byteu(&gb);
1065 c->status[
i].predictor =
sign_extend(bytestream2_get_le16u(&gb), 16);
1068 c->status[
i].step_index =
sign_extend(bytestream2_get_le16u(&gb), 16);
1069 if (
c->status[
i].step_index > 88
u) {
1071 i,
c->status[
i].step_index);
1079 for (n = nb_samples >> 1; n > 0; n--) {
1080 int v = bytestream2_get_byteu(&gb);
1088 c->status[
i].predictor =
sign_extend(bytestream2_get_le16u(&gb), 16);
1090 c->status[
i].step =
sign_extend(bytestream2_get_le16u(&gb), 16);
1092 for (n = 0; n < nb_samples >> (1 - st); n++) {
1093 int v = bytestream2_get_byteu(&gb);
1100 int block_predictor;
1105 block_predictor = bytestream2_get_byteu(&gb);
1106 if (block_predictor > 6) {
1118 for(n = (nb_samples - 2) >> 1; n > 0; n--) {
1119 int byte = bytestream2_get_byteu(&gb);
1125 block_predictor = bytestream2_get_byteu(&gb);
1126 if (block_predictor > 6) {
1134 block_predictor = bytestream2_get_byteu(&gb);
1135 if (block_predictor > 6) {
1143 c->status[0].idelta =
sign_extend(bytestream2_get_le16u(&gb), 16);
1145 c->status[1].idelta =
sign_extend(bytestream2_get_le16u(&gb), 16);
1148 c->status[0].sample1 =
sign_extend(bytestream2_get_le16u(&gb), 16);
1149 if (st)
c->status[1].sample1 =
sign_extend(bytestream2_get_le16u(&gb), 16);
1150 c->status[0].sample2 =
sign_extend(bytestream2_get_le16u(&gb), 16);
1151 if (st)
c->status[1].sample2 =
sign_extend(bytestream2_get_le16u(&gb), 16);
1154 if (st) *
samples++ =
c->status[1].sample2;
1156 if (st) *
samples++ =
c->status[1].sample1;
1157 for(n = (nb_samples - 2) >> (1 - st); n > 0; n--) {
1158 int byte = bytestream2_get_byteu(&gb);
1168 c->status[
channel ].step = bytestream2_get_le16u(&gb) & 0x1f;
1169 c->status[
channel + 1].step = bytestream2_get_le16u(&gb) & 0x1f;
1174 for (n = 0; n < nb_samples; n+=2) {
1175 int v = bytestream2_get_byteu(&gb);
1179 for (n = 0; n < nb_samples; n+=2) {
1180 int v = bytestream2_get_byteu(&gb);
1197 for (n = (nb_samples - 1) >> (1 - st); n > 0; n--) {
1198 int v = bytestream2_get_byteu(&gb);
1207 int decode_top_nibble_next = 0;
1212 c->status[0].predictor =
sign_extend(bytestream2_get_le16u(&gb), 16);
1213 c->status[1].predictor =
sign_extend(bytestream2_get_le16u(&gb), 16);
1214 c->status[0].step_index = bytestream2_get_byteu(&gb);
1215 c->status[1].step_index = bytestream2_get_byteu(&gb);
1216 if (
c->status[0].step_index > 88
u ||
c->status[1].step_index > 88
u){
1218 c->status[0].step_index,
c->status[1].step_index);
1222 diff_channel =
c->status[1].predictor;
1225 #define DK3_GET_NEXT_NIBBLE() \
1226 if (decode_top_nibble_next) { \
1227 nibble = last_byte >> 4; \
1228 decode_top_nibble_next = 0; \
1230 last_byte = bytestream2_get_byteu(&gb); \
1231 nibble = last_byte & 0x0F; \
1232 decode_top_nibble_next = 1; \
1235 while (
samples < samples_end) {
1249 diff_channel = (diff_channel +
c->status[1].predictor) / 2;
1250 *
samples++ =
c->status[0].predictor +
c->status[1].predictor;
1251 *
samples++ =
c->status[0].predictor -
c->status[1].predictor;
1258 diff_channel = (diff_channel +
c->status[1].predictor) / 2;
1259 *
samples++ =
c->status[0].predictor +
c->status[1].predictor;
1260 *
samples++ =
c->status[0].predictor -
c->status[1].predictor;
1279 for (n = nb_samples >> (1 - st); n > 0; n--) {
1281 int v = bytestream2_get_byteu(&gb);
1299 for (n = 0; n < nb_samples; n += 2) {
1300 int v = bytestream2_get_byteu(&gb);
1307 for (n = nb_samples >> (1 - st); n > 0; n--) {
1308 int v = bytestream2_get_byteu(&gb);
1314 for (n = nb_samples >> (1 - st); n > 0; n--) {
1315 int v = bytestream2_get_byteu(&gb);
1321 for (n = nb_samples / 2; n > 0; n--) {
1323 int v = bytestream2_get_byteu(&gb);
1331 for (n = nb_samples / 2; n > 0; n--) {
1333 int v = bytestream2_get_byteu(&gb);
1341 for (n = 0; n < nb_samples / 2; n++) {
1342 int v = bytestream2_get_byteu(&gb);
1348 for (n = nb_samples >> (1 - st); n > 0; n--) {
1349 int v = bytestream2_get_byteu(&gb);
1365 for (n = 0; n < nb_samples / 2; n++) {
1368 byte[0] = bytestream2_get_byteu(&gb);
1370 byte[1] = bytestream2_get_byteu(&gb);
1380 if (
c->vqa_version == 3) {
1382 int16_t *smp = samples_p[
channel];
1384 for (n = nb_samples / 2; n > 0; n--) {
1385 int v = bytestream2_get_byteu(&gb);
1391 for (n = nb_samples / 2; n > 0; n--) {
1393 int v = bytestream2_get_byteu(&gb);
1404 int16_t *out0 = samples_p[0];
1405 int16_t *out1 = samples_p[1];
1406 int samples_per_block = 28 * (3 - avctx->
channels) * 4;
1407 int sample_offset = 0;
1408 int bytes_remaining;
1411 &
c->status[0], &
c->status[1],
1412 avctx->
channels, sample_offset)) < 0)
1415 sample_offset += samples_per_block;
1420 if (bytes_remaining > 0) {
1426 for (
i=0;
i<=st;
i++) {
1427 c->status[
i].step_index = bytestream2_get_le32u(&gb);
1428 if (
c->status[
i].step_index > 88
u) {
1430 i,
c->status[
i].step_index);
1434 for (
i=0;
i<=st;
i++) {
1435 c->status[
i].predictor = bytestream2_get_le32u(&gb);
1436 if (
FFABS((int64_t)
c->status[
i].predictor) > (1<<16))
1440 for (n = nb_samples >> (1 - st); n > 0; n--) {
1441 int byte = bytestream2_get_byteu(&gb);
1447 for (n = nb_samples >> (1 - st); n > 0; n--) {
1448 int byte = bytestream2_get_byteu(&gb);
1455 int previous_left_sample, previous_right_sample;
1456 int current_left_sample, current_right_sample;
1457 int next_left_sample, next_right_sample;
1458 int coeff1l, coeff2l, coeff1r, coeff2r;
1459 int shift_left, shift_right;
1467 current_left_sample =
sign_extend(bytestream2_get_le16u(&gb), 16);
1468 previous_left_sample =
sign_extend(bytestream2_get_le16u(&gb), 16);
1469 current_right_sample =
sign_extend(bytestream2_get_le16u(&gb), 16);
1470 previous_right_sample =
sign_extend(bytestream2_get_le16u(&gb), 16);
1472 for (count1 = 0; count1 < nb_samples / 28; count1++) {
1473 int byte = bytestream2_get_byteu(&gb);
1479 byte = bytestream2_get_byteu(&gb);
1480 shift_left = 20 - (
byte >> 4);
1481 shift_right = 20 - (
byte & 0x0F);
1483 for (count2 = 0; count2 < 28; count2++) {
1484 byte = bytestream2_get_byteu(&gb);
1485 next_left_sample =
sign_extend(
byte >> 4, 4) * (1 << shift_left);
1486 next_right_sample =
sign_extend(
byte, 4) * (1 << shift_right);
1488 next_left_sample = (next_left_sample +
1489 (current_left_sample * coeff1l) +
1490 (previous_left_sample * coeff2l) + 0x80) >> 8;
1491 next_right_sample = (next_right_sample +
1492 (current_right_sample * coeff1r) +
1493 (previous_right_sample * coeff2r) + 0x80) >> 8;
1495 previous_left_sample = current_left_sample;
1496 current_left_sample = av_clip_int16(next_left_sample);
1497 previous_right_sample = current_right_sample;
1498 current_right_sample = av_clip_int16(next_right_sample);
1499 *
samples++ = current_left_sample;
1500 *
samples++ = current_right_sample;
1513 int byte = bytestream2_get_byteu(&gb);
1518 for (count1 = 0; count1 < nb_samples / 2; count1++) {
1521 byte[0] = bytestream2_get_byteu(&gb);
1522 if (st)
byte[1] = bytestream2_get_byteu(&gb);
1523 for(
i = 4;
i >= 0;
i-=4) {
1546 int previous_sample, current_sample, next_sample;
1555 offsets[
channel] = (big_endian ? bytestream2_get_be32(&gb) :
1556 bytestream2_get_le32(&gb)) +
1561 samplesC = samples_p[
channel];
1564 current_sample =
sign_extend(bytestream2_get_le16(&gb), 16);
1565 previous_sample =
sign_extend(bytestream2_get_le16(&gb), 16);
1567 current_sample =
c->status[
channel].predictor;
1568 previous_sample =
c->status[
channel].prev_sample;
1571 for (count1 = 0; count1 < nb_samples / 28; count1++) {
1572 int byte = bytestream2_get_byte(&gb);
1574 current_sample =
sign_extend(bytestream2_get_be16(&gb), 16);
1575 previous_sample =
sign_extend(bytestream2_get_be16(&gb), 16);
1577 for (count2=0; count2<28; count2++)
1578 *samplesC++ =
sign_extend(bytestream2_get_be16(&gb), 16);
1582 shift = 20 - (
byte & 0x0F);
1584 for (count2=0; count2<28; count2++) {
1588 byte = bytestream2_get_byte(&gb);
1592 next_sample += (current_sample * coeff1) +
1593 (previous_sample * coeff2);
1594 next_sample = av_clip_int16(next_sample >> 8);
1596 previous_sample = current_sample;
1597 current_sample = next_sample;
1598 *samplesC++ = current_sample;
1604 }
else if (count != count1) {
1606 count =
FFMAX(count, count1);
1610 c->status[
channel].predictor = current_sample;
1611 c->status[
channel].prev_sample = previous_sample;
1615 frame->nb_samples = count * 28;
1623 for (n = 0; n < 4; n++,
s += 32) {
1634 for (m=2; m<32; m+=2) {
1636 for (n = 0; n < 4; n++,
s += 32) {
1638 int byte = bytestream2_get_byteu(&gb);
1642 s[0] = av_clip_int16((
level +
pred + 0x80) >> 8);
1646 s[1] = av_clip_int16((
level +
pred + 0x80) >> 8);
1652 c->status[0].predictor =
sign_extend(bytestream2_get_le16u(&gb), 16);
1653 c->status[0].step_index = bytestream2_get_byteu(&gb);
1655 if (
c->status[0].step_index > 88
u) {
1657 c->status[0].step_index);
1661 for (n = nb_samples >> (1 - st); n > 0; n--) {
1662 int v = bytestream2_get_byteu(&gb);
1670 c->status[
i].predictor =
sign_extend(bytestream2_get_be16u(&gb), 16);
1671 c->status[
i].step_index = bytestream2_get_byteu(&gb);
1673 if (
c->status[
i].step_index > 88
u) {
1675 c->status[
i].step_index);
1680 for (n = nb_samples >> (1 - st); n > 0; n--) {
1681 int v = bytestream2_get_byteu(&gb);
1688 for (n = nb_samples >> (1 - st); n > 0; n--) {
1689 int v = bytestream2_get_byteu(&gb);
1697 if (!
c->status[0].step_index) {
1699 *
samples++ = 128 * (bytestream2_get_byteu(&gb) - 0x80);
1701 *
samples++ = 128 * (bytestream2_get_byteu(&gb) - 0x80);
1702 c->status[0].step_index = 1;
1706 for (n = nb_samples >> (1 - st); n > 0; n--) {
1707 int byte = bytestream2_get_byteu(&gb);
1714 for (n = (nb_samples<<st) / 3; n > 0; n--) {
1715 int byte = bytestream2_get_byteu(&gb);
1719 (
byte >> 2) & 0x07, 3, 0);
1724 for (n = nb_samples >> (2 - st); n > 0; n--) {
1725 int byte = bytestream2_get_byteu(&gb);
1729 (
byte >> 4) & 0x03, 2, 2);
1731 (
byte >> 2) & 0x03, 2, 2);
1742 for (n = nb_samples >> (1 - st); n > 0; n--) {
1743 int v = bytestream2_get_byteu(&gb);
1749 if (!
c->has_status) {
1756 for (n = nb_samples >> 1; n > 0; n--) {
1757 int v = bytestream2_get_byteu(&gb);
1765 int samples_per_block;
1769 samples_per_block = avctx->
extradata[0] / 16;
1770 blocks = nb_samples / avctx->
extradata[0];
1772 samples_per_block = nb_samples / 16;
1776 for (m = 0; m < blocks; m++) {
1778 int prev1 =
c->status[
channel].sample1;
1779 int prev2 =
c->status[
channel].sample2;
1783 for (
i = 0;
i < samples_per_block;
i++) {
1784 int byte = bytestream2_get_byteu(&gb);
1785 int scale = 1 << (
byte >> 4);
1786 int index =
byte & 0xf;
1791 for (n = 0; n < 16; n++) {
1797 byte = bytestream2_get_byteu(&gb);
1801 sampledat = ((prev1 * factor1 + prev2 * factor2) >> 11) +
1803 *
samples = av_clip_int16(sampledat);
1809 c->status[
channel].sample1 = prev1;
1810 c->status[
channel].sample2 = prev2;
1822 #define THP_GET16(g) \
1824 avctx->codec->id == AV_CODEC_ID_ADPCM_THP_LE ? \
1825 bytestream2_get_le16u(&(g)) : \
1826 bytestream2_get_be16u(&(g)), 16)
1837 for (n = 0; n < 16; n++)
1841 for (n = 0; n < 16; n++)
1844 if (!
c->has_status) {
1860 for (
i = 0;
i < (nb_samples + 13) / 14;
i++) {
1861 int byte = bytestream2_get_byteu(&gb);
1862 int index = (
byte >> 4) & 7;
1863 unsigned int exp =
byte & 0x0F;
1868 for (n = 0; n < 14 && (
i * 14 + n < nb_samples); n++) {
1874 byte = bytestream2_get_byteu(&gb);
1878 sampledat = ((
c->status[
ch].sample1 * factor1
1879 +
c->status[
ch].sample2 * factor2) >> 11) + sampledat * (1 <<
exp);
1880 *
samples = av_clip_int16(sampledat);
1881 c->status[
ch].sample2 =
c->status[
ch].sample1;
1893 for (
i = 0;
i < nb_samples / 28;
i++) {
1897 header = bytestream2_get_byteu(&gb);
1901 for (n = 0; n < 28; n++) {
1906 prev = (
c->status[
channel].sample1 * 0x3c);
1909 prev = (
c->status[
channel].sample1 * 0x73) - (
c->status[
channel].sample2 * 0x34);
1912 prev = (
c->status[
channel].sample1 * 0x62) - (
c->status[
channel].sample2 * 0x37);
1918 prev = av_clip_intp2((prev + 0x20) >> 6, 21);
1920 byte = bytestream2_get_byteu(&gb);
1926 sampledat = ((sampledat * (1 << 12)) >> (
header & 0xf)) * (1 << 6) + prev;
1927 *
samples++ = av_clip_int16(sampledat >> 6);
1929 c->status[
channel].sample1 = sampledat;
1941 for (
i = 0;
i < nb_samples / 28;
i++) {
1944 filter = bytestream2_get_byteu(&gb);
1949 flag = bytestream2_get_byteu(&gb);
1952 for (n = 0; n < 28; n++) {
1959 byte = bytestream2_get_byteu(&gb);
1963 scale = scale * (1 << 12);
1998 control = bytestream2_get_byteu(&gb);
1999 shift = (control >> 4) + 2;
2001 for (n = 0; n < nb_samples / 2; n++) {
2002 int sample = bytestream2_get_byteu(&gb);
2009 if (!
c->has_status) {
2016 for (n = 0; n < nb_samples * avctx->
channels; n++) {
2017 int v = bytestream2_get_byteu(&gb);
2022 for (n = nb_samples / 2; n > 0; n--) {
2024 int v = bytestream2_get_byteu(&gb);
2065 #define ADPCM_DECODER(id_, sample_fmts_, name_, long_name_) \
2066 AVCodec ff_ ## name_ ## _decoder = { \
2068 .long_name = NULL_IF_CONFIG_SMALL(long_name_), \
2069 .type = AVMEDIA_TYPE_AUDIO, \
2071 .priv_data_size = sizeof(ADPCMDecodeContext), \
2072 .init = adpcm_decode_init, \
2073 .decode = adpcm_decode_frame, \
2074 .flush = adpcm_flush, \
2075 .capabilities = AV_CODEC_CAP_DR1, \
2076 .sample_fmts = sample_fmts_, \
#define AV_LOG_WARNING
Something somehow does not look correct.
#define DK3_GET_NEXT_NIBBLE()
@ AV_CODEC_ID_ADPCM_IMA_QT
const int16_t ff_adpcm_oki_step_table[49]
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
#define u(width, name, range_min, range_max)
static const int8_t zork_index_table[8]
const int16_t ff_adpcm_AdaptationTable[]
static av_always_inline void bytestream2_skipu(GetByteContext *g, unsigned int size)
static int get_bits_count(const GetBitContext *s)
static av_always_inline int bytestream2_seek(GetByteContext *g, int offset, int whence)
This structure describes decoded (raw) audio or video data.
trying all byte sequences megabyte in length and selecting the best looking sequence will yield cases to try But a word about which is also called distortion Distortion can be quantified by almost any quality measurement one chooses the sum of squared differences is used but more complex methods that consider psychovisual effects can be used as well It makes no difference in this discussion First step
@ AV_CODEC_ID_ADPCM_IMA_CUNNING
static const uint16_t table[]
@ AV_CODEC_ID_ADPCM_EA_R3
@ AV_CODEC_ID_ADPCM_IMA_OKI
static int adpcm_ima_qt_expand_nibble(ADPCMChannelStatus *c, int nibble)
FFmpeg Automated Testing Environment ************************************Introduction Using FATE from your FFmpeg source directory Submitting the results to the FFmpeg result aggregation server Uploading new samples to the fate suite FATE makefile targets and variables Makefile targets Makefile variables Examples Introduction **************FATE is an extended regression suite on the client side and a means for results aggregation and presentation on the server side The first part of this document explains how you can use FATE from your FFmpeg source directory to test your ffmpeg binary The second part describes how you can run FATE to submit the results to FFmpeg’s FATE server In any way you can have a look at the publicly viewable FATE results by visiting this as it can be seen if some test on some platform broke with their recent contribution This usually happens on the platforms the developers could not test on The second part of this document describes how you can run FATE to submit your results to FFmpeg’s FATE server If you want to submit your results be sure to check that your combination of OS and compiler is not already listed on the above mentioned website In the third part you can find a comprehensive listing of FATE makefile targets and variables Using FATE from your FFmpeg source directory **********************************************If you want to run FATE on your machine you need to have the samples in place You can get the samples via the build target fate rsync Use this command from the top level source this will cause FATE to fail NOTE To use a custom wrapper to run the pass ‘ target exec’ to ‘configure’ or set the TARGET_EXEC Make variable Submitting the results to the FFmpeg result aggregation server ****************************************************************To submit your results to the server you should run fate through the shell script ‘tests fate sh’ from the FFmpeg sources This script needs to be invoked with a configuration file as its first argument tests fate sh path to fate_config A configuration file template with comments describing the individual configuration variables can be found at ‘doc fate_config sh template’ Create a configuration that suits your based on the configuration template The ‘slot’ configuration variable can be any string that is not yet but it is suggested that you name it adhering to the following pattern ‘ARCH OS COMPILER COMPILER VERSION’ The configuration file itself will be sourced in a shell therefore all shell features may be used This enables you to setup the environment as you need it for your build For your first test runs the ‘fate_recv’ variable should be empty or commented out This will run everything as normal except that it will omit the submission of the results to the server The following files should be present in $workdir as specified in the configuration it may help to try out the ‘ssh’ command with one or more ‘ v’ options You should get detailed output concerning your SSH configuration and the authentication process The only thing left is to automate the execution of the fate sh script and the synchronisation of the samples directory Uploading new samples to the fate suite *****************************************If you need a sample uploaded send a mail to samples request This is for developers who have an account on the fate suite server If you upload new samples
filter_frame For filters that do not use the this method is called when a frame is pushed to the filter s input It can be called at any time except in a reentrant way If the input frame is enough to produce then the filter should push the output frames on the output link immediately As an exception to the previous rule if the input frame is enough to produce several output frames then the filter needs output only at least one per link The additional frames can be left buffered in the filter
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
@ AV_CODEC_ID_ADPCM_THP_LE
static int16_t adpcm_sbpro_expand_nibble(ADPCMChannelStatus *c, int8_t nibble, int size, int shift)
const int8_t ff_adpcm_ima_cunning_index_table[9]
static av_always_inline void bytestream2_skip(GetByteContext *g, unsigned int size)
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
const struct AVCodec * codec
static int16_t adpcm_ima_mtf_expand_nibble(ADPCMChannelStatus *c, int nibble)
static int16_t adpcm_ima_expand_nibble(ADPCMChannelStatus *c, int8_t nibble, int shift)
static double val(void *priv, double ch)
static void adpcm_flush(AVCodecContext *avctx)
static av_always_inline void update(SilenceDetectContext *s, AVFrame *insamples, int is_silence, int current_sample, int64_t nb_samples_notify, AVRational time_base)
static const uint8_t ff_adpcm_ima_block_sizes[4]
@ AV_CODEC_ID_ADPCM_SBPRO_2
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
static enum AVSampleFormat sample_fmts_s16p[]
static int16_t adpcm_ima_alp_expand_nibble(ADPCMChannelStatus *c, int8_t nibble, int shift)
static int16_t adpcm_yamaha_expand_nibble(ADPCMChannelStatus *c, uint8_t nibble)
static int16_t adpcm_zork_expand_nibble(ADPCMChannelStatus *c, uint8_t nibble)
@ AV_CODEC_ID_ADPCM_IMA_EA_SEAD
@ AV_CODEC_ID_ADPCM_IMA_DK3
#define av_assert0(cond)
assert() equivalent, that is always enabled.
@ AV_CODEC_ID_ADPCM_IMA_APC
static unsigned int get_bits_le(GetBitContext *s, int n)
static int get_sbits(GetBitContext *s, int n)
@ AV_CODEC_ID_ADPCM_IMA_ISS
@ AV_CODEC_ID_ADPCM_IMA_SMJPEG
static int16_t adpcm_ms_expand_nibble(ADPCMChannelStatus *c, int nibble)
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
uint8_t pi<< 24) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_U8,(uint64_t)((*(const uint8_t *) pi - 0x80U))<< 56) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16,(*(const int16_t *) pi >>8)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S16,(uint64_t)(*(const int16_t *) pi)<< 48) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32,(*(const int32_t *) pi >>24)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S32,(uint64_t)(*(const int32_t *) pi)<< 32) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S64,(*(const int64_t *) pi >>56)+0x80) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S64, *(const int64_t *) pi *(1.0f/(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S64, *(const int64_t *) pi *(1.0/(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_FLT, llrintf(*(const float *) pi *(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_DBL, llrint(*(const double *) pi *(UINT64_C(1)<< 63))) #define FMT_PAIR_FUNC(out, in) static conv_func_type *const fmt_pair_to_conv_functions[AV_SAMPLE_FMT_NB *AV_SAMPLE_FMT_NB]={ FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S64), };static void cpy1(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, len);} static void cpy2(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 2 *len);} static void cpy4(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 4 *len);} static void cpy8(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 8 *len);} AudioConvert *swri_audio_convert_alloc(enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, const int *ch_map, int flags) { AudioConvert *ctx;conv_func_type *f=fmt_pair_to_conv_functions[av_get_packed_sample_fmt(out_fmt)+AV_SAMPLE_FMT_NB *av_get_packed_sample_fmt(in_fmt)];if(!f) return NULL;ctx=av_mallocz(sizeof(*ctx));if(!ctx) return NULL;if(channels==1){ in_fmt=av_get_planar_sample_fmt(in_fmt);out_fmt=av_get_planar_sample_fmt(out_fmt);} ctx->channels=channels;ctx->conv_f=f;ctx->ch_map=ch_map;if(in_fmt==AV_SAMPLE_FMT_U8||in_fmt==AV_SAMPLE_FMT_U8P) memset(ctx->silence, 0x80, sizeof(ctx->silence));if(out_fmt==in_fmt &&!ch_map) { switch(av_get_bytes_per_sample(in_fmt)){ case 1:ctx->simd_f=cpy1;break;case 2:ctx->simd_f=cpy2;break;case 4:ctx->simd_f=cpy4;break;case 8:ctx->simd_f=cpy8;break;} } if(HAVE_X86ASM &&HAVE_MMX) swri_audio_convert_init_x86(ctx, out_fmt, in_fmt, channels);if(ARCH_ARM) swri_audio_convert_init_arm(ctx, out_fmt, in_fmt, channels);if(ARCH_AARCH64) swri_audio_convert_init_aarch64(ctx, out_fmt, in_fmt, channels);return ctx;} void swri_audio_convert_free(AudioConvert **ctx) { av_freep(ctx);} int swri_audio_convert(AudioConvert *ctx, AudioData *out, AudioData *in, int len) { int ch;int off=0;const int os=(out->planar ? 1 :out->ch_count) *out->bps;unsigned misaligned=0;av_assert0(ctx->channels==out->ch_count);if(ctx->in_simd_align_mask) { int planes=in->planar ? in->ch_count :1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) in->ch[ch];misaligned|=m &ctx->in_simd_align_mask;} if(ctx->out_simd_align_mask) { int planes=out->planar ? out->ch_count :1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) out->ch[ch];misaligned|=m &ctx->out_simd_align_mask;} if(ctx->simd_f &&!ctx->ch_map &&!misaligned){ off=len &~15;av_assert1(off >=0);av_assert1(off<=len);av_assert2(ctx->channels==SWR_CH_MAX||!in->ch[ctx->channels]);if(off >0){ if(out->planar==in->planar){ int planes=out->planar ? out->ch_count :1;for(ch=0;ch< planes;ch++){ ctx->simd_f(out-> ch ch
static const uint8_t ff_adpcm_ima_block_samples[4]
static enum AVSampleFormat sample_fmts_s16[]
@ AV_CODEC_ID_ADPCM_EA_XAS
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
@ AV_CODEC_ID_ADPCM_YAMAHA
@ AV_CODEC_ID_ADPCM_IMA_WS
@ AV_CODEC_ID_ADPCM_IMA_EA_EACS
@ AV_CODEC_ID_ADPCM_IMA_DK4
const int16_t ff_adpcm_mtaf_stepsize[32][16]
@ AV_CODEC_ID_ADPCM_IMA_AMV
static const int16_t ea_adpcm_table[]
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
static av_always_inline int bytestream2_get_bytes_left(GetByteContext *g)
static av_always_inline int bytestream2_tell(GetByteContext *g)
static int16_t adpcm_ct_expand_nibble(ADPCMChannelStatus *c, int8_t nibble)
static int16_t adpcm_ima_oki_expand_nibble(ADPCMChannelStatus *c, int nibble)
const int8_t ff_adpcm_yamaha_difflookup[]
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
@ AV_CODEC_ID_ADPCM_IMA_RAD
static int16_t adpcm_ima_cunning_expand_nibble(ADPCMChannelStatus *c, int8_t nibble)
@ AV_CODEC_ID_ADPCM_IMA_ALP
const int16_t ff_adpcm_step_table[89]
This is the step table.
enum AVSampleFormat sample_fmt
audio sample format
static int get_nb_samples(AVCodecContext *avctx, GetByteContext *gb, int buf_size, int *coded_samples, int *approx_nb_samples)
Get the number of samples (per channel) that will be decoded from the packet.
static const uint8_t header[24]
static int xa_decode(AVCodecContext *avctx, int16_t *out0, int16_t *out1, const uint8_t *in, ADPCMChannelStatus *left, ADPCMChannelStatus *right, int channels, int sample_offset)
@ AV_SAMPLE_FMT_S16P
signed 16 bits, planar
#define ADPCM_DECODER(id_, sample_fmts_, name_, long_name_)
int channels
number of audio channels
int bits_per_coded_sample
bits per sample/pixel from the demuxer (needed for huffyuv).
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) #define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac) { } void ff_audio_convert_free(AudioConvert **ac) { if(! *ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);} AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map) { AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method !=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2) { ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc) { av_free(ac);return NULL;} return ac;} in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar) { ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar ? ac->channels :1;} else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;} int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in) { int use_generic=1;int len=in->nb_samples;int p;if(ac->dc) { av_log(ac->avr, AV_LOG_TRACE, "%d samples - audio_convert: %s to %s (dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> in
static enum AVSampleFormat sample_fmts_both[]
@ AV_CODEC_ID_ADPCM_EA_MAXIS_XA
const uint8_t ff_adpcm_AdaptCoeff1[]
Divided by 4 to fit in 8-bit integers.
#define i(width, name, range_min, range_max)
const int8_t ff_adpcm_AdaptCoeff2[]
Divided by 4 to fit in 8-bit integers.
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
AVSampleFormat
Audio sample formats.
#define xf(width, name, var, range_min, range_max, subs,...)
@ AV_CODEC_ID_ADPCM_IMA_APM
@ AV_SAMPLE_FMT_S16
signed 16 bits
int vqa_version
VQA version.
@ AV_CODEC_ID_ADPCM_IMA_DAT4
static const int8_t xa_adpcm_table[5][2]
const int8_t ff_adpcm_index_table[16]
@ AV_CODEC_ID_ADPCM_IMA_MTF
static const float pred[4]
int block_align
number of bytes per packet if constant and known or 0 Used by some WAV based audio codecs.
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
#define AV_INPUT_BUFFER_PADDING_SIZE
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled left
#define FF_ARRAY_ELEMS(a)
static int16_t adpcm_ima_wav_expand_nibble(ADPCMChannelStatus *c, GetBitContext *gb, int bps)
main external API structure.
const int16_t ff_adpcm_yamaha_indexscale[]
static av_const int sign_extend(int val, unsigned bits)
@ AV_CODEC_ID_ADPCM_EA_R1
@ AV_CODEC_ID_ADPCM_EA_R2
static int shift(int a, int b)
static int16_t adpcm_argo_expand_nibble(ADPCMChannelStatus *cs, int nibble, int control, int shift)
@ AV_CODEC_ID_ADPCM_SBPRO_4
static void adpcm_swf_decode(AVCodecContext *avctx, const uint8_t *buf, int buf_size, int16_t *samples)
#define avpriv_request_sample(...)
@ AV_CODEC_ID_ADPCM_IMA_SSI
static av_cold int adpcm_decode_init(AVCodecContext *avctx)
static av_always_inline int diff(const uint32_t a, const uint32_t b)
This structure stores compressed data.
@ AV_CODEC_ID_ADPCM_IMA_WAV
const int16_t ff_adpcm_ima_cunning_step_table[61]
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
static const double coeff[2][5]
const uint16_t ff_adpcm_afc_coeffs[2][16]
static int adpcm_decode_frame(AVCodecContext *avctx, void *data, int *got_frame_ptr, AVPacket *avpkt)
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
static int16_t adpcm_agm_expand_nibble(ADPCMChannelStatus *c, int8_t nibble)
static int16_t adpcm_mtaf_expand_nibble(ADPCMChannelStatus *c, uint8_t nibble)
const int8_t *const ff_adpcm_index_tables[4]
static const int8_t mtf_index_table[16]
@ AV_CODEC_ID_ADPCM_SBPRO_3
ADPCMChannelStatus status[14]
static const int8_t swf_index_tables[4][16]