40 #define BITSTREAM_READER_LE 51 #define QDM2_LIST_ADD(list, size, packet) \ 54 list[size - 1].next = &list[size]; \ 56 list[size].packet = packet; \ 57 list[size].next = NULL; \ 62 #define QDM2_SB_USED(sub_sampling) (((sub_sampling) >= 2) ? 30 : 8 << (sub_sampling)) 64 #define FIX_NOISE_IDX(noise_idx) \ 65 if ((noise_idx) >= 3840) \ 66 (noise_idx) -= 3840; \ 68 #define SB_DITHERING_NOISE(sb,noise_idx) (noise_table[(noise_idx)++] * sb_noise_attenuation[(sb)]) 70 #define SAMPLES_NEEDED \ 71 av_log (NULL,AV_LOG_INFO,"This file triggers some untested code. Please contact the developers.\n"); 73 #define SAMPLES_NEEDED_2(why) \ 74 av_log (NULL,AV_LOG_INFO,"This file triggers some missing code. Please contact the developers.\nPosition: %s\n",why); 76 #define QDM2_MAX_FRAME_SIZE 512 159 int fft_coefs_min_index[5];
160 int fft_coefs_max_index[5];
161 int fft_level_exp[6];
198 0, 5, 1, 5, 5, 5, 5, 5, 2, 5, 5, 5, 5, 5, 5, 5, 3, 5, 5, 5, 5, 5, 4
222 if ((value & ~3) > 0)
234 return (value & 1) ? ((value + 1) >> 1) : -(value >> 1);
250 for (i = 0; i <
length; i++)
253 return (uint16_t)(value & 0xffff);
267 if (sub_packet->
type == 0) {
268 sub_packet->
size = 0;
273 if (sub_packet->
type & 0x80) {
274 sub_packet->
size <<= 8;
276 sub_packet->
type &= 0x7f;
279 if (sub_packet->
type == 0x7f)
300 while (list && list->
packet) {
316 int i, j,
n,
ch, sum;
321 for (i = 0; i <
n; i++) {
324 for (j = 0; j < 8; j++)
331 for (j = 0; j < 8; j++)
353 for (j = 0; j < 64; j++) {
378 for (j = 0; j < 64; ) {
379 if (coding_method[ch][sb][j] < 8)
381 if ((coding_method[ch][sb][j] - 8) > 22) {
385 switch (
switchtable[coding_method[ch][sb][j] - 8]) {
409 for (k = 0; k <
run; k++) {
411 int sbjk = sb + (j + k) / 64;
416 if (coding_method[ch][sbjk][(j + k) % 64] > coding_method[
ch][sb][j]) {
420 memset(&coding_method[ch][sb][j + k], case_val,
422 memset(&coding_method[ch][sb][j + k], case_val,
443 int i, sb,
ch, sb_used;
447 for (sb = 0; sb < 30; sb++)
448 for (i = 0; i < 8; i++) {
462 for (sb = 0; sb < sb_used; sb++)
464 for (i = 0; i < 64; i++) {
473 for (sb = 0; sb < sb_used; sb++) {
474 if ((sb >= 4) && (sb <= 23)) {
476 for (i = 0; i < 64; i++) {
490 for (i = 0; i < 64; i++) {
502 for (i = 0; i < 64; i++) {
534 int c,
int superblocktype_2_3,
539 int add1, add2, add3, add4;
542 if (!superblocktype_2_3) {
547 for (sb = 0; sb < 30; sb++) {
548 for (j = 1; j < 63; j++) {
549 add1 = tone_level_idx[
ch][sb][j] - 10;
552 add2 = add3 = add4 = 0;
568 tmp = tone_level_idx[
ch][sb][j + 1] * 2 - add4 - add3 - add2 - add1;
571 tone_level_idx_temp[
ch][sb][j + 1] = tmp & 0xff;
573 tone_level_idx_temp[
ch][sb][0] = tone_level_idx_temp[
ch][sb][1];
578 for (sb = 0; sb < 30; sb++)
579 for (j = 0; j < 64; j++)
580 acc += tone_level_idx_temp[ch][sb][j];
582 multres = 0x66666667LL * (acc * 10);
583 esp_40 = (multres >> 32) / 8 + ((multres & 0xffffffff) >> 31);
585 for (sb = 0; sb < 30; sb++)
586 for (j = 0; j < 64; j++) {
587 comp = tone_level_idx_temp[
ch][sb][j]* esp_40 * 10;
618 coding_method[
ch][sb][j] = ((tmp & 0xfffa) + 30 )& 0xff;
620 for (sb = 0; sb < 30; sb++)
623 for (sb = 0; sb < 30; sb++)
624 for (j = 0; j < 64; j++)
626 if (coding_method[ch][sb][j] < 10)
627 coding_method[
ch][sb][j] = 10;
630 if (coding_method[ch][sb][j] < 16)
631 coding_method[
ch][sb][j] = 16;
633 if (coding_method[ch][sb][j] < 30)
634 coding_method[
ch][sb][j] = 30;
639 for (sb = 0; sb < 30; sb++)
640 for (j = 0; j < 64; j++)
658 int length,
int sb_min,
int sb_max)
661 int joined_stereo, zero_encoding;
663 float type34_div = 0;
664 float type34_predictor;
666 int sign_bits[16] = {0};
670 for (sb=sb_min; sb < sb_max; sb++)
676 for (sb = sb_min; sb < sb_max; sb++) {
688 for (j = 0; j < 16; j++)
691 for (j = 0; j < 64; j++)
707 type34_predictor = 0.0;
710 for (j = 0; j < 128; ) {
715 for (k = 0; k < 5; k++) {
716 if ((j + 2 * k) >= 128)
727 for (k = 0; k < 5; k++)
730 for (k = 0; k < 5; k++)
733 for (k = 0; k < 10; k++)
745 f -=
noise_samples[((sb + 1) * (j +5 * ch + 1)) & 127] * 9.0 / 40.0;
756 for (k = 0; k < 5; k++) {
768 for (k = 0; k < 5; k++)
772 for (k = 0; k < 5; k++)
786 for (k = 0; k < 3; k++)
789 for (k = 0; k < 3; k++)
812 type34_div = (float)(1 <<
get_bits(gb, 2));
813 samples[0] = ((float)
get_bits(gb, 5) - 16.0) / 15.0;
814 type34_predictor = samples[0];
823 type34_predictor = samples[0];
838 for (k = 0; k < run && j + k < 128; k++) {
840 q->
tone_level[0][sb][(j + k) / 2] * samples[k];
842 if (sign_bits[(j + k) / 8])
844 q->
tone_level[1][sb][(j + k) / 2] * -samples[k];
847 q->
tone_level[1][sb][(j + k) / 2] * samples[k];
851 for (k = 0; k <
run; k++)
882 quantized_coeffs[0] =
level;
884 for (i = 0; i < 7; ) {
896 for (k = 1; k <=
run; k++)
897 quantized_coeffs[i + k] = (level + ((k * diff) / run));
929 for (sb = 0; sb <
n; sb++)
931 for (j = 0; j < 8; j++) {
935 for (k=0; k < 8; k++) {
941 for (k=0; k < 8; k++)
948 for (sb = 0; sb <
n; sb++)
956 for (j = 0; j < 8; j++)
962 for (sb = 0; sb <
n; sb++)
964 for (j = 0; j < 8; j++) {
986 for (i = 1; i <
n; i++)
991 for (j = 0; j < (8 - 1); ) {
998 for (k = 1; k <=
run; k++)
1007 for (i = 0; i < 8; i++)
1101 if (nodes[0] && nodes[1] && nodes[2])
1107 if (nodes[0] && nodes[1] && nodes[3])
1122 int i, packet_bytes, sub_packet_size, sub_packets_D;
1123 unsigned int next_index = 0;
1137 if (header.
type < 2 || header.
type >= 8) {
1148 if (header.
type == 2 || header.
type == 4 || header.
type == 5) {
1164 for (i = 0; i < 6; i++)
1168 for (i = 0; packet_bytes > 0; i++) {
1185 if (next_index >= header.
size)
1193 sub_packet_size = ((packet->
size > 0xff) ? 1 : 0) + packet->
size + 2;
1195 if (packet->
type == 0)
1198 if (sub_packet_size > packet_bytes) {
1199 if (packet->
type != 10 && packet->
type != 11 && packet->
type != 12)
1201 packet->
size += packet_bytes - sub_packet_size;
1204 packet_bytes -= sub_packet_size;
1210 if (packet->
type == 8) {
1213 }
else if (packet->
type >= 9 && packet->
type <= 12) {
1216 }
else if (packet->
type == 13) {
1217 for (j = 0; j < 6; j++)
1219 }
else if (packet->
type == 14) {
1220 for (j = 0; j < 6; j++)
1222 }
else if (packet->
type == 15) {
1225 }
else if (packet->
type >= 16 && packet->
type < 48 &&
1250 ((sub_packet >= 16) ? (sub_packet - 16) : sub_packet);
1262 int local_int_4, local_int_8, stereo_phase, local_int_10;
1263 int local_int_14, stereo_exp, local_int_20, local_int_28;
1270 local_int_10 = 1 << (q->
group_order - duration - 1);
1277 if(local_int_4 < q->group_size)
1283 local_int_4 += local_int_10;
1284 local_int_28 += (1 << local_int_8);
1286 local_int_4 += 8 * local_int_10;
1287 local_int_28 += (8 << local_int_8);
1292 if (local_int_10 <= 2) {
1297 while (offset >= (local_int_10 - 1)) {
1298 offset += (1 - (local_int_10 - 1));
1299 local_int_4 += local_int_10;
1300 local_int_28 += (1 << local_int_8);
1307 local_int_14 = (offset >> local_int_8);
1321 exp = (exp < 0) ? 0 : exp;
1330 if (stereo_phase < 0)
1335 int sub_packet = (local_int_20 + local_int_28);
1338 channel, exp, phase);
1342 stereo_exp, stereo_phase);
1358 for (i = 0; i < 5; i++)
1368 if (value > min && value < max) {
1381 (packet->
type < 16 || packet->
type >= 48 ||
1393 type = packet->
type;
1395 if ((type >= 17 && type < 24) || (type >= 33 && type < 40)) {
1398 if (duration >= 0 && duration < 4)
1400 }
else if (type == 31) {
1401 for (j = 0; j < 4; j++)
1403 }
else if (type == 46) {
1404 for (j = 0; j < 6; j++)
1406 for (j = 0; j < 4; j++)
1412 for (i = 0, j = -1; i < 5; i++)
1427 const double iscale = 2.0 *
M_PI / 512.0;
1433 c.
im = level * sin(tone->
phase * iscale);
1434 c.
re = level * cos(tone->
phase * iscale);
1443 f[1] = -tone->
table[4];
1445 f[2] = 1.0 - tone->
table[2] - tone->
table[3];
1446 f[3] = tone->
table[1] + tone->
table[4] - 1.0;
1448 f[5] = tone->
table[2];
1449 for (i = 0; i < 2; i++) {
1453 c.
im * ((tone->
cutoff <= i) ? -f[i] : f[i]);
1455 for (i = 0; i < 4; i++) {
1471 const double iscale = 0.25 *
M_PI;
1473 for (ch = 0; ch < q->
channels; ch++) {
1505 for (i = 0; i < 4; i++)
1518 if (offset < q->frequency_range) {
1522 tone.
cutoff = (offset >= 60) ? 3 : 2;
1561 int i, k,
ch, sb_used, sub_sampling, dither_state = 0;
1566 for (ch = 0; ch < q->
channels; ch++)
1567 for (i = 0; i < 8; i++)
1568 for (k = sb_used; k <
SBLIMIT; k++)
1574 for (i = 0; i < 8; i++) {
1587 for (ch = 0; ch < q->
channels; ch++)
1665 if (bytestream2_peek_be64(&gb) == (((uint64_t)
MKBETAG(
'f',
'r',
'm',
'a') << 32) |
1666 (uint64_t)
MKBETAG(
'Q',
'D',
'M',
'2')))
1678 size = bytestream2_get_be32(&gb);
1687 if (bytestream2_get_be32(&gb) !=
MKBETAG(
'Q',
'D',
'C',
'A')) {
1703 avctx->
bit_rate = bytestream2_get_be32(&gb);
1705 s->
fft_size = bytestream2_get_be32(&gb);
1731 case 0: tmp = 40;
break;
1732 case 1: tmp = 48;
break;
1733 case 2: tmp = 56;
break;
1734 case 3: tmp = 72;
break;
1735 case 4: tmp = 80;
break;
1736 case 5: tmp = 100;
break;
1740 if ((tmp * 1000) < avctx->
bit_rate) tmp_val = 1;
1741 if ((tmp * 1440) < avctx->
bit_rate) tmp_val = 2;
1742 if ((tmp * 1760) < avctx->
bit_rate) tmp_val = 3;
1743 if ((tmp * 2240) < avctx->
bit_rate) tmp_val = 4;
1789 memset(&q->
output_buffer[frame_size], 0, frame_size *
sizeof(
float));
1807 for (ch = 0; ch < q->
channels; ch++) {
1838 int *got_frame_ptr,
AVPacket *avpkt)
1842 int buf_size = avpkt->
size;
1849 if(buf_size < s->checksum_size)
1856 out = (int16_t *)frame->
data[0];
1858 for (i = 0; i < 16; i++) {
av_cold void ff_rdft_end(RDFTContext *s)
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
FFTTone fft_tones[1000]
FFT and tones.
A node in the subpacket list.
This structure describes decoded (raw) audio or video data.
static int fix_coding_method_array(int sb, int channels, sb_int8_array coding_method)
Called while processing data from subpackets 11 and 12.
static int init_quantized_coeffs_elem0(int8_t *quantized_coeffs, GetBitContext *gb)
Init the first element of a channel in quantized_coeffs with data from packet 10 (quantized_coeffs[ch...
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
int64_t bit_rate
the average bitrate
static const float fft_tone_level_table[2][64]
static av_cold int init(AVCodecContext *avctx)
static void average_quantized_coeffs(QDM2Context *q)
Replace 8 elements with their average value.
static VLC vlc_tab_tone_level_idx_hi2
#define QDM2_MAX_FRAME_SIZE
float synth_buf[MPA_MAX_CHANNELS][512 *2]
uint8_t pi<< 24) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_U8,(uint64_t)((*(const uint8_t *) pi - 0x80U))<< 56) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16,(*(const int16_t *) pi >>8)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S16,(uint64_t)(*(const int16_t *) pi)<< 48) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32,(*(const int32_t *) pi >>24)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S32,(uint64_t)(*(const int32_t *) pi)<< 32) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S64,(*(const int64_t *) pi >>56)+0x80) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S64, *(const int64_t *) pi *(1.0f/(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S64, *(const int64_t *) pi *(1.0/(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_FLT, llrintf(*(const float *) pi *(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_DBL, llrint(*(const double *) pi *(INT64_C(1)<< 63))) #define FMT_PAIR_FUNC(out, in) static conv_func_type *const fmt_pair_to_conv_functions[AV_SAMPLE_FMT_NB *AV_SAMPLE_FMT_NB]={ FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S64), };static void cpy1(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, len);} static void cpy2(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 2 *len);} static void cpy4(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 4 *len);} static void cpy8(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 8 *len);} AudioConvert *swri_audio_convert_alloc(enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, const int *ch_map, int flags) { AudioConvert *ctx;conv_func_type *f=fmt_pair_to_conv_functions[av_get_packed_sample_fmt(out_fmt)+AV_SAMPLE_FMT_NB *av_get_packed_sample_fmt(in_fmt)];if(!f) return NULL;ctx=av_mallocz(sizeof(*ctx));if(!ctx) return NULL;if(channels==1){ in_fmt=av_get_planar_sample_fmt(in_fmt);out_fmt=av_get_planar_sample_fmt(out_fmt);} ctx->channels=channels;ctx->conv_f=f;ctx->ch_map=ch_map;if(in_fmt==AV_SAMPLE_FMT_U8||in_fmt==AV_SAMPLE_FMT_U8P) memset(ctx->silence, 0x80, sizeof(ctx->silence));if(out_fmt==in_fmt &&!ch_map) { switch(av_get_bytes_per_sample(in_fmt)){ case 1:ctx->simd_f=cpy1;break;case 2:ctx->simd_f=cpy2;break;case 4:ctx->simd_f=cpy4;break;case 8:ctx->simd_f=cpy8;break;} } if(HAVE_X86ASM &&HAVE_MMX) swri_audio_convert_init_x86(ctx, out_fmt, in_fmt, channels);if(ARCH_ARM) swri_audio_convert_init_arm(ctx, out_fmt, in_fmt, channels);if(ARCH_AARCH64) swri_audio_convert_init_aarch64(ctx, out_fmt, in_fmt, channels);return ctx;} void swri_audio_convert_free(AudioConvert **ctx) { av_freep(ctx);} int swri_audio_convert(AudioConvert *ctx, AudioData *out, AudioData *in, int len) { int ch;int off=0;const int os=(out->planar ? 1 :out->ch_count) *out->bps;unsigned misaligned=0;av_assert0(ctx->channels==out->ch_count);if(ctx->in_simd_align_mask) { int planes=in->planar ? in->ch_count :1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) in->ch[ch];misaligned|=m &ctx->in_simd_align_mask;} if(ctx->out_simd_align_mask) { int planes=out->planar ? out->ch_count :1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) out->ch[ch];misaligned|=m &ctx->out_simd_align_mask;} if(ctx->simd_f &&!ctx->ch_map &&!misaligned){ off=len &~15;av_assert1(off >=0);av_assert1(off<=len);av_assert2(ctx->channels==SWR_CH_MAX||!in->ch[ctx->channels]);if(off >0){ if(out->planar==in->planar){ int planes=out->planar ? out->ch_count :1;for(ch=0;ch< planes;ch++){ ctx->simd_f(out-> ch ch
int8_t tone_level_idx_base[MPA_MAX_CHANNELS][30][8]
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
int coeff_per_sb_select
selector for "num. of coeffs. per subband" tables. Can be 0, 1, 2
static av_cold int qdm2_decode_close(AVCodecContext *avctx)
unsigned int size
subpacket size
int8_t tone_level_idx_hi2[MPA_MAX_CHANNELS][26]
float sb_samples[MPA_MAX_CHANNELS][128][SBLIMIT]
#define AV_CH_LAYOUT_STEREO
static VLC fft_stereo_exp_vlc
static void qdm2_decode_sub_packet_header(GetBitContext *gb, QDM2SubPacket *sub_packet)
Fill a QDM2SubPacket structure with packet type, size, and data pointer.
static uint16_t softclip_table[HARDCLIP_THRESHOLD - SOFTCLIP_THRESHOLD+1]
static void qdm2_fft_init_coefficient(QDM2Context *q, int sub_packet, int offset, int duration, int channel, int exp, int phase)
QDM2SubPNode sub_packet_list_C[16]
packets with errors?
static void decode(AVCodecContext *dec_ctx, AVPacket *pkt, AVFrame *frame, FILE *outfile)
static QDM2SubPNode * qdm2_search_subpacket_type_in_list(QDM2SubPNode *list, int type)
Return node pointer to first packet of requested type in list.
static VLC vlc_tab_type30
static av_cold int qdm2_decode_init(AVCodecContext *avctx)
Init parameters from codec extradata.
static int qdm2_get_vlc(GetBitContext *gb, const VLC *vlc, int flag, int depth)
void void avpriv_request_sample(void *avc, const char *msg,...) av_printf_format(2
Log a generic warning message about a missing feature.
enum AVSampleFormat sample_fmt
audio sample format
int fft_order
order of FFT (actually fftorder+1)
static void qdm2_decode_fft_packets(QDM2Context *q)
int sub_sampling
subsampling: 0=25%, 1=50%, 2=100% */
void ff_mpa_synth_init_float(float *window)
#define SOFTCLIP_THRESHOLD
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
static void qdm2_fft_tone_synthesizer(QDM2Context *q, int sub_packet)
static const int16_t fft_level_index_table[256]
#define DECLARE_ALIGNED(n, t, v)
Declare a variable that is aligned in memory.
static const float fft_tone_envelope_table[4][31]
static int get_bits_count(const GetBitContext *s)
bitstream reader API header.
static const uint8_t coeff_per_sb_for_dequant[3][30]
int checksum_size
size of data block, used also for checksum
static const uint8_t header[24]
static void process_subpacket_12(QDM2Context *q, QDM2SubPNode *node)
Process subpacket 12.
static int qdm2_decode_frame(AVCodecContext *avctx, void *data, int *got_frame_ptr, AVPacket *avpkt)
static const uint8_t fft_subpackets[32]
static av_cold void qdm2_init_static_data(void)
Init static data (does not depend on specific file)
int channels
number of channels
static void fill_tone_level_array(QDM2Context *q, int flag)
Related to synthesis filter Called by process_subpacket_10.
static int synthfilt_build_sb_samples(QDM2Context *q, GetBitContext *gb, int length, int sb_min, int sb_max)
Called by process_subpacket_11 to process more data from subpacket 11 with sb 0-8.
static av_cold void qdm2_init_vlc(void)
static int get_bits_left(GetBitContext *gb)
int synth_buf_offset[MPA_MAX_CHANNELS]
static VLC fft_level_exp_vlc
static av_cold void rnd_table_init(void)
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
static uint8_t random_dequant_type24[128][3]
const uint8_t * data
pointer to subpacket data (points to input data buffer, it's not a private copy)
static VLC vlc_tab_tone_level_idx_mid
static av_always_inline void bytestream2_skip(GetByteContext *g, unsigned int size)
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
static const int switchtable[23]
int group_size
size of frame group (16 frames per group)
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
static av_always_inline unsigned int bytestream2_get_bytes_left(GetByteContext *g)
int sub_packets_B
number of packets on 'B' list
QDM2SubPNode sub_packet_list_A[16]
list of all packets
int noise_idx
index for dithering noise table
const char * name
Name of the codec implementation.
static const uint8_t offset[127][2]
float tone_level[MPA_MAX_CHANNELS][30][64]
Mixed temporary data used in decoding.
uint64_t channel_layout
Audio channel layout.
int8_t quantized_coeffs[MPA_MAX_CHANNELS][10][8]
static void qdm2_synthesis_filter(QDM2Context *q, int index)
static VLC vlc_tab_tone_level_idx_hi1
#define QDM2_SB_USED(sub_sampling)
int group_order
Parameters built from header parameters, do not change during playback.
static VLC fft_level_exp_alt_vlc
audio channel layout utility functions
static float noise_samples[128]
QDM2SubPNode sub_packet_list_B[16]
FFT packets B are on list.
struct QDM2SubPNode * next
pointer to next packet in the list, NULL if leaf node
static const int8_t tone_level_idx_offset_table[30][4]
float ff_mpa_synth_window_float[]
static void qdm2_decode_super_block(QDM2Context *q)
Decode superblock, fill packet lists.
static av_always_inline int get_vlc2(GetBitContext *s, VLC_TYPE(*table)[2], int bits, int max_depth)
Parse a vlc code.
#define SAMPLES_NEEDED_2(why)
static const int8_t coding_method_table[5][30]
static VLC fft_stereo_phase_vlc
void(* rdft_calc)(struct RDFTContext *s, FFTSample *z)
static uint16_t qdm2_packet_checksum(const uint8_t *data, int length, int value)
QDM2 checksum.
#define QDM2_LIST_ADD(list, size, packet)
static uint8_t random_dequant_index[256][5]
static const float type30_dequant[8]
#define FF_ARRAY_ELEMS(a)
QDM2Complex complex[MPA_MAX_CHANNELS][256]
static const float type34_delta[10]
static VLC vlc_tab_fft_tone_offset[5]
static const float dequant_1bit[2][3]
static void comp(unsigned char *dst, ptrdiff_t dst_stride, unsigned char *src, ptrdiff_t src_stride, int add)
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
static void build_sb_samples_from_noise(QDM2Context *q, int sb)
Build subband samples with noise weighted by q->tone_level.
float samples[MPA_MAX_CHANNELS *MPA_FRAME_SIZE]
static const uint8_t last_coeff[3]
Libavcodec external API header.
static const int fft_cutoff_index_table[4][2]
int sample_rate
samples per second
static void qdm2_fft_decode_tones(QDM2Context *q, int duration, GetBitContext *gb, int b)
static const uint8_t coeff_per_sb_for_avg[3][30]
int8_t tone_level_idx_mid[MPA_MAX_CHANNELS][26][8]
main external API structure.
static int qdm2_get_se_vlc(const VLC *vlc, GetBitContext *gb, int depth)
float output_buffer[QDM2_MAX_FRAME_SIZE *MPA_MAX_CHANNELS *2]
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
int fft_coefs_min_index[5]
FFTCoefficient fft_coefs[1000]
static unsigned int get_bits1(GetBitContext *s)
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) #define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac) { } void ff_audio_convert_free(AudioConvert **ac) { if(! *ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);} AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map) { AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method !=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2) { ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc) { av_free(ac);return NULL;} return ac;} in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar) { ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar ? ac->channels :1;} else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;} int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in) { int use_generic=1;int len=in->nb_samples;int p;if(ac->dc) { av_log(ac->avr, AV_LOG_TRACE, "%d samples - audio_convert: %s to %s (dithered)\", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> in
static void skip_bits(GetBitContext *s, int n)
int has_errors
packet has errors
static const uint8_t dequant_table[64]
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
static void fill_coding_method_array(sb_int8_array tone_level_idx, sb_int8_array tone_level_idx_temp, sb_int8_array coding_method, int nb_channels, int c, int superblocktype_2_3, int cm_table_select)
Related to synthesis filter Called by process_subpacket_11 c is built with data from subpacket 11 Mos...
#define HARDCLIP_THRESHOLD
void ff_mpa_synth_filter_float(MPADSPContext *s, float *synth_buf_ptr, int *synth_buf_offset, float *window, int *dither_state, float *samples, ptrdiff_t incr, float *sb_samples)
int8_t coding_method[MPA_MAX_CHANNELS][30][64]
static void process_subpacket_10(QDM2Context *q, QDM2SubPNode *node)
Process subpacket 10 if not null, else.
static av_cold void softclip_table_init(void)
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
int fft_size
size of FFT, in complex numbers
int fft_coefs_max_index[5]
int frame_size
size of data frame
static int qdm2_decode(QDM2Context *q, const uint8_t *in, int16_t *out)
#define FIX_NOISE_IDX(noise_idx)
static const float fft_tone_sample_table[4][16][5]
int8_t tone_level_idx_hi1[MPA_MAX_CHANNELS][3][8][8]
int nb_channels
Parameters from codec header, do not change during playback.
int superblocktype_2_3
select fft tables and some algorithm based on superblock type
common internal api header.
int cm_table_select
selector for "coding method" tables. Can be 0, 1 (from init: 0-4)
channel
Use these values when setting the channel map with ebur128_set_channel().
QDM2SubPacket * packet
packet
QDM2SubPacket sub_packets[16]
Packets and packet lists.
static const int vlc_stage3_values[60]
mpeg audio declarations for both encoder and decoder.
int do_synth_filter
used to perform or skip synthesis filter
const uint8_t * compressed_data
I/O data.
int8_t tone_level_idx_temp[MPA_MAX_CHANNELS][30][64]
static int process_subpacket_9(QDM2Context *q, QDM2SubPNode *node)
Process subpacket 9, init quantized_coeffs with data from it.
#define MKBETAG(a, b, c, d)
static void process_subpacket_11(QDM2Context *q, QDM2SubPNode *node)
Process subpacket 11.
MPADSPContext mpadsp
Synthesis filter.
static void init_tone_level_dequantization(QDM2Context *q, GetBitContext *gb)
Related to synthesis filter, process data from packet 10 Init part of quantized_coeffs via function i...
static av_always_inline int diff(const uint32_t a, const uint32_t b)
int channels
number of audio channels
static void qdm2_fft_generate_tone(QDM2Context *q, FFTTone *tone)
QDM2SubPNode sub_packet_list_D[16]
DCT packets.
int8_t tone_level_idx[MPA_MAX_CHANNELS][30][64]
VLC_TYPE(* table)[2]
code, bits
static const struct twinvq_data tab
int8_t sb_int8_array[2][30][64]
#define SB_DITHERING_NOISE(sb, noise_idx)
static void qdm2_calculate_fft(QDM2Context *q, int channel, int sub_packet)
static void process_synthesis_subpackets(QDM2Context *q, QDM2SubPNode *list)
Process new subpackets for synthesis filter.
#define AV_CH_LAYOUT_MONO
av_cold int ff_rdft_init(RDFTContext *s, int nbits, enum RDFTransformType trans)
Set up a real FFT.
This structure stores compressed data.
av_cold void ff_mpadsp_init(MPADSPContext *s)
static av_cold void init_noise_samples(void)
int nb_samples
number of audio samples (per channel) described by this frame
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
static VLC vlc_tab_type34