FFmpeg  4.2.2
adpcm.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2001-2003 The FFmpeg project
3  *
4  * first version by Francois Revol (revol@free.fr)
5  * fringe ADPCM codecs (e.g., DK3, DK4, Westwood)
6  * by Mike Melanson (melanson@pcisys.net)
7  * CD-ROM XA ADPCM codec by BERO
8  * EA ADPCM decoder by Robin Kay (komadori@myrealbox.com)
9  * EA ADPCM R1/R2/R3 decoder by Peter Ross (pross@xvid.org)
10  * EA IMA EACS decoder by Peter Ross (pross@xvid.org)
11  * EA IMA SEAD decoder by Peter Ross (pross@xvid.org)
12  * EA ADPCM XAS decoder by Peter Ross (pross@xvid.org)
13  * MAXIS EA ADPCM decoder by Robert Marston (rmarston@gmail.com)
14  * THP ADPCM decoder by Marco Gerards (mgerards@xs4all.nl)
15  *
16  * This file is part of FFmpeg.
17  *
18  * FFmpeg is free software; you can redistribute it and/or
19  * modify it under the terms of the GNU Lesser General Public
20  * License as published by the Free Software Foundation; either
21  * version 2.1 of the License, or (at your option) any later version.
22  *
23  * FFmpeg is distributed in the hope that it will be useful,
24  * but WITHOUT ANY WARRANTY; without even the implied warranty of
25  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
26  * Lesser General Public License for more details.
27  *
28  * You should have received a copy of the GNU Lesser General Public
29  * License along with FFmpeg; if not, write to the Free Software
30  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
31  */
32 #include "avcodec.h"
33 #include "get_bits.h"
34 #include "bytestream.h"
35 #include "adpcm.h"
36 #include "adpcm_data.h"
37 #include "internal.h"
38 
39 /**
40  * @file
41  * ADPCM decoders
42  * Features and limitations:
43  *
44  * Reference documents:
45  * http://wiki.multimedia.cx/index.php?title=Category:ADPCM_Audio_Codecs
46  * http://www.pcisys.net/~melanson/codecs/simpleaudio.html [dead]
47  * http://www.geocities.com/SiliconValley/8682/aud3.txt [dead]
48  * http://openquicktime.sourceforge.net/
49  * XAnim sources (xa_codec.c) http://xanim.polter.net/
50  * http://www.cs.ucla.edu/~leec/mediabench/applications.html [dead]
51  * SoX source code http://sox.sourceforge.net/
52  *
53  * CD-ROM XA:
54  * http://ku-www.ss.titech.ac.jp/~yatsushi/xaadpcm.html [dead]
55  * vagpack & depack http://homepages.compuserve.de/bITmASTER32/psx-index.html [dead]
56  * readstr http://www.geocities.co.jp/Playtown/2004/
57  */
58 
59 /* These are for CD-ROM XA ADPCM */
60 static const int8_t xa_adpcm_table[5][2] = {
61  { 0, 0 },
62  { 60, 0 },
63  { 115, -52 },
64  { 98, -55 },
65  { 122, -60 }
66 };
67 
68 static const int16_t ea_adpcm_table[] = {
69  0, 240, 460, 392,
70  0, 0, -208, -220,
71  0, 1, 3, 4,
72  7, 8, 10, 11,
73  0, -1, -3, -4
74 };
75 
76 // padded to zero where table size is less then 16
77 static const int8_t swf_index_tables[4][16] = {
78  /*2*/ { -1, 2 },
79  /*3*/ { -1, -1, 2, 4 },
80  /*4*/ { -1, -1, -1, -1, 2, 4, 6, 8 },
81  /*5*/ { -1, -1, -1, -1, -1, -1, -1, -1, 1, 2, 4, 6, 8, 10, 13, 16 }
82 };
83 
84 /* end of tables */
85 
86 typedef struct ADPCMDecodeContext {
88  int vqa_version; /**< VQA version. Used for ADPCM_IMA_WS */
91 
93 {
94  ADPCMDecodeContext *c = avctx->priv_data;
95  unsigned int min_channels = 1;
96  unsigned int max_channels = 2;
97 
98  switch(avctx->codec->id) {
101  min_channels = 2;
102  break;
108  max_channels = 6;
109  break;
111  min_channels = 2;
112  max_channels = 8;
113  if (avctx->channels & 1) {
114  avpriv_request_sample(avctx, "channel count %d\n", avctx->channels);
115  return AVERROR_PATCHWELCOME;
116  }
117  break;
119  max_channels = 8;
120  break;
124  max_channels = 14;
125  break;
126  }
127  if (avctx->channels < min_channels || avctx->channels > max_channels) {
128  av_log(avctx, AV_LOG_ERROR, "Invalid number of channels\n");
129  return AVERROR(EINVAL);
130  }
131 
132  switch(avctx->codec->id) {
134  c->status[0].step = c->status[1].step = 511;
135  break;
137  if (avctx->bits_per_coded_sample < 2 || avctx->bits_per_coded_sample > 5)
138  return AVERROR_INVALIDDATA;
139  break;
141  if (avctx->extradata && avctx->extradata_size >= 8) {
142  c->status[0].predictor = av_clip_intp2(AV_RL32(avctx->extradata ), 18);
143  c->status[1].predictor = av_clip_intp2(AV_RL32(avctx->extradata + 4), 18);
144  }
145  break;
147  if (avctx->extradata && avctx->extradata_size >= 2)
148  c->vqa_version = AV_RL16(avctx->extradata);
149  break;
150  default:
151  break;
152  }
153 
154  switch(avctx->codec->id) {
172  break;
174  avctx->sample_fmt = c->vqa_version == 3 ? AV_SAMPLE_FMT_S16P :
176  break;
177  default:
178  avctx->sample_fmt = AV_SAMPLE_FMT_S16;
179  }
180 
181  return 0;
182 }
183 
184 static inline int16_t adpcm_agm_expand_nibble(ADPCMChannelStatus *c, int8_t nibble)
185 {
186  int delta, pred, step, add;
187 
188  pred = c->predictor;
189  delta = nibble & 7;
190  step = c->step;
191  add = (delta * 2 + 1) * step;
192  if (add < 0)
193  add = add + 7;
194 
195  if ((nibble & 8) == 0)
196  pred = av_clip(pred + (add >> 3), -32767, 32767);
197  else
198  pred = av_clip(pred - (add >> 3), -32767, 32767);
199 
200  switch (delta) {
201  case 7:
202  step *= 0x99;
203  break;
204  case 6:
205  c->step = av_clip(c->step * 2, 127, 24576);
206  c->predictor = pred;
207  return pred;
208  case 5:
209  step *= 0x66;
210  break;
211  case 4:
212  step *= 0x4d;
213  break;
214  default:
215  step *= 0x39;
216  break;
217  }
218 
219  if (step < 0)
220  step += 0x3f;
221 
222  c->step = step >> 6;
223  c->step = av_clip(c->step, 127, 24576);
224  c->predictor = pred;
225  return pred;
226 }
227 
228 static inline int16_t adpcm_ima_expand_nibble(ADPCMChannelStatus *c, int8_t nibble, int shift)
229 {
230  int step_index;
231  int predictor;
232  int sign, delta, diff, step;
233 
234  step = ff_adpcm_step_table[c->step_index];
235  step_index = c->step_index + ff_adpcm_index_table[(unsigned)nibble];
236  step_index = av_clip(step_index, 0, 88);
237 
238  sign = nibble & 8;
239  delta = nibble & 7;
240  /* perform direct multiplication instead of series of jumps proposed by
241  * the reference ADPCM implementation since modern CPUs can do the mults
242  * quickly enough */
243  diff = ((2 * delta + 1) * step) >> shift;
244  predictor = c->predictor;
245  if (sign) predictor -= diff;
246  else predictor += diff;
247 
248  c->predictor = av_clip_int16(predictor);
249  c->step_index = step_index;
250 
251  return (int16_t)c->predictor;
252 }
253 
255 {
256  int nibble, step_index, predictor, sign, delta, diff, step, shift;
257 
258  shift = bps - 1;
259  nibble = get_bits_le(gb, bps),
260  step = ff_adpcm_step_table[c->step_index];
261  step_index = c->step_index + ff_adpcm_index_tables[bps - 2][nibble];
262  step_index = av_clip(step_index, 0, 88);
263 
264  sign = nibble & (1 << shift);
265  delta = av_mod_uintp2(nibble, shift);
266  diff = ((2 * delta + 1) * step) >> shift;
267  predictor = c->predictor;
268  if (sign) predictor -= diff;
269  else predictor += diff;
270 
271  c->predictor = av_clip_int16(predictor);
272  c->step_index = step_index;
273 
274  return (int16_t)c->predictor;
275 }
276 
277 static inline int adpcm_ima_qt_expand_nibble(ADPCMChannelStatus *c, int nibble, int shift)
278 {
279  int step_index;
280  int predictor;
281  int diff, step;
282 
283  step = ff_adpcm_step_table[c->step_index];
284  step_index = c->step_index + ff_adpcm_index_table[nibble];
285  step_index = av_clip(step_index, 0, 88);
286 
287  diff = step >> 3;
288  if (nibble & 4) diff += step;
289  if (nibble & 2) diff += step >> 1;
290  if (nibble & 1) diff += step >> 2;
291 
292  if (nibble & 8)
293  predictor = c->predictor - diff;
294  else
295  predictor = c->predictor + diff;
296 
297  c->predictor = av_clip_int16(predictor);
298  c->step_index = step_index;
299 
300  return c->predictor;
301 }
302 
303 static inline int16_t adpcm_ms_expand_nibble(ADPCMChannelStatus *c, int nibble)
304 {
305  int predictor;
306 
307  predictor = (((c->sample1) * (c->coeff1)) + ((c->sample2) * (c->coeff2))) / 64;
308  predictor += ((nibble & 0x08)?(nibble - 0x10):(nibble)) * c->idelta;
309 
310  c->sample2 = c->sample1;
311  c->sample1 = av_clip_int16(predictor);
312  c->idelta = (ff_adpcm_AdaptationTable[(int)nibble] * c->idelta) >> 8;
313  if (c->idelta < 16) c->idelta = 16;
314  if (c->idelta > INT_MAX/768) {
315  av_log(NULL, AV_LOG_WARNING, "idelta overflow\n");
316  c->idelta = INT_MAX/768;
317  }
318 
319  return c->sample1;
320 }
321 
322 static inline int16_t adpcm_ima_oki_expand_nibble(ADPCMChannelStatus *c, int nibble)
323 {
324  int step_index, predictor, sign, delta, diff, step;
325 
327  step_index = c->step_index + ff_adpcm_index_table[(unsigned)nibble];
328  step_index = av_clip(step_index, 0, 48);
329 
330  sign = nibble & 8;
331  delta = nibble & 7;
332  diff = ((2 * delta + 1) * step) >> 3;
333  predictor = c->predictor;
334  if (sign) predictor -= diff;
335  else predictor += diff;
336 
337  c->predictor = av_clip_intp2(predictor, 11);
338  c->step_index = step_index;
339 
340  return c->predictor * 16;
341 }
342 
343 static inline int16_t adpcm_ct_expand_nibble(ADPCMChannelStatus *c, int8_t nibble)
344 {
345  int sign, delta, diff;
346  int new_step;
347 
348  sign = nibble & 8;
349  delta = nibble & 7;
350  /* perform direct multiplication instead of series of jumps proposed by
351  * the reference ADPCM implementation since modern CPUs can do the mults
352  * quickly enough */
353  diff = ((2 * delta + 1) * c->step) >> 3;
354  /* predictor update is not so trivial: predictor is multiplied on 254/256 before updating */
355  c->predictor = ((c->predictor * 254) >> 8) + (sign ? -diff : diff);
356  c->predictor = av_clip_int16(c->predictor);
357  /* calculate new step and clamp it to range 511..32767 */
358  new_step = (ff_adpcm_AdaptationTable[nibble & 7] * c->step) >> 8;
359  c->step = av_clip(new_step, 511, 32767);
360 
361  return (int16_t)c->predictor;
362 }
363 
364 static inline int16_t adpcm_sbpro_expand_nibble(ADPCMChannelStatus *c, int8_t nibble, int size, int shift)
365 {
366  int sign, delta, diff;
367 
368  sign = nibble & (1<<(size-1));
369  delta = nibble & ((1<<(size-1))-1);
370  diff = delta << (7 + c->step + shift);
371 
372  /* clamp result */
373  c->predictor = av_clip(c->predictor + (sign ? -diff : diff), -16384,16256);
374 
375  /* calculate new step */
376  if (delta >= (2*size - 3) && c->step < 3)
377  c->step++;
378  else if (delta == 0 && c->step > 0)
379  c->step--;
380 
381  return (int16_t) c->predictor;
382 }
383 
385 {
386  if(!c->step) {
387  c->predictor = 0;
388  c->step = 127;
389  }
390 
391  c->predictor += (c->step * ff_adpcm_yamaha_difflookup[nibble]) / 8;
392  c->predictor = av_clip_int16(c->predictor);
393  c->step = (c->step * ff_adpcm_yamaha_indexscale[nibble]) >> 8;
394  c->step = av_clip(c->step, 127, 24576);
395  return c->predictor;
396 }
397 
398 static inline int16_t adpcm_mtaf_expand_nibble(ADPCMChannelStatus *c, uint8_t nibble)
399 {
400  c->predictor += ff_adpcm_mtaf_stepsize[c->step][nibble];
401  c->predictor = av_clip_int16(c->predictor);
402  c->step += ff_adpcm_index_table[nibble];
403  c->step = av_clip_uintp2(c->step, 5);
404  return c->predictor;
405 }
406 
407 static int xa_decode(AVCodecContext *avctx, int16_t *out0, int16_t *out1,
408  const uint8_t *in, ADPCMChannelStatus *left,
409  ADPCMChannelStatus *right, int channels, int sample_offset)
410 {
411  int i, j;
412  int shift,filter,f0,f1;
413  int s_1,s_2;
414  int d,s,t;
415 
416  out0 += sample_offset;
417  if (channels == 1)
418  out1 = out0 + 28;
419  else
420  out1 += sample_offset;
421 
422  for(i=0;i<4;i++) {
423  shift = 12 - (in[4+i*2] & 15);
424  filter = in[4+i*2] >> 4;
425  if (filter >= FF_ARRAY_ELEMS(xa_adpcm_table)) {
426  avpriv_request_sample(avctx, "unknown XA-ADPCM filter %d", filter);
427  filter=0;
428  }
429  f0 = xa_adpcm_table[filter][0];
430  f1 = xa_adpcm_table[filter][1];
431 
432  s_1 = left->sample1;
433  s_2 = left->sample2;
434 
435  for(j=0;j<28;j++) {
436  d = in[16+i+j*4];
437 
438  t = sign_extend(d, 4);
439  s = t*(1<<shift) + ((s_1*f0 + s_2*f1+32)>>6);
440  s_2 = s_1;
441  s_1 = av_clip_int16(s);
442  out0[j] = s_1;
443  }
444 
445  if (channels == 2) {
446  left->sample1 = s_1;
447  left->sample2 = s_2;
448  s_1 = right->sample1;
449  s_2 = right->sample2;
450  }
451 
452  shift = 12 - (in[5+i*2] & 15);
453  filter = in[5+i*2] >> 4;
454  if (filter >= FF_ARRAY_ELEMS(xa_adpcm_table)) {
455  avpriv_request_sample(avctx, "unknown XA-ADPCM filter %d", filter);
456  filter=0;
457  }
458 
459  f0 = xa_adpcm_table[filter][0];
460  f1 = xa_adpcm_table[filter][1];
461 
462  for(j=0;j<28;j++) {
463  d = in[16+i+j*4];
464 
465  t = sign_extend(d >> 4, 4);
466  s = t*(1<<shift) + ((s_1*f0 + s_2*f1+32)>>6);
467  s_2 = s_1;
468  s_1 = av_clip_int16(s);
469  out1[j] = s_1;
470  }
471 
472  if (channels == 2) {
473  right->sample1 = s_1;
474  right->sample2 = s_2;
475  } else {
476  left->sample1 = s_1;
477  left->sample2 = s_2;
478  }
479 
480  out0 += 28 * (3 - channels);
481  out1 += 28 * (3 - channels);
482  }
483 
484  return 0;
485 }
486 
487 static void adpcm_swf_decode(AVCodecContext *avctx, const uint8_t *buf, int buf_size, int16_t *samples)
488 {
489  ADPCMDecodeContext *c = avctx->priv_data;
490  GetBitContext gb;
491  const int8_t *table;
492  int k0, signmask, nb_bits, count;
493  int size = buf_size*8;
494  int i;
495 
496  init_get_bits(&gb, buf, size);
497 
498  //read bits & initial values
499  nb_bits = get_bits(&gb, 2)+2;
500  table = swf_index_tables[nb_bits-2];
501  k0 = 1 << (nb_bits-2);
502  signmask = 1 << (nb_bits-1);
503 
504  while (get_bits_count(&gb) <= size - 22*avctx->channels) {
505  for (i = 0; i < avctx->channels; i++) {
506  *samples++ = c->status[i].predictor = get_sbits(&gb, 16);
507  c->status[i].step_index = get_bits(&gb, 6);
508  }
509 
510  for (count = 0; get_bits_count(&gb) <= size - nb_bits*avctx->channels && count < 4095; count++) {
511  int i;
512 
513  for (i = 0; i < avctx->channels; i++) {
514  // similar to IMA adpcm
515  int delta = get_bits(&gb, nb_bits);
516  int step = ff_adpcm_step_table[c->status[i].step_index];
517  int vpdiff = 0; // vpdiff = (delta+0.5)*step/4
518  int k = k0;
519 
520  do {
521  if (delta & k)
522  vpdiff += step;
523  step >>= 1;
524  k >>= 1;
525  } while(k);
526  vpdiff += step;
527 
528  if (delta & signmask)
529  c->status[i].predictor -= vpdiff;
530  else
531  c->status[i].predictor += vpdiff;
532 
533  c->status[i].step_index += table[delta & (~signmask)];
534 
535  c->status[i].step_index = av_clip(c->status[i].step_index, 0, 88);
536  c->status[i].predictor = av_clip_int16(c->status[i].predictor);
537 
538  *samples++ = c->status[i].predictor;
539  }
540  }
541  }
542 }
543 
544 /**
545  * Get the number of samples that will be decoded from the packet.
546  * In one case, this is actually the maximum number of samples possible to
547  * decode with the given buf_size.
548  *
549  * @param[out] coded_samples set to the number of samples as coded in the
550  * packet, or 0 if the codec does not encode the
551  * number of samples in each frame.
552  * @param[out] approx_nb_samples set to non-zero if the number of samples
553  * returned is an approximation.
554  */
556  int buf_size, int *coded_samples, int *approx_nb_samples)
557 {
558  ADPCMDecodeContext *s = avctx->priv_data;
559  int nb_samples = 0;
560  int ch = avctx->channels;
561  int has_coded_samples = 0;
562  int header_size;
563 
564  *coded_samples = 0;
565  *approx_nb_samples = 0;
566 
567  if(ch <= 0)
568  return 0;
569 
570  switch (avctx->codec->id) {
571  /* constant, only check buf_size */
573  if (buf_size < 76 * ch)
574  return 0;
575  nb_samples = 128;
576  break;
578  if (buf_size < 34 * ch)
579  return 0;
580  nb_samples = 64;
581  break;
582  /* simple 4-bit adpcm */
590  nb_samples = buf_size * 2 / ch;
591  break;
592  }
593  if (nb_samples)
594  return nb_samples;
595 
596  /* simple 4-bit adpcm, with header */
597  header_size = 0;
598  switch (avctx->codec->id) {
602  case AV_CODEC_ID_ADPCM_IMA_ISS: header_size = 4 * ch; break;
603  case AV_CODEC_ID_ADPCM_IMA_AMV: header_size = 8; break;
604  case AV_CODEC_ID_ADPCM_IMA_SMJPEG: header_size = 4 * ch; break;
605  }
606  if (header_size > 0)
607  return (buf_size - header_size) * 2 / ch;
608 
609  /* more complex formats */
610  switch (avctx->codec->id) {
612  has_coded_samples = 1;
613  *coded_samples = bytestream2_get_le32(gb);
614  *coded_samples -= *coded_samples % 28;
615  nb_samples = (buf_size - 12) / 30 * 28;
616  break;
618  has_coded_samples = 1;
619  *coded_samples = bytestream2_get_le32(gb);
620  nb_samples = (buf_size - (4 + 8 * ch)) * 2 / ch;
621  break;
623  nb_samples = (buf_size - ch) / ch * 2;
624  break;
628  /* maximum number of samples */
629  /* has internal offsets and a per-frame switch to signal raw 16-bit */
630  has_coded_samples = 1;
631  switch (avctx->codec->id) {
633  header_size = 4 + 9 * ch;
634  *coded_samples = bytestream2_get_le32(gb);
635  break;
637  header_size = 4 + 5 * ch;
638  *coded_samples = bytestream2_get_le32(gb);
639  break;
641  header_size = 4 + 5 * ch;
642  *coded_samples = bytestream2_get_be32(gb);
643  break;
644  }
645  *coded_samples -= *coded_samples % 28;
646  nb_samples = (buf_size - header_size) * 2 / ch;
647  nb_samples -= nb_samples % 28;
648  *approx_nb_samples = 1;
649  break;
651  if (avctx->block_align > 0)
652  buf_size = FFMIN(buf_size, avctx->block_align);
653  nb_samples = ((buf_size - 16) * 2 / 3 * 4) / ch;
654  break;
656  if (avctx->block_align > 0)
657  buf_size = FFMIN(buf_size, avctx->block_align);
658  if (buf_size < 4 * ch)
659  return AVERROR_INVALIDDATA;
660  nb_samples = 1 + (buf_size - 4 * ch) * 2 / ch;
661  break;
663  if (avctx->block_align > 0)
664  buf_size = FFMIN(buf_size, avctx->block_align);
665  nb_samples = (buf_size - 4 * ch) * 2 / ch;
666  break;
668  {
669  int bsize = ff_adpcm_ima_block_sizes[avctx->bits_per_coded_sample - 2];
670  int bsamples = ff_adpcm_ima_block_samples[avctx->bits_per_coded_sample - 2];
671  if (avctx->block_align > 0)
672  buf_size = FFMIN(buf_size, avctx->block_align);
673  if (buf_size < 4 * ch)
674  return AVERROR_INVALIDDATA;
675  nb_samples = 1 + (buf_size - 4 * ch) / (bsize * ch) * bsamples;
676  break;
677  }
679  if (avctx->block_align > 0)
680  buf_size = FFMIN(buf_size, avctx->block_align);
681  nb_samples = (buf_size - 6 * ch) * 2 / ch;
682  break;
684  if (avctx->block_align > 0)
685  buf_size = FFMIN(buf_size, avctx->block_align);
686  nb_samples = (buf_size - 16 * (ch / 2)) * 2 / ch;
687  break;
691  {
692  int samples_per_byte;
693  switch (avctx->codec->id) {
694  case AV_CODEC_ID_ADPCM_SBPRO_2: samples_per_byte = 4; break;
695  case AV_CODEC_ID_ADPCM_SBPRO_3: samples_per_byte = 3; break;
696  case AV_CODEC_ID_ADPCM_SBPRO_4: samples_per_byte = 2; break;
697  }
698  if (!s->status[0].step_index) {
699  if (buf_size < ch)
700  return AVERROR_INVALIDDATA;
701  nb_samples++;
702  buf_size -= ch;
703  }
704  nb_samples += buf_size * samples_per_byte / ch;
705  break;
706  }
708  {
709  int buf_bits = buf_size * 8 - 2;
710  int nbits = (bytestream2_get_byte(gb) >> 6) + 2;
711  int block_hdr_size = 22 * ch;
712  int block_size = block_hdr_size + nbits * ch * 4095;
713  int nblocks = buf_bits / block_size;
714  int bits_left = buf_bits - nblocks * block_size;
715  nb_samples = nblocks * 4096;
716  if (bits_left >= block_hdr_size)
717  nb_samples += 1 + (bits_left - block_hdr_size) / (nbits * ch);
718  break;
719  }
722  if (avctx->extradata) {
723  nb_samples = buf_size * 14 / (8 * ch);
724  break;
725  }
726  has_coded_samples = 1;
727  bytestream2_skip(gb, 4); // channel size
728  *coded_samples = (avctx->codec->id == AV_CODEC_ID_ADPCM_THP_LE) ?
729  bytestream2_get_le32(gb) :
730  bytestream2_get_be32(gb);
731  buf_size -= 8 + 36 * ch;
732  buf_size /= ch;
733  nb_samples = buf_size / 8 * 14;
734  if (buf_size % 8 > 1)
735  nb_samples += (buf_size % 8 - 1) * 2;
736  *approx_nb_samples = 1;
737  break;
739  nb_samples = buf_size / (9 * ch) * 16;
740  break;
742  nb_samples = (buf_size / 128) * 224 / ch;
743  break;
746  nb_samples = buf_size / (16 * ch) * 28;
747  break;
748  }
749 
750  /* validate coded sample count */
751  if (has_coded_samples && (*coded_samples <= 0 || *coded_samples > nb_samples))
752  return AVERROR_INVALIDDATA;
753 
754  return nb_samples;
755 }
756 
757 static int adpcm_decode_frame(AVCodecContext *avctx, void *data,
758  int *got_frame_ptr, AVPacket *avpkt)
759 {
760  AVFrame *frame = data;
761  const uint8_t *buf = avpkt->data;
762  int buf_size = avpkt->size;
763  ADPCMDecodeContext *c = avctx->priv_data;
764  ADPCMChannelStatus *cs;
765  int n, m, channel, i;
766  int16_t *samples;
767  int16_t **samples_p;
768  int st; /* stereo */
769  int count1, count2;
770  int nb_samples, coded_samples, approx_nb_samples, ret;
771  GetByteContext gb;
772 
773  bytestream2_init(&gb, buf, buf_size);
774  nb_samples = get_nb_samples(avctx, &gb, buf_size, &coded_samples, &approx_nb_samples);
775  if (nb_samples <= 0) {
776  av_log(avctx, AV_LOG_ERROR, "invalid number of samples in packet\n");
777  return AVERROR_INVALIDDATA;
778  }
779 
780  /* get output buffer */
781  frame->nb_samples = nb_samples;
782  if ((ret = ff_get_buffer(avctx, frame, 0)) < 0)
783  return ret;
784  samples = (int16_t *)frame->data[0];
785  samples_p = (int16_t **)frame->extended_data;
786 
787  /* use coded_samples when applicable */
788  /* it is always <= nb_samples, so the output buffer will be large enough */
789  if (coded_samples) {
790  if (!approx_nb_samples && coded_samples != nb_samples)
791  av_log(avctx, AV_LOG_WARNING, "mismatch in coded sample count\n");
792  frame->nb_samples = nb_samples = coded_samples;
793  }
794 
795  st = avctx->channels == 2 ? 1 : 0;
796 
797  switch(avctx->codec->id) {
799  /* In QuickTime, IMA is encoded by chunks of 34 bytes (=64 samples).
800  Channel data is interleaved per-chunk. */
801  for (channel = 0; channel < avctx->channels; channel++) {
802  int predictor;
803  int step_index;
804  cs = &(c->status[channel]);
805  /* (pppppp) (piiiiiii) */
806 
807  /* Bits 15-7 are the _top_ 9 bits of the 16-bit initial predictor value */
808  predictor = sign_extend(bytestream2_get_be16u(&gb), 16);
809  step_index = predictor & 0x7F;
810  predictor &= ~0x7F;
811 
812  if (cs->step_index == step_index) {
813  int diff = predictor - cs->predictor;
814  if (diff < 0)
815  diff = - diff;
816  if (diff > 0x7f)
817  goto update;
818  } else {
819  update:
820  cs->step_index = step_index;
821  cs->predictor = predictor;
822  }
823 
824  if (cs->step_index > 88u){
825  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n",
826  channel, cs->step_index);
827  return AVERROR_INVALIDDATA;
828  }
829 
830  samples = samples_p[channel];
831 
832  for (m = 0; m < 64; m += 2) {
833  int byte = bytestream2_get_byteu(&gb);
834  samples[m ] = adpcm_ima_qt_expand_nibble(cs, byte & 0x0F, 3);
835  samples[m + 1] = adpcm_ima_qt_expand_nibble(cs, byte >> 4 , 3);
836  }
837  }
838  break;
840  for(i=0; i<avctx->channels; i++){
841  cs = &(c->status[i]);
842  cs->predictor = samples_p[i][0] = sign_extend(bytestream2_get_le16u(&gb), 16);
843 
844  cs->step_index = sign_extend(bytestream2_get_le16u(&gb), 16);
845  if (cs->step_index > 88u){
846  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n",
847  i, cs->step_index);
848  return AVERROR_INVALIDDATA;
849  }
850  }
851 
852  if (avctx->bits_per_coded_sample != 4) {
853  int samples_per_block = ff_adpcm_ima_block_samples[avctx->bits_per_coded_sample - 2];
854  int block_size = ff_adpcm_ima_block_sizes[avctx->bits_per_coded_sample - 2];
857 
858  for (n = 0; n < (nb_samples - 1) / samples_per_block; n++) {
859  for (i = 0; i < avctx->channels; i++) {
860  int j;
861 
862  cs = &c->status[i];
863  samples = &samples_p[i][1 + n * samples_per_block];
864  for (j = 0; j < block_size; j++) {
865  temp[j] = buf[4 * avctx->channels + block_size * n * avctx->channels +
866  (j % 4) + (j / 4) * (avctx->channels * 4) + i * 4];
867  }
868  ret = init_get_bits8(&g, (const uint8_t *)&temp, block_size);
869  if (ret < 0)
870  return ret;
871  for (m = 0; m < samples_per_block; m++) {
872  samples[m] = adpcm_ima_wav_expand_nibble(cs, &g,
873  avctx->bits_per_coded_sample);
874  }
875  }
876  }
877  bytestream2_skip(&gb, avctx->block_align - avctx->channels * 4);
878  } else {
879  for (n = 0; n < (nb_samples - 1) / 8; n++) {
880  for (i = 0; i < avctx->channels; i++) {
881  cs = &c->status[i];
882  samples = &samples_p[i][1 + n * 8];
883  for (m = 0; m < 8; m += 2) {
884  int v = bytestream2_get_byteu(&gb);
885  samples[m ] = adpcm_ima_expand_nibble(cs, v & 0x0F, 3);
886  samples[m + 1] = adpcm_ima_expand_nibble(cs, v >> 4 , 3);
887  }
888  }
889  }
890  }
891  break;
893  for (i = 0; i < avctx->channels; i++)
894  c->status[i].predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
895 
896  for (i = 0; i < avctx->channels; i++) {
897  c->status[i].step_index = sign_extend(bytestream2_get_le16u(&gb), 16);
898  if (c->status[i].step_index > 88u) {
899  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n",
900  i, c->status[i].step_index);
901  return AVERROR_INVALIDDATA;
902  }
903  }
904 
905  for (i = 0; i < avctx->channels; i++) {
906  samples = (int16_t *)frame->data[i];
907  cs = &c->status[i];
908  for (n = nb_samples >> 1; n > 0; n--) {
909  int v = bytestream2_get_byteu(&gb);
910  *samples++ = adpcm_ima_expand_nibble(cs, v & 0x0F, 4);
911  *samples++ = adpcm_ima_expand_nibble(cs, v >> 4 , 4);
912  }
913  }
914  break;
916  for (i = 0; i < avctx->channels; i++)
917  c->status[i].predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
918  for (i = 0; i < avctx->channels; i++)
919  c->status[i].step = sign_extend(bytestream2_get_le16u(&gb), 16);
920 
921  for (n = 0; n < nb_samples >> (1 - st); n++) {
922  int v = bytestream2_get_byteu(&gb);
923  *samples++ = adpcm_agm_expand_nibble(&c->status[0], v & 0xF);
924  *samples++ = adpcm_agm_expand_nibble(&c->status[st], v >> 4 );
925  }
926  break;
928  {
929  int block_predictor;
930 
931  block_predictor = bytestream2_get_byteu(&gb);
932  if (block_predictor > 6) {
933  av_log(avctx, AV_LOG_ERROR, "ERROR: block_predictor[0] = %d\n",
934  block_predictor);
935  return AVERROR_INVALIDDATA;
936  }
937  c->status[0].coeff1 = ff_adpcm_AdaptCoeff1[block_predictor];
938  c->status[0].coeff2 = ff_adpcm_AdaptCoeff2[block_predictor];
939  if (st) {
940  block_predictor = bytestream2_get_byteu(&gb);
941  if (block_predictor > 6) {
942  av_log(avctx, AV_LOG_ERROR, "ERROR: block_predictor[1] = %d\n",
943  block_predictor);
944  return AVERROR_INVALIDDATA;
945  }
946  c->status[1].coeff1 = ff_adpcm_AdaptCoeff1[block_predictor];
947  c->status[1].coeff2 = ff_adpcm_AdaptCoeff2[block_predictor];
948  }
949  c->status[0].idelta = sign_extend(bytestream2_get_le16u(&gb), 16);
950  if (st){
951  c->status[1].idelta = sign_extend(bytestream2_get_le16u(&gb), 16);
952  }
953 
954  c->status[0].sample1 = sign_extend(bytestream2_get_le16u(&gb), 16);
955  if (st) c->status[1].sample1 = sign_extend(bytestream2_get_le16u(&gb), 16);
956  c->status[0].sample2 = sign_extend(bytestream2_get_le16u(&gb), 16);
957  if (st) c->status[1].sample2 = sign_extend(bytestream2_get_le16u(&gb), 16);
958 
959  *samples++ = c->status[0].sample2;
960  if (st) *samples++ = c->status[1].sample2;
961  *samples++ = c->status[0].sample1;
962  if (st) *samples++ = c->status[1].sample1;
963  for(n = (nb_samples - 2) >> (1 - st); n > 0; n--) {
964  int byte = bytestream2_get_byteu(&gb);
965  *samples++ = adpcm_ms_expand_nibble(&c->status[0 ], byte >> 4 );
966  *samples++ = adpcm_ms_expand_nibble(&c->status[st], byte & 0x0F);
967  }
968  break;
969  }
971  for (channel = 0; channel < avctx->channels; channel+=2) {
972  bytestream2_skipu(&gb, 4);
973  c->status[channel ].step = bytestream2_get_le16u(&gb) & 0x1f;
974  c->status[channel + 1].step = bytestream2_get_le16u(&gb) & 0x1f;
975  c->status[channel ].predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
976  bytestream2_skipu(&gb, 2);
977  c->status[channel + 1].predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
978  bytestream2_skipu(&gb, 2);
979  for (n = 0; n < nb_samples; n+=2) {
980  int v = bytestream2_get_byteu(&gb);
981  samples_p[channel][n ] = adpcm_mtaf_expand_nibble(&c->status[channel], v & 0x0F);
982  samples_p[channel][n + 1] = adpcm_mtaf_expand_nibble(&c->status[channel], v >> 4 );
983  }
984  for (n = 0; n < nb_samples; n+=2) {
985  int v = bytestream2_get_byteu(&gb);
986  samples_p[channel + 1][n ] = adpcm_mtaf_expand_nibble(&c->status[channel + 1], v & 0x0F);
987  samples_p[channel + 1][n + 1] = adpcm_mtaf_expand_nibble(&c->status[channel + 1], v >> 4 );
988  }
989  }
990  break;
992  for (channel = 0; channel < avctx->channels; channel++) {
993  cs = &c->status[channel];
994  cs->predictor = *samples++ = sign_extend(bytestream2_get_le16u(&gb), 16);
995  cs->step_index = sign_extend(bytestream2_get_le16u(&gb), 16);
996  if (cs->step_index > 88u){
997  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n",
998  channel, cs->step_index);
999  return AVERROR_INVALIDDATA;
1000  }
1001  }
1002  for (n = (nb_samples - 1) >> (1 - st); n > 0; n--) {
1003  int v = bytestream2_get_byteu(&gb);
1004  *samples++ = adpcm_ima_expand_nibble(&c->status[0 ], v >> 4 , 3);
1005  *samples++ = adpcm_ima_expand_nibble(&c->status[st], v & 0x0F, 3);
1006  }
1007  break;
1009  {
1010  int last_byte = 0;
1011  int nibble;
1012  int decode_top_nibble_next = 0;
1013  int diff_channel;
1014  const int16_t *samples_end = samples + avctx->channels * nb_samples;
1015 
1016  bytestream2_skipu(&gb, 10);
1017  c->status[0].predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
1018  c->status[1].predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
1019  c->status[0].step_index = bytestream2_get_byteu(&gb);
1020  c->status[1].step_index = bytestream2_get_byteu(&gb);
1021  if (c->status[0].step_index > 88u || c->status[1].step_index > 88u){
1022  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index = %i/%i\n",
1023  c->status[0].step_index, c->status[1].step_index);
1024  return AVERROR_INVALIDDATA;
1025  }
1026  /* sign extend the predictors */
1027  diff_channel = c->status[1].predictor;
1028 
1029  /* DK3 ADPCM support macro */
1030 #define DK3_GET_NEXT_NIBBLE() \
1031  if (decode_top_nibble_next) { \
1032  nibble = last_byte >> 4; \
1033  decode_top_nibble_next = 0; \
1034  } else { \
1035  last_byte = bytestream2_get_byteu(&gb); \
1036  nibble = last_byte & 0x0F; \
1037  decode_top_nibble_next = 1; \
1038  }
1039 
1040  while (samples < samples_end) {
1041 
1042  /* for this algorithm, c->status[0] is the sum channel and
1043  * c->status[1] is the diff channel */
1044 
1045  /* process the first predictor of the sum channel */
1047  adpcm_ima_expand_nibble(&c->status[0], nibble, 3);
1048 
1049  /* process the diff channel predictor */
1051  adpcm_ima_expand_nibble(&c->status[1], nibble, 3);
1052 
1053  /* process the first pair of stereo PCM samples */
1054  diff_channel = (diff_channel + c->status[1].predictor) / 2;
1055  *samples++ = c->status[0].predictor + c->status[1].predictor;
1056  *samples++ = c->status[0].predictor - c->status[1].predictor;
1057 
1058  /* process the second predictor of the sum channel */
1060  adpcm_ima_expand_nibble(&c->status[0], nibble, 3);
1061 
1062  /* process the second pair of stereo PCM samples */
1063  diff_channel = (diff_channel + c->status[1].predictor) / 2;
1064  *samples++ = c->status[0].predictor + c->status[1].predictor;
1065  *samples++ = c->status[0].predictor - c->status[1].predictor;
1066  }
1067 
1068  if ((bytestream2_tell(&gb) & 1))
1069  bytestream2_skip(&gb, 1);
1070  break;
1071  }
1073  for (channel = 0; channel < avctx->channels; channel++) {
1074  cs = &c->status[channel];
1075  cs->predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
1076  cs->step_index = sign_extend(bytestream2_get_le16u(&gb), 16);
1077  if (cs->step_index > 88u){
1078  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n",
1079  channel, cs->step_index);
1080  return AVERROR_INVALIDDATA;
1081  }
1082  }
1083 
1084  for (n = nb_samples >> (1 - st); n > 0; n--) {
1085  int v1, v2;
1086  int v = bytestream2_get_byteu(&gb);
1087  /* nibbles are swapped for mono */
1088  if (st) {
1089  v1 = v >> 4;
1090  v2 = v & 0x0F;
1091  } else {
1092  v2 = v >> 4;
1093  v1 = v & 0x0F;
1094  }
1095  *samples++ = adpcm_ima_expand_nibble(&c->status[0 ], v1, 3);
1096  *samples++ = adpcm_ima_expand_nibble(&c->status[st], v2, 3);
1097  }
1098  break;
1100  for (channel = 0; channel < avctx->channels; channel++) {
1101  cs = &c->status[channel];
1102  samples = samples_p[channel];
1103  bytestream2_skip(&gb, 4);
1104  for (n = 0; n < nb_samples; n += 2) {
1105  int v = bytestream2_get_byteu(&gb);
1106  *samples++ = adpcm_ima_expand_nibble(cs, v >> 4 , 3);
1107  *samples++ = adpcm_ima_expand_nibble(cs, v & 0x0F, 3);
1108  }
1109  }
1110  break;
1112  while (bytestream2_get_bytes_left(&gb) > 0) {
1113  int v = bytestream2_get_byteu(&gb);
1114  *samples++ = adpcm_ima_expand_nibble(&c->status[0], v >> 4 , 3);
1115  *samples++ = adpcm_ima_expand_nibble(&c->status[st], v & 0x0F, 3);
1116  }
1117  break;
1119  while (bytestream2_get_bytes_left(&gb) > 0) {
1120  int v = bytestream2_get_byteu(&gb);
1121  *samples++ = adpcm_ima_oki_expand_nibble(&c->status[0], v >> 4 );
1122  *samples++ = adpcm_ima_oki_expand_nibble(&c->status[st], v & 0x0F);
1123  }
1124  break;
1126  for (channel = 0; channel < avctx->channels; channel++) {
1127  cs = &c->status[channel];
1128  cs->step_index = sign_extend(bytestream2_get_le16u(&gb), 16);
1129  cs->predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
1130  if (cs->step_index > 88u){
1131  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n",
1132  channel, cs->step_index);
1133  return AVERROR_INVALIDDATA;
1134  }
1135  }
1136  for (n = 0; n < nb_samples / 2; n++) {
1137  int byte[2];
1138 
1139  byte[0] = bytestream2_get_byteu(&gb);
1140  if (st)
1141  byte[1] = bytestream2_get_byteu(&gb);
1142  for(channel = 0; channel < avctx->channels; channel++) {
1143  *samples++ = adpcm_ima_expand_nibble(&c->status[channel], byte[channel] & 0x0F, 3);
1144  }
1145  for(channel = 0; channel < avctx->channels; channel++) {
1146  *samples++ = adpcm_ima_expand_nibble(&c->status[channel], byte[channel] >> 4 , 3);
1147  }
1148  }
1149  break;
1151  if (c->vqa_version == 3) {
1152  for (channel = 0; channel < avctx->channels; channel++) {
1153  int16_t *smp = samples_p[channel];
1154 
1155  for (n = nb_samples / 2; n > 0; n--) {
1156  int v = bytestream2_get_byteu(&gb);
1157  *smp++ = adpcm_ima_expand_nibble(&c->status[channel], v >> 4 , 3);
1158  *smp++ = adpcm_ima_expand_nibble(&c->status[channel], v & 0x0F, 3);
1159  }
1160  }
1161  } else {
1162  for (n = nb_samples / 2; n > 0; n--) {
1163  for (channel = 0; channel < avctx->channels; channel++) {
1164  int v = bytestream2_get_byteu(&gb);
1165  *samples++ = adpcm_ima_expand_nibble(&c->status[channel], v >> 4 , 3);
1166  samples[st] = adpcm_ima_expand_nibble(&c->status[channel], v & 0x0F, 3);
1167  }
1168  samples += avctx->channels;
1169  }
1170  }
1171  bytestream2_seek(&gb, 0, SEEK_END);
1172  break;
1173  case AV_CODEC_ID_ADPCM_XA:
1174  {
1175  int16_t *out0 = samples_p[0];
1176  int16_t *out1 = samples_p[1];
1177  int samples_per_block = 28 * (3 - avctx->channels) * 4;
1178  int sample_offset = 0;
1179  int bytes_remaining;
1180  while (bytestream2_get_bytes_left(&gb) >= 128) {
1181  if ((ret = xa_decode(avctx, out0, out1, buf + bytestream2_tell(&gb),
1182  &c->status[0], &c->status[1],
1183  avctx->channels, sample_offset)) < 0)
1184  return ret;
1185  bytestream2_skipu(&gb, 128);
1186  sample_offset += samples_per_block;
1187  }
1188  /* Less than a full block of data left, e.g. when reading from
1189  * 2324 byte per sector XA; the remainder is padding */
1190  bytes_remaining = bytestream2_get_bytes_left(&gb);
1191  if (bytes_remaining > 0) {
1192  bytestream2_skip(&gb, bytes_remaining);
1193  }
1194  break;
1195  }
1197  for (i=0; i<=st; i++) {
1198  c->status[i].step_index = bytestream2_get_le32u(&gb);
1199  if (c->status[i].step_index > 88u) {
1200  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n",
1201  i, c->status[i].step_index);
1202  return AVERROR_INVALIDDATA;
1203  }
1204  }
1205  for (i=0; i<=st; i++) {
1206  c->status[i].predictor = bytestream2_get_le32u(&gb);
1207  if (FFABS(c->status[i].predictor) > (1<<16))
1208  return AVERROR_INVALIDDATA;
1209  }
1210 
1211  for (n = nb_samples >> (1 - st); n > 0; n--) {
1212  int byte = bytestream2_get_byteu(&gb);
1213  *samples++ = adpcm_ima_expand_nibble(&c->status[0], byte >> 4, 3);
1214  *samples++ = adpcm_ima_expand_nibble(&c->status[st], byte & 0x0F, 3);
1215  }
1216  break;
1218  for (n = nb_samples >> (1 - st); n > 0; n--) {
1219  int byte = bytestream2_get_byteu(&gb);
1220  *samples++ = adpcm_ima_expand_nibble(&c->status[0], byte >> 4, 6);
1221  *samples++ = adpcm_ima_expand_nibble(&c->status[st], byte & 0x0F, 6);
1222  }
1223  break;
1224  case AV_CODEC_ID_ADPCM_EA:
1225  {
1226  int previous_left_sample, previous_right_sample;
1227  int current_left_sample, current_right_sample;
1228  int next_left_sample, next_right_sample;
1229  int coeff1l, coeff2l, coeff1r, coeff2r;
1230  int shift_left, shift_right;
1231 
1232  /* Each EA ADPCM frame has a 12-byte header followed by 30-byte pieces,
1233  each coding 28 stereo samples. */
1234 
1235  if(avctx->channels != 2)
1236  return AVERROR_INVALIDDATA;
1237 
1238  current_left_sample = sign_extend(bytestream2_get_le16u(&gb), 16);
1239  previous_left_sample = sign_extend(bytestream2_get_le16u(&gb), 16);
1240  current_right_sample = sign_extend(bytestream2_get_le16u(&gb), 16);
1241  previous_right_sample = sign_extend(bytestream2_get_le16u(&gb), 16);
1242 
1243  for (count1 = 0; count1 < nb_samples / 28; count1++) {
1244  int byte = bytestream2_get_byteu(&gb);
1245  coeff1l = ea_adpcm_table[ byte >> 4 ];
1246  coeff2l = ea_adpcm_table[(byte >> 4 ) + 4];
1247  coeff1r = ea_adpcm_table[ byte & 0x0F];
1248  coeff2r = ea_adpcm_table[(byte & 0x0F) + 4];
1249 
1250  byte = bytestream2_get_byteu(&gb);
1251  shift_left = 20 - (byte >> 4);
1252  shift_right = 20 - (byte & 0x0F);
1253 
1254  for (count2 = 0; count2 < 28; count2++) {
1255  byte = bytestream2_get_byteu(&gb);
1256  next_left_sample = sign_extend(byte >> 4, 4) << shift_left;
1257  next_right_sample = sign_extend(byte, 4) << shift_right;
1258 
1259  next_left_sample = (next_left_sample +
1260  (current_left_sample * coeff1l) +
1261  (previous_left_sample * coeff2l) + 0x80) >> 8;
1262  next_right_sample = (next_right_sample +
1263  (current_right_sample * coeff1r) +
1264  (previous_right_sample * coeff2r) + 0x80) >> 8;
1265 
1266  previous_left_sample = current_left_sample;
1267  current_left_sample = av_clip_int16(next_left_sample);
1268  previous_right_sample = current_right_sample;
1269  current_right_sample = av_clip_int16(next_right_sample);
1270  *samples++ = current_left_sample;
1271  *samples++ = current_right_sample;
1272  }
1273  }
1274 
1275  bytestream2_skip(&gb, 2); // Skip terminating 0x0000
1276 
1277  break;
1278  }
1280  {
1281  int coeff[2][2], shift[2];
1282 
1283  for(channel = 0; channel < avctx->channels; channel++) {
1284  int byte = bytestream2_get_byteu(&gb);
1285  for (i=0; i<2; i++)
1286  coeff[channel][i] = ea_adpcm_table[(byte >> 4) + 4*i];
1287  shift[channel] = 20 - (byte & 0x0F);
1288  }
1289  for (count1 = 0; count1 < nb_samples / 2; count1++) {
1290  int byte[2];
1291 
1292  byte[0] = bytestream2_get_byteu(&gb);
1293  if (st) byte[1] = bytestream2_get_byteu(&gb);
1294  for(i = 4; i >= 0; i-=4) { /* Pairwise samples LL RR (st) or LL LL (mono) */
1295  for(channel = 0; channel < avctx->channels; channel++) {
1296  int sample = sign_extend(byte[channel] >> i, 4) << shift[channel];
1297  sample = (sample +
1298  c->status[channel].sample1 * coeff[channel][0] +
1299  c->status[channel].sample2 * coeff[channel][1] + 0x80) >> 8;
1301  c->status[channel].sample1 = av_clip_int16(sample);
1302  *samples++ = c->status[channel].sample1;
1303  }
1304  }
1305  }
1306  bytestream2_seek(&gb, 0, SEEK_END);
1307  break;
1308  }
1311  case AV_CODEC_ID_ADPCM_EA_R3: {
1312  /* channel numbering
1313  2chan: 0=fl, 1=fr
1314  4chan: 0=fl, 1=rl, 2=fr, 3=rr
1315  6chan: 0=fl, 1=c, 2=fr, 3=rl, 4=rr, 5=sub */
1316  const int big_endian = avctx->codec->id == AV_CODEC_ID_ADPCM_EA_R3;
1317  int previous_sample, current_sample, next_sample;
1318  int coeff1, coeff2;
1319  int shift;
1320  unsigned int channel;
1321  uint16_t *samplesC;
1322  int count = 0;
1323  int offsets[6];
1324 
1325  for (channel=0; channel<avctx->channels; channel++)
1326  offsets[channel] = (big_endian ? bytestream2_get_be32(&gb) :
1327  bytestream2_get_le32(&gb)) +
1328  (avctx->channels + 1) * 4;
1329 
1330  for (channel=0; channel<avctx->channels; channel++) {
1331  bytestream2_seek(&gb, offsets[channel], SEEK_SET);
1332  samplesC = samples_p[channel];
1333 
1334  if (avctx->codec->id == AV_CODEC_ID_ADPCM_EA_R1) {
1335  current_sample = sign_extend(bytestream2_get_le16(&gb), 16);
1336  previous_sample = sign_extend(bytestream2_get_le16(&gb), 16);
1337  } else {
1338  current_sample = c->status[channel].predictor;
1339  previous_sample = c->status[channel].prev_sample;
1340  }
1341 
1342  for (count1 = 0; count1 < nb_samples / 28; count1++) {
1343  int byte = bytestream2_get_byte(&gb);
1344  if (byte == 0xEE) { /* only seen in R2 and R3 */
1345  current_sample = sign_extend(bytestream2_get_be16(&gb), 16);
1346  previous_sample = sign_extend(bytestream2_get_be16(&gb), 16);
1347 
1348  for (count2=0; count2<28; count2++)
1349  *samplesC++ = sign_extend(bytestream2_get_be16(&gb), 16);
1350  } else {
1351  coeff1 = ea_adpcm_table[ byte >> 4 ];
1352  coeff2 = ea_adpcm_table[(byte >> 4) + 4];
1353  shift = 20 - (byte & 0x0F);
1354 
1355  for (count2=0; count2<28; count2++) {
1356  if (count2 & 1)
1357  next_sample = (unsigned)sign_extend(byte, 4) << shift;
1358  else {
1359  byte = bytestream2_get_byte(&gb);
1360  next_sample = (unsigned)sign_extend(byte >> 4, 4) << shift;
1361  }
1362 
1363  next_sample += (current_sample * coeff1) +
1364  (previous_sample * coeff2);
1365  next_sample = av_clip_int16(next_sample >> 8);
1366 
1367  previous_sample = current_sample;
1368  current_sample = next_sample;
1369  *samplesC++ = current_sample;
1370  }
1371  }
1372  }
1373  if (!count) {
1374  count = count1;
1375  } else if (count != count1) {
1376  av_log(avctx, AV_LOG_WARNING, "per-channel sample count mismatch\n");
1377  count = FFMAX(count, count1);
1378  }
1379 
1380  if (avctx->codec->id != AV_CODEC_ID_ADPCM_EA_R1) {
1381  c->status[channel].predictor = current_sample;
1382  c->status[channel].prev_sample = previous_sample;
1383  }
1384  }
1385 
1386  frame->nb_samples = count * 28;
1387  bytestream2_seek(&gb, 0, SEEK_END);
1388  break;
1389  }
1391  for (channel=0; channel<avctx->channels; channel++) {
1392  int coeff[2][4], shift[4];
1393  int16_t *s = samples_p[channel];
1394  for (n = 0; n < 4; n++, s += 32) {
1395  int val = sign_extend(bytestream2_get_le16u(&gb), 16);
1396  for (i=0; i<2; i++)
1397  coeff[i][n] = ea_adpcm_table[(val&0x0F)+4*i];
1398  s[0] = val & ~0x0F;
1399 
1400  val = sign_extend(bytestream2_get_le16u(&gb), 16);
1401  shift[n] = 20 - (val & 0x0F);
1402  s[1] = val & ~0x0F;
1403  }
1404 
1405  for (m=2; m<32; m+=2) {
1406  s = &samples_p[channel][m];
1407  for (n = 0; n < 4; n++, s += 32) {
1408  int level, pred;
1409  int byte = bytestream2_get_byteu(&gb);
1410 
1411  level = sign_extend(byte >> 4, 4) << shift[n];
1412  pred = s[-1] * coeff[0][n] + s[-2] * coeff[1][n];
1413  s[0] = av_clip_int16((level + pred + 0x80) >> 8);
1414 
1415  level = sign_extend(byte, 4) << shift[n];
1416  pred = s[0] * coeff[0][n] + s[-1] * coeff[1][n];
1417  s[1] = av_clip_int16((level + pred + 0x80) >> 8);
1418  }
1419  }
1420  }
1421  break;
1423  c->status[0].predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
1424  c->status[0].step_index = bytestream2_get_byteu(&gb);
1425  bytestream2_skipu(&gb, 5);
1426  if (c->status[0].step_index > 88u) {
1427  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index = %i\n",
1428  c->status[0].step_index);
1429  return AVERROR_INVALIDDATA;
1430  }
1431 
1432  for (n = nb_samples >> (1 - st); n > 0; n--) {
1433  int v = bytestream2_get_byteu(&gb);
1434 
1435  *samples++ = adpcm_ima_expand_nibble(&c->status[0], v >> 4, 3);
1436  *samples++ = adpcm_ima_expand_nibble(&c->status[0], v & 0xf, 3);
1437  }
1438  break;
1440  for (i = 0; i < avctx->channels; i++) {
1441  c->status[i].predictor = sign_extend(bytestream2_get_be16u(&gb), 16);
1442  c->status[i].step_index = bytestream2_get_byteu(&gb);
1443  bytestream2_skipu(&gb, 1);
1444  if (c->status[i].step_index > 88u) {
1445  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index = %i\n",
1446  c->status[i].step_index);
1447  return AVERROR_INVALIDDATA;
1448  }
1449  }
1450 
1451  for (n = nb_samples >> (1 - st); n > 0; n--) {
1452  int v = bytestream2_get_byteu(&gb);
1453 
1454  *samples++ = adpcm_ima_qt_expand_nibble(&c->status[0 ], v >> 4, 3);
1455  *samples++ = adpcm_ima_qt_expand_nibble(&c->status[st], v & 0xf, 3);
1456  }
1457  break;
1458  case AV_CODEC_ID_ADPCM_CT:
1459  for (n = nb_samples >> (1 - st); n > 0; n--) {
1460  int v = bytestream2_get_byteu(&gb);
1461  *samples++ = adpcm_ct_expand_nibble(&c->status[0 ], v >> 4 );
1462  *samples++ = adpcm_ct_expand_nibble(&c->status[st], v & 0x0F);
1463  }
1464  break;
1468  if (!c->status[0].step_index) {
1469  /* the first byte is a raw sample */
1470  *samples++ = 128 * (bytestream2_get_byteu(&gb) - 0x80);
1471  if (st)
1472  *samples++ = 128 * (bytestream2_get_byteu(&gb) - 0x80);
1473  c->status[0].step_index = 1;
1474  nb_samples--;
1475  }
1476  if (avctx->codec->id == AV_CODEC_ID_ADPCM_SBPRO_4) {
1477  for (n = nb_samples >> (1 - st); n > 0; n--) {
1478  int byte = bytestream2_get_byteu(&gb);
1479  *samples++ = adpcm_sbpro_expand_nibble(&c->status[0],
1480  byte >> 4, 4, 0);
1481  *samples++ = adpcm_sbpro_expand_nibble(&c->status[st],
1482  byte & 0x0F, 4, 0);
1483  }
1484  } else if (avctx->codec->id == AV_CODEC_ID_ADPCM_SBPRO_3) {
1485  for (n = (nb_samples<<st) / 3; n > 0; n--) {
1486  int byte = bytestream2_get_byteu(&gb);
1487  *samples++ = adpcm_sbpro_expand_nibble(&c->status[0],
1488  byte >> 5 , 3, 0);
1489  *samples++ = adpcm_sbpro_expand_nibble(&c->status[0],
1490  (byte >> 2) & 0x07, 3, 0);
1491  *samples++ = adpcm_sbpro_expand_nibble(&c->status[0],
1492  byte & 0x03, 2, 0);
1493  }
1494  } else {
1495  for (n = nb_samples >> (2 - st); n > 0; n--) {
1496  int byte = bytestream2_get_byteu(&gb);
1497  *samples++ = adpcm_sbpro_expand_nibble(&c->status[0],
1498  byte >> 6 , 2, 2);
1499  *samples++ = adpcm_sbpro_expand_nibble(&c->status[st],
1500  (byte >> 4) & 0x03, 2, 2);
1501  *samples++ = adpcm_sbpro_expand_nibble(&c->status[0],
1502  (byte >> 2) & 0x03, 2, 2);
1503  *samples++ = adpcm_sbpro_expand_nibble(&c->status[st],
1504  byte & 0x03, 2, 2);
1505  }
1506  }
1507  break;
1508  case AV_CODEC_ID_ADPCM_SWF:
1509  adpcm_swf_decode(avctx, buf, buf_size, samples);
1510  bytestream2_seek(&gb, 0, SEEK_END);
1511  break;
1513  for (n = nb_samples >> (1 - st); n > 0; n--) {
1514  int v = bytestream2_get_byteu(&gb);
1515  *samples++ = adpcm_yamaha_expand_nibble(&c->status[0 ], v & 0x0F);
1516  *samples++ = adpcm_yamaha_expand_nibble(&c->status[st], v >> 4 );
1517  }
1518  break;
1520  if (!c->has_status) {
1521  for (channel = 0; channel < avctx->channels; channel++)
1522  c->status[channel].step = 0;
1523  c->has_status = 1;
1524  }
1525  for (channel = 0; channel < avctx->channels; channel++) {
1526  samples = samples_p[channel];
1527  for (n = nb_samples >> 1; n > 0; n--) {
1528  int v = bytestream2_get_byteu(&gb);
1529  *samples++ = adpcm_yamaha_expand_nibble(&c->status[channel], v & 0x0F);
1530  *samples++ = adpcm_yamaha_expand_nibble(&c->status[channel], v >> 4 );
1531  }
1532  }
1533  break;
1534  case AV_CODEC_ID_ADPCM_AFC:
1535  {
1536  int samples_per_block;
1537  int blocks;
1538 
1539  if (avctx->extradata && avctx->extradata_size == 1 && avctx->extradata[0]) {
1540  samples_per_block = avctx->extradata[0] / 16;
1541  blocks = nb_samples / avctx->extradata[0];
1542  } else {
1543  samples_per_block = nb_samples / 16;
1544  blocks = 1;
1545  }
1546 
1547  for (m = 0; m < blocks; m++) {
1548  for (channel = 0; channel < avctx->channels; channel++) {
1549  int prev1 = c->status[channel].sample1;
1550  int prev2 = c->status[channel].sample2;
1551 
1552  samples = samples_p[channel] + m * 16;
1553  /* Read in every sample for this channel. */
1554  for (i = 0; i < samples_per_block; i++) {
1555  int byte = bytestream2_get_byteu(&gb);
1556  int scale = 1 << (byte >> 4);
1557  int index = byte & 0xf;
1558  int factor1 = ff_adpcm_afc_coeffs[0][index];
1559  int factor2 = ff_adpcm_afc_coeffs[1][index];
1560 
1561  /* Decode 16 samples. */
1562  for (n = 0; n < 16; n++) {
1563  int32_t sampledat;
1564 
1565  if (n & 1) {
1566  sampledat = sign_extend(byte, 4);
1567  } else {
1568  byte = bytestream2_get_byteu(&gb);
1569  sampledat = sign_extend(byte >> 4, 4);
1570  }
1571 
1572  sampledat = ((prev1 * factor1 + prev2 * factor2) +
1573  ((sampledat * scale) << 11)) >> 11;
1574  *samples = av_clip_int16(sampledat);
1575  prev2 = prev1;
1576  prev1 = *samples++;
1577  }
1578  }
1579 
1580  c->status[channel].sample1 = prev1;
1581  c->status[channel].sample2 = prev2;
1582  }
1583  }
1584  bytestream2_seek(&gb, 0, SEEK_END);
1585  break;
1586  }
1587  case AV_CODEC_ID_ADPCM_THP:
1589  {
1590  int table[14][16];
1591  int ch;
1592 
1593 #define THP_GET16(g) \
1594  sign_extend( \
1595  avctx->codec->id == AV_CODEC_ID_ADPCM_THP_LE ? \
1596  bytestream2_get_le16u(&(g)) : \
1597  bytestream2_get_be16u(&(g)), 16)
1598 
1599  if (avctx->extradata) {
1601  if (avctx->extradata_size < 32 * avctx->channels) {
1602  av_log(avctx, AV_LOG_ERROR, "Missing coeff table\n");
1603  return AVERROR_INVALIDDATA;
1604  }
1605 
1606  bytestream2_init(&tb, avctx->extradata, avctx->extradata_size);
1607  for (i = 0; i < avctx->channels; i++)
1608  for (n = 0; n < 16; n++)
1609  table[i][n] = THP_GET16(tb);
1610  } else {
1611  for (i = 0; i < avctx->channels; i++)
1612  for (n = 0; n < 16; n++)
1613  table[i][n] = THP_GET16(gb);
1614 
1615  if (!c->has_status) {
1616  /* Initialize the previous sample. */
1617  for (i = 0; i < avctx->channels; i++) {
1618  c->status[i].sample1 = THP_GET16(gb);
1619  c->status[i].sample2 = THP_GET16(gb);
1620  }
1621  c->has_status = 1;
1622  } else {
1623  bytestream2_skip(&gb, avctx->channels * 4);
1624  }
1625  }
1626 
1627  for (ch = 0; ch < avctx->channels; ch++) {
1628  samples = samples_p[ch];
1629 
1630  /* Read in every sample for this channel. */
1631  for (i = 0; i < (nb_samples + 13) / 14; i++) {
1632  int byte = bytestream2_get_byteu(&gb);
1633  int index = (byte >> 4) & 7;
1634  unsigned int exp = byte & 0x0F;
1635  int factor1 = table[ch][index * 2];
1636  int factor2 = table[ch][index * 2 + 1];
1637 
1638  /* Decode 14 samples. */
1639  for (n = 0; n < 14 && (i * 14 + n < nb_samples); n++) {
1640  int32_t sampledat;
1641 
1642  if (n & 1) {
1643  sampledat = sign_extend(byte, 4);
1644  } else {
1645  byte = bytestream2_get_byteu(&gb);
1646  sampledat = sign_extend(byte >> 4, 4);
1647  }
1648 
1649  sampledat = ((c->status[ch].sample1 * factor1
1650  + c->status[ch].sample2 * factor2) >> 11) + (sampledat << exp);
1651  *samples = av_clip_int16(sampledat);
1652  c->status[ch].sample2 = c->status[ch].sample1;
1653  c->status[ch].sample1 = *samples++;
1654  }
1655  }
1656  }
1657  break;
1658  }
1659  case AV_CODEC_ID_ADPCM_DTK:
1660  for (channel = 0; channel < avctx->channels; channel++) {
1661  samples = samples_p[channel];
1662 
1663  /* Read in every sample for this channel. */
1664  for (i = 0; i < nb_samples / 28; i++) {
1665  int byte, header;
1666  if (channel)
1667  bytestream2_skipu(&gb, 1);
1668  header = bytestream2_get_byteu(&gb);
1669  bytestream2_skipu(&gb, 3 - channel);
1670 
1671  /* Decode 28 samples. */
1672  for (n = 0; n < 28; n++) {
1673  int32_t sampledat, prev;
1674 
1675  switch (header >> 4) {
1676  case 1:
1677  prev = (c->status[channel].sample1 * 0x3c);
1678  break;
1679  case 2:
1680  prev = (c->status[channel].sample1 * 0x73) - (c->status[channel].sample2 * 0x34);
1681  break;
1682  case 3:
1683  prev = (c->status[channel].sample1 * 0x62) - (c->status[channel].sample2 * 0x37);
1684  break;
1685  default:
1686  prev = 0;
1687  }
1688 
1689  prev = av_clip_intp2((prev + 0x20) >> 6, 21);
1690 
1691  byte = bytestream2_get_byteu(&gb);
1692  if (!channel)
1693  sampledat = sign_extend(byte, 4);
1694  else
1695  sampledat = sign_extend(byte >> 4, 4);
1696 
1697  sampledat = ((sampledat * (1 << 12)) >> (header & 0xf)) * (1 << 6) + prev;
1698  *samples++ = av_clip_int16(sampledat >> 6);
1700  c->status[channel].sample1 = sampledat;
1701  }
1702  }
1703  if (!channel)
1704  bytestream2_seek(&gb, 0, SEEK_SET);
1705  }
1706  break;
1707  case AV_CODEC_ID_ADPCM_PSX:
1708  for (channel = 0; channel < avctx->channels; channel++) {
1709  samples = samples_p[channel];
1710 
1711  /* Read in every sample for this channel. */
1712  for (i = 0; i < nb_samples / 28; i++) {
1713  int filter, shift, flag, byte;
1714 
1715  filter = bytestream2_get_byteu(&gb);
1716  shift = filter & 0xf;
1717  filter = filter >> 4;
1718  if (filter >= FF_ARRAY_ELEMS(xa_adpcm_table))
1719  return AVERROR_INVALIDDATA;
1720  flag = bytestream2_get_byteu(&gb);
1721 
1722  /* Decode 28 samples. */
1723  for (n = 0; n < 28; n++) {
1724  int sample = 0, scale;
1725 
1726  if (flag < 0x07) {
1727  if (n & 1) {
1728  scale = sign_extend(byte >> 4, 4);
1729  } else {
1730  byte = bytestream2_get_byteu(&gb);
1731  scale = sign_extend(byte, 4);
1732  }
1733 
1734  scale = scale << 12;
1735  sample = (int)((scale >> shift) + (c->status[channel].sample1 * xa_adpcm_table[filter][0] + c->status[channel].sample2 * xa_adpcm_table[filter][1]) / 64);
1736  }
1737  *samples++ = av_clip_int16(sample);
1739  c->status[channel].sample1 = sample;
1740  }
1741  }
1742  }
1743  break;
1744 
1745  default:
1746  av_assert0(0); // unsupported codec_id should not happen
1747  }
1748 
1749  if (avpkt->size && bytestream2_tell(&gb) == 0) {
1750  av_log(avctx, AV_LOG_ERROR, "Nothing consumed\n");
1751  return AVERROR_INVALIDDATA;
1752  }
1753 
1754  *got_frame_ptr = 1;
1755 
1756  if (avpkt->size < bytestream2_tell(&gb)) {
1757  av_log(avctx, AV_LOG_ERROR, "Overread of %d < %d\n", avpkt->size, bytestream2_tell(&gb));
1758  return avpkt->size;
1759  }
1760 
1761  return bytestream2_tell(&gb);
1762 }
1763 
1764 static void adpcm_flush(AVCodecContext *avctx)
1765 {
1766  ADPCMDecodeContext *c = avctx->priv_data;
1767  c->has_status = 0;
1768 }
1769 
1770 
1778 
1779 #define ADPCM_DECODER(id_, sample_fmts_, name_, long_name_) \
1780 AVCodec ff_ ## name_ ## _decoder = { \
1781  .name = #name_, \
1782  .long_name = NULL_IF_CONFIG_SMALL(long_name_), \
1783  .type = AVMEDIA_TYPE_AUDIO, \
1784  .id = id_, \
1785  .priv_data_size = sizeof(ADPCMDecodeContext), \
1786  .init = adpcm_decode_init, \
1787  .decode = adpcm_decode_frame, \
1788  .flush = adpcm_flush, \
1789  .capabilities = AV_CODEC_CAP_DR1, \
1790  .sample_fmts = sample_fmts_, \
1791 }
1792 
1793 /* Note: Do not forget to add new entries to the Makefile as well. */
1794 ADPCM_DECODER(AV_CODEC_ID_ADPCM_4XM, sample_fmts_s16p, adpcm_4xm, "ADPCM 4X Movie");
1795 ADPCM_DECODER(AV_CODEC_ID_ADPCM_AFC, sample_fmts_s16p, adpcm_afc, "ADPCM Nintendo Gamecube AFC");
1796 ADPCM_DECODER(AV_CODEC_ID_ADPCM_AGM, sample_fmts_s16, adpcm_agm, "ADPCM AmuseGraphics Movie");
1797 ADPCM_DECODER(AV_CODEC_ID_ADPCM_AICA, sample_fmts_s16p, adpcm_aica, "ADPCM Yamaha AICA");
1798 ADPCM_DECODER(AV_CODEC_ID_ADPCM_CT, sample_fmts_s16, adpcm_ct, "ADPCM Creative Technology");
1799 ADPCM_DECODER(AV_CODEC_ID_ADPCM_DTK, sample_fmts_s16p, adpcm_dtk, "ADPCM Nintendo Gamecube DTK");
1800 ADPCM_DECODER(AV_CODEC_ID_ADPCM_EA, sample_fmts_s16, adpcm_ea, "ADPCM Electronic Arts");
1801 ADPCM_DECODER(AV_CODEC_ID_ADPCM_EA_MAXIS_XA, sample_fmts_s16, adpcm_ea_maxis_xa, "ADPCM Electronic Arts Maxis CDROM XA");
1802 ADPCM_DECODER(AV_CODEC_ID_ADPCM_EA_R1, sample_fmts_s16p, adpcm_ea_r1, "ADPCM Electronic Arts R1");
1803 ADPCM_DECODER(AV_CODEC_ID_ADPCM_EA_R2, sample_fmts_s16p, adpcm_ea_r2, "ADPCM Electronic Arts R2");
1804 ADPCM_DECODER(AV_CODEC_ID_ADPCM_EA_R3, sample_fmts_s16p, adpcm_ea_r3, "ADPCM Electronic Arts R3");
1805 ADPCM_DECODER(AV_CODEC_ID_ADPCM_EA_XAS, sample_fmts_s16p, adpcm_ea_xas, "ADPCM Electronic Arts XAS");
1806 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_AMV, sample_fmts_s16, adpcm_ima_amv, "ADPCM IMA AMV");
1807 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_APC, sample_fmts_s16, adpcm_ima_apc, "ADPCM IMA CRYO APC");
1808 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_DAT4, sample_fmts_s16, adpcm_ima_dat4, "ADPCM IMA Eurocom DAT4");
1809 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_DK3, sample_fmts_s16, adpcm_ima_dk3, "ADPCM IMA Duck DK3");
1810 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_DK4, sample_fmts_s16, adpcm_ima_dk4, "ADPCM IMA Duck DK4");
1811 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_EA_EACS, sample_fmts_s16, adpcm_ima_ea_eacs, "ADPCM IMA Electronic Arts EACS");
1812 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_EA_SEAD, sample_fmts_s16, adpcm_ima_ea_sead, "ADPCM IMA Electronic Arts SEAD");
1813 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_ISS, sample_fmts_s16, adpcm_ima_iss, "ADPCM IMA Funcom ISS");
1814 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_OKI, sample_fmts_s16, adpcm_ima_oki, "ADPCM IMA Dialogic OKI");
1815 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_QT, sample_fmts_s16p, adpcm_ima_qt, "ADPCM IMA QuickTime");
1816 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_RAD, sample_fmts_s16, adpcm_ima_rad, "ADPCM IMA Radical");
1817 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_SMJPEG, sample_fmts_s16, adpcm_ima_smjpeg, "ADPCM IMA Loki SDL MJPEG");
1818 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_WAV, sample_fmts_s16p, adpcm_ima_wav, "ADPCM IMA WAV");
1819 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_WS, sample_fmts_both, adpcm_ima_ws, "ADPCM IMA Westwood");
1820 ADPCM_DECODER(AV_CODEC_ID_ADPCM_MS, sample_fmts_s16, adpcm_ms, "ADPCM Microsoft");
1821 ADPCM_DECODER(AV_CODEC_ID_ADPCM_MTAF, sample_fmts_s16p, adpcm_mtaf, "ADPCM MTAF");
1822 ADPCM_DECODER(AV_CODEC_ID_ADPCM_PSX, sample_fmts_s16p, adpcm_psx, "ADPCM Playstation");
1823 ADPCM_DECODER(AV_CODEC_ID_ADPCM_SBPRO_2, sample_fmts_s16, adpcm_sbpro_2, "ADPCM Sound Blaster Pro 2-bit");
1824 ADPCM_DECODER(AV_CODEC_ID_ADPCM_SBPRO_3, sample_fmts_s16, adpcm_sbpro_3, "ADPCM Sound Blaster Pro 2.6-bit");
1825 ADPCM_DECODER(AV_CODEC_ID_ADPCM_SBPRO_4, sample_fmts_s16, adpcm_sbpro_4, "ADPCM Sound Blaster Pro 4-bit");
1826 ADPCM_DECODER(AV_CODEC_ID_ADPCM_SWF, sample_fmts_s16, adpcm_swf, "ADPCM Shockwave Flash");
1827 ADPCM_DECODER(AV_CODEC_ID_ADPCM_THP_LE, sample_fmts_s16p, adpcm_thp_le, "ADPCM Nintendo THP (little-endian)");
1828 ADPCM_DECODER(AV_CODEC_ID_ADPCM_THP, sample_fmts_s16p, adpcm_thp, "ADPCM Nintendo THP");
1829 ADPCM_DECODER(AV_CODEC_ID_ADPCM_XA, sample_fmts_s16p, adpcm_xa, "ADPCM CDROM XA");
1830 ADPCM_DECODER(AV_CODEC_ID_ADPCM_YAMAHA, sample_fmts_s16, adpcm_yamaha, "ADPCM Yamaha");
#define NULL
Definition: coverity.c:32
const struct AVCodec * codec
Definition: avcodec.h:1574
const char const char void * val
Definition: avisynth_c.h:863
static const int16_t ea_adpcm_table[]
Definition: adpcm.c:68
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
static int shift(int a, int b)
Definition: sonic.c:82
int size
This structure describes decoded (raw) audio or video data.
Definition: frame.h:295
static int16_t adpcm_mtaf_expand_nibble(ADPCMChannelStatus *c, uint8_t nibble)
Definition: adpcm.c:398
#define THP_GET16(g)
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:379
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:182
else temp
Definition: vf_mcdeint.c:256
const char * g
Definition: vf_curves.c:115
#define avpriv_request_sample(...)
channels
Definition: aptx.c:30
int size
Definition: avcodec.h:1478
uint8_t pi<< 24) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_U8,(uint64_t)((*(const uint8_t *) pi - 0x80U))<< 56) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16,(*(const int16_t *) pi >>8)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S16,(uint64_t)(*(const int16_t *) pi)<< 48) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32,(*(const int32_t *) pi >>24)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S32,(uint64_t)(*(const int32_t *) pi)<< 32) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S64,(*(const int64_t *) pi >>56)+0x80) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S64, *(const int64_t *) pi *(1.0f/(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S64, *(const int64_t *) pi *(1.0/(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_FLT, llrintf(*(const float *) pi *(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_DBL, llrint(*(const double *) pi *(INT64_C(1)<< 63))) #define FMT_PAIR_FUNC(out, in) static conv_func_type *const fmt_pair_to_conv_functions[AV_SAMPLE_FMT_NB *AV_SAMPLE_FMT_NB]={ FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S64), };static void cpy1(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, len);} static void cpy2(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 2 *len);} static void cpy4(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 4 *len);} static void cpy8(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 8 *len);} AudioConvert *swri_audio_convert_alloc(enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, const int *ch_map, int flags) { AudioConvert *ctx;conv_func_type *f=fmt_pair_to_conv_functions[av_get_packed_sample_fmt(out_fmt)+AV_SAMPLE_FMT_NB *av_get_packed_sample_fmt(in_fmt)];if(!f) return NULL;ctx=av_mallocz(sizeof(*ctx));if(!ctx) return NULL;if(channels==1){ in_fmt=av_get_planar_sample_fmt(in_fmt);out_fmt=av_get_planar_sample_fmt(out_fmt);} ctx->channels=channels;ctx->conv_f=f;ctx->ch_map=ch_map;if(in_fmt==AV_SAMPLE_FMT_U8||in_fmt==AV_SAMPLE_FMT_U8P) memset(ctx->silence, 0x80, sizeof(ctx->silence));if(out_fmt==in_fmt &&!ch_map) { switch(av_get_bytes_per_sample(in_fmt)){ case 1:ctx->simd_f=cpy1;break;case 2:ctx->simd_f=cpy2;break;case 4:ctx->simd_f=cpy4;break;case 8:ctx->simd_f=cpy8;break;} } if(HAVE_X86ASM &&HAVE_MMX) swri_audio_convert_init_x86(ctx, out_fmt, in_fmt, channels);if(ARCH_ARM) swri_audio_convert_init_arm(ctx, out_fmt, in_fmt, channels);if(ARCH_AARCH64) swri_audio_convert_init_aarch64(ctx, out_fmt, in_fmt, channels);return ctx;} void swri_audio_convert_free(AudioConvert **ctx) { av_freep(ctx);} int swri_audio_convert(AudioConvert *ctx, AudioData *out, AudioData *in, int len) { int ch;int off=0;const int os=(out->planar ? 1 :out->ch_count) *out->bps;unsigned misaligned=0;av_assert0(ctx->channels==out->ch_count);if(ctx->in_simd_align_mask) { int planes=in->planar ? in->ch_count :1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) in->ch[ch];misaligned|=m &ctx->in_simd_align_mask;} if(ctx->out_simd_align_mask) { int planes=out->planar ? out->ch_count :1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) out->ch[ch];misaligned|=m &ctx->out_simd_align_mask;} if(ctx->simd_f &&!ctx->ch_map &&!misaligned){ off=len &~15;av_assert1(off >=0);av_assert1(off<=len);av_assert2(ctx->channels==SWR_CH_MAX||!in->ch[ctx->channels]);if(off >0){ if(out->planar==in->planar){ int planes=out->planar ? out->ch_count :1;for(ch=0;ch< planes;ch++){ ctx->simd_f(out-> ch ch
Definition: audioconvert.c:56
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
Definition: bytestream.h:133
#define AV_RL16
Definition: intreadwrite.h:42
static enum AVSampleFormat sample_fmts_s16[]
Definition: adpcm.c:1771
#define sample
int block_align
number of bytes per packet if constant and known or 0 Used by some WAV based audio codecs...
Definition: avcodec.h:2262
static int get_sbits(GetBitContext *s, int n)
Definition: get_bits.h:359
static int16_t adpcm_ms_expand_nibble(ADPCMChannelStatus *c, int nibble)
Definition: adpcm.c:303
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
static void filter(int16_t *output, ptrdiff_t out_stride, int16_t *low, ptrdiff_t low_stride, int16_t *high, ptrdiff_t high_stride, int len, int clip)
Definition: cfhd.c:153
const uint8_t ff_adpcm_AdaptCoeff1[]
Divided by 4 to fit in 8-bit integers.
Definition: adpcm_data.c:90
enum AVSampleFormat sample_fmt
audio sample format
Definition: avcodec.h:2233
uint8_t
#define av_cold
Definition: attributes.h:82
static av_cold int adpcm_decode_init(AVCodecContext *avctx)
Definition: adpcm.c:92
float delta
static void adpcm_flush(AVCodecContext *avctx)
Definition: adpcm.c:1764
static void adpcm_swf_decode(AVCodecContext *avctx, const uint8_t *buf, int buf_size, int16_t *samples)
Definition: adpcm.c:487
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:1666
#define u(width, name, range_min, range_max)
Definition: cbs_h2645.c:252
ADPCM tables.
static AVFrame * frame
const char data[16]
Definition: mxf.c:91
uint8_t * data
Definition: avcodec.h:1477
static int get_bits_count(const GetBitContext *s)
Definition: get_bits.h:219
static int get_nb_samples(AVCodecContext *avctx, GetByteContext *gb, int buf_size, int *coded_samples, int *approx_nb_samples)
Get the number of samples that will be decoded from the packet.
Definition: adpcm.c:555
static av_always_inline void bytestream2_skipu(GetByteContext *g, unsigned int size)
Definition: bytestream.h:170
bitstream reader API header.
static const uint8_t header[24]
Definition: sdr2.c:67
int bits_per_coded_sample
bits per sample/pixel from the demuxer (needed for huffyuv).
Definition: avcodec.h:2789
#define av_log(a,...)
static const uint16_t table[]
Definition: prosumer.c:206
enum AVCodecID id
Definition: avcodec.h:3495
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:259
static const int8_t xa_adpcm_table[5][2]
Definition: adpcm.c:60
const uint16_t ff_adpcm_afc_coeffs[2][16]
Definition: adpcm_data.c:109
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
ADPCM encoder/decoder common header.
static av_always_inline void update(SilenceDetectContext *s, AVFrame *insamples, int is_silence, int current_sample, int64_t nb_samples_notify, AVRational time_base)
#define AVERROR(e)
Definition: error.h:43
static av_always_inline void bytestream2_skip(GetByteContext *g, unsigned int size)
Definition: bytestream.h:164
const int8_t *const ff_adpcm_index_tables[4]
Definition: adpcm_data.c:50
const int16_t ff_adpcm_step_table[89]
This is the step table.
Definition: adpcm_data.c:61
static int adpcm_ima_qt_expand_nibble(ADPCMChannelStatus *c, int nibble, int shift)
Definition: adpcm.c:277
static av_always_inline unsigned int bytestream2_get_bytes_left(GetByteContext *g)
Definition: bytestream.h:154
static int16_t adpcm_ima_oki_expand_nibble(ADPCMChannelStatus *c, int nibble)
Definition: adpcm.c:322
#define FFMAX(a, b)
Definition: common.h:94
static int16_t adpcm_ima_wav_expand_nibble(ADPCMChannelStatus *c, GetBitContext *gb, int bps)
Definition: adpcm.c:254
int8_t exp
Definition: eval.c:72
const int8_t ff_adpcm_index_table[16]
Definition: adpcm_data.c:40
static const int8_t swf_index_tables[4][16]
Definition: adpcm.c:77
const int16_t ff_adpcm_mtaf_stepsize[32][16]
Definition: adpcm_data.c:114
static int xa_decode(AVCodecContext *avctx, int16_t *out0, int16_t *out1, const uint8_t *in, ADPCMChannelStatus *left, ADPCMChannelStatus *right, int channels, int sample_offset)
Definition: adpcm.c:407
#define FFMIN(a, b)
Definition: common.h:96
const int8_t ff_adpcm_AdaptCoeff2[]
Divided by 4 to fit in 8-bit integers.
Definition: adpcm_data.c:95
int vqa_version
VQA version.
Definition: adpcm.c:88
int32_t
static const uint8_t ff_adpcm_ima_block_sizes[4]
Definition: adpcm_data.h:31
static enum AVSampleFormat sample_fmts_s16p[]
Definition: adpcm.c:1773
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:72
#define s(width, name)
Definition: cbs_vp9.c:257
#define AV_RL32
Definition: intreadwrite.h:146
int n
Definition: avisynth_c.h:760
const int16_t ff_adpcm_oki_step_table[49]
Definition: adpcm_data.c:73
#define FF_ARRAY_ELEMS(a)
static const float pred[4]
Definition: siprdata.h:259
static const uint8_t ff_adpcm_ima_block_samples[4]
Definition: adpcm_data.h:32
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:62
static av_always_inline int bytestream2_tell(GetByteContext *g)
Definition: bytestream.h:188
const int16_t ff_adpcm_AdaptationTable[]
Definition: adpcm_data.c:84
Libavcodec external API header.
AVSampleFormat
Audio sample formats.
Definition: samplefmt.h:58
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
Definition: get_bits.h:677
main external API structure.
Definition: avcodec.h:1565
static int16_t adpcm_yamaha_expand_nibble(ADPCMChannelStatus *c, uint8_t nibble)
Definition: adpcm.c:384
#define DK3_GET_NEXT_NIBBLE()
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
Definition: decode.c:1964
static int16_t adpcm_ima_expand_nibble(ADPCMChannelStatus *c, int8_t nibble, int shift)
Definition: adpcm.c:228
void * buf
Definition: avisynth_c.h:766
int extradata_size
Definition: avcodec.h:1667
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) #define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac) { } void ff_audio_convert_free(AudioConvert **ac) { if(! *ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);} AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map) { AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method !=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2) { ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc) { av_free(ac);return NULL;} return ac;} in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar) { ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar ? ac->channels :1;} else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;} int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in) { int use_generic=1;int len=in->nb_samples;int p;if(ac->dc) { av_log(ac->avr, AV_LOG_TRACE, "%d samples - audio_convert: %s to %s (dithered)\", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> in
int index
Definition: gxfenc.c:89
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
Definition: get_bits.h:659
static int16_t adpcm_agm_expand_nibble(ADPCMChannelStatus *c, int8_t nibble)
Definition: adpcm.c:184
ADPCMChannelStatus status[14]
Definition: adpcm.c:87
static av_const int sign_extend(int val, unsigned bits)
Definition: mathops.h:130
static int16_t adpcm_sbpro_expand_nibble(ADPCMChannelStatus *c, int8_t nibble, int size, int shift)
Definition: adpcm.c:364
static unsigned int get_bits_le(GetBitContext *s, int n)
Definition: get_bits.h:420
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:309
uint8_t level
Definition: svq3.c:207
int
const int8_t ff_adpcm_yamaha_difflookup[]
Definition: adpcm_data.c:104
common internal api header.
const int16_t ff_adpcm_yamaha_indexscale[]
Definition: adpcm_data.c:99
static int adpcm_decode_frame(AVCodecContext *avctx, void *data, int *got_frame_ptr, AVPacket *avpkt)
Definition: adpcm.c:757
signed 16 bits
Definition: samplefmt.h:61
#define flag(name)
Definition: cbs_av1.c:553
static double c[64]
channel
Use these values when setting the channel map with ebur128_set_channel().
Definition: ebur128.h:39
unsigned bps
Definition: movenc.c:1497
#define AV_INPUT_BUFFER_PADDING_SIZE
Required number of additionally allocated bytes at the end of the input bitstream for decoding...
Definition: avcodec.h:790
void * priv_data
Definition: avcodec.h:1592
static av_always_inline int diff(const uint32_t a, const uint32_t b)
#define xf(width, name, var, range_min, range_max, subs,...)
Definition: cbs_av1.c:664
int channels
number of audio channels
Definition: avcodec.h:2226
static const double coeff[2][5]
Definition: vf_owdenoise.c:72
static av_always_inline int bytestream2_seek(GetByteContext *g, int offset, int whence)
Definition: bytestream.h:208
static int16_t adpcm_ct_expand_nibble(ADPCMChannelStatus *c, int8_t nibble)
Definition: adpcm.c:343
static enum AVSampleFormat sample_fmts_both[]
Definition: adpcm.c:1775
void INT64 INT64 count
Definition: avisynth_c.h:766
int16_t step_index
Definition: adpcm.h:35
signed 16 bits, planar
Definition: samplefmt.h:67
uint8_t ** extended_data
pointers to the data planes/channels.
Definition: frame.h:342
This structure stores compressed data.
Definition: avcodec.h:1454
int nb_samples
number of audio samples (per channel) described by this frame
Definition: frame.h:361
for(j=16;j >0;--j)
#define tb
Definition: regdef.h:68
#define ADPCM_DECODER(id_, sample_fmts_, name_, long_name_)
Definition: adpcm.c:1779