Go to the documentation of this file.
78 for (
i = 0;
i < 4;
i++) {
79 for (j = 0; j < pce->
num_ele[
i]; j++) {
99 int channels = (!
s->needs_pce)*(
s->channels - (
s->channels == 8 ? 1 : 0));
100 const int max_size = 32;
129 ++
s->quantize_band_cost_cache_generation;
130 if (
s->quantize_band_cost_cache_generation == 0) {
131 memset(
s->quantize_band_cost_cache, 0,
sizeof(
s->quantize_band_cost_cache));
132 s->quantize_band_cost_cache_generation = 1;
136 #define WINDOW_FUNC(type) \
137 static void apply_ ##type ##_window(AVFloatDSPContext *fdsp, \
138 SingleChannelElement *sce, \
145 float *
out = sce->ret_buf;
147 fdsp->vector_fmul (
out, audio, lwindow, 1024);
148 fdsp->vector_fmul_reverse(
out + 1024, audio + 1024, pwindow, 1024);
155 float *
out = sce->ret_buf;
157 fdsp->vector_fmul(
out, audio, lwindow, 1024);
158 memcpy(
out + 1024, audio + 1024,
sizeof(
out[0]) * 448);
159 fdsp->vector_fmul_reverse(
out + 1024 + 448, audio + 1024 + 448, swindow, 128);
160 memset(
out + 1024 + 576, 0,
sizeof(
out[0]) * 448);
167 float *
out = sce->ret_buf;
169 memset(
out, 0,
sizeof(
out[0]) * 448);
170 fdsp->vector_fmul(
out + 448, audio + 448, swindow, 128);
171 memcpy(
out + 576, audio + 576,
sizeof(
out[0]) * 448);
172 fdsp->vector_fmul_reverse(
out + 1024, audio + 1024, lwindow, 1024);
179 const float *
in = audio + 448;
180 float *
out = sce->ret_buf;
183 for (
w = 0;
w < 8;
w++) {
184 fdsp->vector_fmul (
out,
in,
w ? pwindow : swindow, 128);
187 fdsp->vector_fmul_reverse(
out,
in, swindow, 128);
194 const float *audio) = {
212 for (
i = 0;
i < 1024;
i += 128)
214 memcpy(audio, audio + 1024,
sizeof(audio[0]) * 1024);
234 for (
w = 1;
w < 8;
w++)
262 for (
ch = 0;
ch < chans;
ch++) {
268 for (cmaxsfb = ics->
num_swb; cmaxsfb > 0 && cpe->
ch[
ch].
zeroes[
w*16+cmaxsfb-1]; cmaxsfb--)
270 maxsfb =
FFMAX(maxsfb, cmaxsfb);
300 if (msc == 0 || ics0->
max_sfb == 0)
315 int start = (
w+w2) * 128;
344 int start = (
w+w2) * 128;
375 if (
s->coder->set_special_band_scalefactors)
376 s->coder->set_special_band_scalefactors(
s, sce);
389 int off_is = 0, noise_flag = 1;
398 if (noise_flag-- > 0) {
452 s->coder->quantize_and_encode_band(
s, &
s->pb,
453 &sce->
coeffs[start + w2*128],
476 float *swb_coeffs = &sce->
coeffs[start +
w*128];
493 if (!common_window) {
495 if (
s->coder->encode_main_pred)
496 s->coder->encode_main_pred(
s, sce);
497 if (
s->coder->encode_ltp_info)
498 s->coder->encode_ltp_info(
s, sce, 0);
504 if (
s->coder->encode_tns_info)
505 s->coder->encode_tns_info(
s, sce);
516 int i, namelen, padbits;
518 namelen = strlen(
name) + 2;
526 for (
i = 0;
i < namelen - 2;
i++)
539 const uint8_t *channel_map =
s->reorder_map;
542 for (
ch = 0;
ch <
s->channels;
ch++) {
544 memcpy(&
s->planar_samples[
ch][1024], &
s->planar_samples[
ch][2048], 1024 *
sizeof(
s->planar_samples[0][0]));
548 memcpy(&
s->planar_samples[
ch][2048],
549 frame->extended_data[channel_map[
ch]],
550 frame->nb_samples *
sizeof(
s->planar_samples[0][0]));
552 memset(&
s->planar_samples[
ch][
end], 0,
553 (3072 -
end) *
sizeof(
s->planar_samples[0][0]));
561 float **
samples =
s->planar_samples, *samples2, *la, *overlap;
565 int i, its,
ch,
w, chans,
tag, start_ch,
ret, frame_bits;
566 int target_bits, rate_bits, too_many_bits, too_few_bits;
567 int ms_mode = 0, is_mode = 0, tns_mode = 0, pred_mode = 0;
568 int chan_el_counter[4];
576 if (!
s->afq.remaining_samples || (!
s->afq.frame_alloc && !
s->afq.frame_count))
588 for (
i = 0;
i <
s->chan_map[0];
i++) {
590 tag =
s->chan_map[
i+1];
593 for (
ch = 0;
ch < chans;
ch++) {
595 float clip_avoidance_factor;
598 s->cur_channel = start_ch +
ch;
599 overlap = &
samples[
s->cur_channel][0];
600 samples2 = overlap + 1024;
601 la = samples2 + (448+64);
615 ics->
num_swb =
s->samplerate_index >= 8 ? 1 : 3;
617 wi[
ch] =
s->psy.model->window(&
s->psy, samples2, la,
s->cur_channel,
639 clip_avoidance_factor = 0.0f;
641 const float *wbuf = overlap +
w * 128;
646 for (j = 0; j < wlen; j++)
653 clip_avoidance_factor =
FFMAX(clip_avoidance_factor, wi[
ch].clipping[
w]);
666 if (
s->options.ltp &&
s->coder->update_ltp) {
667 s->coder->update_ltp(
s, sce);
672 for (k = 0; k < 1024; k++) {
684 frame_bits = its = 0;
692 memset(chan_el_counter, 0,
sizeof(chan_el_counter));
693 for (
i = 0;
i <
s->chan_map[0];
i++) {
695 const float *coeffs[2];
696 tag =
s->chan_map[
i+1];
704 for (
ch = 0;
ch < chans;
ch++) {
712 for (
w = 0;
w < 128;
w++)
716 s->psy.bitres.alloc = -1;
717 s->psy.bitres.bits =
s->last_frame_pb_count /
s->channels;
718 s->psy.model->analyze(&
s->psy, start_ch, coeffs, wi);
719 if (
s->psy.bitres.alloc > 0) {
721 target_bits +=
s->psy.bitres.alloc
723 s->psy.bitres.alloc /= chans;
726 for (
ch = 0;
ch < chans;
ch++) {
727 s->cur_channel = start_ch +
ch;
728 if (
s->options.pns &&
s->coder->mark_pns)
729 s->coder->mark_pns(
s, avctx, &cpe->
ch[
ch]);
730 s->coder->search_for_quantizers(avctx,
s, &cpe->
ch[
ch],
s->lambda);
733 && wi[0].window_type[0] == wi[1].window_type[0]
734 && wi[0].window_shape == wi[1].window_shape) {
738 if (wi[0].grouping[
w] != wi[1].grouping[
w]) {
744 for (
ch = 0;
ch < chans;
ch++) {
746 s->cur_channel = start_ch +
ch;
747 if (
s->options.tns &&
s->coder->search_for_tns)
748 s->coder->search_for_tns(
s, sce);
749 if (
s->options.tns &&
s->coder->apply_tns_filt)
750 s->coder->apply_tns_filt(
s, sce);
753 if (
s->options.pns &&
s->coder->search_for_pns)
754 s->coder->search_for_pns(
s, avctx, sce);
756 s->cur_channel = start_ch;
757 if (
s->options.intensity_stereo) {
758 if (
s->coder->search_for_is)
759 s->coder->search_for_is(
s, avctx, cpe);
763 if (
s->options.pred) {
764 for (
ch = 0;
ch < chans;
ch++) {
766 s->cur_channel = start_ch +
ch;
767 if (
s->options.pred &&
s->coder->search_for_pred)
768 s->coder->search_for_pred(
s, sce);
771 if (
s->coder->adjust_common_pred)
772 s->coder->adjust_common_pred(
s, cpe);
773 for (
ch = 0;
ch < chans;
ch++) {
775 s->cur_channel = start_ch +
ch;
776 if (
s->options.pred &&
s->coder->apply_main_pred)
777 s->coder->apply_main_pred(
s, sce);
779 s->cur_channel = start_ch;
781 if (
s->options.mid_side) {
782 if (
s->options.mid_side == -1 &&
s->coder->search_for_ms)
783 s->coder->search_for_ms(
s, cpe);
789 if (
s->options.ltp) {
790 for (
ch = 0;
ch < chans;
ch++) {
792 s->cur_channel = start_ch +
ch;
793 if (
s->coder->search_for_ltp)
797 s->cur_channel = start_ch;
798 if (
s->coder->adjust_common_ltp)
799 s->coder->adjust_common_ltp(
s, cpe);
805 if (
s->coder->encode_main_pred)
806 s->coder->encode_main_pred(
s, &cpe->
ch[0]);
807 if (
s->coder->encode_ltp_info)
808 s->coder->encode_ltp_info(
s, &cpe->
ch[0], 1);
813 for (
ch = 0;
ch < chans;
ch++) {
814 s->cur_channel = start_ch +
ch;
831 rate_bits =
FFMIN(rate_bits, 6144 *
s->channels - 3);
832 too_many_bits =
FFMAX(target_bits, rate_bits);
833 too_many_bits =
FFMIN(too_many_bits, 6144 *
s->channels - 3);
834 too_few_bits =
FFMIN(
FFMAX(rate_bits - rate_bits/4, target_bits), too_many_bits);
837 too_few_bits = too_few_bits - too_few_bits/8;
838 too_many_bits = too_many_bits + too_many_bits/2;
841 || (its < 5 && (frame_bits < too_few_bits || frame_bits > too_many_bits))
842 || frame_bits >= 6144 *
s->channels - 3 )
844 float ratio = ((float)rate_bits) / frame_bits;
846 if (frame_bits >= too_few_bits && frame_bits <= too_many_bits) {
853 ratio = sqrtf(sqrtf(ratio));
854 ratio = av_clipf(ratio, 0.9
f, 1.1
f);
857 ratio = sqrtf(ratio);
859 s->lambda =
FFMIN(
s->lambda * ratio, 65536.f);
862 if (ratio > 0.9
f && ratio < 1.1
f) {
865 if (is_mode || ms_mode || tns_mode || pred_mode) {
866 for (
i = 0;
i <
s->chan_map[0];
i++) {
870 for (
ch = 0;
ch < chans;
ch++)
881 if (
s->options.ltp &&
s->coder->ltp_insert_new_frame)
882 s->coder->ltp_insert_new_frame(
s);
889 s->lambda_sum +=
s->lambda;
947 for(
ch = 0;
ch <
s->channels;
ch++)
948 s->planar_samples[
ch] =
s->buffer.samples + 3 * 1024 *
ch;
969 s->last_frame_pb_count = 0;
980 s->needs_pce =
s->options.pce;
992 av_log(avctx,
AV_LOG_INFO,
"Using a PCE to encode channel layout \"%s\"\n", buf);
994 s->reorder_map =
s->pce.reorder_map;
995 s->chan_map =
s->pce.config_map;
1002 for (
i = 1;
i <=
s->chan_map[0];
i++) {
1010 for (
i = 0;
i < 16;
i++)
1013 s->samplerate_index =
i;
1017 "Unsupported sample rate %d\n", avctx->
sample_rate);
1021 "Too many bits %f > %d per frame requested, clamping to max\n",
1023 6144 *
s->channels);
1036 "Main prediction unavailable in the \"mpeg2_aac_low\" profile\n");
1038 "LTP prediction unavailable in the \"mpeg2_aac_low\" profile\n");
1040 "PNS unavailable in the \"mpeg2_aac_low\" profile, turning off\n");
1045 "Main prediction unavailable in the \"aac_ltp\" profile\n");
1047 s->options.pred = 1;
1049 "LTP prediction unavailable in the \"aac_main\" profile\n");
1050 }
else if (
s->options.ltp) {
1053 "Chainging profile to \"aac_ltp\"\n");
1055 "Main prediction unavailable in the \"aac_ltp\" profile\n");
1056 }
else if (
s->options.pred) {
1059 "Chainging profile to \"aac_main\"\n");
1061 "LTP prediction unavailable in the \"aac_main\" profile\n");
1069 "The ANMR coder is considered experimental, add -strict -2 to enable!\n");
1070 s->options.intensity_stereo = 0;
1074 "The LPT profile requires experimental compliance, add -strict -2 to enable!\n");
1077 if (
s->channels > 3)
1078 s->options.mid_side = 0;
1093 for (
i = 0;
i <
s->chan_map[0];
i++)
1096 s->chan_map[0], grouping)) < 0)
1100 s->random_state = 0x1f2e3d4c;
1122 #define AACENC_FLAGS AV_OPT_FLAG_ENCODING_PARAM | AV_OPT_FLAG_AUDIO_PARAM
1124 {
"aac_coder",
"Coding algorithm", offsetof(
AACEncContext,
options.coder),
AV_OPT_TYPE_INT, {.i64 =
AAC_CODER_FAST}, 0,
AAC_CODER_NB-1,
AACENC_FLAGS,
"coder"},
int frame_size
Number of samples per channel in an audio frame.
@ AV_SAMPLE_FMT_FLTP
float, planar
#define FF_CODEC_CAP_INIT_THREADSAFE
The codec does not modify any global variables in the init function, allowing to call the init functi...
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default minimum maximum flags name is the option name
const uint8_t ff_tns_max_bands_128[]
static const AVClass aacenc_class
static av_cold int init(AVCodecContext *avctx)
void AAC_RENAME() ff_init_ff_sine_windows(int index)
initialize the specified entry of ff_sine_windows
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
uint64_t channel_layout
Audio channel layout.
static void put_bitstream_info(AACEncContext *s, const char *name)
Write some auxiliary information about the created AAC file.
void ff_af_queue_remove(AudioFrameQueue *afq, int nb_samples, int64_t *pts, int64_t *duration)
Remove frame(s) from the queue.
#define FF_COMPLIANCE_EXPERIMENTAL
Allow nonstandardized experimental things.
int sample_rate
samples per second
void ff_af_queue_close(AudioFrameQueue *afq)
Close AudioFrameQueue.
static void abs_pow34_v(float *out, const float *in, const int size)
static enum AVSampleFormat sample_fmts[]
static void copy_input_samples(AACEncContext *s, const AVFrame *frame)
static av_cold int aac_encode_init(AVCodecContext *avctx)
static const int aacenc_profiles[]
#define AV_CODEC_FLAG_QSCALE
Use fixed qscale.
filter_frame For filters that do not use the this method is called when a frame is pushed to the filter s input It can be called at any time except in a reentrant way If the input frame is enough to produce output
#define FF_PROFILE_AAC_MAIN
uint8_t zeroes[128]
band is not coded (used by encoder)
static void init_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Initialize the PutBitContext s.
av_cold void ff_af_queue_init(AVCodecContext *avctx, AudioFrameQueue *afq)
Initialize AudioFrameQueue.
static av_cold int end(AVCodecContext *avctx)
void av_get_channel_layout_string(char *buf, int buf_size, int nb_channels, uint64_t channel_layout)
Return a description of a channel layout.
This structure describes decoded (raw) audio or video data.
static void put_bits(Jpeg2000EncoderContext *s, int val, int n)
put n times val bit
static void ff_aac_tableinit(void)
#define WARN_IF(cond,...)
void avpriv_put_string(PutBitContext *pb, const char *string, int terminate_string)
Put the string string in the bitstream.
const AACCoefficientsEncoder ff_aac_coders[AAC_CODER_NB]
#define FF_PROFILE_AAC_LTP
static void encode_band_info(AACEncContext *s, SingleChannelElement *sce)
Encode scalefactor band coding type.
int window_shape
window shape (sine/KBD/whatever)
static const uint8_t aac_chan_configs[AAC_MAX_CHANNELS][6]
default channel configurations
int8_t used[MAX_LTP_LONG_SFB]
FFmpeg Automated Testing Environment ************************************Introduction Using FATE from your FFmpeg source directory Submitting the results to the FFmpeg result aggregation server Uploading new samples to the fate suite FATE makefile targets and variables Makefile targets Makefile variables Examples Introduction **************FATE is an extended regression suite on the client side and a means for results aggregation and presentation on the server side The first part of this document explains how you can use FATE from your FFmpeg source directory to test your ffmpeg binary The second part describes how you can run FATE to submit the results to FFmpeg’s FATE server In any way you can have a look at the publicly viewable FATE results by visiting this as it can be seen if some test on some platform broke with their recent contribution This usually happens on the platforms the developers could not test on The second part of this document describes how you can run FATE to submit your results to FFmpeg’s FATE server If you want to submit your results be sure to check that your combination of OS and compiler is not already listed on the above mentioned website In the third part you can find a comprehensive listing of FATE makefile targets and variables Using FATE from your FFmpeg source directory **********************************************If you want to run FATE on your machine you need to have the samples in place You can get the samples via the build target fate rsync Use this command from the top level source this will cause FATE to fail NOTE To use a custom wrapper to run the pass ‘ target exec’ to ‘configure’ or set the TARGET_EXEC Make variable Submitting the results to the FFmpeg result aggregation server ****************************************************************To submit your results to the server you should run fate through the shell script ‘tests fate sh’ from the FFmpeg sources This script needs to be invoked with a configuration file as its first argument tests fate sh path to fate_config A configuration file template with comments describing the individual configuration variables can be found at ‘doc fate_config sh template’ Create a configuration that suits your based on the configuration template The ‘slot’ configuration variable can be any string that is not yet but it is suggested that you name it adhering to the following pattern ‘ARCH OS COMPILER COMPILER VERSION’ The configuration file itself will be sourced in a shell therefore all shell features may be used This enables you to setup the environment as you need it for your build For your first test runs the ‘fate_recv’ variable should be empty or commented out This will run everything as normal except that it will omit the submission of the results to the server The following files should be present in $workdir as specified in the configuration it may help to try out the ‘ssh’ command with one or more ‘ v’ options You should get detailed output concerning your SSH configuration and the authentication process The only thing left is to automate the execution of the fate sh script and the synchronisation of the samples directory Uploading new samples to the fate suite *****************************************If you need a sample uploaded send a mail to samples request This is for developers who have an account on the fate suite server If you upload new samples
int64_t duration
Duration of this packet in AVStream->time_base units, 0 if unknown.
INTFLOAT pcoeffs[1024]
coefficients for IMDCT, pristine
#define AVERROR_UNKNOWN
Unknown error, typically from an external library.
const uint16_t *const ff_swb_offset_128[]
static void encode_spectral_coeffs(AACEncContext *s, SingleChannelElement *sce)
Encode spectral coefficients processed by psychoacoustic model.
const uint8_t ff_tns_max_bands_1024[]
int num_swb
number of scalefactor window bands
#define WINDOW_FUNC(type)
static void avoid_clipping(AACEncContext *s, SingleChannelElement *sce)
Downscale spectral coefficients for near-clipping windows to avoid artifacts.
INTFLOAT ret_buf[2048]
PCM output buffer.
#define FF_PROFILE_MPEG2_AAC_LOW
static void apply_mid_side_stereo(ChannelElement *cpe)
int ms_mode
Signals mid/side stereo flags coding mode (used by encoder)
int initial_padding
Audio only.
static const AVCodecDefault defaults[]
int flags
AV_CODEC_FLAG_*.
static void put_pce(PutBitContext *pb, AVCodecContext *avctx)
av_cold void ff_psy_end(FFPsyContext *ctx)
Cleanup model context at the end.
int ff_af_queue_add(AudioFrameQueue *afq, const AVFrame *f)
Add a frame to the queue.
uint8_t prediction_used[41]
int num_ele[4]
front, side, back, lfe
IndividualChannelStream ics
windowing related information
static void adjust_frame_information(ChannelElement *cpe, int chans)
Produce integer coefficients from scalefactors provided by the model.
static int ff_thread_once(char *control, void(*routine)(void))
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
float clip_avoidance_factor
set if any window is near clipping to the necessary atennuation factor to avoid it
@ NOISE_BT
Spectral data are scaled white noise not coded in the bitstream.
INTFLOAT coeffs[1024]
coefficients for IMDCT, maybe processed
int global_quality
Global quality for codecs which cannot change it per frame.
const uint8_t * swb_sizes
table of scalefactor band sizes for a particular window
@ INTENSITY_BT2
Scalefactor data are intensity stereo positions (out of phase).
#define FF_PROFILE_UNKNOWN
#define av_assert0(cond)
assert() equivalent, that is always enabled.
static const int64_t aac_normal_chan_layouts[7]
static av_cold int alloc_buffers(AVCodecContext *avctx, AACEncContext *s)
const int ff_aac_swb_size_128_len
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Individual Channel Stream.
#define SCALE_DIFF_ZERO
codebook index corresponding to zero scalefactor indices difference
#define NOISE_PRE
preamble for NOISE_BT, put in bitstream with the first noise band
const uint16_t * swb_offset
table of offsets to the lowest spectral coefficient of a scalefactor band, sfb, for a particular wind...
static const uint8_t aac_chan_maps[AAC_MAX_CHANNELS][AAC_MAX_CHANNELS]
Table to remap channels from libavcodec's default order to AAC order.
uint8_t pi<< 24) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_U8,(uint64_t)((*(const uint8_t *) pi - 0x80U))<< 56) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16,(*(const int16_t *) pi >>8)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S16,(uint64_t)(*(const int16_t *) pi)<< 48) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32,(*(const int32_t *) pi >>24)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S32,(uint64_t)(*(const int32_t *) pi)<< 32) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S64,(*(const int64_t *) pi >>56)+0x80) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S64, *(const int64_t *) pi *(1.0f/(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S64, *(const int64_t *) pi *(1.0/(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_FLT, llrintf(*(const float *) pi *(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_DBL, llrint(*(const double *) pi *(UINT64_C(1)<< 63))) #define FMT_PAIR_FUNC(out, in) static conv_func_type *const fmt_pair_to_conv_functions[AV_SAMPLE_FMT_NB *AV_SAMPLE_FMT_NB]={ FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S64), };static void cpy1(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, len);} static void cpy2(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 2 *len);} static void cpy4(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 4 *len);} static void cpy8(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 8 *len);} AudioConvert *swri_audio_convert_alloc(enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, const int *ch_map, int flags) { AudioConvert *ctx;conv_func_type *f=fmt_pair_to_conv_functions[av_get_packed_sample_fmt(out_fmt)+AV_SAMPLE_FMT_NB *av_get_packed_sample_fmt(in_fmt)];if(!f) return NULL;ctx=av_mallocz(sizeof(*ctx));if(!ctx) return NULL;if(channels==1){ in_fmt=av_get_planar_sample_fmt(in_fmt);out_fmt=av_get_planar_sample_fmt(out_fmt);} ctx->channels=channels;ctx->conv_f=f;ctx->ch_map=ch_map;if(in_fmt==AV_SAMPLE_FMT_U8||in_fmt==AV_SAMPLE_FMT_U8P) memset(ctx->silence, 0x80, sizeof(ctx->silence));if(out_fmt==in_fmt &&!ch_map) { switch(av_get_bytes_per_sample(in_fmt)){ case 1:ctx->simd_f=cpy1;break;case 2:ctx->simd_f=cpy2;break;case 4:ctx->simd_f=cpy4;break;case 8:ctx->simd_f=cpy8;break;} } if(HAVE_X86ASM &&HAVE_MMX) swri_audio_convert_init_x86(ctx, out_fmt, in_fmt, channels);if(ARCH_ARM) swri_audio_convert_init_arm(ctx, out_fmt, in_fmt, channels);if(ARCH_AARCH64) swri_audio_convert_init_aarch64(ctx, out_fmt, in_fmt, channels);return ctx;} void swri_audio_convert_free(AudioConvert **ctx) { av_freep(ctx);} int swri_audio_convert(AudioConvert *ctx, AudioData *out, AudioData *in, int len) { int ch;int off=0;const int os=(out->planar ? 1 :out->ch_count) *out->bps;unsigned misaligned=0;av_assert0(ctx->channels==out->ch_count);if(ctx->in_simd_align_mask) { int planes=in->planar ? in->ch_count :1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) in->ch[ch];misaligned|=m &ctx->in_simd_align_mask;} if(ctx->out_simd_align_mask) { int planes=out->planar ? out->ch_count :1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) out->ch[ch];misaligned|=m &ctx->out_simd_align_mask;} if(ctx->simd_f &&!ctx->ch_map &&!misaligned){ off=len &~15;av_assert1(off >=0);av_assert1(off<=len);av_assert2(ctx->channels==SWR_CH_MAX||!in->ch[ctx->channels]);if(off >0){ if(out->planar==in->planar){ int planes=out->planar ? out->ch_count :1;for(ch=0;ch< planes;ch++){ ctx->simd_f(out-> ch ch
@ INTENSITY_BT
Scalefactor data are intensity stereo positions (in phase).
int window_type[3]
window type (short/long/transitional, etc.) - current, previous and next
#define LIBAVUTIL_VERSION_INT
Describe the class of an AVClass context structure.
static const AACPCEInfo aac_pce_configs[]
List of PCE (Program Configuration Element) for the channel layouts listed in channel_layout....
uint8_t is_mask[128]
Set if intensity stereo is used (used by encoder)
static const int sizes[][2]
const int ff_aac_swb_size_1024_len
static void encode_pulses(AACEncContext *s, Pulse *pulse)
Encode pulse data.
float is_ener[128]
Intensity stereo pos (used by encoder)
const uint8_t ff_aac_num_swb_128[]
int64_t bit_rate
the average bitrate
const char * av_default_item_name(void *ptr)
Return the context name.
int grouping[8]
window grouping (for e.g. AAC)
int sf_idx[128]
scalefactor indices (used by encoder)
static int aac_encode_frame(AVCodecContext *avctx, AVPacket *avpkt, const AVFrame *frame, int *got_packet_ptr)
void avpriv_align_put_bits(PutBitContext *s)
Pad the bitstream with zeros up to the next byte boundary.
static AVOnce aac_table_init
const uint8_t ff_aac_scalefactor_bits[121]
float clipping[8]
maximum absolute normalized intensity in the given window for clip avoidance
const OptionDef options[]
AAC_FLOAT lcoeffs[1024]
MDCT of LTP coefficients (used by encoder)
SingleChannelElement ch[2]
const uint16_t *const ff_swb_offset_1024[]
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
static void quantize_bands(int *out, const float *in, const float *scaled, int size, int is_signed, int maxval, const float Q34, const float rounding)
#define FF_PROFILE_AAC_LOW
static void encode_scale_factors(AVCodecContext *avctx, AACEncContext *s, SingleChannelElement *sce)
Encode scalefactors.
static void apply_window_and_mdct(AACEncContext *s, SingleChannelElement *sce, float *audio)
const uint8_t * ff_aac_swb_size_128[]
void ff_aac_coder_init_mips(AACEncContext *c)
int common_window
Set if channels share a common 'IndividualChannelStream' in bitstream.
static void apply_intensity_stereo(ChannelElement *cpe)
int index[4][8]
front, side, back, lfe
uint8_t ms_mask[128]
Set if mid/side stereo is used for each scalefactor window band.
av_cold void ff_lpc_end(LPCContext *s)
Uninitialize LPCContext.
#define AV_LOG_INFO
Standard information.
int channels
number of audio channels
av_cold int ff_psy_init(FFPsyContext *ctx, AVCodecContext *avctx, int num_lens, const uint8_t **bands, const int *num_bands, int num_groups, const uint8_t *group_map)
Initialize psychoacoustic model.
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) #define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac) { } void ff_audio_convert_free(AudioConvert **ac) { if(! *ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);} AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map) { AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method !=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2) { ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc) { av_free(ac);return NULL;} return ac;} in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar) { ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar ? ac->channels :1;} else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;} int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in) { int use_generic=1;int len=in->nb_samples;int p;if(ac->dc) { av_log(ac->avr, AV_LOG_TRACE, "%d samples - audio_convert: %s to %s (dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> in
Single Channel Element - used for both SCE and LFE elements.
#define i(width, name, range_min, range_max)
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
static int put_bits_count(PutBitContext *s)
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
static const AVOption aacenc_options[]
channel element - generic struct for SCE/CPE/CCE/LFE
static void(*const apply_window[4])(AVFloatDSPContext *fdsp, SingleChannelElement *sce, const float *audio)
int pairing[3][8]
front, side, back
AVSampleFormat
Audio sample formats.
#define NOISE_PRE_BITS
length of preamble
av_cold struct FFPsyPreprocessContext * ff_psy_preprocess_init(AVCodecContext *avctx)
psychoacoustic model audio preprocessing initialization
const char * name
Name of the codec implementation.
void ff_aac_dsp_init_x86(AACEncContext *s)
static av_cold void aac_encode_init_tables(void)
const uint8_t ff_aac_num_swb_1024[]
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
float ff_aac_kbd_long_1024[1024]
int strict_std_compliance
strictly follow the standard (MPEG-4, ...).
FFmpeg Automated Testing Environment ************************************Introduction Using FATE from your FFmpeg source directory Submitting the results to the FFmpeg result aggregation server Uploading new samples to the fate suite FATE makefile targets and variables Makefile targets Makefile variables Examples Introduction **************FATE is an extended regression suite on the client side and a means for results aggregation and presentation on the server side The first part of this document explains how you can use FATE from your FFmpeg source directory to test your ffmpeg binary The second part describes how you can run FATE to submit the results to FFmpeg’s FATE server In any way you can have a look at the publicly viewable FATE results by visiting this as it can be seen if some test on some platform broke with their recent contribution This usually happens on the platforms the developers could not test on The second part of this document describes how you can run FATE to submit your results to FFmpeg’s FATE server If you want to submit your results be sure to check that your combination of OS and compiler is not already listed on the above mentioned website In the third part you can find a comprehensive listing of FATE makefile targets and variables Using FATE from your FFmpeg source directory **********************************************If you want to run FATE on your machine you need to have the samples in place You can get the samples via the build target fate rsync Use this command from the top level source this will cause FATE to fail NOTE To use a custom wrapper to run the pass ‘ target exec’ to ‘configure’ or set the TARGET_EXEC Make variable Submitting the results to the FFmpeg result aggregation server ****************************************************************To submit your results to the server you should run fate through the shell script ‘tests fate sh’ from the FFmpeg sources This script needs to be invoked with a configuration file as its first argument tests fate sh path to fate_config A configuration file template with comments describing the individual configuration variables can be found at ‘doc fate_config sh template’ Create a configuration that suits your based on the configuration template The ‘slot’ configuration variable can be any string that is not yet but it is suggested that you name it adhering to the following pattern ‘ARCH OS COMPILER COMPILER VERSION’ The configuration file itself will be sourced in a shell therefore all shell features may be used This enables you to setup the environment as you need it for your build For your first test runs the ‘fate_recv’ variable should be empty or commented out This will run everything as normal except that it will omit the submission of the results to the server The following files should be present in $workdir as specified in the configuration it may help to try out the ‘ssh’ command with one or more ‘ v’ options You should get detailed output concerning your SSH configuration and the authentication process The only thing left is to automate the execution of the fate sh script and the synchronisation of the samples directory Uploading new samples to the fate suite *****************************************If you need a sample uploaded send a mail to samples request This is for developers who have an account on the fate suite server If you upload new please make sure they are as small as space on each network bandwidth and so on benefit from smaller test cases Also keep in mind older checkouts use existing sample that means in practice generally do not remove or overwrite files as it likely would break older checkouts or releases Also all needed samples for a commit should be ideally before the push If you need an account for frequently uploading samples or you wish to help others by doing that send a mail to ffmpeg devel rsync vauL Duo ug o o w
static void encode_ms_info(PutBitContext *pb, ChannelElement *cpe)
Encode MS data.
@ RESERVED_BT
Band types following are encoded differently from others.
void ff_psy_preprocess(struct FFPsyPreprocessContext *ctx, float **audio, int channels)
Preprocess several channel in audio frame in order to compress it better.
#define CLIP_AVOIDANCE_FACTOR
#define FF_ARRAY_ELEMS(a)
main external API structure.
typedef void(RENAME(mix_any_func_type))
static int encode_individual_channel(AVCodecContext *avctx, AACEncContext *s, SingleChannelElement *sce, int common_window)
Encode one channel of audio data.
#define NOISE_OFFSET
subtracted from global gain, used as offset for the preamble
#define ERROR_IF(cond,...)
enum WindowSequence window_sequence[2]
@ AOT_SBR
Y Spectral Band Replication.
av_cold void ff_kbd_window_init(float *window, float alpha, int n)
Generate a Kaiser-Bessel Derived Window.
const uint8_t * ff_aac_swb_size_1024[]
static int put_audio_specific_config(AVCodecContext *avctx)
Make AAC audio config object.
#define AV_CODEC_CAP_DELAY
Encoder or decoder requires flushing with NULL input at the end in order to give the complete and cor...
uint8_t is_mode
Set if any bands have been encoded using intensity stereo (used by encoder)
static void put_ics_info(AACEncContext *s, IndividualChannelStream *info)
Encode ics_info element.
static const AVCodecDefault aac_encode_defaults[]
#define AV_CODEC_FLAG_BITEXACT
Use only bitexact stuff (except (I)DCT).
const int avpriv_mpeg4audio_sample_rates[16]
static av_cold int aac_encode_end(AVCodecContext *avctx)
int frame_number
Frame counter, set by libavcodec.
static void flush_put_bits(PutBitContext *s)
Pad the end of the output stream with zeros.
static av_always_inline int diff(const uint32_t a, const uint32_t b)
#define FF_AAC_PROFILE_OPTS
This structure stores compressed data.
av_cold AVFloatDSPContext * avpriv_float_dsp_alloc(int bit_exact)
Allocate a float DSP context.
uint8_t window_clipping[8]
set if a certain window is near clipping
float ff_aac_kbd_short_128[128]
static const int mpeg4audio_sample_rates[16]
uint8_t max_sfb
number of scalefactor bands per group
INTFLOAT ltp_state[3072]
time signal for LTP
static av_cold int dsp_init(AVCodecContext *avctx, AACEncContext *s)
av_cold void ff_psy_preprocess_end(struct FFPsyPreprocessContext *ctx)
Cleanup audio preprocessing module.
#define AV_CODEC_CAP_SMALL_LAST_FRAME
Codec can be fed a final frame with a smaller size.
int ff_alloc_packet2(AVCodecContext *avctx, AVPacket *avpkt, int64_t size, int64_t min_size)
Check AVPacket size and/or allocate data.
enum BandType band_type[128]
band types
uint8_t use_kb_window[2]
If set, use Kaiser-Bessel window, otherwise use a sine window.
@ FF_LPC_TYPE_LEVINSON
Levinson-Durbin recursion.
#define FF_ALLOCZ_ARRAY_OR_GOTO(ctx, p, nelem, elsize, label)
int num_windows
number of windows in a frame
av_cold int ff_lpc_init(LPCContext *s, int blocksize, int max_order, enum FFLPCType lpc_type)
Initialize LPCContext.
const uint32_t ff_aac_scalefactor_code[121]
void ff_quantize_band_cost_cache_init(struct AACEncContext *s)