FFmpeg  4.3
aacenc_ltp.c
Go to the documentation of this file.
1 /*
2  * AAC encoder long term prediction extension
3  * Copyright (C) 2015 Rostislav Pehlivanov
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 /**
23  * @file
24  * AAC encoder long term prediction extension
25  * @author Rostislav Pehlivanov ( atomnuker gmail com )
26  */
27 
28 #include "aacenc_ltp.h"
29 #include "aacenc_quantization.h"
30 #include "aacenc_utils.h"
31 
32 /**
33  * Encode LTP data.
34  */
36  int common_window)
37 {
38  int i;
39  IndividualChannelStream *ics = &sce->ics;
40  if (s->profile != FF_PROFILE_AAC_LTP || !ics->predictor_present)
41  return;
42  if (common_window)
43  put_bits(&s->pb, 1, 0);
44  put_bits(&s->pb, 1, ics->ltp.present);
45  if (!ics->ltp.present)
46  return;
47  put_bits(&s->pb, 11, ics->ltp.lag);
48  put_bits(&s->pb, 3, ics->ltp.coef_idx);
49  for (i = 0; i < FFMIN(ics->max_sfb, MAX_LTP_LONG_SFB); i++)
50  put_bits(&s->pb, 1, ics->ltp.used[i]);
51 }
52 
54 {
55  int i, ch, tag, chans, cur_channel, start_ch = 0;
56  ChannelElement *cpe;
58  for (i = 0; i < s->chan_map[0]; i++) {
59  cpe = &s->cpe[i];
60  tag = s->chan_map[i+1];
61  chans = tag == TYPE_CPE ? 2 : 1;
62  for (ch = 0; ch < chans; ch++) {
63  sce = &cpe->ch[ch];
64  cur_channel = start_ch + ch;
65  /* New sample + overlap */
66  memcpy(&sce->ltp_state[0], &sce->ltp_state[1024], 1024*sizeof(sce->ltp_state[0]));
67  memcpy(&sce->ltp_state[1024], &s->planar_samples[cur_channel][2048], 1024*sizeof(sce->ltp_state[0]));
68  memcpy(&sce->ltp_state[2048], &sce->ret_buf[0], 1024*sizeof(sce->ltp_state[0]));
69  sce->ics.ltp.lag = 0;
70  }
71  start_ch += chans;
72  }
73 }
74 
75 static void get_lag(float *buf, const float *new, LongTermPrediction *ltp)
76 {
77  int i, j, lag = 0, max_corr = 0;
78  float max_ratio = 0.0f;
79  for (i = 0; i < 2048; i++) {
80  float corr, s0 = 0.0f, s1 = 0.0f;
81  const int start = FFMAX(0, i - 1024);
82  for (j = start; j < 2048; j++) {
83  const int idx = j - i + 1024;
84  s0 += new[j]*buf[idx];
85  s1 += buf[idx]*buf[idx];
86  }
87  corr = s1 > 0.0f ? s0/sqrt(s1) : 0.0f;
88  if (corr > max_corr) {
89  max_corr = corr;
90  lag = i;
91  max_ratio = corr/(2048-start);
92  }
93  }
94  ltp->lag = FFMAX(av_clip_uintp2(lag, 11), 0);
95  ltp->coef_idx = quant_array_idx(max_ratio, ltp_coef, 8);
96  ltp->coef = ltp_coef[ltp->coef_idx];
97 }
98 
99 static void generate_samples(float *buf, LongTermPrediction *ltp)
100 {
101  int i, samples_num = 2048;
102  if (!ltp->lag) {
103  ltp->present = 0;
104  return;
105  } else if (ltp->lag < 1024) {
106  samples_num = ltp->lag + 1024;
107  }
108  for (i = 0; i < samples_num; i++)
109  buf[i] = ltp->coef*buf[i + 2048 - ltp->lag];
110  memset(&buf[i], 0, (2048 - i)*sizeof(float));
111 }
112 
113 /**
114  * Process LTP parameters
115  * @see Patent WO2006070265A1
116  */
118 {
119  float *pred_signal = &sce->ltp_state[0];
120  const float *samples = &s->planar_samples[s->cur_channel][1024];
121 
122  if (s->profile != FF_PROFILE_AAC_LTP)
123  return;
124 
125  /* Calculate lag */
126  get_lag(pred_signal, samples, &sce->ics.ltp);
127  generate_samples(pred_signal, &sce->ics.ltp);
128 }
129 
131 {
132  int sfb, count = 0;
133  SingleChannelElement *sce0 = &cpe->ch[0];
134  SingleChannelElement *sce1 = &cpe->ch[1];
135 
136  if (!cpe->common_window ||
139  sce0->ics.ltp.present = 0;
140  return;
141  }
142 
143  for (sfb = 0; sfb < FFMIN(sce0->ics.max_sfb, MAX_LTP_LONG_SFB); sfb++) {
144  int sum = sce0->ics.ltp.used[sfb] + sce1->ics.ltp.used[sfb];
145  if (sum != 2) {
146  sce0->ics.ltp.used[sfb] = 0;
147  } else {
148  count++;
149  }
150  }
151 
152  sce0->ics.ltp.present = !!count;
153  sce0->ics.predictor_present = !!count;
154 }
155 
156 /**
157  * Mark LTP sfb's
158  */
160  int common_window)
161 {
162  int w, g, w2, i, start = 0, count = 0;
163  int saved_bits = -(15 + FFMIN(sce->ics.max_sfb, MAX_LTP_LONG_SFB));
164  float *C34 = &s->scoefs[128*0], *PCD = &s->scoefs[128*1];
165  float *PCD34 = &s->scoefs[128*2];
166  const int max_ltp = FFMIN(sce->ics.max_sfb, MAX_LTP_LONG_SFB);
167 
168  if (sce->ics.window_sequence[0] == EIGHT_SHORT_SEQUENCE) {
169  if (sce->ics.ltp.lag) {
170  memset(&sce->ltp_state[0], 0, 3072*sizeof(sce->ltp_state[0]));
171  memset(&sce->ics.ltp, 0, sizeof(LongTermPrediction));
172  }
173  return;
174  }
175 
176  if (!sce->ics.ltp.lag || s->lambda > 120.0f)
177  return;
178 
179  for (w = 0; w < sce->ics.num_windows; w += sce->ics.group_len[w]) {
180  start = 0;
181  for (g = 0; g < sce->ics.num_swb; g++) {
182  int bits1 = 0, bits2 = 0;
183  float dist1 = 0.0f, dist2 = 0.0f;
184  if (w*16+g > max_ltp) {
185  start += sce->ics.swb_sizes[g];
186  continue;
187  }
188  for (w2 = 0; w2 < sce->ics.group_len[w]; w2++) {
189  int bits_tmp1, bits_tmp2;
190  FFPsyBand *band = &s->psy.ch[s->cur_channel].psy_bands[(w+w2)*16+g];
191  for (i = 0; i < sce->ics.swb_sizes[g]; i++)
192  PCD[i] = sce->coeffs[start+(w+w2)*128+i] - sce->lcoeffs[start+(w+w2)*128+i];
193  s->abs_pow34(C34, &sce->coeffs[start+(w+w2)*128], sce->ics.swb_sizes[g]);
194  s->abs_pow34(PCD34, PCD, sce->ics.swb_sizes[g]);
195  dist1 += quantize_band_cost(s, &sce->coeffs[start+(w+w2)*128], C34, sce->ics.swb_sizes[g],
196  sce->sf_idx[(w+w2)*16+g], sce->band_type[(w+w2)*16+g],
197  s->lambda/band->threshold, INFINITY, &bits_tmp1, NULL, 0);
198  dist2 += quantize_band_cost(s, PCD, PCD34, sce->ics.swb_sizes[g],
199  sce->sf_idx[(w+w2)*16+g],
200  sce->band_type[(w+w2)*16+g],
201  s->lambda/band->threshold, INFINITY, &bits_tmp2, NULL, 0);
202  bits1 += bits_tmp1;
203  bits2 += bits_tmp2;
204  }
205  if (dist2 < dist1 && bits2 < bits1) {
206  for (w2 = 0; w2 < sce->ics.group_len[w]; w2++)
207  for (i = 0; i < sce->ics.swb_sizes[g]; i++)
208  sce->coeffs[start+(w+w2)*128+i] -= sce->lcoeffs[start+(w+w2)*128+i];
209  sce->ics.ltp.used[w*16+g] = 1;
210  saved_bits += bits1 - bits2;
211  count++;
212  }
213  start += sce->ics.swb_sizes[g];
214  }
215  }
216 
217  sce->ics.ltp.present = !!count && (saved_bits >= 0);
218  sce->ics.predictor_present = !!sce->ics.ltp.present;
219 
220  /* Reset any marked sfbs */
221  if (!sce->ics.ltp.present && !!count) {
222  for (w = 0; w < sce->ics.num_windows; w += sce->ics.group_len[w]) {
223  start = 0;
224  for (g = 0; g < sce->ics.num_swb; g++) {
225  if (sce->ics.ltp.used[w*16+g]) {
226  for (w2 = 0; w2 < sce->ics.group_len[w]; w2++) {
227  for (i = 0; i < sce->ics.swb_sizes[g]; i++) {
228  sce->coeffs[start+(w+w2)*128+i] += sce->lcoeffs[start+(w+w2)*128+i];
229  }
230  }
231  }
232  start += sce->ics.swb_sizes[g];
233  }
234  }
235  }
236 }
INFINITY
#define INFINITY
Definition: mathematics.h:67
AACISError::dist2
float dist2
Definition: aacenc_is.h:41
ff_aac_update_ltp
void ff_aac_update_ltp(AACEncContext *s, SingleChannelElement *sce)
Process LTP parameters.
Definition: aacenc_ltp.c:117
get_lag
static void get_lag(float *buf, const float *new, LongTermPrediction *ltp)
Definition: aacenc_ltp.c:75
put_bits
static void put_bits(Jpeg2000EncoderContext *s, int val, int n)
put n times val bit
Definition: j2kenc.c:208
aacenc_ltp.h
FF_PROFILE_AAC_LTP
#define FF_PROFILE_AAC_LTP
Definition: avcodec.h:1866
LongTermPrediction::used
int8_t used[MAX_LTP_LONG_SFB]
Definition: aac.h:168
samples
FFmpeg Automated Testing Environment ************************************Introduction Using FATE from your FFmpeg source directory Submitting the results to the FFmpeg result aggregation server Uploading new samples to the fate suite FATE makefile targets and variables Makefile targets Makefile variables Examples Introduction **************FATE is an extended regression suite on the client side and a means for results aggregation and presentation on the server side The first part of this document explains how you can use FATE from your FFmpeg source directory to test your ffmpeg binary The second part describes how you can run FATE to submit the results to FFmpeg’s FATE server In any way you can have a look at the publicly viewable FATE results by visiting this as it can be seen if some test on some platform broke with their recent contribution This usually happens on the platforms the developers could not test on The second part of this document describes how you can run FATE to submit your results to FFmpeg’s FATE server If you want to submit your results be sure to check that your combination of OS and compiler is not already listed on the above mentioned website In the third part you can find a comprehensive listing of FATE makefile targets and variables Using FATE from your FFmpeg source directory **********************************************If you want to run FATE on your machine you need to have the samples in place You can get the samples via the build target fate rsync Use this command from the top level source this will cause FATE to fail NOTE To use a custom wrapper to run the pass ‘ target exec’ to ‘configure’ or set the TARGET_EXEC Make variable Submitting the results to the FFmpeg result aggregation server ****************************************************************To submit your results to the server you should run fate through the shell script ‘tests fate sh’ from the FFmpeg sources This script needs to be invoked with a configuration file as its first argument tests fate sh path to fate_config A configuration file template with comments describing the individual configuration variables can be found at ‘doc fate_config sh template’ Create a configuration that suits your based on the configuration template The ‘slot’ configuration variable can be any string that is not yet but it is suggested that you name it adhering to the following pattern ‘ARCH OS COMPILER COMPILER VERSION’ The configuration file itself will be sourced in a shell therefore all shell features may be used This enables you to setup the environment as you need it for your build For your first test runs the ‘fate_recv’ variable should be empty or commented out This will run everything as normal except that it will omit the submission of the results to the server The following files should be present in $workdir as specified in the configuration it may help to try out the ‘ssh’ command with one or more ‘ v’ options You should get detailed output concerning your SSH configuration and the authentication process The only thing left is to automate the execution of the fate sh script and the synchronisation of the samples directory Uploading new samples to the fate suite *****************************************If you need a sample uploaded send a mail to samples request This is for developers who have an account on the fate suite server If you upload new samples
Definition: fate.txt:139
IndividualChannelStream::num_swb
int num_swb
number of scalefactor window bands
Definition: aac.h:183
AACISError::dist1
float dist1
Definition: aacenc_is.h:40
LongTermPrediction::coef
INTFLOAT coef
Definition: aac.h:167
ltp_coef
static const INTFLOAT ltp_coef[8]
Definition: aactab.h:94
SingleChannelElement::ret_buf
INTFLOAT ret_buf[2048]
PCM output buffer.
Definition: aac.h:264
TYPE_CPE
@ TYPE_CPE
Definition: aac.h:57
MAX_LTP_LONG_SFB
#define MAX_LTP_LONG_SFB
Definition: aac.h:51
SingleChannelElement::ics
IndividualChannelStream ics
Definition: aac.h:249
s
#define s(width, name)
Definition: cbs_vp9.c:257
SingleChannelElement::coeffs
INTFLOAT coeffs[1024]
coefficients for IMDCT, maybe processed
Definition: aac.h:262
bits1
static const uint8_t bits1[81]
Definition: aactab.c:117
IndividualChannelStream::swb_sizes
const uint8_t * swb_sizes
table of scalefactor band sizes for a particular window
Definition: aac.h:182
g
const char * g
Definition: vf_curves.c:115
EIGHT_SHORT_SEQUENCE
@ EIGHT_SHORT_SEQUENCE
Definition: aac.h:78
s1
#define s1
Definition: regdef.h:38
IndividualChannelStream::group_len
uint8_t group_len[8]
Definition: aac.h:179
LongTermPrediction::present
int8_t present
Definition: aac.h:164
ff_aac_encode_ltp_info
void ff_aac_encode_ltp_info(AACEncContext *s, SingleChannelElement *sce, int common_window)
Encode LTP data.
Definition: aacenc_ltp.c:35
IndividualChannelStream
Individual Channel Stream.
Definition: aac.h:174
ch
uint8_t pi<< 24) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_U8,(uint64_t)((*(const uint8_t *) pi - 0x80U))<< 56) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16,(*(const int16_t *) pi >>8)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S16,(uint64_t)(*(const int16_t *) pi)<< 48) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32,(*(const int32_t *) pi >>24)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S32,(uint64_t)(*(const int32_t *) pi)<< 32) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S64,(*(const int64_t *) pi >>56)+0x80) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S64, *(const int64_t *) pi *(1.0f/(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S64, *(const int64_t *) pi *(1.0/(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_FLT, llrintf(*(const float *) pi *(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_DBL, llrint(*(const double *) pi *(UINT64_C(1)<< 63))) #define FMT_PAIR_FUNC(out, in) static conv_func_type *const fmt_pair_to_conv_functions[AV_SAMPLE_FMT_NB *AV_SAMPLE_FMT_NB]={ FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S64), };static void cpy1(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, len);} static void cpy2(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 2 *len);} static void cpy4(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 4 *len);} static void cpy8(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 8 *len);} AudioConvert *swri_audio_convert_alloc(enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, const int *ch_map, int flags) { AudioConvert *ctx;conv_func_type *f=fmt_pair_to_conv_functions[av_get_packed_sample_fmt(out_fmt)+AV_SAMPLE_FMT_NB *av_get_packed_sample_fmt(in_fmt)];if(!f) return NULL;ctx=av_mallocz(sizeof(*ctx));if(!ctx) return NULL;if(channels==1){ in_fmt=av_get_planar_sample_fmt(in_fmt);out_fmt=av_get_planar_sample_fmt(out_fmt);} ctx->channels=channels;ctx->conv_f=f;ctx->ch_map=ch_map;if(in_fmt==AV_SAMPLE_FMT_U8||in_fmt==AV_SAMPLE_FMT_U8P) memset(ctx->silence, 0x80, sizeof(ctx->silence));if(out_fmt==in_fmt &&!ch_map) { switch(av_get_bytes_per_sample(in_fmt)){ case 1:ctx->simd_f=cpy1;break;case 2:ctx->simd_f=cpy2;break;case 4:ctx->simd_f=cpy4;break;case 8:ctx->simd_f=cpy8;break;} } if(HAVE_X86ASM &&HAVE_MMX) swri_audio_convert_init_x86(ctx, out_fmt, in_fmt, channels);if(ARCH_ARM) swri_audio_convert_init_arm(ctx, out_fmt, in_fmt, channels);if(ARCH_AARCH64) swri_audio_convert_init_aarch64(ctx, out_fmt, in_fmt, channels);return ctx;} void swri_audio_convert_free(AudioConvert **ctx) { av_freep(ctx);} int swri_audio_convert(AudioConvert *ctx, AudioData *out, AudioData *in, int len) { int ch;int off=0;const int os=(out->planar ? 1 :out->ch_count) *out->bps;unsigned misaligned=0;av_assert0(ctx->channels==out->ch_count);if(ctx->in_simd_align_mask) { int planes=in->planar ? in->ch_count :1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) in->ch[ch];misaligned|=m &ctx->in_simd_align_mask;} if(ctx->out_simd_align_mask) { int planes=out->planar ? out->ch_count :1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) out->ch[ch];misaligned|=m &ctx->out_simd_align_mask;} if(ctx->simd_f &&!ctx->ch_map &&!misaligned){ off=len &~15;av_assert1(off >=0);av_assert1(off<=len);av_assert2(ctx->channels==SWR_CH_MAX||!in->ch[ctx->channels]);if(off >0){ if(out->planar==in->planar){ int planes=out->planar ? out->ch_count :1;for(ch=0;ch< planes;ch++){ ctx->simd_f(out-> ch ch
Definition: audioconvert.c:56
NULL
#define NULL
Definition: coverity.c:32
aacenc_quantization.h
LongTermPrediction::coef_idx
int coef_idx
Definition: aac.h:166
FFPsyBand
single band psychoacoustic information
Definition: psymodel.h:50
IndividualChannelStream::predictor_present
int predictor_present
Definition: aac.h:186
SingleChannelElement::sf_idx
int sf_idx[128]
scalefactor indices (used by encoder)
Definition: aac.h:256
ff_aac_ltp_insert_new_frame
void ff_aac_ltp_insert_new_frame(AACEncContext *s)
Definition: aacenc_ltp.c:53
SingleChannelElement::lcoeffs
AAC_FLOAT lcoeffs[1024]
MDCT of LTP coefficients (used by encoder)
Definition: aac.h:266
ChannelElement::ch
SingleChannelElement ch[2]
Definition: aac.h:284
FFMAX
#define FFMAX(a, b)
Definition: common.h:94
quant_array_idx
static int quant_array_idx(const float val, const float *arr, const int num)
Definition: aacenc_utils.h:171
ChannelElement::common_window
int common_window
Set if channels share a common 'IndividualChannelStream' in bitstream.
Definition: aac.h:278
FFMIN
#define FFMIN(a, b)
Definition: common.h:96
ff_aac_adjust_common_ltp
void ff_aac_adjust_common_ltp(AACEncContext *s, ChannelElement *cpe)
Definition: aacenc_ltp.c:130
generate_samples
static void generate_samples(float *buf, LongTermPrediction *ltp)
Definition: aacenc_ltp.c:99
SingleChannelElement
Single Channel Element - used for both SCE and LFE elements.
Definition: aac.h:248
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:269
IndividualChannelStream::num_windows
int num_windows
Definition: aac.h:184
FFPsyBand::threshold
float threshold
Definition: psymodel.h:53
ChannelElement
channel element - generic struct for SCE/CPE/CCE/LFE
Definition: aac.h:275
LongTermPrediction::lag
int16_t lag
Definition: aac.h:165
tag
uint32_t tag
Definition: movenc.c:1532
w
FFmpeg Automated Testing Environment ************************************Introduction Using FATE from your FFmpeg source directory Submitting the results to the FFmpeg result aggregation server Uploading new samples to the fate suite FATE makefile targets and variables Makefile targets Makefile variables Examples Introduction **************FATE is an extended regression suite on the client side and a means for results aggregation and presentation on the server side The first part of this document explains how you can use FATE from your FFmpeg source directory to test your ffmpeg binary The second part describes how you can run FATE to submit the results to FFmpeg’s FATE server In any way you can have a look at the publicly viewable FATE results by visiting this as it can be seen if some test on some platform broke with their recent contribution This usually happens on the platforms the developers could not test on The second part of this document describes how you can run FATE to submit your results to FFmpeg’s FATE server If you want to submit your results be sure to check that your combination of OS and compiler is not already listed on the above mentioned website In the third part you can find a comprehensive listing of FATE makefile targets and variables Using FATE from your FFmpeg source directory **********************************************If you want to run FATE on your machine you need to have the samples in place You can get the samples via the build target fate rsync Use this command from the top level source this will cause FATE to fail NOTE To use a custom wrapper to run the pass ‘ target exec’ to ‘configure’ or set the TARGET_EXEC Make variable Submitting the results to the FFmpeg result aggregation server ****************************************************************To submit your results to the server you should run fate through the shell script ‘tests fate sh’ from the FFmpeg sources This script needs to be invoked with a configuration file as its first argument tests fate sh path to fate_config A configuration file template with comments describing the individual configuration variables can be found at ‘doc fate_config sh template’ Create a configuration that suits your based on the configuration template The ‘slot’ configuration variable can be any string that is not yet but it is suggested that you name it adhering to the following pattern ‘ARCH OS COMPILER COMPILER VERSION’ The configuration file itself will be sourced in a shell therefore all shell features may be used This enables you to setup the environment as you need it for your build For your first test runs the ‘fate_recv’ variable should be empty or commented out This will run everything as normal except that it will omit the submission of the results to the server The following files should be present in $workdir as specified in the configuration it may help to try out the ‘ssh’ command with one or more ‘ v’ options You should get detailed output concerning your SSH configuration and the authentication process The only thing left is to automate the execution of the fate sh script and the synchronisation of the samples directory Uploading new samples to the fate suite *****************************************If you need a sample uploaded send a mail to samples request This is for developers who have an account on the fate suite server If you upload new please make sure they are as small as space on each network bandwidth and so on benefit from smaller test cases Also keep in mind older checkouts use existing sample that means in practice generally do not remove or overwrite files as it likely would break older checkouts or releases Also all needed samples for a commit should be ideally before the push If you need an account for frequently uploading samples or you wish to help others by doing that send a mail to ffmpeg devel rsync vauL Duo ug o o w
Definition: fate.txt:150
AACEncContext
AAC encoder context.
Definition: aacenc.h:376
LongTermPrediction
Long Term Prediction.
Definition: aac.h:163
IndividualChannelStream::window_sequence
enum WindowSequence window_sequence[2]
Definition: aac.h:176
s0
#define s0
Definition: regdef.h:37
IndividualChannelStream::max_sfb
uint8_t max_sfb
number of scalefactor bands per group
Definition: aac.h:175
ff_aac_search_for_ltp
void ff_aac_search_for_ltp(AACEncContext *s, SingleChannelElement *sce, int common_window)
Mark LTP sfb's.
Definition: aacenc_ltp.c:159
quantize_band_cost
static float quantize_band_cost(struct AACEncContext *s, const float *in, const float *scaled, int size, int scale_idx, int cb, const float lambda, const float uplim, int *bits, float *energy, int rtz)
Definition: aacenc_quantization.h:250
SingleChannelElement::ltp_state
INTFLOAT ltp_state[3072]
time signal for LTP
Definition: aac.h:265
IndividualChannelStream::ltp
LongTermPrediction ltp
Definition: aac.h:180
aacenc_utils.h
SingleChannelElement::band_type
enum BandType band_type[128]
band types
Definition: aac.h:252
bits2
static const uint8_t bits2[81]
Definition: aactab.c:140