34 static void lpc_apply_welch_window_sse2(
const int32_t *
data,
int len,
37 double c = 2.0 / (len-1.0);
42 "movsd %4, %%xmm7 \n\t" 43 "movapd "MANGLE(pd_1)
", %%xmm6 \n\t" 44 "movapd "MANGLE(pd_2)
", %%xmm5 \n\t" 45 "movlhps %%xmm7, %%xmm7 \n\t" 46 "subpd %%xmm5, %%xmm7 \n\t" 47 "addsd %%xmm6, %%xmm7 \n\t" 50 #define WELCH(MOVPD, offset)\ 52 "movapd %%xmm7, %%xmm1 \n\t"\ 53 "mulpd %%xmm1, %%xmm1 \n\t"\ 54 "movapd %%xmm6, %%xmm0 \n\t"\ 55 "subpd %%xmm1, %%xmm0 \n\t"\ 56 "pshufd $0x4e, %%xmm0, %%xmm1 \n\t"\ 57 "cvtpi2pd (%3,%0), %%xmm2 \n\t"\ 58 "cvtpi2pd "#offset"*4(%3,%1), %%xmm3 \n\t"\ 59 "mulpd %%xmm0, %%xmm2 \n\t"\ 60 "mulpd %%xmm1, %%xmm3 \n\t"\ 61 "movapd %%xmm2, (%2,%0,2) \n\t"\ 62 MOVPD" %%xmm3, "#offset"*8(%2,%1,2) \n\t"\ 63 "subpd %%xmm5, %%xmm7 \n\t"\ 74 :
"r"(w_data+n2),
"r"(data+n2),
"m"(
c),
"r"(len)
77 "%xmm5",
"%xmm6",
"%xmm7")
82 static void lpc_compute_autocorr_sse2(
const double *data,
int len,
int lag,
90 for(j=0; j<lag; j+=2){
91 x86_reg i = -len*
sizeof(double);
94 "movsd "MANGLE(pd_1)
", %%xmm0 \n\t" 95 "movsd "MANGLE(pd_1)
", %%xmm1 \n\t" 96 "movsd "MANGLE(pd_1)
", %%xmm2 \n\t" 98 "movapd (%2,%0), %%xmm3 \n\t" 99 "movupd -8(%3,%0), %%xmm4 \n\t" 100 "movapd (%3,%0), %%xmm5 \n\t" 101 "mulpd %%xmm3, %%xmm4 \n\t" 102 "mulpd %%xmm3, %%xmm5 \n\t" 103 "mulpd -16(%3,%0), %%xmm3 \n\t" 104 "addpd %%xmm4, %%xmm1 \n\t" 105 "addpd %%xmm5, %%xmm0 \n\t" 106 "addpd %%xmm3, %%xmm2 \n\t" 109 "movhlps %%xmm0, %%xmm3 \n\t" 110 "movhlps %%xmm1, %%xmm4 \n\t" 111 "movhlps %%xmm2, %%xmm5 \n\t" 112 "addsd %%xmm3, %%xmm0 \n\t" 113 "addsd %%xmm4, %%xmm1 \n\t" 114 "addsd %%xmm5, %%xmm2 \n\t" 115 "movsd %%xmm0, (%1) \n\t" 116 "movsd %%xmm1, 8(%1) \n\t" 117 "movsd %%xmm2, 16(%1) \n\t" 119 :
"r"(autoc+j),
"r"(data+
len),
"r"(data+len-j)
125 "movsd "MANGLE(pd_1)
", %%xmm0 \n\t" 126 "movsd "MANGLE(pd_1)
", %%xmm1 \n\t" 128 "movapd (%3,%0), %%xmm3 \n\t" 129 "movupd -8(%4,%0), %%xmm4 \n\t" 130 "mulpd %%xmm3, %%xmm4 \n\t" 131 "mulpd (%4,%0), %%xmm3 \n\t" 132 "addpd %%xmm4, %%xmm1 \n\t" 133 "addpd %%xmm3, %%xmm0 \n\t" 136 "movhlps %%xmm0, %%xmm3 \n\t" 137 "movhlps %%xmm1, %%xmm4 \n\t" 138 "addsd %%xmm3, %%xmm0 \n\t" 139 "addsd %%xmm4, %%xmm1 \n\t" 140 "movsd %%xmm0, %1 \n\t" 141 "movsd %%xmm1, %2 \n\t" 142 :
"+&r"(
i),
"=m"(autoc[j]),
"=m"(autoc[j+1])
143 :
"r"(data+len),
"r"(data+len-j)
DECLARE_ASM_CONST(16, double, pd_1)[2]
Memory handling functions.
#define INLINE_SSE2(flags)
static atomic_int cpu_flags
Macro definitions for various function/variable attributes.
#define INLINE_SSE2_SLOW(flags)
av_cold void ff_lpc_init_x86(LPCContext *c)
#define NAMED_CONSTRAINTS_ARRAY_ADD(...)
#define i(width, name, range_min, range_max)
void(* lpc_apply_welch_window)(const int32_t *data, int len, double *w_data)
Apply a Welch window to an array of input samples.
void(* lpc_compute_autocorr)(const double *data, int len, int lag, double *autoc)
Perform autocorrelation on input samples with delay of 0 to lag.
int av_get_cpu_flags(void)
Return the flags which specify extensions supported by the CPU.
#define XMM_CLOBBERS_ONLY(...)