27 #define CHROMA_MC8_ALTIVEC_CORE(BIAS1, BIAS2) \ 28 vsrc2ssH = (vec_s16)VEC_MERGEH(zero_u8v,(vec_u8)vsrc2uc);\ 29 vsrc3ssH = (vec_s16)VEC_MERGEH(zero_u8v,(vec_u8)vsrc3uc);\ 31 psum = vec_mladd(vA, vsrc0ssH, BIAS1);\ 32 psum = vec_mladd(vB, vsrc1ssH, psum);\ 33 psum = vec_mladd(vC, vsrc2ssH, psum);\ 34 psum = vec_mladd(vD, vsrc3ssH, psum);\ 36 psum = vec_sr(psum, v6us);\ 38 vdst = vec_ld(0, dst);\ 39 ppsum = (vec_u8)vec_pack(psum, psum);\ 40 vfdst = vec_perm(vdst, ppsum, fperm);\ 42 OP_U8_ALTIVEC(fsum, vfdst, vdst);\ 44 vec_st(fsum, 0, dst);\ 52 #define CHROMA_MC8_ALTIVEC_CORE_SIMPLE \ 54 vsrc0ssH = (vec_s16)VEC_MERGEH(zero_u8v,(vec_u8)vsrc0uc);\ 55 vsrc1ssH = (vec_s16)VEC_MERGEH(zero_u8v,(vec_u8)vsrc1uc);\ 57 psum = vec_mladd(vA, vsrc0ssH, v32ss);\ 58 psum = vec_mladd(vE, vsrc1ssH, psum);\ 59 psum = vec_sr(psum, v6us);\ 61 vdst = vec_ld(0, dst);\ 62 ppsum = (vec_u8)vec_pack(psum, psum);\ 63 vfdst = vec_perm(vdst, ppsum, fperm);\ 65 OP_U8_ALTIVEC(fsum, vfdst, vdst);\ 67 vec_st(fsum, 0, dst);\ 73 #define add28(a) vec_add(v28ss, a) 76 #define GET_VSRC1(vs0, off, b, perm0, s){ \ 77 vec_u8 vsrcCuc, vsrcDuc; \ 78 vsrcCuc = vec_ld(off, s); \ 80 vsrcDuc = vec_ld(off + b, s); \ 84 vs0 = vec_perm(vsrcCuc, vsrcDuc, perm0); \ 86 #define GET_VSRC(vs0, vs1, off, b, perm0, perm1, s){ \ 87 vec_u8 vsrcCuc, vsrcDuc; \ 88 vsrcCuc = vec_ld(off, s); \ 90 vsrcDuc = vec_ld(off + b, s); \ 94 vs0 = vec_perm(vsrcCuc, vsrcDuc, perm0); \ 95 if (reallyBadAlign){ \ 98 vs1 = vec_perm(vsrcCuc, vsrcDuc, perm1); \ 103 #define GET_VSRC1(vs0, off, b, perm0, s){ \ 104 vs0 = vec_vsx_ld(off, s); \ 106 #define GET_VSRC(vs0, vs1, off, b, perm0, perm1, s){ \ 107 vs0 = vec_vsx_ld(off, s); \ 108 vs1 = vec_vsx_ld(off + 1, s); \ 112 #ifdef PREFIX_h264_chroma_mc8_altivec 118 {((8 - x) * (8 - y)),
125 const vec_s32 vABCD = vec_ld(0, ABCD);
126 const vec_s16 vA = VEC_SPLAT16(vABCD, 1);
127 const vec_s16 vB = VEC_SPLAT16(vABCD, 3);
128 const vec_s16 vC = VEC_SPLAT16(vABCD, 5);
129 const vec_s16 vD = VEC_SPLAT16(vABCD, 7);
130 const vec_s16 v32ss = vec_sl(vec_splat_s16(1),vec_splat_u16(5));
131 const vec_u16 v6us = vec_splat_u16(6);
133 vec_u8 vsrcperm0, vsrcperm1;
137 vec_s16 vsrc2ssH, vsrc3ssH, psum;
138 vec_u8 vdst, ppsum, vfdst, fsum;
140 register int loadSecond = (((
unsigned long)src) % 16) <= 7 ? 0 : 1;
141 register int reallyBadAlign = (((
unsigned long)src) % 16) == 15 ? 1 : 0;
142 vsrcperm0 = vec_lvsl(0, src);
143 vsrcperm1 = vec_lvsl(1, src);
146 if (((
unsigned long)dst) % 16 == 0) {
147 fperm = (
vec_u8){0x10, 0x11, 0x12, 0x13,
148 0x14, 0x15, 0x16, 0x17,
149 0x08, 0x09, 0x0A, 0x0B,
150 0x0C, 0x0D, 0x0E, 0x0F};
152 fperm = (
vec_u8){0x00, 0x01, 0x02, 0x03,
153 0x04, 0x05, 0x06, 0x07,
154 0x18, 0x19, 0x1A, 0x1B,
155 0x1C, 0x1D, 0x1E, 0x1F};
158 GET_VSRC(vsrc0uc, vsrc1uc, 0, 16, vsrcperm0, vsrcperm1, src);
164 for (i = 0 ; i <
h ; i++) {
165 GET_VSRC(vsrc2uc, vsrc3uc, stride, 16, vsrcperm0, vsrcperm1, src);
169 const vec_s16 vE = vec_add(vB, vC);
171 for (i = 0 ; i <
h ; i++) {
172 GET_VSRC1(vsrc1uc, stride, 15, vsrcperm0, src);
177 for (i = 0 ; i <
h ; i++) {
178 GET_VSRC(vsrc0uc, vsrc1uc, 0, 15, vsrcperm0, vsrcperm1, src);
187 #ifdef PREFIX_no_rnd_vc1_chroma_mc8_altivec 193 {((8 - x) * (8 - y)),
200 const vec_s32 vABCD = vec_ld(0, ABCD);
201 const vec_s16 vA = VEC_SPLAT16(vABCD, 1);
202 const vec_s16 vB = VEC_SPLAT16(vABCD, 3);
203 const vec_s16 vC = VEC_SPLAT16(vABCD, 5);
204 const vec_s16 vD = VEC_SPLAT16(vABCD, 7);
205 const vec_s16 v28ss = vec_sub(vec_sl(vec_splat_s16(1),vec_splat_u16(5)),vec_splat_s16(4));
206 const vec_u16 v6us = vec_splat_u16(6);
208 vec_u8 vsrcperm0, vsrcperm1;
212 vec_s16 vsrc2ssH, vsrc3ssH, psum;
213 vec_u8 vdst, ppsum, vfdst, fsum;
215 register int loadSecond = (((
unsigned long)
src) % 16) <= 7 ? 0 : 1;
216 register int reallyBadAlign = (((
unsigned long)
src) % 16) == 15 ? 1 : 0;
217 vsrcperm0 = vec_lvsl(0,
src);
218 vsrcperm1 = vec_lvsl(1,
src);
221 if (((
unsigned long)dst) % 16 == 0) {
222 fperm = (
vec_u8){0x10, 0x11, 0x12, 0x13,
223 0x14, 0x15, 0x16, 0x17,
224 0x08, 0x09, 0x0A, 0x0B,
225 0x0C, 0x0D, 0x0E, 0x0F};
227 fperm = (
vec_u8){0x00, 0x01, 0x02, 0x03,
228 0x04, 0x05, 0x06, 0x07,
229 0x18, 0x19, 0x1A, 0x1B,
230 0x1C, 0x1D, 0x1E, 0x1F};
233 GET_VSRC(vsrc0uc, vsrc1uc, 0, 16, vsrcperm0, vsrcperm1,
src);
238 for (i = 0 ; i <
h ; i++) {
247 #undef CHROMA_MC8_ALTIVEC_CORE
Memory handling functions.
#define DECLARE_ALIGNED(n, t, v)
Declare a variable that is aligned in memory.
#define GET_VSRC1(vs0, off, b, perm0, s)
Contains misc utility macros and inline functions.
#define GET_VSRC(vs0, vs1, off, b, perm0, perm1, s)
#define CHROMA_MC8_ALTIVEC_CORE_SIMPLE
#define CHROMA_MC8_ALTIVEC_CORE(BIAS1, BIAS2)