33 #define STEP8(s0, s1, s2, s3, s4, s5, s6, s7, vec_rnd) \ 35 t0 = vec_sl(vec_add(s0, s4), vec_2); \ 36 t0 = vec_add(vec_sl(t0, vec_1), t0); \ 37 t0 = vec_add(t0, vec_rnd); \ 38 t1 = vec_sl(vec_sub(s0, s4), vec_2); \ 39 t1 = vec_add(vec_sl(t1, vec_1), t1); \ 40 t1 = vec_add(t1, vec_rnd); \ 41 t2 = vec_add(vec_sl(s6, vec_2), vec_sl(s6, vec_1)); \ 42 t2 = vec_add(t2, vec_sl(s2, vec_4)); \ 43 t3 = vec_add(vec_sl(s2, vec_2), vec_sl(s2, vec_1)); \ 44 t3 = vec_sub(t3, vec_sl(s6, vec_4)); \ 45 t4 = vec_add(t0, t2); \ 46 t5 = vec_add(t1, t3); \ 47 t6 = vec_sub(t1, t3); \ 48 t7 = vec_sub(t0, t2); \ 50 t0 = vec_sl(vec_add(s1, s3), vec_4); \ 51 t0 = vec_add(t0, vec_sl(s5, vec_3)); \ 52 t0 = vec_add(t0, vec_sl(s7, vec_2)); \ 53 t0 = vec_add(t0, vec_sub(s5, s3)); \ 55 t1 = vec_sl(vec_sub(s1, s5), vec_4); \ 56 t1 = vec_sub(t1, vec_sl(s7, vec_3)); \ 57 t1 = vec_sub(t1, vec_sl(s3, vec_2)); \ 58 t1 = vec_sub(t1, vec_add(s1, s7)); \ 60 t2 = vec_sl(vec_sub(s7, s3), vec_4); \ 61 t2 = vec_add(t2, vec_sl(s1, vec_3)); \ 62 t2 = vec_add(t2, vec_sl(s5, vec_2)); \ 63 t2 = vec_add(t2, vec_sub(s1, s7)); \ 65 t3 = vec_sl(vec_sub(s5, s7), vec_4); \ 66 t3 = vec_sub(t3, vec_sl(s3, vec_3)); \ 67 t3 = vec_add(t3, vec_sl(s1, vec_2)); \ 68 t3 = vec_sub(t3, vec_add(s3, s5)); \ 70 s0 = vec_add(t4, t0); \ 71 s1 = vec_add(t5, t1); \ 72 s2 = vec_add(t6, t2); \ 73 s3 = vec_add(t7, t3); \ 74 s4 = vec_sub(t7, t3); \ 75 s5 = vec_sub(t6, t2); \ 76 s6 = vec_sub(t5, t1); \ 77 s7 = vec_sub(t4, t0); \ 80 #define SHIFT_HOR8(s0, s1, s2, s3, s4, s5, s6, s7) \ 82 s0 = vec_sra(s0, vec_3); \ 83 s1 = vec_sra(s1, vec_3); \ 84 s2 = vec_sra(s2, vec_3); \ 85 s3 = vec_sra(s3, vec_3); \ 86 s4 = vec_sra(s4, vec_3); \ 87 s5 = vec_sra(s5, vec_3); \ 88 s6 = vec_sra(s6, vec_3); \ 89 s7 = vec_sra(s7, vec_3); \ 92 #define SHIFT_VERT8(s0, s1, s2, s3, s4, s5, s6, s7) \ 94 s0 = vec_sra(s0, vec_7); \ 95 s1 = vec_sra(s1, vec_7); \ 96 s2 = vec_sra(s2, vec_7); \ 97 s3 = vec_sra(s3, vec_7); \ 98 s4 = vec_sra(vec_add(s4, vec_1s), vec_7); \ 99 s5 = vec_sra(vec_add(s5, vec_1s), vec_7); \ 100 s6 = vec_sra(vec_add(s6, vec_1s), vec_7); \ 101 s7 = vec_sra(vec_add(s7, vec_1s), vec_7); \ 105 #define STEP4(s0, s1, s2, s3, vec_rnd) \ 107 t1 = vec_add(vec_sl(s0, vec_4), s0); \ 108 t1 = vec_add(t1, vec_rnd); \ 109 t2 = vec_add(vec_sl(s2, vec_4), s2); \ 110 t0 = vec_add(t1, t2); \ 111 t1 = vec_sub(t1, t2); \ 112 t3 = vec_sl(vec_sub(s3, s1), vec_1); \ 113 t3 = vec_add(t3, vec_sl(t3, vec_2)); \ 114 t2 = vec_add(t3, vec_sl(s1, vec_5)); \ 115 t3 = vec_add(t3, vec_sl(s3, vec_3)); \ 116 t3 = vec_add(t3, vec_sl(s3, vec_2)); \ 117 s0 = vec_add(t0, t2); \ 118 s1 = vec_sub(t1, t3); \ 119 s2 = vec_add(t1, t3); \ 120 s3 = vec_sub(t0, t2); \ 123 #define SHIFT_HOR4(s0, s1, s2, s3) \ 124 s0 = vec_sra(s0, vec_3); \ 125 s1 = vec_sra(s1, vec_3); \ 126 s2 = vec_sra(s2, vec_3); \ 127 s3 = vec_sra(s3, vec_3); 129 #define SHIFT_VERT4(s0, s1, s2, s3) \ 130 s0 = vec_sra(s0, vec_7); \ 131 s1 = vec_sra(s1, vec_7); \ 132 s2 = vec_sra(s2, vec_7); \ 133 s3 = vec_sra(s3, vec_7); 137 static void vc1_inv_trans_8x8_altivec(int16_t
block[64])
139 vector
signed short src0,
src1, src2, src3, src4, src5, src6, src7;
141 vector
signed int s8, s9, sA, sB, sC, sD, sE, sF;
143 const vector
signed int vec_64 = vec_sl(vec_splat_s32(4), vec_splat_u32(4));
144 const vector
unsigned int vec_7 = vec_splat_u32(7);
145 const vector
unsigned int vec_4 = vec_splat_u32(4);
146 const vector
signed int vec_4s = vec_splat_s32(4);
147 const vector
unsigned int vec_3 = vec_splat_u32(3);
148 const vector
unsigned int vec_2 = vec_splat_u32(2);
149 const vector
signed int vec_1s = vec_splat_s32(1);
150 const vector
unsigned int vec_1 = vec_splat_u32(1);
152 src0 = vec_ld( 0, block);
153 src1 = vec_ld( 16, block);
154 src2 = vec_ld( 32, block);
155 src3 = vec_ld( 48, block);
156 src4 = vec_ld( 64, block);
157 src5 = vec_ld( 80, block);
158 src6 = vec_ld( 96, block);
159 src7 = vec_ld(112, block);
161 s0 = vec_unpackl(src0);
162 s1 = vec_unpackl(src1);
163 s2 = vec_unpackl(src2);
164 s3 = vec_unpackl(src3);
165 s4 = vec_unpackl(src4);
166 s5 = vec_unpackl(src5);
167 s6 = vec_unpackl(src6);
168 s7 = vec_unpackl(src7);
169 s8 = vec_unpackh(src0);
170 s9 = vec_unpackh(src1);
171 sA = vec_unpackh(src2);
172 sB = vec_unpackh(src3);
173 sC = vec_unpackh(src4);
174 sD = vec_unpackh(src5);
175 sE = vec_unpackh(src6);
176 sF = vec_unpackh(src7);
177 STEP8(s0, s1, s2, s3, s4, s5, s6, s7, vec_4s);
178 SHIFT_HOR8(s0, s1, s2, s3, s4, s5, s6, s7);
179 STEP8(s8, s9, sA, sB, sC, sD, sE, sF, vec_4s);
180 SHIFT_HOR8(s8, s9, sA, sB, sC, sD, sE, sF);
181 src0 = vec_pack(s8, s0);
182 src1 = vec_pack(s9, s1);
183 src2 = vec_pack(sA, s2);
184 src3 = vec_pack(sB, s3);
185 src4 = vec_pack(sC, s4);
186 src5 = vec_pack(sD, s5);
187 src6 = vec_pack(sE, s6);
188 src7 = vec_pack(sF, s7);
189 TRANSPOSE8(src0, src1, src2, src3, src4, src5, src6, src7);
191 s0 = vec_unpackl(src0);
192 s1 = vec_unpackl(src1);
193 s2 = vec_unpackl(src2);
194 s3 = vec_unpackl(src3);
195 s4 = vec_unpackl(src4);
196 s5 = vec_unpackl(src5);
197 s6 = vec_unpackl(src6);
198 s7 = vec_unpackl(src7);
199 s8 = vec_unpackh(src0);
200 s9 = vec_unpackh(src1);
201 sA = vec_unpackh(src2);
202 sB = vec_unpackh(src3);
203 sC = vec_unpackh(src4);
204 sD = vec_unpackh(src5);
205 sE = vec_unpackh(src6);
206 sF = vec_unpackh(src7);
207 STEP8(s0, s1, s2, s3, s4, s5, s6, s7, vec_64);
208 SHIFT_VERT8(s0, s1, s2, s3, s4, s5, s6, s7);
209 STEP8(s8, s9, sA, sB, sC, sD, sE, sF, vec_64);
210 SHIFT_VERT8(s8, s9, sA, sB, sC, sD, sE, sF);
211 src0 = vec_pack(s8, s0);
212 src1 = vec_pack(s9, s1);
213 src2 = vec_pack(sA, s2);
214 src3 = vec_pack(sB, s3);
215 src4 = vec_pack(sC, s4);
216 src5 = vec_pack(sD, s5);
217 src6 = vec_pack(sE, s6);
218 src7 = vec_pack(sF, s7);
220 vec_st(src0, 0, block);
221 vec_st(src1, 16, block);
222 vec_st(src2, 32, block);
223 vec_st(src3, 48, block);
224 vec_st(src4, 64, block);
225 vec_st(src5, 80, block);
226 vec_st(src6, 96, block);
227 vec_st(src7,112, block);
232 static void vc1_inv_trans_8x4_altivec(
uint8_t *dest, ptrdiff_t
stride,
235 vector
signed short src0,
src1, src2, src3, src4, src5, src6, src7;
237 vector
signed int s8, s9, sA, sB, sC, sD, sE, sF;
239 const vector
signed int vec_64 = vec_sl(vec_splat_s32(4), vec_splat_u32(4));
240 const vector
unsigned int vec_7 = vec_splat_u32(7);
241 const vector
unsigned int vec_5 = vec_splat_u32(5);
242 const vector
unsigned int vec_4 = vec_splat_u32(4);
243 const vector
signed int vec_4s = vec_splat_s32(4);
244 const vector
unsigned int vec_3 = vec_splat_u32(3);
245 const vector
unsigned int vec_2 = vec_splat_u32(2);
246 const vector
unsigned int vec_1 = vec_splat_u32(1);
247 vector
unsigned char tmp;
248 vector
signed short tmp2, tmp3;
249 vector
unsigned char perm0, perm1, p0, p1, p;
251 src0 = vec_ld( 0, block);
252 src1 = vec_ld( 16, block);
253 src2 = vec_ld( 32, block);
254 src3 = vec_ld( 48, block);
255 src4 = vec_ld( 64, block);
256 src5 = vec_ld( 80, block);
257 src6 = vec_ld( 96, block);
258 src7 = vec_ld(112, block);
260 TRANSPOSE8(src0, src1, src2, src3, src4, src5, src6, src7);
261 s0 = vec_unpackl(src0);
262 s1 = vec_unpackl(src1);
263 s2 = vec_unpackl(src2);
264 s3 = vec_unpackl(src3);
265 s4 = vec_unpackl(src4);
266 s5 = vec_unpackl(src5);
267 s6 = vec_unpackl(src6);
268 s7 = vec_unpackl(src7);
269 s8 = vec_unpackh(src0);
270 s9 = vec_unpackh(src1);
271 sA = vec_unpackh(src2);
272 sB = vec_unpackh(src3);
273 sC = vec_unpackh(src4);
274 sD = vec_unpackh(src5);
275 sE = vec_unpackh(src6);
276 sF = vec_unpackh(src7);
277 STEP8(s0, s1, s2, s3, s4, s5, s6, s7, vec_4s);
278 SHIFT_HOR8(s0, s1, s2, s3, s4, s5, s6, s7);
279 STEP8(s8, s9, sA, sB, sC, sD, sE, sF, vec_4s);
280 SHIFT_HOR8(s8, s9, sA, sB, sC, sD, sE, sF);
281 src0 = vec_pack(s8, s0);
282 src1 = vec_pack(s9, s1);
283 src2 = vec_pack(sA, s2);
284 src3 = vec_pack(sB, s3);
285 src4 = vec_pack(sC, s4);
286 src5 = vec_pack(sD, s5);
287 src6 = vec_pack(sE, s6);
288 src7 = vec_pack(sF, s7);
289 TRANSPOSE8(src0, src1, src2, src3, src4, src5, src6, src7);
291 s0 = vec_unpackh(src0);
292 s1 = vec_unpackh(src1);
293 s2 = vec_unpackh(src2);
294 s3 = vec_unpackh(src3);
295 s8 = vec_unpackl(src0);
296 s9 = vec_unpackl(src1);
297 sA = vec_unpackl(src2);
298 sB = vec_unpackl(src3);
299 STEP4(s0, s1, s2, s3, vec_64);
300 SHIFT_VERT4(s0, s1, s2, s3);
301 STEP4(s8, s9, sA, sB, vec_64);
302 SHIFT_VERT4(s8, s9, sA, sB);
303 src0 = vec_pack(s0, s8);
304 src1 = vec_pack(s1, s9);
305 src2 = vec_pack(s2, sA);
306 src3 = vec_pack(s3, sB);
309 p0 = vec_lvsl (0, dest);
310 p1 = vec_lvsl (stride, dest);
311 p = vec_splat_u8 (-1);
312 perm0 = vec_mergeh (p, p0);
313 perm1 = vec_mergeh (p, p1);
314 #define GET_TMP2(dst, p) \ 315 tmp = vec_ld (0, dest); \ 316 tmp2 = (vector signed short)vec_perm (tmp, vec_splat_u8(0), p); 318 #define GET_TMP2(dst,p) \ 319 tmp = vec_vsx_ld (0, dst); \ 320 tmp2 = (vector signed short)vec_mergeh (tmp, vec_splat_u8(0)); 323 #define ADD(dest,src,perm) \ 324 GET_TMP2(dest, perm); \ 325 tmp3 = vec_adds (tmp2, src); \ 326 tmp = vec_packsu (tmp3, tmp3); \ 327 vec_ste ((vector unsigned int)tmp, 0, (unsigned int *)dest); \ 328 vec_ste ((vector unsigned int)tmp, 4, (unsigned int *)dest); 333 ADD (dest, src3, perm1)
336 #define PUT_OP_U8_ALTIVEC(d, s, dst) d = s 337 #define AVG_OP_U8_ALTIVEC(d, s, dst) d = vec_avg(dst, s) 339 #define OP_U8_ALTIVEC PUT_OP_U8_ALTIVEC 340 #define PREFIX_no_rnd_vc1_chroma_mc8_altivec put_no_rnd_vc1_chroma_mc8_altivec 341 #include "h264chroma_template.c" 343 #undef PREFIX_no_rnd_vc1_chroma_mc8_altivec 345 #define OP_U8_ALTIVEC AVG_OP_U8_ALTIVEC 346 #define PREFIX_no_rnd_vc1_chroma_mc8_altivec avg_no_rnd_vc1_chroma_mc8_altivec 347 #include "h264chroma_template.c" 349 #undef PREFIX_no_rnd_vc1_chroma_mc8_altivec
void(* vc1_inv_trans_8x4)(uint8_t *dest, ptrdiff_t stride, int16_t *block)
Macro definitions for various function/variable attributes.
void(* vc1_inv_trans_8x8)(int16_t *b)
#define PPC_ALTIVEC(flags)
h264_chroma_mc_func avg_no_rnd_vc1_chroma_pixels_tab[3]
int av_get_cpu_flags(void)
Return the flags which specify extensions supported by the CPU.
Contains misc utility macros and inline functions.
h264_chroma_mc_func put_no_rnd_vc1_chroma_pixels_tab[3]
av_cold void ff_vc1dsp_init_ppc(VC1DSPContext *dsp)