35 #define vzero vec_splat_s32(0) 38 #define GET_LS(a,b,c,s) {\ 40 a = vec_vsx_ld(((b) << 1) + 16, s);\ 43 #define yuv2planeX_8(d1, d2, l1, src, x, perm, filter) do {\ 44 vector signed short ls;\ 45 vector signed int vf1, vf2, i1, i2;\ 46 GET_LS(l1, x, perm, src);\ 47 i1 = vec_mule(filter, ls);\ 48 i2 = vec_mulo(filter, ls);\ 49 vf1 = vec_mergeh(i1, i2);\ 50 vf2 = vec_mergel(i1, i2);\ 51 d1 = vec_add(d1, vf1);\ 52 d2 = vec_add(d2, vf2);\ 55 #define LOAD_FILTER(vf,f) {\ 56 vf = vec_vsx_ld(joffset, f);\ 58 #define LOAD_L1(ll1,s,p){\ 59 ll1 = vec_vsx_ld(xoffset, s);\ 68 #define GET_VF4(a, vf, f) {\ 69 vf = (vector signed short)vec_vsx_ld(a << 3, f);\ 70 vf = vec_mergeh(vf, (vector signed short)vzero);\ 72 #define FIRST_LOAD(sv, pos, s, per) {} 73 #define UPDATE_PTR(s0, d0, s1, d1) {} 74 #define LOAD_SRCV(pos, a, s, per, v0, v1, vf) {\ 75 vf = vec_vsx_ld(pos + a, s);\ 77 #define LOAD_SRCV8(pos, a, s, per, v0, v1, vf) LOAD_SRCV(pos, a, s, per, v0, v1, vf) 78 #define GET_VFD(a, b, f, vf0, vf1, per, vf, off) {\ 79 vf = vec_vsx_ld((a * 2 * filterSize) + (b * 2) + off, f);\ 82 #define FUNC(name) name ## _vsx 90 static void yuv2plane1_8_u(
const int16_t *
src,
uint8_t *dest,
int dstW,
94 for (i = start; i < dstW; i++) {
95 int val = (src[
i] + dither[(i +
offset) & 7]) >> 7;
96 dest[
i] = av_clip_uint8(val);
100 static void yuv2plane1_8_vsx(
const int16_t *src,
uint8_t *dest,
int dstW,
101 const uint8_t *dither,
int offset)
103 const int dst_u = -(uintptr_t)dest & 15;
107 vec_s16 vi, vileft, ditherleft, ditherright;
110 for (j = 0; j < 16; j++) {
111 val[j] = dither[(dst_u + offset + j) & 7];
114 ditherleft = vec_ld(0,
val);
115 ditherright = vec_ld(0, &
val[8]);
117 yuv2plane1_8_u(src, dest, dst_u, dither, offset, 0);
119 for (i = dst_u; i < dstW - 15; i += 16) {
121 vi = vec_vsx_ld(0, &src[i]);
122 vi = vec_adds(ditherleft, vi);
123 vileft = vec_sra(vi, shifts);
125 vi = vec_vsx_ld(0, &src[i + 8]);
126 vi = vec_adds(ditherright, vi);
127 vi = vec_sra(vi, shifts);
129 vd = vec_packsu(vileft, vi);
130 vec_st(vd, 0, &dest[i]);
133 yuv2plane1_8_u(src, dest, dstW, dither, offset, i);
138 #define output_pixel(pos, val) \ 140 AV_WB16(pos, av_clip_uintp2(val >> shift, output_bits)); \ 142 AV_WL16(pos, av_clip_uintp2(val >> shift, output_bits)); \ 145 static void yuv2plane1_nbps_u(
const int16_t *src, uint16_t *dest,
int dstW,
146 int big_endian,
int output_bits,
int start)
149 int shift = 15 - output_bits;
151 for (i = start; i < dstW; i++) {
152 int val = src[
i] + (1 << (shift - 1));
158 uint16_t *dest,
int dstW,
159 const int big_endian,
160 const int output_bits)
162 const int dst_u = -(uintptr_t)dest & 7;
163 const int shift = 15 - output_bits;
164 const int add = (1 << (shift - 1));
165 const int clip = (1 << output_bits) - 1;
166 const vec_u16 vadd = (
vec_u16) {add, add, add, add, add, add, add, add};
167 const vec_u16 vswap = (
vec_u16) vec_splat_u16(big_endian ? 8 : 0);
173 yuv2plane1_nbps_u(src, dest, dst_u, big_endian, output_bits, 0);
175 for (i = dst_u; i < dstW - 7; i += 8) {
176 v = vec_vsx_ld(0, (
const uint16_t *) &src[i]);
177 v = vec_add(v, vadd);
178 v = vec_sr(v, vshift);
179 v = vec_min(v, vlargest);
180 v = vec_rl(v, vswap);
181 vec_st(v, 0, &dest[i]);
184 yuv2plane1_nbps_u(src, dest, dstW, big_endian, output_bits, i);
187 static void yuv2planeX_nbps_u(
const int16_t *
filter,
int filterSize,
188 const int16_t **src, uint16_t *dest,
int dstW,
189 int big_endian,
int output_bits,
int start)
192 int shift = 11 + 16 - output_bits;
194 for (i = start; i < dstW; i++) {
195 int val = 1 << (shift - 1);
198 for (j = 0; j < filterSize; j++)
199 val += src[j][i] * filter[j];
205 static void yuv2planeX_nbps_vsx(
const int16_t *filter,
int filterSize,
206 const int16_t **src, uint16_t *dest,
int dstW,
207 int big_endian,
int output_bits)
209 const int dst_u = -(uintptr_t)dest & 7;
210 const int shift = 11 + 16 - output_bits;
211 const int add = (1 << (shift - 1));
212 const int clip = (1 << output_bits) - 1;
213 const uint16_t swap = big_endian ? 8 : 0;
216 const vec_u16 vswap = (
vec_u16) {swap, swap, swap, swap, swap, swap, swap, swap};
218 const vec_s16 vzero = vec_splat_s16(0);
219 const vec_u8 vperm = (
vec_u8) {0, 1, 8, 9, 2, 3, 10, 11, 4, 5, 12, 13, 6, 7, 14, 15};
225 for (i = 0; i < filterSize; i++) {
226 vfilter[
i] = (
vec_s16) {filter[
i], filter[
i], filter[
i], filter[
i],
227 filter[
i], filter[
i], filter[
i], filter[
i]};
230 yuv2planeX_nbps_u(filter, filterSize, src, dest, dst_u, big_endian, output_bits, 0);
232 for (i = dst_u; i < dstW - 7; i += 8) {
233 vleft = vright = vadd;
235 for (j = 0; j < filterSize; j++) {
236 vin = vec_vsx_ld(0, &src[j][i]);
237 vtmp = (
vec_u32) vec_mule(vin, vfilter[j]);
238 vleft = vec_add(vleft, vtmp);
239 vtmp = (
vec_u32) vec_mulo(vin, vfilter[j]);
240 vright = vec_add(vright, vtmp);
243 vleft = vec_sra(vleft, vshift);
244 vright = vec_sra(vright, vshift);
245 v = vec_packsu(vleft, vright);
247 v = vec_min(v, vlargest);
248 v = vec_rl(v, vswap);
249 v = vec_perm(v, v, vperm);
250 vec_st(v, 0, &dest[i]);
253 yuv2planeX_nbps_u(filter, filterSize, src, dest, dstW, big_endian, output_bits, i);
259 #define output_pixel(pos, val, bias, signedness) \ 261 AV_WB16(pos, bias + av_clip_ ## signedness ## 16(val >> shift)); \ 263 AV_WL16(pos, bias + av_clip_ ## signedness ## 16(val >> shift)); \ 266 static void yuv2plane1_16_u(
const int32_t *src, uint16_t *dest,
int dstW,
267 int big_endian,
int output_bits,
int start)
272 for (i = start; i < dstW; i++) {
273 int val = src[
i] + (1 << (shift - 1));
279 uint16_t *dest,
int dstW,
280 const int big_endian,
283 const int dst_u = -(uintptr_t)dest & 7;
285 const int add = (1 << (shift - 1));
287 const vec_u16 vswap = (
vec_u16) vec_splat_u16(big_endian ? 8 : 0);
293 yuv2plane1_16_u(src, dest, dst_u, big_endian, output_bits, 0);
295 for (i = dst_u; i < dstW - 7; i += 8) {
296 v = vec_vsx_ld(0, (
const uint32_t *) &src[i]);
297 v = vec_add(v, vadd);
298 v = vec_sr(v, vshift);
300 v2 = vec_vsx_ld(0, (
const uint32_t *) &src[i + 4]);
301 v2 = vec_add(v2, vadd);
302 v2 = vec_sr(v2, vshift);
304 vd = vec_packsu(v, v2);
305 vd = vec_rl(vd, vswap);
307 vec_st(vd, 0, &dest[i]);
310 yuv2plane1_16_u(src, dest, dstW, big_endian, output_bits, i);
315 static void yuv2planeX_16_u(
const int16_t *filter,
int filterSize,
316 const int32_t **src, uint16_t *dest,
int dstW,
317 int big_endian,
int output_bits,
int start)
322 for (i = start; i < dstW; i++) {
323 int val = 1 << (shift - 1);
332 for (j = 0; j < filterSize; j++)
333 val += src[j][i] * (
unsigned)filter[j];
339 static void yuv2planeX_16_vsx(
const int16_t *filter,
int filterSize,
340 const int32_t **src, uint16_t *dest,
int dstW,
341 int big_endian,
int output_bits)
343 const int dst_u = -(uintptr_t)dest & 7;
344 const int shift = 15;
345 const int bias = 0x8000;
346 const int add = (1 << (shift - 1)) - 0x40000000;
347 const uint16_t swap = big_endian ? 8 : 0;
350 const vec_u16 vswap = (
vec_u16) {swap, swap, swap, swap, swap, swap, swap, swap};
351 const vec_u16 vbias = (
vec_u16) {bias, bias, bias, bias, bias, bias, bias, bias};
358 for (i = 0; i < filterSize; i++) {
359 vfilter[
i] = (
vec_s32) {filter[
i], filter[
i], filter[
i], filter[
i]};
362 yuv2planeX_16_u(filter, filterSize, src, dest, dst_u, big_endian, output_bits, 0);
364 for (i = dst_u; i < dstW - 7; i += 8) {
365 vleft = vright = vadd;
367 for (j = 0; j < filterSize; j++) {
368 vin32l = vec_vsx_ld(0, &src[j][i]);
369 vin32r = vec_vsx_ld(0, &src[j][i + 4]);
371 vtmp = (
vec_u32) vec_mul(vin32l, vfilter[j]);
372 vleft = vec_add(vleft, vtmp);
373 vtmp = (
vec_u32) vec_mul(vin32r, vfilter[j]);
374 vright = vec_add(vright, vtmp);
377 vleft = vec_sra(vleft, vshift);
378 vright = vec_sra(vright, vshift);
380 v = vec_add(v, vbias);
381 v = vec_rl(v, vswap);
382 vec_st(v, 0, &dest[i]);
385 yuv2planeX_16_u(filter, filterSize, src, dest, dstW, big_endian, output_bits, i);
390 #define yuv2NBPS(bits, BE_LE, is_be, template_size, typeX_t) \ 391 yuv2NBPS1(bits, BE_LE, is_be, template_size, typeX_t) \ 392 yuv2NBPSX(bits, BE_LE, is_be, template_size, typeX_t) 394 #define yuv2NBPS1(bits, BE_LE, is_be, template_size, typeX_t) \ 395 static void yuv2plane1_ ## bits ## BE_LE ## _vsx(const int16_t *src, \ 396 uint8_t *dest, int dstW, \ 397 const uint8_t *dither, int offset) \ 399 yuv2plane1_ ## template_size ## _vsx((const typeX_t *) src, \ 400 (uint16_t *) dest, dstW, is_be, bits); \ 403 #define yuv2NBPSX(bits, BE_LE, is_be, template_size, typeX_t) \ 404 static void yuv2planeX_ ## bits ## BE_LE ## _vsx(const int16_t *filter, int filterSize, \ 405 const int16_t **src, uint8_t *dest, int dstW, \ 406 const uint8_t *dither, int offset)\ 408 yuv2planeX_## template_size ## _vsx(filter, \ 409 filterSize, (const typeX_t **) src, \ 410 (uint16_t *) dest, dstW, is_be, bits); \ 422 yuv2NBPS1(16, BE, 1, 16,
int32_t)
423 yuv2NBPS1(16, LE, 0, 16,
int32_t)
425 yuv2NBPSX(16, BE, 1, 16,
int32_t)
426 yuv2NBPSX(16, LE, 0, 16,
int32_t)
430 R_l = vec_max(R_l, zero32); \ 431 R_r = vec_max(R_r, zero32); \ 432 G_l = vec_max(G_l, zero32); \ 433 G_r = vec_max(G_r, zero32); \ 434 B_l = vec_max(B_l, zero32); \ 435 B_r = vec_max(B_r, zero32); \ 437 R_l = vec_min(R_l, rgbclip); \ 438 R_r = vec_min(R_r, rgbclip); \ 439 G_l = vec_min(G_l, rgbclip); \ 440 G_r = vec_min(G_r, rgbclip); \ 441 B_l = vec_min(B_l, rgbclip); \ 442 B_r = vec_min(B_r, rgbclip); \ 444 R_l = vec_sr(R_l, shift22); \ 445 R_r = vec_sr(R_r, shift22); \ 446 G_l = vec_sr(G_l, shift22); \ 447 G_r = vec_sr(G_r, shift22); \ 448 B_l = vec_sr(B_l, shift22); \ 449 B_r = vec_sr(B_r, shift22); \ 451 rd16 = vec_packsu(R_l, R_r); \ 452 gd16 = vec_packsu(G_l, G_r); \ 453 bd16 = vec_packsu(B_l, B_r); \ 454 rd = vec_packsu(rd16, zero16); \ 455 gd = vec_packsu(gd16, zero16); \ 456 bd = vec_packsu(bd16, zero16); \ 459 case AV_PIX_FMT_RGB24: \ 460 out0 = vec_perm(rd, gd, perm3rg0); \ 461 out0 = vec_perm(out0, bd, perm3tb0); \ 462 out1 = vec_perm(rd, gd, perm3rg1); \ 463 out1 = vec_perm(out1, bd, perm3tb1); \ 465 vec_vsx_st(out0, 0, dest); \ 466 vec_vsx_st(out1, 16, dest); \ 470 case AV_PIX_FMT_BGR24: \ 471 out0 = vec_perm(bd, gd, perm3rg0); \ 472 out0 = vec_perm(out0, rd, perm3tb0); \ 473 out1 = vec_perm(bd, gd, perm3rg1); \ 474 out1 = vec_perm(out1, rd, perm3tb1); \ 476 vec_vsx_st(out0, 0, dest); \ 477 vec_vsx_st(out1, 16, dest); \ 481 case AV_PIX_FMT_BGRA: \ 482 out0 = vec_mergeh(bd, gd); \ 483 out1 = vec_mergeh(rd, ad); \ 485 tmp8 = (vec_u8) vec_mergeh((vec_u16) out0, (vec_u16) out1); \ 486 vec_vsx_st(tmp8, 0, dest); \ 487 tmp8 = (vec_u8) vec_mergel((vec_u16) out0, (vec_u16) out1); \ 488 vec_vsx_st(tmp8, 16, dest); \ 492 case AV_PIX_FMT_RGBA: \ 493 out0 = vec_mergeh(rd, gd); \ 494 out1 = vec_mergeh(bd, ad); \ 496 tmp8 = (vec_u8) vec_mergeh((vec_u16) out0, (vec_u16) out1); \ 497 vec_vsx_st(tmp8, 0, dest); \ 498 tmp8 = (vec_u8) vec_mergel((vec_u16) out0, (vec_u16) out1); \ 499 vec_vsx_st(tmp8, 16, dest); \ 503 case AV_PIX_FMT_ARGB: \ 504 out0 = vec_mergeh(ad, rd); \ 505 out1 = vec_mergeh(gd, bd); \ 507 tmp8 = (vec_u8) vec_mergeh((vec_u16) out0, (vec_u16) out1); \ 508 vec_vsx_st(tmp8, 0, dest); \ 509 tmp8 = (vec_u8) vec_mergel((vec_u16) out0, (vec_u16) out1); \ 510 vec_vsx_st(tmp8, 16, dest); \ 514 case AV_PIX_FMT_ABGR: \ 515 out0 = vec_mergeh(ad, bd); \ 516 out1 = vec_mergeh(gd, rd); \ 518 tmp8 = (vec_u8) vec_mergeh((vec_u16) out0, (vec_u16) out1); \ 519 vec_vsx_st(tmp8, 0, dest); \ 520 tmp8 = (vec_u8) vec_mergel((vec_u16) out0, (vec_u16) out1); \ 521 vec_vsx_st(tmp8, 16, dest); \ 528 yuv2rgb_full_X_vsx_template(
SwsContext *
c,
const int16_t *lumFilter,
529 const int16_t **lumSrc,
int lumFilterSize,
530 const int16_t *chrFilter,
const int16_t **chrUSrc,
531 const int16_t **chrVSrc,
int chrFilterSize,
532 const int16_t **alpSrc,
uint8_t *dest,
536 vec_s32 vy32_l, vy32_r, vu32_l, vu32_r, vv32_l, vv32_r, tmp32;
537 vec_s32 R_l, R_r, G_l, G_r, B_l, B_r;
540 vec_u8 rd, bd, gd, ad, out0, out1, tmp8;
542 const vec_s32 ystart = vec_splats(1 << 9);
543 const vec_s32 uvstart = vec_splats((1 << 9) - (128 << 19));
544 const vec_u16 zero16 = vec_splat_u16(0);
547 const vec_s32 y_add = vec_splats(1 << 21);
552 const vec_s32 rgbclip = vec_splats(1 << 30);
553 const vec_s32 zero32 = vec_splat_s32(0);
554 const vec_u32 shift22 = vec_splats(22
U);
555 const vec_u32 shift10 = vec_splat_u32(10);
578 ad = vec_splats((
uint8_t) 255);
580 for (i = 0; i < lumFilterSize; i++)
581 vlumFilter[i] = vec_splats(lumFilter[i]);
582 for (i = 0; i < chrFilterSize; i++)
583 vchrFilter[i] = vec_splats(chrFilter[i]);
585 for (i = 0; i < dstW; i += 8) {
593 for (j = 0; j < lumFilterSize; j++) {
594 vv = vec_ld(0, &lumSrc[j][i]);
595 tmp = vec_mule(vv, vlumFilter[j]);
596 tmp2 = vec_mulo(vv, vlumFilter[j]);
597 tmp3 = vec_mergeh(tmp, tmp2);
598 tmp4 = vec_mergel(tmp, tmp2);
600 vy32_l = vec_adds(vy32_l, tmp3);
601 vy32_r = vec_adds(vy32_r, tmp4);
604 for (j = 0; j < chrFilterSize; j++) {
605 vv = vec_ld(0, &chrUSrc[j][i]);
606 tmp = vec_mule(vv, vchrFilter[j]);
607 tmp2 = vec_mulo(vv, vchrFilter[j]);
608 tmp3 = vec_mergeh(tmp, tmp2);
609 tmp4 = vec_mergel(tmp, tmp2);
611 vu32_l = vec_adds(vu32_l, tmp3);
612 vu32_r = vec_adds(vu32_r, tmp4);
614 vv = vec_ld(0, &chrVSrc[j][i]);
615 tmp = vec_mule(vv, vchrFilter[j]);
616 tmp2 = vec_mulo(vv, vchrFilter[j]);
617 tmp3 = vec_mergeh(tmp, tmp2);
618 tmp4 = vec_mergel(tmp, tmp2);
620 vv32_l = vec_adds(vv32_l, tmp3);
621 vv32_r = vec_adds(vv32_r, tmp4);
624 vy32_l = vec_sra(vy32_l, shift10);
625 vy32_r = vec_sra(vy32_r, shift10);
626 vu32_l = vec_sra(vu32_l, shift10);
627 vu32_r = vec_sra(vu32_r, shift10);
628 vv32_l = vec_sra(vv32_l, shift10);
629 vv32_r = vec_sra(vv32_r, shift10);
631 vy32_l = vec_sub(vy32_l, y_offset);
632 vy32_r = vec_sub(vy32_r, y_offset);
633 vy32_l = vec_mul(vy32_l, y_coeff);
634 vy32_r = vec_mul(vy32_r, y_coeff);
635 vy32_l = vec_add(vy32_l, y_add);
636 vy32_r = vec_add(vy32_r, y_add);
638 R_l = vec_mul(vv32_l, v2r_coeff);
639 R_l = vec_add(R_l, vy32_l);
640 R_r = vec_mul(vv32_r, v2r_coeff);
641 R_r = vec_add(R_r, vy32_r);
642 G_l = vec_mul(vv32_l, v2g_coeff);
643 tmp32 = vec_mul(vu32_l, u2g_coeff);
644 G_l = vec_add(G_l, vy32_l);
645 G_l = vec_add(G_l, tmp32);
646 G_r = vec_mul(vv32_r, v2g_coeff);
647 tmp32 = vec_mul(vu32_r, u2g_coeff);
648 G_r = vec_add(G_r, vy32_r);
649 G_r = vec_add(G_r, tmp32);
651 B_l = vec_mul(vu32_l, u2b_coeff);
652 B_l = vec_add(B_l, vy32_l);
653 B_r = vec_mul(vu32_r, u2b_coeff);
654 B_r = vec_add(B_r, vy32_r);
660 #define SETUP(x, buf0, alpha1, buf1, alpha) { \ 661 x = vec_ld(0, buf0); \ 662 tmp = vec_mule(x, alpha1); \ 663 tmp2 = vec_mulo(x, alpha1); \ 664 tmp3 = vec_mergeh(tmp, tmp2); \ 665 tmp4 = vec_mergel(tmp, tmp2); \ 667 x = vec_ld(0, buf1); \ 668 tmp = vec_mule(x, alpha); \ 669 tmp2 = vec_mulo(x, alpha); \ 670 tmp5 = vec_mergeh(tmp, tmp2); \ 671 tmp6 = vec_mergel(tmp, tmp2); \ 673 tmp3 = vec_add(tmp3, tmp5); \ 674 tmp4 = vec_add(tmp4, tmp6); \ 679 yuv2rgb_full_2_vsx_template(
SwsContext *c,
const int16_t *buf[2],
680 const int16_t *ubuf[2],
const int16_t *vbuf[2],
681 const int16_t *abuf[2],
uint8_t *dest,
int dstW,
682 int yalpha,
int uvalpha,
int y,
685 const int16_t *buf0 = buf[0], *buf1 = buf[1],
686 *ubuf0 = ubuf[0], *ubuf1 = ubuf[1],
687 *vbuf0 = vbuf[0], *vbuf1 = vbuf[1],
688 *abuf0 = hasAlpha ? abuf[0] :
NULL,
689 *abuf1 = hasAlpha ? abuf[1] :
NULL;
690 const int16_t yalpha1 = 4096 - yalpha;
691 const int16_t uvalpha1 = 4096 - uvalpha;
692 vec_s16 vy, vu, vv,
A = vec_splat_s16(0);
693 vec_s32 vy32_l, vy32_r, vu32_l, vu32_r, vv32_l, vv32_r, tmp32;
694 vec_s32 R_l, R_r, G_l, G_r, B_l, B_r;
697 vec_u8 rd, bd, gd, ad, out0, out1, tmp8;
698 const vec_s16 vyalpha1 = vec_splats(yalpha1);
699 const vec_s16 vuvalpha1 = vec_splats(uvalpha1);
700 const vec_s16 vyalpha = vec_splats((int16_t) yalpha);
701 const vec_s16 vuvalpha = vec_splats((int16_t) uvalpha);
702 const vec_u16 zero16 = vec_splat_u16(0);
705 const vec_s32 y_add = vec_splats(1 << 21);
710 const vec_s32 rgbclip = vec_splats(1 << 30);
711 const vec_s32 zero32 = vec_splat_s32(0);
712 const vec_u32 shift19 = vec_splats(19
U);
713 const vec_u32 shift22 = vec_splats(22
U);
714 const vec_u32 shift10 = vec_splat_u32(10);
715 const vec_s32 dec128 = vec_splats(128 << 19);
716 const vec_s32 add18 = vec_splats(1 << 18);
742 for (i = 0; i < dstW; i += 8) {
743 SETUP(vy, &buf0[i], vyalpha1, &buf1[i], vyalpha);
744 vy32_l = vec_sra(tmp3, shift10);
745 vy32_r = vec_sra(tmp4, shift10);
747 SETUP(vu, &ubuf0[i], vuvalpha1, &ubuf1[i], vuvalpha);
748 tmp3 = vec_sub(tmp3, dec128);
749 tmp4 = vec_sub(tmp4, dec128);
750 vu32_l = vec_sra(tmp3, shift10);
751 vu32_r = vec_sra(tmp4, shift10);
753 SETUP(vv, &vbuf0[i], vuvalpha1, &vbuf1[i], vuvalpha);
754 tmp3 = vec_sub(tmp3, dec128);
755 tmp4 = vec_sub(tmp4, dec128);
756 vv32_l = vec_sra(tmp3, shift10);
757 vv32_r = vec_sra(tmp4, shift10);
760 SETUP(A, &abuf0[i], vyalpha1, &abuf1[i], vyalpha);
761 tmp3 = vec_add(tmp3, add18);
762 tmp4 = vec_add(tmp4, add18);
763 tmp3 = vec_sra(tmp3, shift19);
764 tmp4 = vec_sra(tmp4, shift19);
765 A = vec_packs(tmp3, tmp4);
766 ad = vec_packsu(A, (
vec_s16) zero16);
768 ad = vec_splats((
uint8_t) 255);
771 vy32_l = vec_sub(vy32_l, y_offset);
772 vy32_r = vec_sub(vy32_r, y_offset);
773 vy32_l = vec_mul(vy32_l, y_coeff);
774 vy32_r = vec_mul(vy32_r, y_coeff);
775 vy32_l = vec_add(vy32_l, y_add);
776 vy32_r = vec_add(vy32_r, y_add);
778 R_l = vec_mul(vv32_l, v2r_coeff);
779 R_l = vec_add(R_l, vy32_l);
780 R_r = vec_mul(vv32_r, v2r_coeff);
781 R_r = vec_add(R_r, vy32_r);
782 G_l = vec_mul(vv32_l, v2g_coeff);
783 tmp32 = vec_mul(vu32_l, u2g_coeff);
784 G_l = vec_add(G_l, vy32_l);
785 G_l = vec_add(G_l, tmp32);
786 G_r = vec_mul(vv32_r, v2g_coeff);
787 tmp32 = vec_mul(vu32_r, u2g_coeff);
788 G_r = vec_add(G_r, vy32_r);
789 G_r = vec_add(G_r, tmp32);
791 B_l = vec_mul(vu32_l, u2b_coeff);
792 B_l = vec_add(B_l, vy32_l);
793 B_r = vec_mul(vu32_r, u2b_coeff);
794 B_r = vec_add(B_r, vy32_r);
801 yuv2rgb_2_vsx_template(
SwsContext *c,
const int16_t *buf[2],
802 const int16_t *ubuf[2],
const int16_t *vbuf[2],
803 const int16_t *abuf[2],
uint8_t *dest,
int dstW,
804 int yalpha,
int uvalpha,
int y,
807 const int16_t *buf0 = buf[0], *buf1 = buf[1],
808 *ubuf0 = ubuf[0], *ubuf1 = ubuf[1],
809 *vbuf0 = vbuf[0], *vbuf1 = vbuf[1],
810 *abuf0 = hasAlpha ? abuf[0] :
NULL,
811 *abuf1 = hasAlpha ? abuf[1] :
NULL;
812 const int16_t yalpha1 = 4096 - yalpha;
813 const int16_t uvalpha1 = 4096 - uvalpha;
814 vec_s16 vy, vu, vv, A = vec_splat_s16(0);
815 vec_s32 vy32_l, vy32_r, vu32_l, vu32_r, vv32_l, vv32_r, tmp32;
816 vec_s32 R_l, R_r, G_l, G_r, B_l, B_r, vud32_l, vud32_r, vvd32_l, vvd32_r;
819 vec_u8 rd, bd, gd, ad, out0, out1, tmp8;
820 const vec_s16 vyalpha1 = vec_splats(yalpha1);
821 const vec_s16 vuvalpha1 = vec_splats(uvalpha1);
822 const vec_s16 vyalpha = vec_splats((int16_t) yalpha);
823 const vec_s16 vuvalpha = vec_splats((int16_t) uvalpha);
824 const vec_u16 zero16 = vec_splat_u16(0);
827 const vec_s32 y_add = vec_splats(1 << 21);
832 const vec_s32 rgbclip = vec_splats(1 << 30);
833 const vec_s32 zero32 = vec_splat_s32(0);
834 const vec_u32 shift19 = vec_splats(19
U);
835 const vec_u32 shift22 = vec_splats(22
U);
836 const vec_u32 shift10 = vec_splat_u32(10);
837 const vec_s32 dec128 = vec_splats(128 << 19);
838 const vec_s32 add18 = vec_splats(1 << 18);
872 for (i = 0; i < (dstW + 1) >> 1; i += 8) {
873 SETUP(vy, &buf0[i * 2], vyalpha1, &buf1[i * 2], vyalpha);
874 vy32_l = vec_sra(tmp3, shift10);
875 vy32_r = vec_sra(tmp4, shift10);
877 SETUP(vu, &ubuf0[i], vuvalpha1, &ubuf1[i], vuvalpha);
878 tmp3 = vec_sub(tmp3, dec128);
879 tmp4 = vec_sub(tmp4, dec128);
880 vu32_l = vec_sra(tmp3, shift10);
881 vu32_r = vec_sra(tmp4, shift10);
883 SETUP(vv, &vbuf0[i], vuvalpha1, &vbuf1[i], vuvalpha);
884 tmp3 = vec_sub(tmp3, dec128);
885 tmp4 = vec_sub(tmp4, dec128);
886 vv32_l = vec_sra(tmp3, shift10);
887 vv32_r = vec_sra(tmp4, shift10);
890 SETUP(A, &abuf0[i], vyalpha1, &abuf1[i], vyalpha);
891 tmp3 = vec_add(tmp3, add18);
892 tmp4 = vec_add(tmp4, add18);
893 tmp3 = vec_sra(tmp3, shift19);
894 tmp4 = vec_sra(tmp4, shift19);
895 A = vec_packs(tmp3, tmp4);
896 ad = vec_packsu(A, (
vec_s16) zero16);
898 ad = vec_splats((
uint8_t) 255);
901 vy32_l = vec_sub(vy32_l, y_offset);
902 vy32_r = vec_sub(vy32_r, y_offset);
903 vy32_l = vec_mul(vy32_l, y_coeff);
904 vy32_r = vec_mul(vy32_r, y_coeff);
905 vy32_l = vec_add(vy32_l, y_add);
906 vy32_r = vec_add(vy32_r, y_add);
909 vud32_l = vec_perm(vu32_l, vu32_l, doubleleft);
910 vud32_r = vec_perm(vu32_l, vu32_l, doubleright);
911 vvd32_l = vec_perm(vv32_l, vv32_l, doubleleft);
912 vvd32_r = vec_perm(vv32_l, vv32_l, doubleright);
914 R_l = vec_mul(vvd32_l, v2r_coeff);
915 R_l = vec_add(R_l, vy32_l);
916 R_r = vec_mul(vvd32_r, v2r_coeff);
917 R_r = vec_add(R_r, vy32_r);
918 G_l = vec_mul(vvd32_l, v2g_coeff);
919 tmp32 = vec_mul(vud32_l, u2g_coeff);
920 G_l = vec_add(G_l, vy32_l);
921 G_l = vec_add(G_l, tmp32);
922 G_r = vec_mul(vvd32_r, v2g_coeff);
923 tmp32 = vec_mul(vud32_r, u2g_coeff);
924 G_r = vec_add(G_r, vy32_r);
925 G_r = vec_add(G_r, tmp32);
927 B_l = vec_mul(vud32_l, u2b_coeff);
928 B_l = vec_add(B_l, vy32_l);
929 B_r = vec_mul(vud32_r, u2b_coeff);
930 B_r = vec_add(B_r, vy32_r);
935 SETUP(vy, &buf0[i * 2 + 8], vyalpha1, &buf1[i * 2 + 8], vyalpha);
936 vy32_l = vec_sra(tmp3, shift10);
937 vy32_r = vec_sra(tmp4, shift10);
939 vy32_l = vec_sub(vy32_l, y_offset);
940 vy32_r = vec_sub(vy32_r, y_offset);
941 vy32_l = vec_mul(vy32_l, y_coeff);
942 vy32_r = vec_mul(vy32_r, y_coeff);
943 vy32_l = vec_add(vy32_l, y_add);
944 vy32_r = vec_add(vy32_r, y_add);
947 vud32_l = vec_perm(vu32_r, vu32_r, doubleleft);
948 vud32_r = vec_perm(vu32_r, vu32_r, doubleright);
949 vvd32_l = vec_perm(vv32_r, vv32_r, doubleleft);
950 vvd32_r = vec_perm(vv32_r, vv32_r, doubleright);
952 R_l = vec_mul(vvd32_l, v2r_coeff);
953 R_l = vec_add(R_l, vy32_l);
954 R_r = vec_mul(vvd32_r, v2r_coeff);
955 R_r = vec_add(R_r, vy32_r);
956 G_l = vec_mul(vvd32_l, v2g_coeff);
957 tmp32 = vec_mul(vud32_l, u2g_coeff);
958 G_l = vec_add(G_l, vy32_l);
959 G_l = vec_add(G_l, tmp32);
960 G_r = vec_mul(vvd32_r, v2g_coeff);
961 tmp32 = vec_mul(vud32_r, u2g_coeff);
962 G_r = vec_add(G_r, vy32_r);
963 G_r = vec_add(G_r, tmp32);
965 B_l = vec_mul(vud32_l, u2b_coeff);
966 B_l = vec_add(B_l, vy32_l);
967 B_r = vec_mul(vud32_r, u2b_coeff);
968 B_r = vec_add(B_r, vy32_r);
977 yuv2rgb_full_1_vsx_template(
SwsContext *c,
const int16_t *buf0,
978 const int16_t *ubuf[2],
const int16_t *vbuf[2],
979 const int16_t *abuf0,
uint8_t *dest,
int dstW,
983 const int16_t *ubuf0 = ubuf[0], *vbuf0 = vbuf[0];
984 const int16_t *ubuf1 = ubuf[1], *vbuf1 = vbuf[1];
985 vec_s16 vy, vu, vv, A = vec_splat_s16(0), tmp16;
986 vec_s32 vy32_l, vy32_r, vu32_l, vu32_r, vv32_l, vv32_r, tmp32, tmp32_2;
987 vec_s32 R_l, R_r, G_l, G_r, B_l, B_r;
989 vec_u8 rd, bd, gd, ad, out0, out1, tmp8;
990 const vec_u16 zero16 = vec_splat_u16(0);
993 const vec_s32 y_add = vec_splats(1 << 21);
998 const vec_s32 rgbclip = vec_splats(1 << 30);
999 const vec_s32 zero32 = vec_splat_s32(0);
1001 const vec_u32 shift22 = vec_splats(22
U);
1002 const vec_u16 sub7 = vec_splats((uint16_t) (128 << 7));
1003 const vec_u16 sub8 = vec_splats((uint16_t) (128 << 8));
1004 const vec_s16 mul4 = vec_splat_s16(4);
1006 const vec_s16 add64 = vec_splat_s16(64);
1007 const vec_u16 shift7 = vec_splat_u16(7);
1008 const vec_s16 max255 = vec_splat_s16(255);
1031 for (i = 0; i < dstW; i += 8) {
1032 vy = vec_ld(0, &buf0[i]);
1033 vy32_l = vec_unpackh(vy);
1034 vy32_r = vec_unpackl(vy);
1035 vy32_l = vec_sl(vy32_l, shift2);
1036 vy32_r = vec_sl(vy32_r, shift2);
1038 vu = vec_ld(0, &ubuf0[i]);
1039 vv = vec_ld(0, &vbuf0[i]);
1040 if (uvalpha < 2048) {
1044 tmp32 = vec_mule(vu, mul4);
1045 tmp32_2 = vec_mulo(vu, mul4);
1046 vu32_l = vec_mergeh(tmp32, tmp32_2);
1047 vu32_r = vec_mergel(tmp32, tmp32_2);
1048 tmp32 = vec_mule(vv, mul4);
1049 tmp32_2 = vec_mulo(vv, mul4);
1050 vv32_l = vec_mergeh(tmp32, tmp32_2);
1051 vv32_r = vec_mergel(tmp32, tmp32_2);
1053 tmp16 = vec_ld(0, &ubuf1[i]);
1054 vu = vec_add(vu, tmp16);
1056 tmp16 = vec_ld(0, &vbuf1[i]);
1057 vv = vec_add(vv, tmp16);
1060 vu32_l = vec_mule(vu, mul8);
1061 vu32_r = vec_mulo(vu, mul8);
1062 vv32_l = vec_mule(vv, mul8);
1063 vv32_r = vec_mulo(vv, mul8);
1067 A = vec_ld(0, &abuf0[i]);
1068 A = vec_add(A, add64);
1069 A = vec_sr(A, shift7);
1070 A = vec_max(A, max255);
1071 ad = vec_packsu(A, (
vec_s16) zero16);
1073 ad = vec_splats((
uint8_t) 255);
1076 vy32_l = vec_sub(vy32_l, y_offset);
1077 vy32_r = vec_sub(vy32_r, y_offset);
1078 vy32_l = vec_mul(vy32_l, y_coeff);
1079 vy32_r = vec_mul(vy32_r, y_coeff);
1080 vy32_l = vec_add(vy32_l, y_add);
1081 vy32_r = vec_add(vy32_r, y_add);
1083 R_l = vec_mul(vv32_l, v2r_coeff);
1084 R_l = vec_add(R_l, vy32_l);
1085 R_r = vec_mul(vv32_r, v2r_coeff);
1086 R_r = vec_add(R_r, vy32_r);
1087 G_l = vec_mul(vv32_l, v2g_coeff);
1088 tmp32 = vec_mul(vu32_l, u2g_coeff);
1089 G_l = vec_add(G_l, vy32_l);
1090 G_l = vec_add(G_l, tmp32);
1091 G_r = vec_mul(vv32_r, v2g_coeff);
1092 tmp32 = vec_mul(vu32_r, u2g_coeff);
1093 G_r = vec_add(G_r, vy32_r);
1094 G_r = vec_add(G_r, tmp32);
1096 B_l = vec_mul(vu32_l, u2b_coeff);
1097 B_l = vec_add(B_l, vy32_l);
1098 B_r = vec_mul(vu32_r, u2b_coeff);
1099 B_r = vec_add(B_r, vy32_r);
1106 yuv2rgb_1_vsx_template(
SwsContext *c,
const int16_t *buf0,
1107 const int16_t *ubuf[2],
const int16_t *vbuf[2],
1108 const int16_t *abuf0,
uint8_t *dest,
int dstW,
1112 const int16_t *ubuf0 = ubuf[0], *vbuf0 = vbuf[0];
1113 const int16_t *ubuf1 = ubuf[1], *vbuf1 = vbuf[1];
1114 vec_s16 vy, vu, vv, A = vec_splat_s16(0), tmp16;
1115 vec_s32 vy32_l, vy32_r, vu32_l, vu32_r, vv32_l, vv32_r, tmp32, tmp32_2;
1116 vec_s32 vud32_l, vud32_r, vvd32_l, vvd32_r;
1117 vec_s32 R_l, R_r, G_l, G_r, B_l, B_r;
1119 vec_u8 rd, bd, gd, ad, out0, out1, tmp8;
1120 const vec_u16 zero16 = vec_splat_u16(0);
1123 const vec_s32 y_add = vec_splats(1 << 21);
1128 const vec_s32 rgbclip = vec_splats(1 << 30);
1129 const vec_s32 zero32 = vec_splat_s32(0);
1130 const vec_u32 shift2 = vec_splat_u32(2);
1131 const vec_u32 shift22 = vec_splats(22
U);
1132 const vec_u16 sub7 = vec_splats((uint16_t) (128 << 7));
1133 const vec_u16 sub8 = vec_splats((uint16_t) (128 << 8));
1134 const vec_s16 mul4 = vec_splat_s16(4);
1135 const vec_s16 mul8 = vec_splat_s16(8);
1136 const vec_s16 add64 = vec_splat_s16(64);
1137 const vec_u16 shift7 = vec_splat_u16(7);
1138 const vec_s16 max255 = vec_splat_s16(255);
1169 for (i = 0; i < (dstW + 1) >> 1; i += 8) {
1170 vy = vec_ld(0, &buf0[i * 2]);
1171 vy32_l = vec_unpackh(vy);
1172 vy32_r = vec_unpackl(vy);
1173 vy32_l = vec_sl(vy32_l, shift2);
1174 vy32_r = vec_sl(vy32_r, shift2);
1176 vu = vec_ld(0, &ubuf0[i]);
1177 vv = vec_ld(0, &vbuf0[i]);
1178 if (uvalpha < 2048) {
1182 tmp32 = vec_mule(vu, mul4);
1183 tmp32_2 = vec_mulo(vu, mul4);
1184 vu32_l = vec_mergeh(tmp32, tmp32_2);
1185 vu32_r = vec_mergel(tmp32, tmp32_2);
1186 tmp32 = vec_mule(vv, mul4);
1187 tmp32_2 = vec_mulo(vv, mul4);
1188 vv32_l = vec_mergeh(tmp32, tmp32_2);
1189 vv32_r = vec_mergel(tmp32, tmp32_2);
1191 tmp16 = vec_ld(0, &ubuf1[i]);
1192 vu = vec_add(vu, tmp16);
1194 tmp16 = vec_ld(0, &vbuf1[i]);
1195 vv = vec_add(vv, tmp16);
1198 vu32_l = vec_mule(vu, mul8);
1199 vu32_r = vec_mulo(vu, mul8);
1200 vv32_l = vec_mule(vv, mul8);
1201 vv32_r = vec_mulo(vv, mul8);
1205 A = vec_ld(0, &abuf0[i]);
1206 A = vec_add(A, add64);
1207 A = vec_sr(A, shift7);
1208 A = vec_max(A, max255);
1209 ad = vec_packsu(A, (
vec_s16) zero16);
1211 ad = vec_splats((
uint8_t) 255);
1214 vy32_l = vec_sub(vy32_l, y_offset);
1215 vy32_r = vec_sub(vy32_r, y_offset);
1216 vy32_l = vec_mul(vy32_l, y_coeff);
1217 vy32_r = vec_mul(vy32_r, y_coeff);
1218 vy32_l = vec_add(vy32_l, y_add);
1219 vy32_r = vec_add(vy32_r, y_add);
1222 vud32_l = vec_perm(vu32_l, vu32_l, doubleleft);
1223 vud32_r = vec_perm(vu32_l, vu32_l, doubleright);
1224 vvd32_l = vec_perm(vv32_l, vv32_l, doubleleft);
1225 vvd32_r = vec_perm(vv32_l, vv32_l, doubleright);
1227 R_l = vec_mul(vvd32_l, v2r_coeff);
1228 R_l = vec_add(R_l, vy32_l);
1229 R_r = vec_mul(vvd32_r, v2r_coeff);
1230 R_r = vec_add(R_r, vy32_r);
1231 G_l = vec_mul(vvd32_l, v2g_coeff);
1232 tmp32 = vec_mul(vud32_l, u2g_coeff);
1233 G_l = vec_add(G_l, vy32_l);
1234 G_l = vec_add(G_l, tmp32);
1235 G_r = vec_mul(vvd32_r, v2g_coeff);
1236 tmp32 = vec_mul(vud32_r, u2g_coeff);
1237 G_r = vec_add(G_r, vy32_r);
1238 G_r = vec_add(G_r, tmp32);
1240 B_l = vec_mul(vud32_l, u2b_coeff);
1241 B_l = vec_add(B_l, vy32_l);
1242 B_r = vec_mul(vud32_r, u2b_coeff);
1243 B_r = vec_add(B_r, vy32_r);
1248 vy = vec_ld(16, &buf0[i * 2]);
1249 vy32_l = vec_unpackh(vy);
1250 vy32_r = vec_unpackl(vy);
1251 vy32_l = vec_sl(vy32_l, shift2);
1252 vy32_r = vec_sl(vy32_r, shift2);
1254 vy32_l = vec_sub(vy32_l, y_offset);
1255 vy32_r = vec_sub(vy32_r, y_offset);
1256 vy32_l = vec_mul(vy32_l, y_coeff);
1257 vy32_r = vec_mul(vy32_r, y_coeff);
1258 vy32_l = vec_add(vy32_l, y_add);
1259 vy32_r = vec_add(vy32_r, y_add);
1262 vud32_l = vec_perm(vu32_r, vu32_r, doubleleft);
1263 vud32_r = vec_perm(vu32_r, vu32_r, doubleright);
1264 vvd32_l = vec_perm(vv32_r, vv32_r, doubleleft);
1265 vvd32_r = vec_perm(vv32_r, vv32_r, doubleright);
1267 R_l = vec_mul(vvd32_l, v2r_coeff);
1268 R_l = vec_add(R_l, vy32_l);
1269 R_r = vec_mul(vvd32_r, v2r_coeff);
1270 R_r = vec_add(R_r, vy32_r);
1271 G_l = vec_mul(vvd32_l, v2g_coeff);
1272 tmp32 = vec_mul(vud32_l, u2g_coeff);
1273 G_l = vec_add(G_l, vy32_l);
1274 G_l = vec_add(G_l, tmp32);
1275 G_r = vec_mul(vvd32_r, v2g_coeff);
1276 tmp32 = vec_mul(vud32_r, u2g_coeff);
1277 G_r = vec_add(G_r, vy32_r);
1278 G_r = vec_add(G_r, tmp32);
1280 B_l = vec_mul(vud32_l, u2b_coeff);
1281 B_l = vec_add(B_l, vy32_l);
1282 B_r = vec_mul(vud32_r, u2b_coeff);
1283 B_r = vec_add(B_r, vy32_r);
1291 #define YUV2RGBWRAPPERX(name, base, ext, fmt, hasAlpha) \ 1292 static void name ## ext ## _X_vsx(SwsContext *c, const int16_t *lumFilter, \ 1293 const int16_t **lumSrc, int lumFilterSize, \ 1294 const int16_t *chrFilter, const int16_t **chrUSrc, \ 1295 const int16_t **chrVSrc, int chrFilterSize, \ 1296 const int16_t **alpSrc, uint8_t *dest, int dstW, \ 1299 name ## base ## _X_vsx_template(c, lumFilter, lumSrc, lumFilterSize, \ 1300 chrFilter, chrUSrc, chrVSrc, chrFilterSize, \ 1301 alpSrc, dest, dstW, y, fmt, hasAlpha); \ 1304 #define YUV2RGBWRAPPERX2(name, base, ext, fmt, hasAlpha) \ 1305 static void name ## ext ## _2_vsx(SwsContext *c, const int16_t *buf[2], \ 1306 const int16_t *ubuf[2], const int16_t *vbuf[2], \ 1307 const int16_t *abuf[2], uint8_t *dest, int dstW, \ 1308 int yalpha, int uvalpha, int y) \ 1310 name ## base ## _2_vsx_template(c, buf, ubuf, vbuf, abuf, \ 1311 dest, dstW, yalpha, uvalpha, y, fmt, hasAlpha); \ 1314 #define YUV2RGBWRAPPER(name, base, ext, fmt, hasAlpha) \ 1315 static void name ## ext ## _1_vsx(SwsContext *c, const int16_t *buf0, \ 1316 const int16_t *ubuf[2], const int16_t *vbuf[2], \ 1317 const int16_t *abuf0, uint8_t *dest, int dstW, \ 1318 int uvalpha, int y) \ 1320 name ## base ## _1_vsx_template(c, buf0, ubuf, vbuf, abuf0, dest, \ 1321 dstW, uvalpha, y, fmt, hasAlpha); \ 1371 0x0, 0x10, 0x1, 0x18,
1372 0x2, 0x11, 0x3, 0x19,
1373 0x4, 0x12, 0x5, 0x1a,
1374 0x6, 0x13, 0x7, 0x1b };
1376 0x8, 0x14, 0x9, 0x1c,
1377 0xa, 0x15, 0xb, 0x1d,
1378 0xc, 0x16, 0xd, 0x1e,
1379 0xe, 0x17, 0xf, 0x1f };
1381 0x0, 0x18, 0x1, 0x10,
1382 0x2, 0x19, 0x3, 0x11,
1383 0x4, 0x1a, 0x5, 0x12,
1384 0x6, 0x1b, 0x7, 0x13 };
1386 0x8, 0x1c, 0x9, 0x14,
1387 0xa, 0x1d, 0xb, 0x15,
1388 0xc, 0x1e, 0xd, 0x16,
1389 0xe, 0x1f, 0xf, 0x17 };
1391 0x10, 0x0, 0x18, 0x1,
1392 0x11, 0x2, 0x19, 0x3,
1393 0x12, 0x4, 0x1a, 0x5,
1394 0x13, 0x6, 0x1b, 0x7 };
1396 0x14, 0x8, 0x1c, 0x9,
1397 0x15, 0xa, 0x1d, 0xb,
1398 0x16, 0xc, 0x1e, 0xd,
1399 0x17, 0xe, 0x1f, 0xf };
1401 vd1 = vec_packsu(vy1, vy2);
1402 vd2 = vec_packsu(vu, vv);
1406 tmp = vec_perm(vd1, vd2, yuyv1);
1407 vec_st(tmp, 0, dest);
1408 tmp = vec_perm(vd1, vd2, yuyv2);
1409 vec_st(tmp, 16, dest);
1412 tmp = vec_perm(vd1, vd2, yvyu1);
1413 vec_st(tmp, 0, dest);
1414 tmp = vec_perm(vd1, vd2, yvyu2);
1415 vec_st(tmp, 16, dest);
1418 tmp = vec_perm(vd1, vd2, uyvy1);
1419 vec_st(tmp, 0, dest);
1420 tmp = vec_perm(vd1, vd2, uyvy2);
1421 vec_st(tmp, 16, dest);
1427 yuv2422_X_vsx_template(
SwsContext *c,
const int16_t *lumFilter,
1428 const int16_t **lumSrc,
int lumFilterSize,
1429 const int16_t *chrFilter,
const int16_t **chrUSrc,
1430 const int16_t **chrVSrc,
int chrFilterSize,
1431 const int16_t **alpSrc,
uint8_t *dest,
int dstW,
1436 vec_s32 vy32[4], vu32[2], vv32[2],
tmp, tmp2, tmp3, tmp4;
1438 const vec_s32 start = vec_splats(1 << 18);
1439 const vec_u32 shift19 = vec_splats(19
U);
1441 for (i = 0; i < lumFilterSize; i++)
1442 vlumFilter[i] = vec_splats(lumFilter[i]);
1443 for (i = 0; i < chrFilterSize; i++)
1444 vchrFilter[i] = vec_splats(chrFilter[i]);
1446 for (i = 0; i < ((dstW + 1) >> 1); i += 8) {
1456 for (j = 0; j < lumFilterSize; j++) {
1457 vv = vec_ld(0, &lumSrc[j][i * 2]);
1458 tmp = vec_mule(vv, vlumFilter[j]);
1459 tmp2 = vec_mulo(vv, vlumFilter[j]);
1460 tmp3 = vec_mergeh(tmp, tmp2);
1461 tmp4 = vec_mergel(tmp, tmp2);
1463 vy32[0] = vec_adds(vy32[0], tmp3);
1464 vy32[1] = vec_adds(vy32[1], tmp4);
1466 vv = vec_ld(0, &lumSrc[j][(i + 4) * 2]);
1467 tmp = vec_mule(vv, vlumFilter[j]);
1468 tmp2 = vec_mulo(vv, vlumFilter[j]);
1469 tmp3 = vec_mergeh(tmp, tmp2);
1470 tmp4 = vec_mergel(tmp, tmp2);
1472 vy32[2] = vec_adds(vy32[2], tmp3);
1473 vy32[3] = vec_adds(vy32[3], tmp4);
1476 for (j = 0; j < chrFilterSize; j++) {
1477 vv = vec_ld(0, &chrUSrc[j][i]);
1478 tmp = vec_mule(vv, vchrFilter[j]);
1479 tmp2 = vec_mulo(vv, vchrFilter[j]);
1480 tmp3 = vec_mergeh(tmp, tmp2);
1481 tmp4 = vec_mergel(tmp, tmp2);
1483 vu32[0] = vec_adds(vu32[0], tmp3);
1484 vu32[1] = vec_adds(vu32[1], tmp4);
1486 vv = vec_ld(0, &chrVSrc[j][i]);
1487 tmp = vec_mule(vv, vchrFilter[j]);
1488 tmp2 = vec_mulo(vv, vchrFilter[j]);
1489 tmp3 = vec_mergeh(tmp, tmp2);
1490 tmp4 = vec_mergel(tmp, tmp2);
1492 vv32[0] = vec_adds(vv32[0], tmp3);
1493 vv32[1] = vec_adds(vv32[1], tmp4);
1496 for (j = 0; j < 4; j++) {
1497 vy32[j] = vec_sra(vy32[j], shift19);
1499 for (j = 0; j < 2; j++) {
1500 vu32[j] = vec_sra(vu32[j], shift19);
1501 vv32[j] = vec_sra(vv32[j], shift19);
1504 vy1 = vec_packs(vy32[0], vy32[1]);
1505 vy2 = vec_packs(vy32[2], vy32[3]);
1506 vu = vec_packs(vu32[0], vu32[1]);
1507 vv = vec_packs(vv32[0], vv32[1]);
1509 write422(vy1, vy2, vu, vv, &dest[i * 4], target);
1513 #define SETUP(x, buf0, buf1, alpha) { \ 1514 x = vec_ld(0, buf0); \ 1515 tmp = vec_mule(x, alpha); \ 1516 tmp2 = vec_mulo(x, alpha); \ 1517 tmp3 = vec_mergeh(tmp, tmp2); \ 1518 tmp4 = vec_mergel(tmp, tmp2); \ 1520 x = vec_ld(0, buf1); \ 1521 tmp = vec_mule(x, alpha); \ 1522 tmp2 = vec_mulo(x, alpha); \ 1523 tmp5 = vec_mergeh(tmp, tmp2); \ 1524 tmp6 = vec_mergel(tmp, tmp2); \ 1526 tmp3 = vec_add(tmp3, tmp5); \ 1527 tmp4 = vec_add(tmp4, tmp6); \ 1529 tmp3 = vec_sra(tmp3, shift19); \ 1530 tmp4 = vec_sra(tmp4, shift19); \ 1531 x = vec_packs(tmp3, tmp4); \ 1535 yuv2422_2_vsx_template(
SwsContext *c,
const int16_t *buf[2],
1536 const int16_t *ubuf[2],
const int16_t *vbuf[2],
1537 const int16_t *abuf[2],
uint8_t *dest,
int dstW,
1538 int yalpha,
int uvalpha,
int y,
1541 const int16_t *buf0 = buf[0], *buf1 = buf[1],
1542 *ubuf0 = ubuf[0], *ubuf1 = ubuf[1],
1543 *vbuf0 = vbuf[0], *vbuf1 = vbuf[1];
1544 const int16_t yalpha1 = 4096 - yalpha;
1545 const int16_t uvalpha1 = 4096 - uvalpha;
1548 const vec_s16 vyalpha1 = vec_splats(yalpha1);
1549 const vec_s16 vuvalpha1 = vec_splats(uvalpha1);
1550 const vec_u32 shift19 = vec_splats(19
U);
1555 for (i = 0; i < ((dstW + 1) >> 1); i += 8) {
1557 SETUP(vy1, &buf0[i * 2], &buf1[i * 2], vyalpha1)
1558 SETUP(vy2, &buf0[(i + 4) * 2], &buf1[(i + 4) * 2], vyalpha1)
1559 SETUP(vu, &ubuf0[i], &ubuf1[i], vuvalpha1)
1560 SETUP(vv, &vbuf0[i], &vbuf1[i], vuvalpha1)
1562 write422(vy1, vy2, vu, vv, &dest[i * 4], target);
1569 yuv2422_1_vsx_template(
SwsContext *c,
const int16_t *buf0,
1570 const int16_t *ubuf[2],
const int16_t *vbuf[2],
1571 const int16_t *abuf0,
uint8_t *dest,
int dstW,
1574 const int16_t *ubuf0 = ubuf[0], *vbuf0 = vbuf[0];
1576 const vec_s16 add64 = vec_splats((int16_t) 64);
1577 const vec_s16 add128 = vec_splats((int16_t) 128);
1578 const vec_u16 shift7 = vec_splat_u16(7);
1579 const vec_u16 shift8 = vec_splat_u16(8);
1582 if (uvalpha < 2048) {
1583 for (i = 0; i < ((dstW + 1) >> 1); i += 8) {
1584 vy1 = vec_ld(0, &buf0[i * 2]);
1585 vy2 = vec_ld(0, &buf0[(i + 4) * 2]);
1586 vu = vec_ld(0, &ubuf0[i]);
1587 vv = vec_ld(0, &vbuf0[i]);
1589 vy1 = vec_add(vy1, add64);
1590 vy2 = vec_add(vy2, add64);
1591 vu = vec_add(vu, add64);
1592 vv = vec_add(vv, add64);
1594 vy1 = vec_sra(vy1, shift7);
1595 vy2 = vec_sra(vy2, shift7);
1596 vu = vec_sra(vu, shift7);
1597 vv = vec_sra(vv, shift7);
1599 write422(vy1, vy2, vu, vv, &dest[i * 4], target);
1602 const int16_t *ubuf1 = ubuf[1], *vbuf1 = vbuf[1];
1603 for (i = 0; i < ((dstW + 1) >> 1); i += 8) {
1604 vy1 = vec_ld(0, &buf0[i * 2]);
1605 vy2 = vec_ld(0, &buf0[(i + 4) * 2]);
1606 vu = vec_ld(0, &ubuf0[i]);
1607 tmp = vec_ld(0, &ubuf1[i]);
1608 vu = vec_adds(vu, tmp);
1609 vv = vec_ld(0, &vbuf0[i]);
1610 tmp = vec_ld(0, &vbuf1[i]);
1611 vv = vec_adds(vv, tmp);
1613 vy1 = vec_add(vy1, add64);
1614 vy2 = vec_add(vy2, add64);
1615 vu = vec_adds(vu, add128);
1616 vv = vec_adds(vv, add128);
1618 vy1 = vec_sra(vy1, shift7);
1619 vy2 = vec_sra(vy2, shift7);
1620 vu = vec_sra(vu, shift8);
1621 vv = vec_sra(vv, shift8);
1623 write422(vy1, vy2, vu, vv, &dest[i * 4], target);
1628 #define YUV2PACKEDWRAPPERX(name, base, ext, fmt) \ 1629 static void name ## ext ## _X_vsx(SwsContext *c, const int16_t *lumFilter, \ 1630 const int16_t **lumSrc, int lumFilterSize, \ 1631 const int16_t *chrFilter, const int16_t **chrUSrc, \ 1632 const int16_t **chrVSrc, int chrFilterSize, \ 1633 const int16_t **alpSrc, uint8_t *dest, int dstW, \ 1636 name ## base ## _X_vsx_template(c, lumFilter, lumSrc, lumFilterSize, \ 1637 chrFilter, chrUSrc, chrVSrc, chrFilterSize, \ 1638 alpSrc, dest, dstW, y, fmt); \ 1641 #define YUV2PACKEDWRAPPER2(name, base, ext, fmt) \ 1642 YUV2PACKEDWRAPPERX(name, base, ext, fmt) \ 1643 static void name ## ext ## _2_vsx(SwsContext *c, const int16_t *buf[2], \ 1644 const int16_t *ubuf[2], const int16_t *vbuf[2], \ 1645 const int16_t *abuf[2], uint8_t *dest, int dstW, \ 1646 int yalpha, int uvalpha, int y) \ 1648 name ## base ## _2_vsx_template(c, buf, ubuf, vbuf, abuf, \ 1649 dest, dstW, yalpha, uvalpha, y, fmt); \ 1652 #define YUV2PACKEDWRAPPER(name, base, ext, fmt) \ 1653 YUV2PACKEDWRAPPER2(name, base, ext, fmt) \ 1654 static void name ## ext ## _1_vsx(SwsContext *c, const int16_t *buf0, \ 1655 const int16_t *ubuf[2], const int16_t *vbuf[2], \ 1656 const int16_t *abuf0, uint8_t *dest, int dstW, \ 1657 int uvalpha, int y) \ 1659 name ## base ## _1_vsx_template(c, buf0, ubuf, vbuf, \ 1660 abuf0, dest, dstW, uvalpha, \ 1668 static void hyscale_fast_vsx(
SwsContext *c, int16_t *dst,
int dstWidth,
1669 const uint8_t *src,
int srcW,
int xInc)
1672 unsigned int xpos = 0, xx;
1675 vec_s16 vtmp, vtmp2, vtmp3, vtmp4;
1676 vec_u16 vd_l, vd_r, vcoord16[2];
1694 const vec_u32 vshift16 = vec_splats((uint32_t) 16);
1695 const vec_u16 vshift9 = vec_splat_u16(9);
1696 const vec_u8 vzero = vec_splat_u8(0);
1697 const vec_u16 vshift = vec_splat_u16(7);
1699 for (i = 0; i < dstWidth; i += 16) {
1700 vcoord16[0] = vec_splats((uint16_t) xpos);
1701 vcoord16[1] = vec_splats((uint16_t) (xpos + xInc * 8));
1703 vcoord16[0] = vec_add(vcoord16[0], vadd16);
1704 vcoord16[1] = vec_add(vcoord16[1], vadd16);
1706 vcoord16[0] = vec_sr(vcoord16[0], vshift9);
1707 vcoord16[1] = vec_sr(vcoord16[1], vshift9);
1708 valpha = (
vec_s8) vec_pack(vcoord16[0], vcoord16[1]);
1711 vin = vec_vsx_ld(0, &src[xx]);
1713 vcoord[0] = vec_splats(xpos & 0xffff);
1714 vcoord[1] = vec_splats((xpos & 0xffff) + xInc * 4);
1715 vcoord[2] = vec_splats((xpos & 0xffff) + xInc * 8);
1716 vcoord[3] = vec_splats((xpos & 0xffff) + xInc * 12);
1718 vcoord[0] = vec_add(vcoord[0], vadd);
1719 vcoord[1] = vec_add(vcoord[1], vadd);
1720 vcoord[2] = vec_add(vcoord[2], vadd);
1721 vcoord[3] = vec_add(vcoord[3], vadd);
1723 vcoord[0] = vec_sr(vcoord[0], vshift16);
1724 vcoord[1] = vec_sr(vcoord[1], vshift16);
1725 vcoord[2] = vec_sr(vcoord[2], vshift16);
1726 vcoord[3] = vec_sr(vcoord[3], vshift16);
1728 vcoord16[0] = vec_pack(vcoord[0], vcoord[1]);
1729 vcoord16[1] = vec_pack(vcoord[2], vcoord[3]);
1730 vperm = vec_pack(vcoord16[0], vcoord16[1]);
1732 vin = vec_perm(vin, vin, vperm);
1734 vin2 = vec_vsx_ld(1, &src[xx]);
1735 vin2 = vec_perm(vin2, vin2, vperm);
1737 vmul = (
vec_s8) vec_sub(vin2, vin);
1738 vtmp = vec_mule(vmul, valpha);
1739 vtmp2 = vec_mulo(vmul, valpha);
1740 vtmp3 = vec_mergeh(vtmp, vtmp2);
1741 vtmp4 = vec_mergel(vtmp, vtmp2);
1743 vd_l = (
vec_u16) vec_mergeh(vin, vzero);
1744 vd_r = (
vec_u16) vec_mergel(vin, vzero);
1745 vd_l = vec_sl(vd_l, vshift);
1746 vd_r = vec_sl(vd_r, vshift);
1748 vd_l = vec_add(vd_l, (
vec_u16) vtmp3);
1749 vd_r = vec_add(vd_r, (
vec_u16) vtmp4);
1751 vec_st((
vec_s16) vd_l, 0, &dst[i]);
1752 vec_st((
vec_s16) vd_r, 0, &dst[i + 8]);
1756 for (i=dstWidth-1; (i*xInc)>>16 >=srcW-1; i--)
1757 dst[i] = src[srcW-1]*128;
1760 #define HCSCALE(in, out) \ 1761 vin = vec_vsx_ld(0, &in[xx]); \ 1762 vin = vec_perm(vin, vin, vperm); \ 1764 vin2 = vec_vsx_ld(1, &in[xx]); \ 1765 vin2 = vec_perm(vin2, vin2, vperm); \ 1767 vtmp = vec_mule(vin, valphaxor); \ 1768 vtmp2 = vec_mulo(vin, valphaxor); \ 1769 vtmp3 = vec_mergeh(vtmp, vtmp2); \ 1770 vtmp4 = vec_mergel(vtmp, vtmp2); \ 1772 vtmp = vec_mule(vin2, valpha); \ 1773 vtmp2 = vec_mulo(vin2, valpha); \ 1774 vd_l = vec_mergeh(vtmp, vtmp2); \ 1775 vd_r = vec_mergel(vtmp, vtmp2); \ 1777 vd_l = vec_add(vd_l, vtmp3); \ 1778 vd_r = vec_add(vd_r, vtmp4); \ 1780 vec_st((vec_s16) vd_l, 0, &out[i]); \ 1781 vec_st((vec_s16) vd_r, 0, &out[i + 8]) 1783 static void hcscale_fast_vsx(
SwsContext *c, int16_t *dst1, int16_t *dst2,
1785 const uint8_t *src2,
int srcW,
int xInc)
1788 unsigned int xpos = 0, xx;
1790 vec_u8 valpha, valphaxor;
1791 vec_u16 vtmp, vtmp2, vtmp3, vtmp4;
1792 vec_u16 vd_l, vd_r, vcoord16[2];
1811 const vec_u32 vshift16 = vec_splats((uint32_t) 16);
1812 const vec_u16 vshift9 = vec_splat_u16(9);
1814 for (i = 0; i < dstWidth; i += 16) {
1815 vcoord16[0] = vec_splats((uint16_t) xpos);
1816 vcoord16[1] = vec_splats((uint16_t) (xpos + xInc * 8));
1818 vcoord16[0] = vec_add(vcoord16[0], vadd16);
1819 vcoord16[1] = vec_add(vcoord16[1], vadd16);
1821 vcoord16[0] = vec_sr(vcoord16[0], vshift9);
1822 vcoord16[1] = vec_sr(vcoord16[1], vshift9);
1823 valpha = vec_pack(vcoord16[0], vcoord16[1]);
1824 valphaxor = vec_xor(valpha, vxor);
1828 vcoord[0] = vec_splats(xpos & 0xffff);
1829 vcoord[1] = vec_splats((xpos & 0xffff) + xInc * 4);
1830 vcoord[2] = vec_splats((xpos & 0xffff) + xInc * 8);
1831 vcoord[3] = vec_splats((xpos & 0xffff) + xInc * 12);
1833 vcoord[0] = vec_add(vcoord[0], vadd);
1834 vcoord[1] = vec_add(vcoord[1], vadd);
1835 vcoord[2] = vec_add(vcoord[2], vadd);
1836 vcoord[3] = vec_add(vcoord[3], vadd);
1838 vcoord[0] = vec_sr(vcoord[0], vshift16);
1839 vcoord[1] = vec_sr(vcoord[1], vshift16);
1840 vcoord[2] = vec_sr(vcoord[2], vshift16);
1841 vcoord[3] = vec_sr(vcoord[3], vshift16);
1843 vcoord16[0] = vec_pack(vcoord[0], vcoord[1]);
1844 vcoord16[1] = vec_pack(vcoord[2], vcoord[3]);
1845 vperm = vec_pack(vcoord16[0], vcoord16[1]);
1847 HCSCALE(src1, dst1);
1848 HCSCALE(src2, dst2);
1852 for (i=dstWidth-1; (i*xInc)>>16 >=srcW-1; i--) {
1853 dst1[
i] = src1[srcW-1]*128;
1854 dst2[
i] = src2[srcW-1]*128;
1860 static void hScale8To19_vsx(
SwsContext *c, int16_t *_dst,
int dstW,
1861 const uint8_t *src,
const int16_t *filter,
1862 const int32_t *filterPos,
int filterSize)
1869 const vec_u8 vzero = vec_splat_u8(0);
1870 const vec_u8 vunusedtab[8] = {
1871 (
vec_u8) {0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7,
1872 0x8, 0x9, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf},
1873 (
vec_u8) {0x0, 0x1, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10,
1874 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10},
1875 (
vec_u8) {0x0, 0x1, 0x2, 0x3, 0x10, 0x10, 0x10, 0x10,
1876 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10},
1877 (
vec_u8) {0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x10, 0x10,
1878 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10},
1879 (
vec_u8) {0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7,
1880 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10},
1881 (
vec_u8) {0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7,
1882 0x8, 0x9, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10},
1883 (
vec_u8) {0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7,
1884 0x8, 0x9, 0xa, 0xb, 0x10, 0x10, 0x10, 0x10},
1885 (
vec_u8) {0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7,
1886 0x8, 0x9, 0xa, 0xb, 0xc, 0xd, 0x10, 0x10},
1888 const vec_u8 vunused = vunusedtab[filterSize % 8];
1890 if (filterSize == 1) {
1891 for (i = 0; i < dstW; i++) {
1892 int srcPos = filterPos[
i];
1894 for (j = 0; j < filterSize; j++) {
1895 val += ((
int)src[srcPos + j]) * filter[filterSize * i + j];
1897 dst[
i] =
FFMIN(val >> 3, (1 << 19) - 1);
1900 for (i = 0; i < dstW; i++) {
1901 const int srcPos = filterPos[
i];
1902 vout = vec_splat_s32(0);
1903 for (j = 0; j < filterSize; j += 8) {
1904 vin8 = vec_vsx_ld(0, &src[srcPos + j]);
1905 vin = (
vec_s16) vec_mergeh(vin8, vzero);
1906 if (j + 8 > filterSize)
1907 vin = vec_perm(vin, (
vec_s16) vzero, vunused);
1909 vfilter = vec_vsx_ld(0, &filter[filterSize * i + j]);
1910 vout = vec_msums(vin, vfilter, vout);
1912 vout = vec_sums(vout, (
vec_s32) vzero);
1913 dst[
i] =
FFMIN(vout[3] >> 3, (1 << 19) - 1);
1918 static void hScale16To19_vsx(
SwsContext *c, int16_t *_dst,
int dstW,
1919 const uint8_t *_src,
const int16_t *filter,
1920 const int32_t *filterPos,
int filterSize)
1925 const uint16_t *src = (
const uint16_t *) _src;
1929 vec_s32 vout, vtmp, vtmp2, vfilter32_l, vfilter32_r;
1930 const vec_u8 vzero = vec_splat_u8(0);
1931 const vec_u8 vunusedtab[8] = {
1932 (
vec_u8) {0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7,
1933 0x8, 0x9, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf},
1934 (
vec_u8) {0x0, 0x1, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10,
1935 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10},
1936 (
vec_u8) {0x0, 0x1, 0x2, 0x3, 0x10, 0x10, 0x10, 0x10,
1937 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10},
1938 (
vec_u8) {0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x10, 0x10,
1939 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10},
1940 (
vec_u8) {0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7,
1941 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10},
1942 (
vec_u8) {0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7,
1943 0x8, 0x9, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10},
1944 (
vec_u8) {0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7,
1945 0x8, 0x9, 0xa, 0xb, 0x10, 0x10, 0x10, 0x10},
1946 (
vec_u8) {0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7,
1947 0x8, 0x9, 0xa, 0xb, 0xc, 0xd, 0x10, 0x10},
1949 const vec_u8 vunused = vunusedtab[filterSize % 8];
1957 if (filterSize == 1) {
1958 for (i = 0; i < dstW; i++) {
1959 int srcPos = filterPos[
i];
1962 for (j = 0; j < filterSize; j++) {
1963 val += src[srcPos + j] * filter[filterSize * i + j];
1966 dst[
i] =
FFMIN(val >> sh, (1 << 19) - 1);
1969 for (i = 0; i < dstW; i++) {
1970 const int srcPos = filterPos[
i];
1971 vout = vec_splat_s32(0);
1972 for (j = 0; j < filterSize; j += 8) {
1973 vin = (
vec_s16) vec_vsx_ld(0, &src[srcPos + j]);
1974 if (j + 8 > filterSize)
1975 vin = vec_perm(vin, (
vec_s16) vzero, vunused);
1977 vfilter = vec_vsx_ld(0, &filter[filterSize * i + j]);
1978 vfilter32_l = vec_unpackh(vfilter);
1979 vfilter32_r = vec_unpackl(vfilter);
1984 vtmp = vec_mul(vtmp, vfilter32_l);
1985 vtmp2 = vec_mul(vtmp2, vfilter32_r);
1987 vout = vec_adds(vout, vtmp);
1988 vout = vec_adds(vout, vtmp2);
1990 vout = vec_sums(vout, (
vec_s32) vzero);
1991 dst[
i] =
FFMIN(vout[3] >> sh, (1 << 19) - 1);
1996 static void hScale16To15_vsx(
SwsContext *c, int16_t *dst,
int dstW,
1997 const uint8_t *_src,
const int16_t *filter,
1998 const int32_t *filterPos,
int filterSize)
2002 const uint16_t *src = (
const uint16_t *) _src;
2005 vec_s32 vout, vtmp, vtmp2, vfilter32_l, vfilter32_r;
2006 const vec_u8 vzero = vec_splat_u8(0);
2007 const vec_u8 vunusedtab[8] = {
2008 (
vec_u8) {0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7,
2009 0x8, 0x9, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf},
2010 (
vec_u8) {0x0, 0x1, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10,
2011 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10},
2012 (
vec_u8) {0x0, 0x1, 0x2, 0x3, 0x10, 0x10, 0x10, 0x10,
2013 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10},
2014 (
vec_u8) {0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x10, 0x10,
2015 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10},
2016 (
vec_u8) {0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7,
2017 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10},
2018 (
vec_u8) {0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7,
2019 0x8, 0x9, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10},
2020 (
vec_u8) {0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7,
2021 0x8, 0x9, 0xa, 0xb, 0x10, 0x10, 0x10, 0x10},
2022 (
vec_u8) {0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7,
2023 0x8, 0x9, 0xa, 0xb, 0xc, 0xd, 0x10, 0x10},
2025 const vec_u8 vunused = vunusedtab[filterSize % 8];
2033 if (filterSize == 1) {
2034 for (i = 0; i < dstW; i++) {
2035 int srcPos = filterPos[
i];
2038 for (j = 0; j < filterSize; j++) {
2039 val += src[srcPos + j] * filter[filterSize * i + j];
2042 dst[
i] =
FFMIN(val >> sh, (1 << 15) - 1);
2045 for (i = 0; i < dstW; i++) {
2046 const int srcPos = filterPos[
i];
2047 vout = vec_splat_s32(0);
2048 for (j = 0; j < filterSize; j += 8) {
2049 vin = (
vec_s16) vec_vsx_ld(0, &src[srcPos + j]);
2050 if (j + 8 > filterSize)
2051 vin = vec_perm(vin, (
vec_s16) vzero, vunused);
2053 vfilter = vec_vsx_ld(0, &filter[filterSize * i + j]);
2054 vfilter32_l = vec_unpackh(vfilter);
2055 vfilter32_r = vec_unpackl(vfilter);
2060 vtmp = vec_mul(vtmp, vfilter32_l);
2061 vtmp2 = vec_mul(vtmp2, vfilter32_r);
2063 vout = vec_adds(vout, vtmp);
2064 vout = vec_adds(vout, vtmp2);
2066 vout = vec_sums(vout, (
vec_s32) vzero);
2067 dst[
i] =
FFMIN(vout[3] >> sh, (1 << 15) - 1);
2117 c->
yuv2plane1 =
isBE(dstFormat) ? yuv2plane1_9BE_vsx : yuv2plane1_9LE_vsx;
2118 c->
yuv2planeX =
isBE(dstFormat) ? yuv2planeX_9BE_vsx : yuv2planeX_9LE_vsx;
2121 c->
yuv2plane1 =
isBE(dstFormat) ? yuv2plane1_10BE_vsx : yuv2plane1_10LE_vsx;
2122 c->
yuv2planeX =
isBE(dstFormat) ? yuv2planeX_10BE_vsx : yuv2planeX_10LE_vsx;
2125 c->
yuv2plane1 =
isBE(dstFormat) ? yuv2plane1_12BE_vsx : yuv2plane1_12LE_vsx;
2126 c->
yuv2planeX =
isBE(dstFormat) ? yuv2planeX_12BE_vsx : yuv2planeX_12LE_vsx;
2129 c->
yuv2plane1 =
isBE(dstFormat) ? yuv2plane1_14BE_vsx : yuv2plane1_14LE_vsx;
2130 c->
yuv2planeX =
isBE(dstFormat) ? yuv2planeX_14BE_vsx : yuv2planeX_14LE_vsx;
2133 c->
yuv2plane1 =
isBE(dstFormat) ? yuv2plane1_16BE_vsx : yuv2plane1_16LE_vsx;
2135 if (cpu_flags & AV_CPU_FLAG_POWER8) {
2136 c->
yuv2planeX =
isBE(dstFormat) ? yuv2planeX_16BE_vsx : yuv2planeX_16LE_vsx;
2149 switch (dstFormat) {
2202 switch (dstFormat) {
packed YUV 4:2:2, 16bpp, Cb Y0 Cr Y1
IEEE-754 single precision Y, 32bpp, big-endian.
void(* hcScale)(struct SwsContext *c, int16_t *dst, int dstW, const uint8_t *src, const int16_t *filter, const int32_t *filterPos, int filterSize)
#define YUV2RGBWRAPPERX2(name, base, ext, fmt, hasAlpha)
static av_always_inline int isAnyRGB(enum AVPixelFormat pix_fmt)
static int shift(int a, int b)
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
packed RGB 8:8:8, 24bpp, RGBRGB...
#define YUV2RGBWRAPPERX(name, base, ext, fmt, hasAlpha)
static av_always_inline int is16BPS(enum AVPixelFormat pix_fmt)
void(* hyScale)(struct SwsContext *c, int16_t *dst, int dstW, const uint8_t *src, const int16_t *filter, const int32_t *filterPos, int filterSize)
Scale one horizontal line of input data using a filter over the input lines, to produce one (differen...
static atomic_int cpu_flags
void(* hyscale_fast)(struct SwsContext *c, int16_t *dst, int dstWidth, const uint8_t *src, int srcW, int xInc)
Scale one horizontal line of input data using a bilinear filter to produce one line of output data...
Macro definitions for various function/variable attributes.
static void filter(int16_t *output, ptrdiff_t out_stride, int16_t *low, ptrdiff_t low_stride, int16_t *high, ptrdiff_t high_stride, int len, int clip)
AVComponentDescriptor comp[4]
Parameters that describe how pixels are packed.
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
8 bits with AV_PIX_FMT_RGB32 palette
#define AV_PIX_FMT_FLAG_FLOAT
The pixel format contains IEEE-754 floating point values.
#define SWS_FULL_CHR_H_INT
packed ABGR 8:8:8:8, 32bpp, ABGRABGR...
#define SWS_FAST_BILINEAR
enum AVPixelFormat dstFormat
Destination pixel format.
yuv2packedX_fn yuv2packedX
#define i(width, name, range_min, range_max)
packed BGRA 8:8:8:8, 32bpp, BGRABGRA...
static const uint8_t dither[8][8]
yuv2packed1_fn yuv2packed1
static const uint8_t offset[127][2]
static av_always_inline int isSemiPlanarYUV(enum AVPixelFormat pix_fmt)
void(* hcscale_fast)(struct SwsContext *c, int16_t *dst1, int16_t *dst2, int dstWidth, const uint8_t *src1, const uint8_t *src2, int srcW, int xInc)
packed ARGB 8:8:8:8, 32bpp, ARGBARGB...
int chrDstW
Width of destination chroma planes.
packed RGBA 8:8:8:8, 32bpp, RGBARGBA...
uint64_t flags
Combination of AV_PIX_FMT_FLAG_...
yuv2planar1_fn yuv2plane1
packed YUV 4:2:2, 16bpp, Y0 Cr Y1 Cb
packed RGB 8:8:8, 24bpp, BGRBGR...
int dstW
Width of destination luma/alpha planes.
static av_always_inline int isBE(enum AVPixelFormat pix_fmt)
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
yuv2planarX_fn yuv2planeX
packed YUV 4:2:2, 16bpp, Y0 Cb Y1 Cr
#define AV_CPU_FLAG_VSX
ISA 2.06.
int av_get_cpu_flags(void)
Return the flags which specify extensions supported by the CPU.
#define YUV2RGBWRAPPER(name, base, ext, fmt, hasAlpha)
Contains misc utility macros and inline functions.
#define AV_CPU_FLAG_POWER8
ISA 2.07.
yuv2NBPS(yuv2NBPS(9, yuv2NBPS(BE, yuv2NBPS(1, yuv2NBPS(10, int16_t)
yuv2packed2_fn yuv2packed2
#define LOCAL_ALIGNED(a, t, v,...)
static double clip(void *opaque, double val)
Clip value val in the minval - maxval range.
static const int shift2[6]
static const uint8_t shifts[2][12]
enum AVPixelFormat srcFormat
Source pixel format.
#define output_pixel(pos, val, bias, signedness)
av_cold void ff_sws_init_swscale_vsx(SwsContext *c)
int chrSrcW
Width of source chroma planes.
int depth
Number of bits in the component.
IEEE-754 single precision Y, 32bpp, little-endian.
#define YUV2PACKEDWRAPPER(name, base, ext, fmt)
int srcW
Width of source luma/alpha planes.
int flags
Flags passed by the user to select scaler algorithm, optimizations, subsampling, etc...
AVPixelFormat
Pixel format.
static double val(void *priv, double ch)
static av_always_inline int isNBPS(enum AVPixelFormat pix_fmt)