2 * AltiVec optimizations (C) 2004 Romain Dolbeau <romain@dolbeau.org>
4 * based on code by Copyright (C) 2001-2003 Michael Niedermayer (michaelni@gmx.at)
6 * This file is part of FFmpeg.
8 * FFmpeg is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
13 * FFmpeg is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with FFmpeg; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 #include "libavutil/avutil.h"
25 #define ALTIVEC_TRANSPOSE_8x8_SHORT(src_a,src_b,src_c,src_d,src_e,src_f,src_g,src_h) \
27 __typeof__(src_a) tempA1, tempB1, tempC1, tempD1; \
28 __typeof__(src_a) tempE1, tempF1, tempG1, tempH1; \
29 __typeof__(src_a) tempA2, tempB2, tempC2, tempD2; \
30 __typeof__(src_a) tempE2, tempF2, tempG2, tempH2; \
31 tempA1 = vec_mergeh (src_a, src_e); \
32 tempB1 = vec_mergel (src_a, src_e); \
33 tempC1 = vec_mergeh (src_b, src_f); \
34 tempD1 = vec_mergel (src_b, src_f); \
35 tempE1 = vec_mergeh (src_c, src_g); \
36 tempF1 = vec_mergel (src_c, src_g); \
37 tempG1 = vec_mergeh (src_d, src_h); \
38 tempH1 = vec_mergel (src_d, src_h); \
39 tempA2 = vec_mergeh (tempA1, tempE1); \
40 tempB2 = vec_mergel (tempA1, tempE1); \
41 tempC2 = vec_mergeh (tempB1, tempF1); \
42 tempD2 = vec_mergel (tempB1, tempF1); \
43 tempE2 = vec_mergeh (tempC1, tempG1); \
44 tempF2 = vec_mergel (tempC1, tempG1); \
45 tempG2 = vec_mergeh (tempD1, tempH1); \
46 tempH2 = vec_mergel (tempD1, tempH1); \
47 src_a = vec_mergeh (tempA2, tempE2); \
48 src_b = vec_mergel (tempA2, tempE2); \
49 src_c = vec_mergeh (tempB2, tempF2); \
50 src_d = vec_mergel (tempB2, tempF2); \
51 src_e = vec_mergeh (tempC2, tempG2); \
52 src_f = vec_mergel (tempC2, tempG2); \
53 src_g = vec_mergeh (tempD2, tempH2); \
54 src_h = vec_mergel (tempD2, tempH2); \
58 static inline int vertClassify_altivec(uint8_t src
[], int stride
, PPContext
*c
) {
60 this code makes no assumption on src or stride.
61 One could remove the recomputation of the perm
62 vector by assuming (stride % 16) == 0, unfortunately
63 this is not always true.
65 short data_0
= ((c
->nonBQP
*c
->ppMode
.baseDcDiff
)>>8) + 1;
66 DECLARE_ALIGNED(16, short, data
)[8] =
75 vector
signed short v_dcOffset
;
76 vector
signed short v2QP
;
77 vector
unsigned short v4QP
;
78 vector
unsigned short v_dcThreshold
;
79 const int properStride
= (stride
% 16);
80 const int srcAlign
= ((unsigned long)src2
% 16);
81 const int two_vectors
= ((srcAlign
> 8) || properStride
) ? 1 : 0;
82 const vector
signed int zero
= vec_splat_s32(0);
83 const vector
signed short mask
= vec_splat_s16(1);
84 vector
signed int v_numEq
= vec_splat_s32(0);
85 vector
signed short v_data
= vec_ld(0, data
);
86 vector
signed short v_srcAss0
, v_srcAss1
, v_srcAss2
, v_srcAss3
,
87 v_srcAss4
, v_srcAss5
, v_srcAss6
, v_srcAss7
;
88 //FIXME avoid this mess if possible
97 vector
unsigned char v_srcA0
, v_srcA1
, v_srcA2
, v_srcA3
,
98 v_srcA4
, v_srcA5
, v_srcA6
, v_srcA7
;
100 v_dcOffset
= vec_splat(v_data
, 0);
101 v_dcThreshold
= (vector
unsigned short)vec_splat(v_data
, 1);
102 v2QP
= vec_splat(v_data
, 2);
103 v4QP
= (vector
unsigned short)vec_splat(v_data
, 3);
107 #define LOAD_LINE(i) \
109 vector unsigned char perm##i = vec_lvsl(j##i, src2); \
110 vector unsigned char v_srcA2##i; \
111 vector unsigned char v_srcA1##i = vec_ld(j##i, src2); \
113 v_srcA2##i = vec_ld(j##i + 16, src2); \
115 vec_perm(v_srcA1##i, v_srcA2##i, perm##i); \
117 (vector signed short)vec_mergeh((vector signed char)zero, \
118 (vector signed char)v_srcA##i); }
120 #define LOAD_LINE_ALIGNED(i) \
121 v_srcA##i = vec_ld(j##i, src2); \
123 (vector signed short)vec_mergeh((vector signed char)zero, \
124 (vector signed char)v_srcA##i)
126 /* Special-casing the aligned case is worthwhile, as all calls from
127 * the (transposed) horizontable deblocks will be aligned, in addition
128 * to the naturally aligned vertical deblocks. */
129 if (properStride
&& srcAlign
) {
130 LOAD_LINE_ALIGNED(0);
131 LOAD_LINE_ALIGNED(1);
132 LOAD_LINE_ALIGNED(2);
133 LOAD_LINE_ALIGNED(3);
134 LOAD_LINE_ALIGNED(4);
135 LOAD_LINE_ALIGNED(5);
136 LOAD_LINE_ALIGNED(6);
137 LOAD_LINE_ALIGNED(7);
149 #undef LOAD_LINE_ALIGNED
152 const vector signed short v_diff##i = \
153 vec_sub(v_srcAss##i, v_srcAss##j); \
154 const vector signed short v_sum##i = \
155 vec_add(v_diff##i, v_dcOffset); \
156 const vector signed short v_comp##i = \
157 (vector signed short)vec_cmplt((vector unsigned short)v_sum##i, \
159 const vector signed short v_part##i = vec_and(mask, v_comp##i);
170 v_numEq
= vec_sum4s(v_part0
, v_numEq
);
171 v_numEq
= vec_sum4s(v_part1
, v_numEq
);
172 v_numEq
= vec_sum4s(v_part2
, v_numEq
);
173 v_numEq
= vec_sum4s(v_part3
, v_numEq
);
174 v_numEq
= vec_sum4s(v_part4
, v_numEq
);
175 v_numEq
= vec_sum4s(v_part5
, v_numEq
);
176 v_numEq
= vec_sum4s(v_part6
, v_numEq
);
181 v_numEq
= vec_sums(v_numEq
, zero
);
183 v_numEq
= vec_splat(v_numEq
, 3);
184 vec_ste(v_numEq
, 0, &numEq
);
186 if (numEq
> c
->ppMode
.flatnessThreshold
){
187 const vector
unsigned char mmoP1
= (const vector
unsigned char)
188 {0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f,
189 0x00, 0x01, 0x12, 0x13, 0x08, 0x09, 0x1A, 0x1B};
190 const vector
unsigned char mmoP2
= (const vector
unsigned char)
191 {0x04, 0x05, 0x16, 0x17, 0x0C, 0x0D, 0x1E, 0x1F,
192 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f};
193 const vector
unsigned char mmoP
= (const vector
unsigned char)
194 vec_lvsl(8, (unsigned char*)0);
196 vector
signed short mmoL1
= vec_perm(v_srcAss0
, v_srcAss2
, mmoP1
);
197 vector
signed short mmoL2
= vec_perm(v_srcAss4
, v_srcAss6
, mmoP2
);
198 vector
signed short mmoL
= vec_perm(mmoL1
, mmoL2
, mmoP
);
199 vector
signed short mmoR1
= vec_perm(v_srcAss5
, v_srcAss7
, mmoP1
);
200 vector
signed short mmoR2
= vec_perm(v_srcAss1
, v_srcAss3
, mmoP2
);
201 vector
signed short mmoR
= vec_perm(mmoR1
, mmoR2
, mmoP
);
202 vector
signed short mmoDiff
= vec_sub(mmoL
, mmoR
);
203 vector
unsigned short mmoSum
= (vector
unsigned short)vec_add(mmoDiff
, v2QP
);
205 if (vec_any_gt(mmoSum
, v4QP
))
213 static inline void doVertLowPass_altivec(uint8_t *src
, int stride
, PPContext
*c
) {
215 this code makes no assumption on src or stride.
216 One could remove the recomputation of the perm
217 vector by assuming (stride % 16) == 0, unfortunately
218 this is not always true. Quite a lot of load/stores
219 can be removed by assuming proper alignment of
223 const vector
signed int zero
= vec_splat_s32(0);
224 const int properStride
= (stride
% 16);
225 const int srcAlign
= ((unsigned long)src2
% 16);
226 DECLARE_ALIGNED(16, short, qp
)[8] = {c
->QP
};
227 vector
signed short vqp
= vec_ld(0, qp
);
228 vector
signed short vb0
, vb1
, vb2
, vb3
, vb4
, vb5
, vb6
, vb7
, vb8
, vb9
;
229 vector
unsigned char vbA0
, av_uninit(vbA1
), av_uninit(vbA2
), av_uninit(vbA3
), av_uninit(vbA4
), av_uninit(vbA5
), av_uninit(vbA6
), av_uninit(vbA7
), av_uninit(vbA8
), vbA9
;
230 vector
unsigned char vbB0
, av_uninit(vbB1
), av_uninit(vbB2
), av_uninit(vbB3
), av_uninit(vbB4
), av_uninit(vbB5
), av_uninit(vbB6
), av_uninit(vbB7
), av_uninit(vbB8
), vbB9
;
231 vector
unsigned char vbT0
, vbT1
, vbT2
, vbT3
, vbT4
, vbT5
, vbT6
, vbT7
, vbT8
, vbT9
;
232 vector
unsigned char perml0
, perml1
, perml2
, perml3
, perml4
,
233 perml5
, perml6
, perml7
, perml8
, perml9
;
245 vqp
= vec_splat(vqp
, 0);
249 #define LOAD_LINE(i) \
250 perml##i = vec_lvsl(i * stride, src2); \
251 vbA##i = vec_ld(i * stride, src2); \
252 vbB##i = vec_ld(i * stride + 16, src2); \
253 vbT##i = vec_perm(vbA##i, vbB##i, perml##i); \
255 (vector signed short)vec_mergeh((vector unsigned char)zero, \
256 (vector unsigned char)vbT##i)
258 #define LOAD_LINE_ALIGNED(i) \
259 vbT##i = vec_ld(j##i, src2); \
261 (vector signed short)vec_mergeh((vector signed char)zero, \
262 (vector signed char)vbT##i)
264 /* Special-casing the aligned case is worthwhile, as all calls from
265 * the (transposed) horizontable deblocks will be aligned, in addition
266 * to the naturally aligned vertical deblocks. */
267 if (properStride
&& srcAlign
) {
268 LOAD_LINE_ALIGNED(0);
269 LOAD_LINE_ALIGNED(1);
270 LOAD_LINE_ALIGNED(2);
271 LOAD_LINE_ALIGNED(3);
272 LOAD_LINE_ALIGNED(4);
273 LOAD_LINE_ALIGNED(5);
274 LOAD_LINE_ALIGNED(6);
275 LOAD_LINE_ALIGNED(7);
276 LOAD_LINE_ALIGNED(8);
277 LOAD_LINE_ALIGNED(9);
291 #undef LOAD_LINE_ALIGNED
293 const vector
unsigned short v_2
= vec_splat_u16(2);
294 const vector
unsigned short v_4
= vec_splat_u16(4);
296 const vector
signed short v_diff01
= vec_sub(vb0
, vb1
);
297 const vector
unsigned short v_cmp01
=
298 (const vector
unsigned short) vec_cmplt(vec_abs(v_diff01
), vqp
);
299 const vector
signed short v_first
= vec_sel(vb1
, vb0
, v_cmp01
);
300 const vector
signed short v_diff89
= vec_sub(vb8
, vb9
);
301 const vector
unsigned short v_cmp89
=
302 (const vector
unsigned short) vec_cmplt(vec_abs(v_diff89
), vqp
);
303 const vector
signed short v_last
= vec_sel(vb8
, vb9
, v_cmp89
);
305 const vector
signed short temp01
= vec_mladd(v_first
, (vector
signed short)v_4
, vb1
);
306 const vector
signed short temp02
= vec_add(vb2
, vb3
);
307 const vector
signed short temp03
= vec_add(temp01
, (vector
signed short)v_4
);
308 const vector
signed short v_sumsB0
= vec_add(temp02
, temp03
);
310 const vector
signed short temp11
= vec_sub(v_sumsB0
, v_first
);
311 const vector
signed short v_sumsB1
= vec_add(temp11
, vb4
);
313 const vector
signed short temp21
= vec_sub(v_sumsB1
, v_first
);
314 const vector
signed short v_sumsB2
= vec_add(temp21
, vb5
);
316 const vector
signed short temp31
= vec_sub(v_sumsB2
, v_first
);
317 const vector
signed short v_sumsB3
= vec_add(temp31
, vb6
);
319 const vector
signed short temp41
= vec_sub(v_sumsB3
, v_first
);
320 const vector
signed short v_sumsB4
= vec_add(temp41
, vb7
);
322 const vector
signed short temp51
= vec_sub(v_sumsB4
, vb1
);
323 const vector
signed short v_sumsB5
= vec_add(temp51
, vb8
);
325 const vector
signed short temp61
= vec_sub(v_sumsB5
, vb2
);
326 const vector
signed short v_sumsB6
= vec_add(temp61
, v_last
);
328 const vector
signed short temp71
= vec_sub(v_sumsB6
, vb3
);
329 const vector
signed short v_sumsB7
= vec_add(temp71
, v_last
);
331 const vector
signed short temp81
= vec_sub(v_sumsB7
, vb4
);
332 const vector
signed short v_sumsB8
= vec_add(temp81
, v_last
);
334 const vector
signed short temp91
= vec_sub(v_sumsB8
, vb5
);
335 const vector
signed short v_sumsB9
= vec_add(temp91
, v_last
);
337 #define COMPUTE_VR(i, j, k) \
338 const vector signed short temps1##i = \
339 vec_add(v_sumsB##i, v_sumsB##k); \
340 const vector signed short temps2##i = \
341 vec_mladd(vb##j, (vector signed short)v_2, temps1##i); \
342 const vector signed short vr##j = vec_sra(temps2##i, v_4)
353 const vector
signed char neg1
= vec_splat_s8(-1);
354 const vector
unsigned char permHH
= (const vector
unsigned char){0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
355 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F};
357 #define PACK_AND_STORE(i) \
358 { const vector unsigned char perms##i = \
359 vec_lvsr(i * stride, src2); \
360 const vector unsigned char vf##i = \
361 vec_packsu(vr##i, (vector signed short)zero); \
362 const vector unsigned char vg##i = \
363 vec_perm(vf##i, vbT##i, permHH); \
364 const vector unsigned char mask##i = \
365 vec_perm((vector unsigned char)zero, (vector unsigned char)neg1, perms##i); \
366 const vector unsigned char vg2##i = \
367 vec_perm(vg##i, vg##i, perms##i); \
368 const vector unsigned char svA##i = \
369 vec_sel(vbA##i, vg2##i, mask##i); \
370 const vector unsigned char svB##i = \
371 vec_sel(vg2##i, vbB##i, mask##i); \
372 vec_st(svA##i, i * stride, src2); \
373 vec_st(svB##i, i * stride + 16, src2);}
375 #define PACK_AND_STORE_ALIGNED(i) \
376 { const vector unsigned char vf##i = \
377 vec_packsu(vr##i, (vector signed short)zero); \
378 const vector unsigned char vg##i = \
379 vec_perm(vf##i, vbT##i, permHH); \
380 vec_st(vg##i, i * stride, src2);}
382 /* Special-casing the aligned case is worthwhile, as all calls from
383 * the (transposed) horizontable deblocks will be aligned, in addition
384 * to the naturally aligned vertical deblocks. */
385 if (properStride
&& srcAlign
) {
386 PACK_AND_STORE_ALIGNED(1)
387 PACK_AND_STORE_ALIGNED(2)
388 PACK_AND_STORE_ALIGNED(3)
389 PACK_AND_STORE_ALIGNED(4)
390 PACK_AND_STORE_ALIGNED(5)
391 PACK_AND_STORE_ALIGNED(6)
392 PACK_AND_STORE_ALIGNED(7)
393 PACK_AND_STORE_ALIGNED(8)
404 #undef PACK_AND_STORE
405 #undef PACK_AND_STORE_ALIGNED
411 static inline void doVertDefFilter_altivec(uint8_t src
[], int stride
, PPContext
*c
) {
413 this code makes no assumption on src or stride.
414 One could remove the recomputation of the perm
415 vector by assuming (stride % 16) == 0, unfortunately
416 this is not always true. Quite a lot of load/stores
417 can be removed by assuming proper alignment of
420 uint8_t *src2
= src
+ stride
*3;
421 const vector
signed int zero
= vec_splat_s32(0);
422 DECLARE_ALIGNED(16, short, qp
)[8] = {8*c
->QP
};
423 vector
signed short vqp
= vec_splat(
424 (vector
signed short)vec_ld(0, qp
), 0);
426 #define LOAD_LINE(i) \
427 const vector unsigned char perm##i = \
428 vec_lvsl(i * stride, src2); \
429 const vector unsigned char vbA##i = \
430 vec_ld(i * stride, src2); \
431 const vector unsigned char vbB##i = \
432 vec_ld(i * stride + 16, src2); \
433 const vector unsigned char vbT##i = \
434 vec_perm(vbA##i, vbB##i, perm##i); \
435 const vector signed short vb##i = \
436 (vector signed short)vec_mergeh((vector unsigned char)zero, \
437 (vector unsigned char)vbT##i)
449 const vector
signed short v_1
= vec_splat_s16(1);
450 const vector
signed short v_2
= vec_splat_s16(2);
451 const vector
signed short v_5
= vec_splat_s16(5);
452 const vector
signed short v_32
= vec_sl(v_1
,
453 (vector
unsigned short)v_5
);
455 const vector
signed short l3minusl6
= vec_sub(vb3
, vb6
);
456 const vector
signed short l5minusl4
= vec_sub(vb5
, vb4
);
457 const vector
signed short twotimes_l3minusl6
= vec_mladd(v_2
, l3minusl6
, (vector
signed short)zero
);
458 const vector
signed short mE
= vec_mladd(v_5
, l5minusl4
, twotimes_l3minusl6
);
459 const vector
signed short absmE
= vec_abs(mE
);
460 /* left & right energy */
461 const vector
signed short l1minusl4
= vec_sub(vb1
, vb4
);
462 const vector
signed short l3minusl2
= vec_sub(vb3
, vb2
);
463 const vector
signed short l5minusl8
= vec_sub(vb5
, vb8
);
464 const vector
signed short l7minusl6
= vec_sub(vb7
, vb6
);
465 const vector
signed short twotimes_l1minusl4
= vec_mladd(v_2
, l1minusl4
, (vector
signed short)zero
);
466 const vector
signed short twotimes_l5minusl8
= vec_mladd(v_2
, l5minusl8
, (vector
signed short)zero
);
467 const vector
signed short lE
= vec_mladd(v_5
, l3minusl2
, twotimes_l1minusl4
);
468 const vector
signed short rE
= vec_mladd(v_5
, l7minusl6
, twotimes_l5minusl8
);
470 const vector
signed short ddiff
= vec_sub(absmE
,
473 const vector
signed short ddiffclamp
= vec_max(ddiff
, (vector
signed short)zero
);
474 const vector
signed short dtimes64
= vec_mladd(v_5
, ddiffclamp
, v_32
);
475 const vector
signed short d
= vec_sra(dtimes64
, vec_splat_u16(6));
476 const vector
signed short minusd
= vec_sub((vector
signed short)zero
, d
);
477 const vector
signed short finald
= vec_sel(minusd
,
479 vec_cmpgt(vec_sub((vector
signed short)zero
, mE
),
480 (vector
signed short)zero
));
482 const vector
signed short qtimes2
= vec_sub(vb4
, vb5
);
483 /* for a shift right to behave like /2, we need to add one
484 to all negative integer */
485 const vector
signed short rounddown
= vec_sel((vector
signed short)zero
,
487 vec_cmplt(qtimes2
, (vector
signed short)zero
));
488 const vector
signed short q
= vec_sra(vec_add(qtimes2
, rounddown
), vec_splat_u16(1));
490 const vector
signed short dclamp_P1
= vec_max((vector
signed short)zero
, finald
);
491 const vector
signed short dclamp_P
= vec_min(dclamp_P1
, q
);
492 const vector
signed short dclamp_N1
= vec_min((vector
signed short)zero
, finald
);
493 const vector
signed short dclamp_N
= vec_max(dclamp_N1
, q
);
495 const vector
signed short dclampedfinal
= vec_sel(dclamp_N
,
497 vec_cmpgt(q
, (vector
signed short)zero
));
498 const vector
signed short dornotd
= vec_sel((vector
signed short)zero
,
500 vec_cmplt(absmE
, vqp
));
501 /* add/subtract to l4 and l5 */
502 const vector
signed short vb4minusd
= vec_sub(vb4
, dornotd
);
503 const vector
signed short vb5plusd
= vec_add(vb5
, dornotd
);
504 /* finally, stores */
505 const vector
unsigned char st4
= vec_packsu(vb4minusd
, (vector
signed short)zero
);
506 const vector
unsigned char st5
= vec_packsu(vb5plusd
, (vector
signed short)zero
);
508 const vector
signed char neg1
= vec_splat_s8(-1);
509 const vector
unsigned char permHH
= (const vector
unsigned char){0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
510 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F};
513 { const vector unsigned char perms##i = \
514 vec_lvsr(i * stride, src2); \
515 const vector unsigned char vg##i = \
516 vec_perm(st##i, vbT##i, permHH); \
517 const vector unsigned char mask##i = \
518 vec_perm((vector unsigned char)zero, (vector unsigned char)neg1, perms##i); \
519 const vector unsigned char vg2##i = \
520 vec_perm(vg##i, vg##i, perms##i); \
521 const vector unsigned char svA##i = \
522 vec_sel(vbA##i, vg2##i, mask##i); \
523 const vector unsigned char svB##i = \
524 vec_sel(vg2##i, vbB##i, mask##i); \
525 vec_st(svA##i, i * stride, src2); \
526 vec_st(svB##i, i * stride + 16, src2);}
532 static inline void dering_altivec(uint8_t src
[], int stride
, PPContext
*c
) {
533 const vector
signed int vsint32_8
= vec_splat_s32(8);
534 const vector
unsigned int vuint32_4
= vec_splat_u32(4);
535 const vector
signed char neg1
= vec_splat_s8(-1);
537 const vector
unsigned char permA1
= (vector
unsigned char)
538 {0x00, 0x01, 0x02, 0x10, 0x11, 0x12, 0x1F, 0x1F,
539 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F};
540 const vector
unsigned char permA2
= (vector
unsigned char)
541 {0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x10, 0x11,
542 0x12, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F};
543 const vector
unsigned char permA1inc
= (vector
unsigned char)
544 {0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x00, 0x00,
545 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
546 const vector
unsigned char permA2inc
= (vector
unsigned char)
547 {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01,
548 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
549 const vector
unsigned char magic
= (vector
unsigned char)
550 {0x01, 0x02, 0x01, 0x02, 0x04, 0x02, 0x01, 0x02,
551 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
552 const vector
unsigned char extractPerm
= (vector
unsigned char)
553 {0x10, 0x10, 0x10, 0x01, 0x10, 0x10, 0x10, 0x01,
554 0x10, 0x10, 0x10, 0x01, 0x10, 0x10, 0x10, 0x01};
555 const vector
unsigned char extractPermInc
= (vector
unsigned char)
556 {0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01,
557 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01};
558 const vector
unsigned char identity
= vec_lvsl(0,(unsigned char *)0);
559 const vector
unsigned char tenRight
= (vector
unsigned char)
560 {0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
561 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
562 const vector
unsigned char eightLeft
= (vector
unsigned char)
563 {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
564 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08};
567 this code makes no assumption on src or stride.
568 One could remove the recomputation of the perm
569 vector by assuming (stride % 16) == 0, unfortunately
570 this is not always true. Quite a lot of load/stores
571 can be removed by assuming proper alignment of
574 uint8_t *srcCopy
= src
;
575 DECLARE_ALIGNED(16, uint8_t, dt
)[16] = { deringThreshold
};
576 const vector
signed int zero
= vec_splat_s32(0);
577 vector
unsigned char v_dt
= vec_splat(vec_ld(0, dt
), 0);
579 #define LOAD_LINE(i) \
580 const vector unsigned char perm##i = \
581 vec_lvsl(i * stride, srcCopy); \
582 vector unsigned char sA##i = vec_ld(i * stride, srcCopy); \
583 vector unsigned char sB##i = vec_ld(i * stride + 16, srcCopy); \
584 vector unsigned char src##i = vec_perm(sA##i, sB##i, perm##i)
598 vector
unsigned char v_avg
;
599 DECLARE_ALIGNED(16, signed int, S
)[8];
600 DECLARE_ALIGNED(16, int, tQP2
)[4] = { c
->QP
/2 + 1 };
601 vector
signed int vQP2
= vec_ld(0, tQP2
);
602 vQP2
= vec_splat(vQP2
, 0);
605 const vector
unsigned char trunc_perm
= (vector
unsigned char)
606 {0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
607 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18};
608 const vector
unsigned char trunc_src12
= vec_perm(src1
, src2
, trunc_perm
);
609 const vector
unsigned char trunc_src34
= vec_perm(src3
, src4
, trunc_perm
);
610 const vector
unsigned char trunc_src56
= vec_perm(src5
, src6
, trunc_perm
);
611 const vector
unsigned char trunc_src78
= vec_perm(src7
, src8
, trunc_perm
);
613 #define EXTRACT(op) do { \
614 const vector unsigned char s_1 = vec_##op(trunc_src12, trunc_src34); \
615 const vector unsigned char s_2 = vec_##op(trunc_src56, trunc_src78); \
616 const vector unsigned char s_6 = vec_##op(s_1, s_2); \
617 const vector unsigned char s_8h = vec_mergeh(s_6, s_6); \
618 const vector unsigned char s_8l = vec_mergel(s_6, s_6); \
619 const vector unsigned char s_9 = vec_##op(s_8h, s_8l); \
620 const vector unsigned char s_9h = vec_mergeh(s_9, s_9); \
621 const vector unsigned char s_9l = vec_mergel(s_9, s_9); \
622 const vector unsigned char s_10 = vec_##op(s_9h, s_9l); \
623 const vector unsigned char s_10h = vec_mergeh(s_10, s_10); \
624 const vector unsigned char s_10l = vec_mergel(s_10, s_10); \
625 const vector unsigned char s_11 = vec_##op(s_10h, s_10l); \
626 const vector unsigned char s_11h = vec_mergeh(s_11, s_11); \
627 const vector unsigned char s_11l = vec_mergel(s_11, s_11); \
628 v_##op = vec_##op(s_11h, s_11l); \
631 vector
unsigned char v_min
;
632 vector
unsigned char v_max
;
637 if (vec_all_lt(vec_sub(v_max
, v_min
), v_dt
))
640 v_avg
= vec_avg(v_min
, v_max
);
644 const vector
unsigned short mask1
= (vector
unsigned short)
645 {0x0001, 0x0002, 0x0004, 0x0008,
646 0x0010, 0x0020, 0x0040, 0x0080};
647 const vector
unsigned short mask2
= (vector
unsigned short)
648 {0x0100, 0x0200, 0x0000, 0x0000,
649 0x0000, 0x0000, 0x0000, 0x0000};
651 const vector
unsigned int vuint32_16
= vec_sl(vec_splat_u32(1), vec_splat_u32(4));
652 const vector
unsigned int vuint32_1
= vec_splat_u32(1);
654 vector
signed int sumA2
;
655 vector
signed int sumB2
;
656 vector
signed int sum0
, sum1
, sum2
, sum3
, sum4
;
657 vector
signed int sum5
, sum6
, sum7
, sum8
, sum9
;
661 const vector unsigned char cmp = \
662 (vector unsigned char)vec_cmpgt(src##i, v_avg); \
663 const vector unsigned short cmpHi = \
664 (vector unsigned short)vec_mergeh(cmp, cmp); \
665 const vector unsigned short cmpLi = \
666 (vector unsigned short)vec_mergel(cmp, cmp); \
667 const vector signed short cmpHf = \
668 (vector signed short)vec_and(cmpHi, mask1); \
669 const vector signed short cmpLf = \
670 (vector signed short)vec_and(cmpLi, mask2); \
671 const vector signed int sump = vec_sum4s(cmpHf, zero); \
672 const vector signed int sumq = vec_sum4s(cmpLf, sump); \
673 sum##i = vec_sums(sumq, zero); \
689 const vector
signed int sump02
= vec_mergel(sum0
, sum2
);
690 const vector
signed int sump13
= vec_mergel(sum1
, sum3
);
691 const vector
signed int sumA
= vec_mergel(sump02
, sump13
);
693 const vector
signed int sump46
= vec_mergel(sum4
, sum6
);
694 const vector
signed int sump57
= vec_mergel(sum5
, sum7
);
695 const vector
signed int sumB
= vec_mergel(sump46
, sump57
);
697 const vector
signed int sump8A
= vec_mergel(sum8
, zero
);
698 const vector
signed int sump9B
= vec_mergel(sum9
, zero
);
699 const vector
signed int sumC
= vec_mergel(sump8A
, sump9B
);
701 const vector
signed int tA
= vec_sl(vec_nor(zero
, sumA
), vuint32_16
);
702 const vector
signed int tB
= vec_sl(vec_nor(zero
, sumB
), vuint32_16
);
703 const vector
signed int tC
= vec_sl(vec_nor(zero
, sumC
), vuint32_16
);
704 const vector
signed int t2A
= vec_or(sumA
, tA
);
705 const vector
signed int t2B
= vec_or(sumB
, tB
);
706 const vector
signed int t2C
= vec_or(sumC
, tC
);
707 const vector
signed int t3A
= vec_and(vec_sra(t2A
, vuint32_1
),
708 vec_sl(t2A
, vuint32_1
));
709 const vector
signed int t3B
= vec_and(vec_sra(t2B
, vuint32_1
),
710 vec_sl(t2B
, vuint32_1
));
711 const vector
signed int t3C
= vec_and(vec_sra(t2C
, vuint32_1
),
712 vec_sl(t2C
, vuint32_1
));
713 const vector
signed int yA
= vec_and(t2A
, t3A
);
714 const vector
signed int yB
= vec_and(t2B
, t3B
);
715 const vector
signed int yC
= vec_and(t2C
, t3C
);
717 const vector
unsigned char strangeperm1
= vec_lvsl(4, (unsigned char*)0);
718 const vector
unsigned char strangeperm2
= vec_lvsl(8, (unsigned char*)0);
719 const vector
signed int sumAd4
= vec_perm(yA
, yB
, strangeperm1
);
720 const vector
signed int sumAd8
= vec_perm(yA
, yB
, strangeperm2
);
721 const vector
signed int sumBd4
= vec_perm(yB
, yC
, strangeperm1
);
722 const vector
signed int sumBd8
= vec_perm(yB
, yC
, strangeperm2
);
723 const vector
signed int sumAp
= vec_and(yA
,
724 vec_and(sumAd4
,sumAd8
));
725 const vector
signed int sumBp
= vec_and(yB
,
726 vec_and(sumBd4
,sumBd8
));
727 sumA2
= vec_or(sumAp
,
730 sumB2
= vec_or(sumBp
,
735 vec_st(sumB2
, 16, S
);
738 /* I'm not sure the following is actually faster
739 than straight, unvectorized C code :-( */
742 vector unsigned char tenRightM = tenRight; \
743 vector unsigned char permA1M = permA1; \
744 vector unsigned char permA2M = permA2; \
745 vector unsigned char extractPermM = extractPerm
747 #define F2(i, j, k, l) \
748 if (S[i] & (1 << (l+1))) { \
749 const vector unsigned char a_A = vec_perm(src##i, src##j, permA1M); \
750 const vector unsigned char a_B = vec_perm(a_A, src##k, permA2M); \
751 const vector signed int a_sump = \
752 (vector signed int)vec_msum(a_B, magic, (vector unsigned int)zero);\
753 vector signed int F = vec_sr(vec_sums(a_sump, vsint32_8), vuint32_4); \
754 const vector signed int p = \
755 (vector signed int)vec_perm(src##j, (vector unsigned char)zero, \
757 const vector signed int sum = vec_add(p, vQP2); \
758 const vector signed int diff = vec_sub(p, vQP2); \
759 vector signed int newpm; \
760 vector unsigned char newpm2, mask; \
761 F = vec_splat(F, 3); \
762 if (vec_all_lt(sum, F)) \
764 else if (vec_all_gt(diff, F)) \
767 newpm2 = vec_splat((vector unsigned char)newpm, 15); \
768 mask = vec_add(identity, tenRightM); \
769 src##j = vec_perm(src##j, newpm2, mask); \
771 permA1M = vec_add(permA1M, permA1inc); \
772 permA2M = vec_add(permA2M, permA2inc); \
773 tenRightM = vec_sro(tenRightM, eightLeft); \
774 extractPermM = vec_add(extractPermM, extractPermInc)
776 #define ITER(i, j, k) do { \
797 #define STORE_LINE(i) do { \
798 const vector unsigned char permST = \
799 vec_lvsr(i * stride, srcCopy); \
800 const vector unsigned char maskST = \
801 vec_perm((vector unsigned char)zero, \
802 (vector unsigned char)neg1, permST); \
803 src##i = vec_perm(src##i ,src##i, permST); \
804 sA##i= vec_sel(sA##i, src##i, maskST); \
805 sB##i= vec_sel(src##i, sB##i, maskST); \
806 vec_st(sA##i, i * stride, srcCopy); \
807 vec_st(sB##i, i * stride + 16, srcCopy); \
824 #define doHorizLowPass_altivec(a...) doHorizLowPass_C(a)
825 #define doHorizDefFilter_altivec(a...) doHorizDefFilter_C(a)
826 #define do_a_deblock_altivec(a...) do_a_deblock_C(a)
828 static inline void tempNoiseReducer_altivec(uint8_t *src
, int stride
,
829 uint8_t *tempBlurred
, uint32_t *tempBlurredPast
, int *maxNoise
)
831 const vector
signed char neg1
= vec_splat_s8(-1);
832 const vector
unsigned char permHH
= (const vector
unsigned char){0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
833 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F};
835 const vector
signed int zero
= vec_splat_s32(0);
836 const vector
signed short vsint16_1
= vec_splat_s16(1);
837 vector
signed int v_dp
= zero
;
838 vector
signed int v_sysdp
= zero
;
841 #define LOAD_LINE(src, i) \
842 register int j##src##i = i * stride; \
843 vector unsigned char perm##src##i = vec_lvsl(j##src##i, src); \
844 const vector unsigned char v_##src##A1##i = vec_ld(j##src##i, src); \
845 const vector unsigned char v_##src##A2##i = vec_ld(j##src##i + 16, src); \
846 const vector unsigned char v_##src##A##i = \
847 vec_perm(v_##src##A1##i, v_##src##A2##i, perm##src##i); \
848 vector signed short v_##src##Ass##i = \
849 (vector signed short)vec_mergeh((vector signed char)zero, \
850 (vector signed char)v_##src##A##i)
861 LOAD_LINE(tempBlurred
, 0);
862 LOAD_LINE(tempBlurred
, 1);
863 LOAD_LINE(tempBlurred
, 2);
864 LOAD_LINE(tempBlurred
, 3);
865 LOAD_LINE(tempBlurred
, 4);
866 LOAD_LINE(tempBlurred
, 5);
867 LOAD_LINE(tempBlurred
, 6);
868 LOAD_LINE(tempBlurred
, 7);
871 #define ACCUMULATE_DIFFS(i) do { \
872 vector signed short v_d = vec_sub(v_tempBlurredAss##i, \
874 v_dp = vec_msums(v_d, v_d, v_dp); \
875 v_sysdp = vec_msums(v_d, vsint16_1, v_sysdp); \
886 #undef ACCUMULATE_DIFFS
888 tempBlurredPast
[127]= maxNoise
[0];
889 tempBlurredPast
[128]= maxNoise
[1];
890 tempBlurredPast
[129]= maxNoise
[2];
892 v_dp
= vec_sums(v_dp
, zero
);
893 v_sysdp
= vec_sums(v_sysdp
, zero
);
895 v_dp
= vec_splat(v_dp
, 3);
896 v_sysdp
= vec_splat(v_sysdp
, 3);
898 vec_ste(v_dp
, 0, &d
);
899 vec_ste(v_sysdp
, 0, &sysd
);
903 +(*(tempBlurredPast
-256))
904 +(*(tempBlurredPast
-1))+ (*(tempBlurredPast
+1))
905 +(*(tempBlurredPast
+256))
910 if (d
> maxNoise
[1]) {
911 if (d
< maxNoise
[2]) {
912 #define OP(i) v_tempBlurredAss##i = vec_avg(v_tempBlurredAss##i, v_srcAss##i);
924 #define OP(i) v_tempBlurredAss##i = v_srcAss##i;
937 if (d
< maxNoise
[0]) {
938 const vector
signed short vsint16_7
= vec_splat_s16(7);
939 const vector
signed short vsint16_4
= vec_splat_s16(4);
940 const vector
unsigned short vuint16_3
= vec_splat_u16(3);
943 const vector signed short v_temp = \
944 vec_mladd(v_tempBlurredAss##i, vsint16_7, v_srcAss##i); \
945 const vector signed short v_temp2 = vec_add(v_temp, vsint16_4); \
946 v_tempBlurredAss##i = vec_sr(v_temp2, vuint16_3); \
959 const vector
signed short vsint16_3
= vec_splat_s16(3);
960 const vector
signed short vsint16_2
= vec_splat_s16(2);
963 const vector signed short v_temp = \
964 vec_mladd(v_tempBlurredAss##i, vsint16_3, v_srcAss##i); \
965 const vector signed short v_temp2 = vec_add(v_temp, vsint16_2); \
966 v_tempBlurredAss##i = \
967 vec_sr(v_temp2, (vector unsigned short)vsint16_2); \
982 #define PACK_AND_STORE(src, i) do { \
983 const vector unsigned char perms = vec_lvsr(i * stride, src); \
984 const vector unsigned char vf = \
985 vec_packsu(v_tempBlurredAss##1, (vector signed short)zero); \
986 const vector unsigned char vg = vec_perm(vf, v_##src##A##i, permHH); \
987 const vector unsigned char mask = \
988 vec_perm((vector unsigned char)zero, (vector unsigned char)neg1, perms); \
989 const vector unsigned char vg2 = vec_perm(vg, vg, perms); \
990 const vector unsigned char svA = vec_sel(v_##src##A1##i, vg2, mask); \
991 const vector unsigned char svB = vec_sel(vg2, v_##src##A2##i, mask); \
992 vec_st(svA, i * stride, src); \
993 vec_st(svB, i * stride + 16, src); \
996 PACK_AND_STORE(src
, 0);
997 PACK_AND_STORE(src
, 1);
998 PACK_AND_STORE(src
, 2);
999 PACK_AND_STORE(src
, 3);
1000 PACK_AND_STORE(src
, 4);
1001 PACK_AND_STORE(src
, 5);
1002 PACK_AND_STORE(src
, 6);
1003 PACK_AND_STORE(src
, 7);
1004 PACK_AND_STORE(tempBlurred
, 0);
1005 PACK_AND_STORE(tempBlurred
, 1);
1006 PACK_AND_STORE(tempBlurred
, 2);
1007 PACK_AND_STORE(tempBlurred
, 3);
1008 PACK_AND_STORE(tempBlurred
, 4);
1009 PACK_AND_STORE(tempBlurred
, 5);
1010 PACK_AND_STORE(tempBlurred
, 6);
1011 PACK_AND_STORE(tempBlurred
, 7);
1012 #undef PACK_AND_STORE
1015 static inline void transpose_16x8_char_toPackedAlign_altivec(unsigned char* dst
, unsigned char* src
, int stride
) {
1016 const vector
unsigned char zero
= vec_splat_u8(0);
1018 #define LOAD_DOUBLE_LINE(i, j) \
1019 vector unsigned char perm1##i = vec_lvsl(i * stride, src); \
1020 vector unsigned char perm2##i = vec_lvsl(j * stride, src); \
1021 vector unsigned char srcA##i = vec_ld(i * stride, src); \
1022 vector unsigned char srcB##i = vec_ld(i * stride + 16, src); \
1023 vector unsigned char srcC##i = vec_ld(j * stride, src); \
1024 vector unsigned char srcD##i = vec_ld(j * stride+ 16, src); \
1025 vector unsigned char src##i = vec_perm(srcA##i, srcB##i, perm1##i); \
1026 vector unsigned char src##j = vec_perm(srcC##i, srcD##i, perm2##i)
1028 LOAD_DOUBLE_LINE(0, 1);
1029 LOAD_DOUBLE_LINE(2, 3);
1030 LOAD_DOUBLE_LINE(4, 5);
1031 LOAD_DOUBLE_LINE(6, 7);
1032 #undef LOAD_DOUBLE_LINE
1034 vector
unsigned char tempA
= vec_mergeh(src0
, zero
);
1035 vector
unsigned char tempB
= vec_mergel(src0
, zero
);
1036 vector
unsigned char tempC
= vec_mergeh(src1
, zero
);
1037 vector
unsigned char tempD
= vec_mergel(src1
, zero
);
1038 vector
unsigned char tempE
= vec_mergeh(src2
, zero
);
1039 vector
unsigned char tempF
= vec_mergel(src2
, zero
);
1040 vector
unsigned char tempG
= vec_mergeh(src3
, zero
);
1041 vector
unsigned char tempH
= vec_mergel(src3
, zero
);
1042 vector
unsigned char tempI
= vec_mergeh(src4
, zero
);
1043 vector
unsigned char tempJ
= vec_mergel(src4
, zero
);
1044 vector
unsigned char tempK
= vec_mergeh(src5
, zero
);
1045 vector
unsigned char tempL
= vec_mergel(src5
, zero
);
1046 vector
unsigned char tempM
= vec_mergeh(src6
, zero
);
1047 vector
unsigned char tempN
= vec_mergel(src6
, zero
);
1048 vector
unsigned char tempO
= vec_mergeh(src7
, zero
);
1049 vector
unsigned char tempP
= vec_mergel(src7
, zero
);
1051 vector
unsigned char temp0
= vec_mergeh(tempA
, tempI
);
1052 vector
unsigned char temp1
= vec_mergel(tempA
, tempI
);
1053 vector
unsigned char temp2
= vec_mergeh(tempB
, tempJ
);
1054 vector
unsigned char temp3
= vec_mergel(tempB
, tempJ
);
1055 vector
unsigned char temp4
= vec_mergeh(tempC
, tempK
);
1056 vector
unsigned char temp5
= vec_mergel(tempC
, tempK
);
1057 vector
unsigned char temp6
= vec_mergeh(tempD
, tempL
);
1058 vector
unsigned char temp7
= vec_mergel(tempD
, tempL
);
1059 vector
unsigned char temp8
= vec_mergeh(tempE
, tempM
);
1060 vector
unsigned char temp9
= vec_mergel(tempE
, tempM
);
1061 vector
unsigned char temp10
= vec_mergeh(tempF
, tempN
);
1062 vector
unsigned char temp11
= vec_mergel(tempF
, tempN
);
1063 vector
unsigned char temp12
= vec_mergeh(tempG
, tempO
);
1064 vector
unsigned char temp13
= vec_mergel(tempG
, tempO
);
1065 vector
unsigned char temp14
= vec_mergeh(tempH
, tempP
);
1066 vector
unsigned char temp15
= vec_mergel(tempH
, tempP
);
1068 tempA
= vec_mergeh(temp0
, temp8
);
1069 tempB
= vec_mergel(temp0
, temp8
);
1070 tempC
= vec_mergeh(temp1
, temp9
);
1071 tempD
= vec_mergel(temp1
, temp9
);
1072 tempE
= vec_mergeh(temp2
, temp10
);
1073 tempF
= vec_mergel(temp2
, temp10
);
1074 tempG
= vec_mergeh(temp3
, temp11
);
1075 tempH
= vec_mergel(temp3
, temp11
);
1076 tempI
= vec_mergeh(temp4
, temp12
);
1077 tempJ
= vec_mergel(temp4
, temp12
);
1078 tempK
= vec_mergeh(temp5
, temp13
);
1079 tempL
= vec_mergel(temp5
, temp13
);
1080 tempM
= vec_mergeh(temp6
, temp14
);
1081 tempN
= vec_mergel(temp6
, temp14
);
1082 tempO
= vec_mergeh(temp7
, temp15
);
1083 tempP
= vec_mergel(temp7
, temp15
);
1085 temp0
= vec_mergeh(tempA
, tempI
);
1086 temp1
= vec_mergel(tempA
, tempI
);
1087 temp2
= vec_mergeh(tempB
, tempJ
);
1088 temp3
= vec_mergel(tempB
, tempJ
);
1089 temp4
= vec_mergeh(tempC
, tempK
);
1090 temp5
= vec_mergel(tempC
, tempK
);
1091 temp6
= vec_mergeh(tempD
, tempL
);
1092 temp7
= vec_mergel(tempD
, tempL
);
1093 temp8
= vec_mergeh(tempE
, tempM
);
1094 temp9
= vec_mergel(tempE
, tempM
);
1095 temp10
= vec_mergeh(tempF
, tempN
);
1096 temp11
= vec_mergel(tempF
, tempN
);
1097 temp12
= vec_mergeh(tempG
, tempO
);
1098 temp13
= vec_mergel(tempG
, tempO
);
1099 temp14
= vec_mergeh(tempH
, tempP
);
1100 temp15
= vec_mergel(tempH
, tempP
);
1102 vec_st(temp0
, 0, dst
);
1103 vec_st(temp1
, 16, dst
);
1104 vec_st(temp2
, 32, dst
);
1105 vec_st(temp3
, 48, dst
);
1106 vec_st(temp4
, 64, dst
);
1107 vec_st(temp5
, 80, dst
);
1108 vec_st(temp6
, 96, dst
);
1109 vec_st(temp7
, 112, dst
);
1110 vec_st(temp8
, 128, dst
);
1111 vec_st(temp9
, 144, dst
);
1112 vec_st(temp10
, 160, dst
);
1113 vec_st(temp11
, 176, dst
);
1114 vec_st(temp12
, 192, dst
);
1115 vec_st(temp13
, 208, dst
);
1116 vec_st(temp14
, 224, dst
);
1117 vec_st(temp15
, 240, dst
);
1120 static inline void transpose_8x16_char_fromPackedAlign_altivec(unsigned char* dst
, unsigned char* src
, int stride
) {
1121 const vector
unsigned char zero
= vec_splat_u8(0);
1122 const vector
signed char neg1
= vec_splat_s8(-1);
1124 #define LOAD_DOUBLE_LINE(i, j) \
1125 vector unsigned char src##i = vec_ld(i * 16, src); \
1126 vector unsigned char src##j = vec_ld(j * 16, src)
1128 LOAD_DOUBLE_LINE(0, 1);
1129 LOAD_DOUBLE_LINE(2, 3);
1130 LOAD_DOUBLE_LINE(4, 5);
1131 LOAD_DOUBLE_LINE(6, 7);
1132 LOAD_DOUBLE_LINE(8, 9);
1133 LOAD_DOUBLE_LINE(10, 11);
1134 LOAD_DOUBLE_LINE(12, 13);
1135 LOAD_DOUBLE_LINE(14, 15);
1136 #undef LOAD_DOUBLE_LINE
1138 vector
unsigned char tempA
= vec_mergeh(src0
, src8
);
1139 vector
unsigned char tempB
;
1140 vector
unsigned char tempC
= vec_mergeh(src1
, src9
);
1141 vector
unsigned char tempD
;
1142 vector
unsigned char tempE
= vec_mergeh(src2
, src10
);
1143 vector
unsigned char tempG
= vec_mergeh(src3
, src11
);
1144 vector
unsigned char tempI
= vec_mergeh(src4
, src12
);
1145 vector
unsigned char tempJ
;
1146 vector
unsigned char tempK
= vec_mergeh(src5
, src13
);
1147 vector
unsigned char tempL
;
1148 vector
unsigned char tempM
= vec_mergeh(src6
, src14
);
1149 vector
unsigned char tempO
= vec_mergeh(src7
, src15
);
1151 vector
unsigned char temp0
= vec_mergeh(tempA
, tempI
);
1152 vector
unsigned char temp1
= vec_mergel(tempA
, tempI
);
1153 vector
unsigned char temp2
;
1154 vector
unsigned char temp3
;
1155 vector
unsigned char temp4
= vec_mergeh(tempC
, tempK
);
1156 vector
unsigned char temp5
= vec_mergel(tempC
, tempK
);
1157 vector
unsigned char temp6
;
1158 vector
unsigned char temp7
;
1159 vector
unsigned char temp8
= vec_mergeh(tempE
, tempM
);
1160 vector
unsigned char temp9
= vec_mergel(tempE
, tempM
);
1161 vector
unsigned char temp12
= vec_mergeh(tempG
, tempO
);
1162 vector
unsigned char temp13
= vec_mergel(tempG
, tempO
);
1164 tempA
= vec_mergeh(temp0
, temp8
);
1165 tempB
= vec_mergel(temp0
, temp8
);
1166 tempC
= vec_mergeh(temp1
, temp9
);
1167 tempD
= vec_mergel(temp1
, temp9
);
1168 tempI
= vec_mergeh(temp4
, temp12
);
1169 tempJ
= vec_mergel(temp4
, temp12
);
1170 tempK
= vec_mergeh(temp5
, temp13
);
1171 tempL
= vec_mergel(temp5
, temp13
);
1173 temp0
= vec_mergeh(tempA
, tempI
);
1174 temp1
= vec_mergel(tempA
, tempI
);
1175 temp2
= vec_mergeh(tempB
, tempJ
);
1176 temp3
= vec_mergel(tempB
, tempJ
);
1177 temp4
= vec_mergeh(tempC
, tempK
);
1178 temp5
= vec_mergel(tempC
, tempK
);
1179 temp6
= vec_mergeh(tempD
, tempL
);
1180 temp7
= vec_mergel(tempD
, tempL
);
1183 #define STORE_DOUBLE_LINE(i, j) do { \
1184 vector unsigned char dstAi = vec_ld(i * stride, dst); \
1185 vector unsigned char dstBi = vec_ld(i * stride + 16, dst); \
1186 vector unsigned char dstAj = vec_ld(j * stride, dst); \
1187 vector unsigned char dstBj = vec_ld(j * stride+ 16, dst); \
1188 vector unsigned char aligni = vec_lvsr(i * stride, dst); \
1189 vector unsigned char alignj = vec_lvsr(j * stride, dst); \
1190 vector unsigned char maski = \
1191 vec_perm(zero, (vector unsigned char)neg1, aligni); \
1192 vector unsigned char maskj = \
1193 vec_perm(zero, (vector unsigned char)neg1, alignj); \
1194 vector unsigned char dstRi = vec_perm(temp##i, temp##i, aligni); \
1195 vector unsigned char dstRj = vec_perm(temp##j, temp##j, alignj); \
1196 vector unsigned char dstAFi = vec_sel(dstAi, dstRi, maski); \
1197 vector unsigned char dstBFi = vec_sel(dstRi, dstBi, maski); \
1198 vector unsigned char dstAFj = vec_sel(dstAj, dstRj, maskj); \
1199 vector unsigned char dstBFj = vec_sel(dstRj, dstBj, maskj); \
1200 vec_st(dstAFi, i * stride, dst); \
1201 vec_st(dstBFi, i * stride + 16, dst); \
1202 vec_st(dstAFj, j * stride, dst); \
1203 vec_st(dstBFj, j * stride + 16, dst); \
1206 STORE_DOUBLE_LINE(0,1);
1207 STORE_DOUBLE_LINE(2,3);
1208 STORE_DOUBLE_LINE(4,5);
1209 STORE_DOUBLE_LINE(6,7);