2 * Copyright (c) 2004 Romain Dolbeau <romain@dolbeau.org>
4 * This file is part of FFmpeg.
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22 #include "libavutil/attributes.h"
23 #include "libavutil/cpu.h"
24 #include "libavutil/intreadwrite.h"
25 #include "libavutil/ppc/cpu.h"
26 #include "libavutil/ppc/types_altivec.h"
27 #include "libavutil/ppc/util_altivec.h"
28 #include "libavcodec/h264data.h"
29 #include "libavcodec/h264dsp.h"
33 /****************************************************************************
35 ****************************************************************************/
37 #define VEC_1D_DCT(vb0,vb1,vb2,vb3,va0,va1,va2,va3) \
39 vz0 = vec_add(vb0,vb2); /* temp[0] = Y[0] + Y[2] */ \
40 vz1 = vec_sub(vb0,vb2); /* temp[1] = Y[0] - Y[2] */ \
41 vz2 = vec_sra(vb1,vec_splat_u16(1)); \
42 vz2 = vec_sub(vz2,vb3); /* temp[2] = Y[1].1/2 - Y[3] */ \
43 vz3 = vec_sra(vb3,vec_splat_u16(1)); \
44 vz3 = vec_add(vb1,vz3); /* temp[3] = Y[1] + Y[3].1/2 */ \
45 /* 2nd stage: output */ \
46 va0 = vec_add(vz0,vz3); /* x[0] = temp[0] + temp[3] */ \
47 va1 = vec_add(vz1,vz2); /* x[1] = temp[1] + temp[2] */ \
48 va2 = vec_sub(vz1,vz2); /* x[2] = temp[1] - temp[2] */ \
49 va3 = vec_sub(vz0,vz3) /* x[3] = temp[0] - temp[3] */
51 #define VEC_TRANSPOSE_4(a0,a1,a2,a3,b0,b1,b2,b3) \
52 b0 = vec_mergeh( a0, a0 ); \
53 b1 = vec_mergeh( a1, a0 ); \
54 b2 = vec_mergeh( a2, a0 ); \
55 b3 = vec_mergeh( a3, a0 ); \
56 a0 = vec_mergeh( b0, b2 ); \
57 a1 = vec_mergel( b0, b2 ); \
58 a2 = vec_mergeh( b1, b3 ); \
59 a3 = vec_mergel( b1, b3 ); \
60 b0 = vec_mergeh( a0, a2 ); \
61 b1 = vec_mergel( a0, a2 ); \
62 b2 = vec_mergeh( a1, a3 ); \
63 b3 = vec_mergel( a1, a3 )
66 #define vdst_load(d) \
67 vdst_orig = vec_ld(0, dst); \
68 vdst = vec_perm(vdst_orig, zero_u8v, vdst_mask);
70 #define vdst_load(d) vdst = vec_vsx_ld(0, dst)
73 #define VEC_LOAD_U8_ADD_S16_STORE_U8(va) \
75 vdst_ss = (vec_s16) VEC_MERGEH(zero_u8v, vdst); \
76 va = vec_add(va, vdst_ss); \
77 va_u8 = vec_packsu(va, zero_s16v); \
78 va_u32 = vec_splat((vec_u32)va_u8, 0); \
79 vec_ste(va_u32, element, (uint32_t*)dst);
81 static void h264_idct_add_altivec(uint8_t *dst
, int16_t *block
, int stride
)
83 vec_s16 va0
, va1
, va2
, va3
;
84 vec_s16 vz0
, vz1
, vz2
, vz3
;
85 vec_s16 vtmp0
, vtmp1
, vtmp2
, vtmp3
;
89 const vec_u16 v6us
= vec_splat_u16(6);
90 vec_u8 vdst
, vdst_orig
;
91 vec_u8 vdst_mask
= vec_lvsl(0, dst
);
92 int element
= ((unsigned long)dst
& 0xf) >> 2;
95 block
[0] += 32; /* add 32 as a DC-level for rounding */
97 vtmp0
= vec_ld(0,block
);
98 vtmp1
= vec_sld(vtmp0
, vtmp0
, 8);
99 vtmp2
= vec_ld(16,block
);
100 vtmp3
= vec_sld(vtmp2
, vtmp2
, 8);
101 memset(block
, 0, 16 * sizeof(int16_t));
103 VEC_1D_DCT(vtmp0
,vtmp1
,vtmp2
,vtmp3
,va0
,va1
,va2
,va3
);
104 VEC_TRANSPOSE_4(va0
,va1
,va2
,va3
,vtmp0
,vtmp1
,vtmp2
,vtmp3
);
105 VEC_1D_DCT(vtmp0
,vtmp1
,vtmp2
,vtmp3
,va0
,va1
,va2
,va3
);
107 va0
= vec_sra(va0
,v6us
);
108 va1
= vec_sra(va1
,v6us
);
109 va2
= vec_sra(va2
,v6us
);
110 va3
= vec_sra(va3
,v6us
);
112 VEC_LOAD_U8_ADD_S16_STORE_U8(va0
);
114 VEC_LOAD_U8_ADD_S16_STORE_U8(va1
);
116 VEC_LOAD_U8_ADD_S16_STORE_U8(va2
);
118 VEC_LOAD_U8_ADD_S16_STORE_U8(va3
);
121 #define IDCT8_1D_ALTIVEC(s0, s1, s2, s3, s4, s5, s6, s7, d0, d1, d2, d3, d4, d5, d6, d7) {\
122 /* a0 = SRC(0) + SRC(4); */ \
123 vec_s16 a0v = vec_add(s0, s4); \
124 /* a2 = SRC(0) - SRC(4); */ \
125 vec_s16 a2v = vec_sub(s0, s4); \
126 /* a4 = (SRC(2)>>1) - SRC(6); */ \
127 vec_s16 a4v = vec_sub(vec_sra(s2, onev), s6); \
128 /* a6 = (SRC(6)>>1) + SRC(2); */ \
129 vec_s16 a6v = vec_add(vec_sra(s6, onev), s2); \
130 /* b0 = a0 + a6; */ \
131 vec_s16 b0v = vec_add(a0v, a6v); \
132 /* b2 = a2 + a4; */ \
133 vec_s16 b2v = vec_add(a2v, a4v); \
134 /* b4 = a2 - a4; */ \
135 vec_s16 b4v = vec_sub(a2v, a4v); \
136 /* b6 = a0 - a6; */ \
137 vec_s16 b6v = vec_sub(a0v, a6v); \
138 /* a1 = SRC(5) - SRC(3) - SRC(7) - (SRC(7)>>1); */ \
139 /* a1 = (SRC(5)-SRC(3)) - (SRC(7) + (SRC(7)>>1)); */ \
140 vec_s16 a1v = vec_sub( vec_sub(s5, s3), vec_add(s7, vec_sra(s7, onev)) ); \
141 /* a3 = SRC(7) + SRC(1) - SRC(3) - (SRC(3)>>1); */ \
142 /* a3 = (SRC(7)+SRC(1)) - (SRC(3) + (SRC(3)>>1)); */ \
143 vec_s16 a3v = vec_sub( vec_add(s7, s1), vec_add(s3, vec_sra(s3, onev)) );\
144 /* a5 = SRC(7) - SRC(1) + SRC(5) + (SRC(5)>>1); */ \
145 /* a5 = (SRC(7)-SRC(1)) + SRC(5) + (SRC(5)>>1); */ \
146 vec_s16 a5v = vec_add( vec_sub(s7, s1), vec_add(s5, vec_sra(s5, onev)) );\
147 /* a7 = SRC(5)+SRC(3) + SRC(1) + (SRC(1)>>1); */ \
148 vec_s16 a7v = vec_add( vec_add(s5, s3), vec_add(s1, vec_sra(s1, onev)) );\
149 /* b1 = (a7>>2) + a1; */ \
150 vec_s16 b1v = vec_add( vec_sra(a7v, twov), a1v); \
151 /* b3 = a3 + (a5>>2); */ \
152 vec_s16 b3v = vec_add(a3v, vec_sra(a5v, twov)); \
153 /* b5 = (a3>>2) - a5; */ \
154 vec_s16 b5v = vec_sub( vec_sra(a3v, twov), a5v); \
155 /* b7 = a7 - (a1>>2); */ \
156 vec_s16 b7v = vec_sub( a7v, vec_sra(a1v, twov)); \
157 /* DST(0, b0 + b7); */ \
158 d0 = vec_add(b0v, b7v); \
159 /* DST(1, b2 + b5); */ \
160 d1 = vec_add(b2v, b5v); \
161 /* DST(2, b4 + b3); */ \
162 d2 = vec_add(b4v, b3v); \
163 /* DST(3, b6 + b1); */ \
164 d3 = vec_add(b6v, b1v); \
165 /* DST(4, b6 - b1); */ \
166 d4 = vec_sub(b6v, b1v); \
167 /* DST(5, b4 - b3); */ \
168 d5 = vec_sub(b4v, b3v); \
169 /* DST(6, b2 - b5); */ \
170 d6 = vec_sub(b2v, b5v); \
171 /* DST(7, b0 - b7); */ \
172 d7 = vec_sub(b0v, b7v); \
176 #define GET_2PERM(ldv, stv, d) \
177 ldv = vec_lvsl(0, d); \
178 stv = vec_lvsr(8, d);
179 #define dstv_load(d) \
180 vec_u8 hv = vec_ld( 0, d ); \
181 vec_u8 lv = vec_ld( 7, d); \
182 vec_u8 dstv = vec_perm( hv, lv, (vec_u8)perm_ldv );
183 #define dest_unligned_store(d) \
185 vec_u8 bodyv = vec_perm( idstsum8, idstsum8, perm_stv ); \
186 vec_u8 edgelv = vec_perm( sel, zero_u8v, perm_stv ); \
187 lv = vec_sel( lv, bodyv, edgelv ); \
188 vec_st( lv, 7, d ); \
189 hv = vec_ld( 0, d ); \
190 edgehv = vec_perm( zero_u8v, sel, perm_stv ); \
191 hv = vec_sel( hv, bodyv, edgehv ); \
195 #define GET_2PERM(ldv, stv, d) {}
196 #define dstv_load(d) vec_u8 dstv = vec_vsx_ld(0, d)
197 #define dest_unligned_store(d)\
198 vec_u8 dst8 = vec_perm((vec_u8)idstsum8, dstv, vcprm(2,3,s2,s3));\
199 vec_vsx_st(dst8, 0, d)
200 #endif /* HAVE_BIGENDIAN */
202 #define ALTIVEC_STORE_SUM_CLIP(dest, idctv, perm_ldv, perm_stv, sel) { \
203 /* unaligned load */ \
205 vec_s16 idct_sh6 = vec_sra(idctv, sixv); \
206 vec_u16 dst16 = (vec_u16)VEC_MERGEH(zero_u8v, dstv); \
207 vec_s16 idstsum = vec_adds(idct_sh6, (vec_s16)dst16); \
208 vec_u8 idstsum8 = vec_packsu(zero_s16v, idstsum); \
209 /* unaligned store */ \
210 dest_unligned_store(dest);\
213 static void h264_idct8_add_altivec(uint8_t *dst
, int16_t *dct
, int stride
)
215 vec_s16 s0
, s1
, s2
, s3
, s4
, s5
, s6
, s7
;
216 vec_s16 d0
, d1
, d2
, d3
, d4
, d5
, d6
, d7
;
217 vec_s16 idct0
, idct1
, idct2
, idct3
, idct4
, idct5
, idct6
, idct7
;
219 vec_u8 perm_ldv
, perm_stv
;
220 GET_2PERM(perm_ldv
, perm_stv
, dst
);
222 const vec_u16 onev
= vec_splat_u16(1);
223 const vec_u16 twov
= vec_splat_u16(2);
224 const vec_u16 sixv
= vec_splat_u16(6);
226 const vec_u8 sel
= (vec_u8
) {0,0,0,0,0,0,0,0,-1,-1,-1,-1,-1,-1,-1,-1};
229 dct
[0] += 32; // rounding for the >>6 at the end
231 s0
= vec_ld(0x00, (int16_t*)dct
);
232 s1
= vec_ld(0x10, (int16_t*)dct
);
233 s2
= vec_ld(0x20, (int16_t*)dct
);
234 s3
= vec_ld(0x30, (int16_t*)dct
);
235 s4
= vec_ld(0x40, (int16_t*)dct
);
236 s5
= vec_ld(0x50, (int16_t*)dct
);
237 s6
= vec_ld(0x60, (int16_t*)dct
);
238 s7
= vec_ld(0x70, (int16_t*)dct
);
239 memset(dct
, 0, 64 * sizeof(int16_t));
241 IDCT8_1D_ALTIVEC(s0
, s1
, s2
, s3
, s4
, s5
, s6
, s7
,
242 d0
, d1
, d2
, d3
, d4
, d5
, d6
, d7
);
244 TRANSPOSE8( d0
, d1
, d2
, d3
, d4
, d5
, d6
, d7
);
246 IDCT8_1D_ALTIVEC(d0
, d1
, d2
, d3
, d4
, d5
, d6
, d7
,
247 idct0
, idct1
, idct2
, idct3
, idct4
, idct5
, idct6
, idct7
);
249 ALTIVEC_STORE_SUM_CLIP(&dst
[0*stride
], idct0
, perm_ldv
, perm_stv
, sel
);
250 ALTIVEC_STORE_SUM_CLIP(&dst
[1*stride
], idct1
, perm_ldv
, perm_stv
, sel
);
251 ALTIVEC_STORE_SUM_CLIP(&dst
[2*stride
], idct2
, perm_ldv
, perm_stv
, sel
);
252 ALTIVEC_STORE_SUM_CLIP(&dst
[3*stride
], idct3
, perm_ldv
, perm_stv
, sel
);
253 ALTIVEC_STORE_SUM_CLIP(&dst
[4*stride
], idct4
, perm_ldv
, perm_stv
, sel
);
254 ALTIVEC_STORE_SUM_CLIP(&dst
[5*stride
], idct5
, perm_ldv
, perm_stv
, sel
);
255 ALTIVEC_STORE_SUM_CLIP(&dst
[6*stride
], idct6
, perm_ldv
, perm_stv
, sel
);
256 ALTIVEC_STORE_SUM_CLIP(&dst
[7*stride
], idct7
, perm_ldv
, perm_stv
, sel
);
259 static av_always_inline
void h264_idct_dc_add_internal(uint8_t *dst
, int16_t *block
, int stride
, int size
)
262 vec_u8 dcplus
, dcminus
, v0
, v1
, v2
, v3
, aligner
;
265 DECLARE_ALIGNED(16, int, dc
);
268 dc
= (block
[0] + 32) >> 6;
270 v_dc32
= vec_lde(0, &dc
);
271 dc16
= VEC_SPLAT16((vec_s16
)v_dc32
, 1);
274 dc16
= VEC_SLD16(dc16
, zero_s16v
, 8);
275 dcplus
= vec_packsu(dc16
, zero_s16v
);
276 dcminus
= vec_packsu(vec_sub(zero_s16v
, dc16
), zero_s16v
);
278 aligner
= vec_lvsr(0, dst
);
280 aligner
= vec_perm(aligner
, zero_u8v
, vcswapc());
282 dcplus
= vec_perm(dcplus
, dcplus
, aligner
);
283 dcminus
= vec_perm(dcminus
, dcminus
, aligner
);
285 for (i
= 0; i
< size
; i
+= 4) {
286 v0
= vec_ld(0, dst
+0*stride
);
287 v1
= vec_ld(0, dst
+1*stride
);
288 v2
= vec_ld(0, dst
+2*stride
);
289 v3
= vec_ld(0, dst
+3*stride
);
291 v0
= vec_adds(v0
, dcplus
);
292 v1
= vec_adds(v1
, dcplus
);
293 v2
= vec_adds(v2
, dcplus
);
294 v3
= vec_adds(v3
, dcplus
);
296 v0
= vec_subs(v0
, dcminus
);
297 v1
= vec_subs(v1
, dcminus
);
298 v2
= vec_subs(v2
, dcminus
);
299 v3
= vec_subs(v3
, dcminus
);
301 vec_st(v0
, 0, dst
+0*stride
);
302 vec_st(v1
, 0, dst
+1*stride
);
303 vec_st(v2
, 0, dst
+2*stride
);
304 vec_st(v3
, 0, dst
+3*stride
);
310 static void h264_idct_dc_add_altivec(uint8_t *dst
, int16_t *block
, int stride
)
312 h264_idct_dc_add_internal(dst
, block
, stride
, 4);
315 static void h264_idct8_dc_add_altivec(uint8_t *dst
, int16_t *block
, int stride
)
317 h264_idct_dc_add_internal(dst
, block
, stride
, 8);
320 static void h264_idct_add16_altivec(uint8_t *dst
, const int *block_offset
,
321 int16_t *block
, int stride
,
322 const uint8_t nnzc
[15 * 8])
326 int nnz
= nnzc
[ scan8
[i
] ];
328 if(nnz
==1 && block
[i
*16]) h264_idct_dc_add_altivec(dst
+ block_offset
[i
], block
+ i
*16, stride
);
329 else h264_idct_add_altivec(dst
+ block_offset
[i
], block
+ i
*16, stride
);
334 static void h264_idct_add16intra_altivec(uint8_t *dst
, const int *block_offset
,
335 int16_t *block
, int stride
,
336 const uint8_t nnzc
[15 * 8])
340 if(nnzc
[ scan8
[i
] ]) h264_idct_add_altivec(dst
+ block_offset
[i
], block
+ i
*16, stride
);
341 else if(block
[i
*16]) h264_idct_dc_add_altivec(dst
+ block_offset
[i
], block
+ i
*16, stride
);
345 static void h264_idct8_add4_altivec(uint8_t *dst
, const int *block_offset
,
346 int16_t *block
, int stride
,
347 const uint8_t nnzc
[15 * 8])
350 for(i
=0; i
<16; i
+=4){
351 int nnz
= nnzc
[ scan8
[i
] ];
353 if(nnz
==1 && block
[i
*16]) h264_idct8_dc_add_altivec(dst
+ block_offset
[i
], block
+ i
*16, stride
);
354 else h264_idct8_add_altivec(dst
+ block_offset
[i
], block
+ i
*16, stride
);
359 static void h264_idct_add8_altivec(uint8_t **dest
, const int *block_offset
,
360 int16_t *block
, int stride
,
361 const uint8_t nnzc
[15 * 8])
364 for (j
= 1; j
< 3; j
++) {
365 for(i
= j
* 16; i
< j
* 16 + 4; i
++){
367 h264_idct_add_altivec(dest
[j
-1] + block_offset
[i
], block
+ i
*16, stride
);
369 h264_idct_dc_add_altivec(dest
[j
-1] + block_offset
[i
], block
+ i
*16, stride
);
374 #define transpose4x16(r0, r1, r2, r3) { \
375 register vec_u8 r4; \
376 register vec_u8 r5; \
377 register vec_u8 r6; \
378 register vec_u8 r7; \
380 r4 = vec_mergeh(r0, r2); /*0, 2 set 0*/ \
381 r5 = vec_mergel(r0, r2); /*0, 2 set 1*/ \
382 r6 = vec_mergeh(r1, r3); /*1, 3 set 0*/ \
383 r7 = vec_mergel(r1, r3); /*1, 3 set 1*/ \
385 r0 = vec_mergeh(r4, r6); /*all set 0*/ \
386 r1 = vec_mergel(r4, r6); /*all set 1*/ \
387 r2 = vec_mergeh(r5, r7); /*all set 2*/ \
388 r3 = vec_mergel(r5, r7); /*all set 3*/ \
391 static inline void write16x4(uint8_t *dst
, int dst_stride
,
392 register vec_u8 r0
, register vec_u8 r1
,
393 register vec_u8 r2
, register vec_u8 r3
) {
394 DECLARE_ALIGNED(16, unsigned char, result
)[64];
395 uint32_t *src_int
= (uint32_t *)result
, *dst_int
= (uint32_t *)dst
;
396 int int_dst_stride
= dst_stride
/4;
398 vec_st(r0
, 0, result
);
399 vec_st(r1
, 16, result
);
400 vec_st(r2
, 32, result
);
401 vec_st(r3
, 48, result
);
402 /* FIXME: there has to be a better way!!!! */
404 *(dst_int
+ int_dst_stride
) = *(src_int
+ 1);
405 *(dst_int
+ 2*int_dst_stride
) = *(src_int
+ 2);
406 *(dst_int
+ 3*int_dst_stride
) = *(src_int
+ 3);
407 *(dst_int
+ 4*int_dst_stride
) = *(src_int
+ 4);
408 *(dst_int
+ 5*int_dst_stride
) = *(src_int
+ 5);
409 *(dst_int
+ 6*int_dst_stride
) = *(src_int
+ 6);
410 *(dst_int
+ 7*int_dst_stride
) = *(src_int
+ 7);
411 *(dst_int
+ 8*int_dst_stride
) = *(src_int
+ 8);
412 *(dst_int
+ 9*int_dst_stride
) = *(src_int
+ 9);
413 *(dst_int
+10*int_dst_stride
) = *(src_int
+ 10);
414 *(dst_int
+11*int_dst_stride
) = *(src_int
+ 11);
415 *(dst_int
+12*int_dst_stride
) = *(src_int
+ 12);
416 *(dst_int
+13*int_dst_stride
) = *(src_int
+ 13);
417 *(dst_int
+14*int_dst_stride
) = *(src_int
+ 14);
418 *(dst_int
+15*int_dst_stride
) = *(src_int
+ 15);
421 /** @brief performs a 6x16 transpose of data in src, and stores it to dst
422 @todo FIXME: see if we can't spare some vec_lvsl() by them factorizing
423 out of unaligned_load() */
424 #define readAndTranspose16x6(src, src_stride, r8, r9, r10, r11, r12, r13) {\
425 register vec_u8 r0 = unaligned_load(0, src); \
426 register vec_u8 r1 = unaligned_load( src_stride, src); \
427 register vec_u8 r2 = unaligned_load(2* src_stride, src); \
428 register vec_u8 r3 = unaligned_load(3* src_stride, src); \
429 register vec_u8 r4 = unaligned_load(4* src_stride, src); \
430 register vec_u8 r5 = unaligned_load(5* src_stride, src); \
431 register vec_u8 r6 = unaligned_load(6* src_stride, src); \
432 register vec_u8 r7 = unaligned_load(7* src_stride, src); \
433 register vec_u8 r14 = unaligned_load(14*src_stride, src); \
434 register vec_u8 r15 = unaligned_load(15*src_stride, src); \
436 r8 = unaligned_load( 8*src_stride, src); \
437 r9 = unaligned_load( 9*src_stride, src); \
438 r10 = unaligned_load(10*src_stride, src); \
439 r11 = unaligned_load(11*src_stride, src); \
440 r12 = unaligned_load(12*src_stride, src); \
441 r13 = unaligned_load(13*src_stride, src); \
443 /*Merge first pairs*/ \
444 r0 = vec_mergeh(r0, r8); /*0, 8*/ \
445 r1 = vec_mergeh(r1, r9); /*1, 9*/ \
446 r2 = vec_mergeh(r2, r10); /*2,10*/ \
447 r3 = vec_mergeh(r3, r11); /*3,11*/ \
448 r4 = vec_mergeh(r4, r12); /*4,12*/ \
449 r5 = vec_mergeh(r5, r13); /*5,13*/ \
450 r6 = vec_mergeh(r6, r14); /*6,14*/ \
451 r7 = vec_mergeh(r7, r15); /*7,15*/ \
453 /*Merge second pairs*/ \
454 r8 = vec_mergeh(r0, r4); /*0,4, 8,12 set 0*/ \
455 r9 = vec_mergel(r0, r4); /*0,4, 8,12 set 1*/ \
456 r10 = vec_mergeh(r1, r5); /*1,5, 9,13 set 0*/ \
457 r11 = vec_mergel(r1, r5); /*1,5, 9,13 set 1*/ \
458 r12 = vec_mergeh(r2, r6); /*2,6,10,14 set 0*/ \
459 r13 = vec_mergel(r2, r6); /*2,6,10,14 set 1*/ \
460 r14 = vec_mergeh(r3, r7); /*3,7,11,15 set 0*/ \
461 r15 = vec_mergel(r3, r7); /*3,7,11,15 set 1*/ \
464 r0 = vec_mergeh(r8, r12); /*0,2,4,6,8,10,12,14 set 0*/ \
465 r1 = vec_mergel(r8, r12); /*0,2,4,6,8,10,12,14 set 1*/ \
466 r2 = vec_mergeh(r9, r13); /*0,2,4,6,8,10,12,14 set 2*/ \
467 r4 = vec_mergeh(r10, r14); /*1,3,5,7,9,11,13,15 set 0*/ \
468 r5 = vec_mergel(r10, r14); /*1,3,5,7,9,11,13,15 set 1*/ \
469 r6 = vec_mergeh(r11, r15); /*1,3,5,7,9,11,13,15 set 2*/ \
470 /* Don't need to compute 3 and 7*/ \
473 r8 = vec_mergeh(r0, r4); /*all set 0*/ \
474 r9 = vec_mergel(r0, r4); /*all set 1*/ \
475 r10 = vec_mergeh(r1, r5); /*all set 2*/ \
476 r11 = vec_mergel(r1, r5); /*all set 3*/ \
477 r12 = vec_mergeh(r2, r6); /*all set 4*/ \
478 r13 = vec_mergel(r2, r6); /*all set 5*/ \
479 /* Don't need to compute 14 and 15*/ \
483 // out: o = |x-y| < a
484 static inline vec_u8
diff_lt_altivec ( register vec_u8 x
,
488 register vec_u8 diff
= vec_subs(x
, y
);
489 register vec_u8 diffneg
= vec_subs(y
, x
);
490 register vec_u8 o
= vec_or(diff
, diffneg
); /* |x-y| */
491 o
= (vec_u8
)vec_cmplt(o
, a
);
495 static inline vec_u8
h264_deblock_mask ( register vec_u8 p0
,
499 register vec_u8 alpha
,
500 register vec_u8 beta
) {
502 register vec_u8 mask
;
503 register vec_u8 tempmask
;
505 mask
= diff_lt_altivec(p0
, q0
, alpha
);
506 tempmask
= diff_lt_altivec(p1
, p0
, beta
);
507 mask
= vec_and(mask
, tempmask
);
508 tempmask
= diff_lt_altivec(q1
, q0
, beta
);
509 mask
= vec_and(mask
, tempmask
);
514 // out: newp1 = clip((p2 + ((p0 + q0 + 1) >> 1)) >> 1, p1-tc0, p1+tc0)
515 static inline vec_u8
h264_deblock_q1(register vec_u8 p0
,
519 register vec_u8 tc0
) {
521 register vec_u8 average
= vec_avg(p0
, q0
);
522 register vec_u8 temp
;
523 register vec_u8 uncliped
;
524 register vec_u8 ones
;
527 register vec_u8 newp1
;
529 temp
= vec_xor(average
, p2
);
530 average
= vec_avg(average
, p2
); /*avg(p2, avg(p0, q0)) */
531 ones
= vec_splat_u8(1);
532 temp
= vec_and(temp
, ones
); /*(p2^avg(p0, q0)) & 1 */
533 uncliped
= vec_subs(average
, temp
); /*(p2+((p0+q0+1)>>1))>>1 */
534 max
= vec_adds(p1
, tc0
);
535 min
= vec_subs(p1
, tc0
);
536 newp1
= vec_max(min
, uncliped
);
537 newp1
= vec_min(max
, newp1
);
541 #define h264_deblock_p0_q0(p0, p1, q0, q1, tc0masked) { \
543 const vec_u8 A0v = vec_sl(vec_splat_u8(10), vec_splat_u8(4)); \
545 register vec_u8 pq0bit = vec_xor(p0,q0); \
546 register vec_u8 q1minus; \
547 register vec_u8 p0minus; \
548 register vec_u8 stage1; \
549 register vec_u8 stage2; \
550 register vec_u8 vec160; \
551 register vec_u8 delta; \
552 register vec_u8 deltaneg; \
554 q1minus = vec_nor(q1, q1); /* 255 - q1 */ \
555 stage1 = vec_avg(p1, q1minus); /* (p1 - q1 + 256)>>1 */ \
556 stage2 = vec_sr(stage1, vec_splat_u8(1)); /* (p1 - q1 + 256)>>2 = 64 + (p1 - q1) >> 2 */ \
557 p0minus = vec_nor(p0, p0); /* 255 - p0 */ \
558 stage1 = vec_avg(q0, p0minus); /* (q0 - p0 + 256)>>1 */ \
559 pq0bit = vec_and(pq0bit, vec_splat_u8(1)); \
560 stage2 = vec_avg(stage2, pq0bit); /* 32 + ((q0 - p0)&1 + (p1 - q1) >> 2 + 1) >> 1 */ \
561 stage2 = vec_adds(stage2, stage1); /* 160 + ((p0 - q0) + (p1 - q1) >> 2 + 1) >> 1 */ \
562 vec160 = vec_ld(0, &A0v); \
563 deltaneg = vec_subs(vec160, stage2); /* -d */ \
564 delta = vec_subs(stage2, vec160); /* d */ \
565 deltaneg = vec_min(tc0masked, deltaneg); \
566 delta = vec_min(tc0masked, delta); \
567 p0 = vec_subs(p0, deltaneg); \
568 q0 = vec_subs(q0, delta); \
569 p0 = vec_adds(p0, delta); \
570 q0 = vec_adds(q0, deltaneg); \
573 #define h264_loop_filter_luma_altivec(p2, p1, p0, q0, q1, q2, alpha, beta, tc0) { \
574 DECLARE_ALIGNED(16, unsigned char, temp)[16]; \
575 register vec_u8 alphavec; \
576 register vec_u8 betavec; \
577 register vec_u8 mask; \
578 register vec_u8 p1mask; \
579 register vec_u8 q1mask; \
580 register vector signed char tc0vec; \
581 register vec_u8 finaltc0; \
582 register vec_u8 tc0masked; \
583 register vec_u8 newp1; \
584 register vec_u8 newq1; \
588 alphavec = vec_ld(0, temp); \
589 betavec = vec_splat(alphavec, 0x1); \
590 alphavec = vec_splat(alphavec, 0x0); \
591 mask = h264_deblock_mask(p0, p1, q0, q1, alphavec, betavec); /*if in block */ \
593 AV_COPY32(temp, tc0); \
594 tc0vec = vec_ld(0, (signed char*)temp); \
595 tc0vec = vec_mergeh(tc0vec, tc0vec); \
596 tc0vec = vec_mergeh(tc0vec, tc0vec); \
597 mask = vec_and(mask, vec_cmpgt(tc0vec, vec_splat_s8(-1))); /* if tc0[i] >= 0 */ \
598 finaltc0 = vec_and((vec_u8)tc0vec, mask); /* tc = tc0 */ \
600 p1mask = diff_lt_altivec(p2, p0, betavec); \
601 p1mask = vec_and(p1mask, mask); /* if ( |p2 - p0| < beta) */ \
602 tc0masked = vec_and(p1mask, (vec_u8)tc0vec); \
603 finaltc0 = vec_sub(finaltc0, p1mask); /* tc++ */ \
604 newp1 = h264_deblock_q1(p0, p1, p2, q0, tc0masked); \
607 q1mask = diff_lt_altivec(q2, q0, betavec); \
608 q1mask = vec_and(q1mask, mask); /* if ( |q2 - q0| < beta ) */\
609 tc0masked = vec_and(q1mask, (vec_u8)tc0vec); \
610 finaltc0 = vec_sub(finaltc0, q1mask); /* tc++ */ \
611 newq1 = h264_deblock_q1(p0, q1, q2, q0, tc0masked); \
614 h264_deblock_p0_q0(p0, p1, q0, q1, finaltc0); \
619 static void h264_v_loop_filter_luma_altivec(uint8_t *pix
, int stride
, int alpha
, int beta
, int8_t *tc0
) {
621 if ((tc0
[0] & tc0
[1] & tc0
[2] & tc0
[3]) >= 0) {
622 register vec_u8 p2
= vec_ld(-3*stride
, pix
);
623 register vec_u8 p1
= vec_ld(-2*stride
, pix
);
624 register vec_u8 p0
= vec_ld(-1*stride
, pix
);
625 register vec_u8 q0
= vec_ld(0, pix
);
626 register vec_u8 q1
= vec_ld(stride
, pix
);
627 register vec_u8 q2
= vec_ld(2*stride
, pix
);
628 h264_loop_filter_luma_altivec(p2
, p1
, p0
, q0
, q1
, q2
, alpha
, beta
, tc0
);
629 vec_st(p1
, -2*stride
, pix
);
630 vec_st(p0
, -1*stride
, pix
);
632 vec_st(q1
, stride
, pix
);
636 static void h264_h_loop_filter_luma_altivec(uint8_t *pix
, int stride
, int alpha
, int beta
, int8_t *tc0
) {
638 register vec_u8 line0
, line1
, line2
, line3
, line4
, line5
;
639 if ((tc0
[0] & tc0
[1] & tc0
[2] & tc0
[3]) < 0)
641 readAndTranspose16x6(pix
-3, stride
, line0
, line1
, line2
, line3
, line4
, line5
);
642 h264_loop_filter_luma_altivec(line0
, line1
, line2
, line3
, line4
, line5
, alpha
, beta
, tc0
);
643 transpose4x16(line1
, line2
, line3
, line4
);
644 write16x4(pix
-2, stride
, line1
, line2
, line3
, line4
);
647 static av_always_inline
648 void weight_h264_W_altivec(uint8_t *block
, int stride
, int height
,
649 int log2_denom
, int weight
, int offset
, int w
)
653 vec_s16 vtemp
, vweight
, voffset
, v0
, v1
;
655 DECLARE_ALIGNED(16, int32_t, temp
)[4];
658 offset
<<= log2_denom
;
659 if(log2_denom
) offset
+= 1<<(log2_denom
-1);
660 temp
[0] = log2_denom
;
664 vtemp
= (vec_s16
)vec_ld(0, temp
);
666 vtemp
=(vec_s16
)vec_perm(vtemp
, vtemp
, vcswapi2s(0,1,2,3));
668 vlog2_denom
= (vec_u16
)vec_splat(vtemp
, 1);
669 vweight
= vec_splat(vtemp
, 3);
670 voffset
= vec_splat(vtemp
, 5);
671 aligned
= !((unsigned long)block
& 0xf);
673 for (y
= 0; y
< height
; y
++) {
674 vblock
= vec_ld(0, block
);
676 v0
= (vec_s16
)VEC_MERGEH(zero_u8v
, vblock
);
677 v1
= (vec_s16
)VEC_MERGEL(zero_u8v
, vblock
);
679 if (w
== 16 || aligned
) {
680 v0
= vec_mladd(v0
, vweight
, zero_s16v
);
681 v0
= vec_adds(v0
, voffset
);
682 v0
= vec_sra(v0
, vlog2_denom
);
684 if (w
== 16 || !aligned
) {
685 v1
= vec_mladd(v1
, vweight
, zero_s16v
);
686 v1
= vec_adds(v1
, voffset
);
687 v1
= vec_sra(v1
, vlog2_denom
);
689 vblock
= vec_packsu(v0
, v1
);
690 vec_st(vblock
, 0, block
);
696 static av_always_inline
697 void biweight_h264_W_altivec(uint8_t *dst
, uint8_t *src
, int stride
, int height
,
698 int log2_denom
, int weightd
, int weights
, int offset
, int w
)
700 int y
, dst_aligned
, src_aligned
;
702 vec_s16 vtemp
, vweights
, vweightd
, voffset
, v0
, v1
, v2
, v3
;
704 DECLARE_ALIGNED(16, int32_t, temp
)[4];
707 offset
= ((offset
+ 1) | 1) << log2_denom
;
708 temp
[0] = log2_denom
+1;
713 vtemp
= (vec_s16
)vec_ld(0, temp
);
715 vtemp
=(vec_s16
)vec_perm(vtemp
, vtemp
, vcswapi2s(0,1,2,3));
717 vlog2_denom
= (vec_u16
)vec_splat(vtemp
, 1);
718 vweights
= vec_splat(vtemp
, 3);
719 vweightd
= vec_splat(vtemp
, 5);
720 voffset
= vec_splat(vtemp
, 7);
721 dst_aligned
= !((unsigned long)dst
& 0xf);
722 src_aligned
= !((unsigned long)src
& 0xf);
724 for (y
= 0; y
< height
; y
++) {
725 vdst
= vec_ld(0, dst
);
726 vsrc
= vec_ld(0, src
);
728 v0
= (vec_s16
)VEC_MERGEH(zero_u8v
, vdst
);
729 v1
= (vec_s16
)VEC_MERGEL(zero_u8v
, vdst
);
730 v2
= (vec_s16
)VEC_MERGEH(zero_u8v
, vsrc
);
731 v3
= (vec_s16
)VEC_MERGEL(zero_u8v
, vsrc
);
740 if (w
== 16 || dst_aligned
) {
741 v0
= vec_mladd(v0
, vweightd
, zero_s16v
);
742 v2
= vec_mladd(v2
, vweights
, zero_s16v
);
744 v0
= vec_adds(v0
, voffset
);
745 v0
= vec_adds(v0
, v2
);
746 v0
= vec_sra(v0
, vlog2_denom
);
748 if (w
== 16 || !dst_aligned
) {
749 v1
= vec_mladd(v1
, vweightd
, zero_s16v
);
750 v3
= vec_mladd(v3
, vweights
, zero_s16v
);
752 v1
= vec_adds(v1
, voffset
);
753 v1
= vec_adds(v1
, v3
);
754 v1
= vec_sra(v1
, vlog2_denom
);
756 vdst
= vec_packsu(v0
, v1
);
757 vec_st(vdst
, 0, dst
);
764 #define H264_WEIGHT(W) \
765 static void weight_h264_pixels ## W ## _altivec(uint8_t *block, int stride, int height, \
766 int log2_denom, int weight, int offset) \
768 weight_h264_W_altivec(block, stride, height, log2_denom, weight, offset, W); \
770 static void biweight_h264_pixels ## W ## _altivec(uint8_t *dst, uint8_t *src, int stride, int height, \
771 int log2_denom, int weightd, int weights, int offset) \
773 biweight_h264_W_altivec(dst, src, stride, height, log2_denom, weightd, weights, offset, W); \
778 #endif /* HAVE_ALTIVEC */
780 av_cold
void ff_h264dsp_init_ppc(H264DSPContext
*c
, const int bit_depth
,
781 const int chroma_format_idc
)
784 if (!PPC_ALTIVEC(av_get_cpu_flags()))
787 if (bit_depth
== 8) {
788 c
->h264_idct_add
= h264_idct_add_altivec
;
789 if (chroma_format_idc
<= 1)
790 c
->h264_idct_add8
= h264_idct_add8_altivec
;
791 c
->h264_idct_add16
= h264_idct_add16_altivec
;
792 c
->h264_idct_add16intra
= h264_idct_add16intra_altivec
;
793 c
->h264_idct_dc_add
= h264_idct_dc_add_altivec
;
794 c
->h264_idct8_dc_add
= h264_idct8_dc_add_altivec
;
795 c
->h264_idct8_add
= h264_idct8_add_altivec
;
796 c
->h264_idct8_add4
= h264_idct8_add4_altivec
;
797 c
->h264_v_loop_filter_luma
= h264_v_loop_filter_luma_altivec
;
798 c
->h264_h_loop_filter_luma
= h264_h_loop_filter_luma_altivec
;
800 c
->weight_h264_pixels_tab
[0] = weight_h264_pixels16_altivec
;
801 c
->weight_h264_pixels_tab
[1] = weight_h264_pixels8_altivec
;
802 c
->biweight_h264_pixels_tab
[0] = biweight_h264_pixels16_altivec
;
803 c
->biweight_h264_pixels_tab
[1] = biweight_h264_pixels8_altivec
;
805 #endif /* HAVE_ALTIVEC */