Commit | Line | Data |
---|---|---|
2ba45a60 DM |
1 | /* |
2 | * Copyright (c) 2004 Romain Dolbeau <romain@dolbeau.org> | |
3 | * | |
4 | * This file is part of FFmpeg. | |
5 | * | |
6 | * FFmpeg is free software; you can redistribute it and/or | |
7 | * modify it under the terms of the GNU Lesser General Public | |
8 | * License as published by the Free Software Foundation; either | |
9 | * version 2.1 of the License, or (at your option) any later version. | |
10 | * | |
11 | * FFmpeg is distributed in the hope that it will be useful, | |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
14 | * Lesser General Public License for more details. | |
15 | * | |
16 | * You should have received a copy of the GNU Lesser General Public | |
17 | * License along with FFmpeg; if not, write to the Free Software | |
18 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA | |
19 | */ | |
20 | ||
21 | #include "config.h" | |
22 | #include "libavutil/attributes.h" | |
23 | #include "libavutil/cpu.h" | |
24 | #include "libavutil/intreadwrite.h" | |
25 | #include "libavutil/ppc/cpu.h" | |
26 | #include "libavutil/ppc/types_altivec.h" | |
27 | #include "libavutil/ppc/util_altivec.h" | |
28 | #include "libavcodec/h264data.h" | |
29 | #include "libavcodec/h264dsp.h" | |
30 | ||
31 | #if HAVE_ALTIVEC | |
32 | ||
33 | /**************************************************************************** | |
34 | * IDCT transform: | |
35 | ****************************************************************************/ | |
36 | ||
37 | #define VEC_1D_DCT(vb0,vb1,vb2,vb3,va0,va1,va2,va3) \ | |
38 | /* 1st stage */ \ | |
39 | vz0 = vec_add(vb0,vb2); /* temp[0] = Y[0] + Y[2] */ \ | |
40 | vz1 = vec_sub(vb0,vb2); /* temp[1] = Y[0] - Y[2] */ \ | |
41 | vz2 = vec_sra(vb1,vec_splat_u16(1)); \ | |
42 | vz2 = vec_sub(vz2,vb3); /* temp[2] = Y[1].1/2 - Y[3] */ \ | |
43 | vz3 = vec_sra(vb3,vec_splat_u16(1)); \ | |
44 | vz3 = vec_add(vb1,vz3); /* temp[3] = Y[1] + Y[3].1/2 */ \ | |
45 | /* 2nd stage: output */ \ | |
46 | va0 = vec_add(vz0,vz3); /* x[0] = temp[0] + temp[3] */ \ | |
47 | va1 = vec_add(vz1,vz2); /* x[1] = temp[1] + temp[2] */ \ | |
48 | va2 = vec_sub(vz1,vz2); /* x[2] = temp[1] - temp[2] */ \ | |
49 | va3 = vec_sub(vz0,vz3) /* x[3] = temp[0] - temp[3] */ | |
50 | ||
51 | #define VEC_TRANSPOSE_4(a0,a1,a2,a3,b0,b1,b2,b3) \ | |
52 | b0 = vec_mergeh( a0, a0 ); \ | |
53 | b1 = vec_mergeh( a1, a0 ); \ | |
54 | b2 = vec_mergeh( a2, a0 ); \ | |
55 | b3 = vec_mergeh( a3, a0 ); \ | |
56 | a0 = vec_mergeh( b0, b2 ); \ | |
57 | a1 = vec_mergel( b0, b2 ); \ | |
58 | a2 = vec_mergeh( b1, b3 ); \ | |
59 | a3 = vec_mergel( b1, b3 ); \ | |
60 | b0 = vec_mergeh( a0, a2 ); \ | |
61 | b1 = vec_mergel( a0, a2 ); \ | |
62 | b2 = vec_mergeh( a1, a3 ); \ | |
63 | b3 = vec_mergel( a1, a3 ) | |
64 | ||
65 | #define VEC_LOAD_U8_ADD_S16_STORE_U8(va) \ | |
66 | vdst_orig = vec_ld(0, dst); \ | |
67 | vdst = vec_perm(vdst_orig, zero_u8v, vdst_mask); \ | |
68 | vdst_ss = (vec_s16) vec_mergeh(zero_u8v, vdst); \ | |
69 | va = vec_add(va, vdst_ss); \ | |
70 | va_u8 = vec_packsu(va, zero_s16v); \ | |
71 | va_u32 = vec_splat((vec_u32)va_u8, 0); \ | |
72 | vec_ste(va_u32, element, (uint32_t*)dst); | |
73 | ||
74 | static void h264_idct_add_altivec(uint8_t *dst, int16_t *block, int stride) | |
75 | { | |
76 | vec_s16 va0, va1, va2, va3; | |
77 | vec_s16 vz0, vz1, vz2, vz3; | |
78 | vec_s16 vtmp0, vtmp1, vtmp2, vtmp3; | |
79 | vec_u8 va_u8; | |
80 | vec_u32 va_u32; | |
81 | vec_s16 vdst_ss; | |
82 | const vec_u16 v6us = vec_splat_u16(6); | |
83 | vec_u8 vdst, vdst_orig; | |
84 | vec_u8 vdst_mask = vec_lvsl(0, dst); | |
85 | int element = ((unsigned long)dst & 0xf) >> 2; | |
86 | LOAD_ZERO; | |
87 | ||
88 | block[0] += 32; /* add 32 as a DC-level for rounding */ | |
89 | ||
90 | vtmp0 = vec_ld(0,block); | |
91 | vtmp1 = vec_sld(vtmp0, vtmp0, 8); | |
92 | vtmp2 = vec_ld(16,block); | |
93 | vtmp3 = vec_sld(vtmp2, vtmp2, 8); | |
94 | memset(block, 0, 16 * sizeof(int16_t)); | |
95 | ||
96 | VEC_1D_DCT(vtmp0,vtmp1,vtmp2,vtmp3,va0,va1,va2,va3); | |
97 | VEC_TRANSPOSE_4(va0,va1,va2,va3,vtmp0,vtmp1,vtmp2,vtmp3); | |
98 | VEC_1D_DCT(vtmp0,vtmp1,vtmp2,vtmp3,va0,va1,va2,va3); | |
99 | ||
100 | va0 = vec_sra(va0,v6us); | |
101 | va1 = vec_sra(va1,v6us); | |
102 | va2 = vec_sra(va2,v6us); | |
103 | va3 = vec_sra(va3,v6us); | |
104 | ||
105 | VEC_LOAD_U8_ADD_S16_STORE_U8(va0); | |
106 | dst += stride; | |
107 | VEC_LOAD_U8_ADD_S16_STORE_U8(va1); | |
108 | dst += stride; | |
109 | VEC_LOAD_U8_ADD_S16_STORE_U8(va2); | |
110 | dst += stride; | |
111 | VEC_LOAD_U8_ADD_S16_STORE_U8(va3); | |
112 | } | |
113 | ||
114 | #define IDCT8_1D_ALTIVEC(s0, s1, s2, s3, s4, s5, s6, s7, d0, d1, d2, d3, d4, d5, d6, d7) {\ | |
115 | /* a0 = SRC(0) + SRC(4); */ \ | |
116 | vec_s16 a0v = vec_add(s0, s4); \ | |
117 | /* a2 = SRC(0) - SRC(4); */ \ | |
118 | vec_s16 a2v = vec_sub(s0, s4); \ | |
119 | /* a4 = (SRC(2)>>1) - SRC(6); */ \ | |
120 | vec_s16 a4v = vec_sub(vec_sra(s2, onev), s6); \ | |
121 | /* a6 = (SRC(6)>>1) + SRC(2); */ \ | |
122 | vec_s16 a6v = vec_add(vec_sra(s6, onev), s2); \ | |
123 | /* b0 = a0 + a6; */ \ | |
124 | vec_s16 b0v = vec_add(a0v, a6v); \ | |
125 | /* b2 = a2 + a4; */ \ | |
126 | vec_s16 b2v = vec_add(a2v, a4v); \ | |
127 | /* b4 = a2 - a4; */ \ | |
128 | vec_s16 b4v = vec_sub(a2v, a4v); \ | |
129 | /* b6 = a0 - a6; */ \ | |
130 | vec_s16 b6v = vec_sub(a0v, a6v); \ | |
131 | /* a1 = SRC(5) - SRC(3) - SRC(7) - (SRC(7)>>1); */ \ | |
132 | /* a1 = (SRC(5)-SRC(3)) - (SRC(7) + (SRC(7)>>1)); */ \ | |
133 | vec_s16 a1v = vec_sub( vec_sub(s5, s3), vec_add(s7, vec_sra(s7, onev)) ); \ | |
134 | /* a3 = SRC(7) + SRC(1) - SRC(3) - (SRC(3)>>1); */ \ | |
135 | /* a3 = (SRC(7)+SRC(1)) - (SRC(3) + (SRC(3)>>1)); */ \ | |
136 | vec_s16 a3v = vec_sub( vec_add(s7, s1), vec_add(s3, vec_sra(s3, onev)) );\ | |
137 | /* a5 = SRC(7) - SRC(1) + SRC(5) + (SRC(5)>>1); */ \ | |
138 | /* a5 = (SRC(7)-SRC(1)) + SRC(5) + (SRC(5)>>1); */ \ | |
139 | vec_s16 a5v = vec_add( vec_sub(s7, s1), vec_add(s5, vec_sra(s5, onev)) );\ | |
140 | /* a7 = SRC(5)+SRC(3) + SRC(1) + (SRC(1)>>1); */ \ | |
141 | vec_s16 a7v = vec_add( vec_add(s5, s3), vec_add(s1, vec_sra(s1, onev)) );\ | |
142 | /* b1 = (a7>>2) + a1; */ \ | |
143 | vec_s16 b1v = vec_add( vec_sra(a7v, twov), a1v); \ | |
144 | /* b3 = a3 + (a5>>2); */ \ | |
145 | vec_s16 b3v = vec_add(a3v, vec_sra(a5v, twov)); \ | |
146 | /* b5 = (a3>>2) - a5; */ \ | |
147 | vec_s16 b5v = vec_sub( vec_sra(a3v, twov), a5v); \ | |
148 | /* b7 = a7 - (a1>>2); */ \ | |
149 | vec_s16 b7v = vec_sub( a7v, vec_sra(a1v, twov)); \ | |
150 | /* DST(0, b0 + b7); */ \ | |
151 | d0 = vec_add(b0v, b7v); \ | |
152 | /* DST(1, b2 + b5); */ \ | |
153 | d1 = vec_add(b2v, b5v); \ | |
154 | /* DST(2, b4 + b3); */ \ | |
155 | d2 = vec_add(b4v, b3v); \ | |
156 | /* DST(3, b6 + b1); */ \ | |
157 | d3 = vec_add(b6v, b1v); \ | |
158 | /* DST(4, b6 - b1); */ \ | |
159 | d4 = vec_sub(b6v, b1v); \ | |
160 | /* DST(5, b4 - b3); */ \ | |
161 | d5 = vec_sub(b4v, b3v); \ | |
162 | /* DST(6, b2 - b5); */ \ | |
163 | d6 = vec_sub(b2v, b5v); \ | |
164 | /* DST(7, b0 - b7); */ \ | |
165 | d7 = vec_sub(b0v, b7v); \ | |
166 | } | |
167 | ||
168 | #define ALTIVEC_STORE_SUM_CLIP(dest, idctv, perm_ldv, perm_stv, sel) { \ | |
169 | /* unaligned load */ \ | |
170 | vec_u8 hv = vec_ld( 0, dest ); \ | |
171 | vec_u8 lv = vec_ld( 7, dest ); \ | |
172 | vec_u8 dstv = vec_perm( hv, lv, (vec_u8)perm_ldv ); \ | |
173 | vec_s16 idct_sh6 = vec_sra(idctv, sixv); \ | |
174 | vec_u16 dst16 = (vec_u16)vec_mergeh(zero_u8v, dstv); \ | |
175 | vec_s16 idstsum = vec_adds(idct_sh6, (vec_s16)dst16); \ | |
176 | vec_u8 idstsum8 = vec_packsu(zero_s16v, idstsum); \ | |
177 | vec_u8 edgehv; \ | |
178 | /* unaligned store */ \ | |
179 | vec_u8 bodyv = vec_perm( idstsum8, idstsum8, perm_stv );\ | |
180 | vec_u8 edgelv = vec_perm( sel, zero_u8v, perm_stv ); \ | |
181 | lv = vec_sel( lv, bodyv, edgelv ); \ | |
182 | vec_st( lv, 7, dest ); \ | |
183 | hv = vec_ld( 0, dest ); \ | |
184 | edgehv = vec_perm( zero_u8v, sel, perm_stv ); \ | |
185 | hv = vec_sel( hv, bodyv, edgehv ); \ | |
186 | vec_st( hv, 0, dest ); \ | |
187 | } | |
188 | ||
189 | static void h264_idct8_add_altivec(uint8_t *dst, int16_t *dct, int stride) | |
190 | { | |
191 | vec_s16 s0, s1, s2, s3, s4, s5, s6, s7; | |
192 | vec_s16 d0, d1, d2, d3, d4, d5, d6, d7; | |
193 | vec_s16 idct0, idct1, idct2, idct3, idct4, idct5, idct6, idct7; | |
194 | ||
195 | vec_u8 perm_ldv = vec_lvsl(0, dst); | |
196 | vec_u8 perm_stv = vec_lvsr(8, dst); | |
197 | ||
198 | const vec_u16 onev = vec_splat_u16(1); | |
199 | const vec_u16 twov = vec_splat_u16(2); | |
200 | const vec_u16 sixv = vec_splat_u16(6); | |
201 | ||
202 | const vec_u8 sel = (vec_u8) {0,0,0,0,0,0,0,0,-1,-1,-1,-1,-1,-1,-1,-1}; | |
203 | LOAD_ZERO; | |
204 | ||
205 | dct[0] += 32; // rounding for the >>6 at the end | |
206 | ||
207 | s0 = vec_ld(0x00, (int16_t*)dct); | |
208 | s1 = vec_ld(0x10, (int16_t*)dct); | |
209 | s2 = vec_ld(0x20, (int16_t*)dct); | |
210 | s3 = vec_ld(0x30, (int16_t*)dct); | |
211 | s4 = vec_ld(0x40, (int16_t*)dct); | |
212 | s5 = vec_ld(0x50, (int16_t*)dct); | |
213 | s6 = vec_ld(0x60, (int16_t*)dct); | |
214 | s7 = vec_ld(0x70, (int16_t*)dct); | |
215 | memset(dct, 0, 64 * sizeof(int16_t)); | |
216 | ||
217 | IDCT8_1D_ALTIVEC(s0, s1, s2, s3, s4, s5, s6, s7, | |
218 | d0, d1, d2, d3, d4, d5, d6, d7); | |
219 | ||
220 | TRANSPOSE8( d0, d1, d2, d3, d4, d5, d6, d7 ); | |
221 | ||
222 | IDCT8_1D_ALTIVEC(d0, d1, d2, d3, d4, d5, d6, d7, | |
223 | idct0, idct1, idct2, idct3, idct4, idct5, idct6, idct7); | |
224 | ||
225 | ALTIVEC_STORE_SUM_CLIP(&dst[0*stride], idct0, perm_ldv, perm_stv, sel); | |
226 | ALTIVEC_STORE_SUM_CLIP(&dst[1*stride], idct1, perm_ldv, perm_stv, sel); | |
227 | ALTIVEC_STORE_SUM_CLIP(&dst[2*stride], idct2, perm_ldv, perm_stv, sel); | |
228 | ALTIVEC_STORE_SUM_CLIP(&dst[3*stride], idct3, perm_ldv, perm_stv, sel); | |
229 | ALTIVEC_STORE_SUM_CLIP(&dst[4*stride], idct4, perm_ldv, perm_stv, sel); | |
230 | ALTIVEC_STORE_SUM_CLIP(&dst[5*stride], idct5, perm_ldv, perm_stv, sel); | |
231 | ALTIVEC_STORE_SUM_CLIP(&dst[6*stride], idct6, perm_ldv, perm_stv, sel); | |
232 | ALTIVEC_STORE_SUM_CLIP(&dst[7*stride], idct7, perm_ldv, perm_stv, sel); | |
233 | } | |
234 | ||
235 | static av_always_inline void h264_idct_dc_add_internal(uint8_t *dst, int16_t *block, int stride, int size) | |
236 | { | |
237 | vec_s16 dc16; | |
238 | vec_u8 dcplus, dcminus, v0, v1, v2, v3, aligner; | |
239 | LOAD_ZERO; | |
240 | DECLARE_ALIGNED(16, int, dc); | |
241 | int i; | |
242 | ||
243 | dc = (block[0] + 32) >> 6; | |
244 | block[0] = 0; | |
245 | dc16 = vec_splat((vec_s16) vec_lde(0, &dc), 1); | |
246 | ||
247 | if (size == 4) | |
248 | dc16 = vec_sld(dc16, zero_s16v, 8); | |
249 | dcplus = vec_packsu(dc16, zero_s16v); | |
250 | dcminus = vec_packsu(vec_sub(zero_s16v, dc16), zero_s16v); | |
251 | ||
252 | aligner = vec_lvsr(0, dst); | |
253 | dcplus = vec_perm(dcplus, dcplus, aligner); | |
254 | dcminus = vec_perm(dcminus, dcminus, aligner); | |
255 | ||
256 | for (i = 0; i < size; i += 4) { | |
257 | v0 = vec_ld(0, dst+0*stride); | |
258 | v1 = vec_ld(0, dst+1*stride); | |
259 | v2 = vec_ld(0, dst+2*stride); | |
260 | v3 = vec_ld(0, dst+3*stride); | |
261 | ||
262 | v0 = vec_adds(v0, dcplus); | |
263 | v1 = vec_adds(v1, dcplus); | |
264 | v2 = vec_adds(v2, dcplus); | |
265 | v3 = vec_adds(v3, dcplus); | |
266 | ||
267 | v0 = vec_subs(v0, dcminus); | |
268 | v1 = vec_subs(v1, dcminus); | |
269 | v2 = vec_subs(v2, dcminus); | |
270 | v3 = vec_subs(v3, dcminus); | |
271 | ||
272 | vec_st(v0, 0, dst+0*stride); | |
273 | vec_st(v1, 0, dst+1*stride); | |
274 | vec_st(v2, 0, dst+2*stride); | |
275 | vec_st(v3, 0, dst+3*stride); | |
276 | ||
277 | dst += 4*stride; | |
278 | } | |
279 | } | |
280 | ||
281 | static void h264_idct_dc_add_altivec(uint8_t *dst, int16_t *block, int stride) | |
282 | { | |
283 | h264_idct_dc_add_internal(dst, block, stride, 4); | |
284 | } | |
285 | ||
286 | static void h264_idct8_dc_add_altivec(uint8_t *dst, int16_t *block, int stride) | |
287 | { | |
288 | h264_idct_dc_add_internal(dst, block, stride, 8); | |
289 | } | |
290 | ||
291 | static void h264_idct_add16_altivec(uint8_t *dst, const int *block_offset, | |
292 | int16_t *block, int stride, | |
293 | const uint8_t nnzc[15 * 8]) | |
294 | { | |
295 | int i; | |
296 | for(i=0; i<16; i++){ | |
297 | int nnz = nnzc[ scan8[i] ]; | |
298 | if(nnz){ | |
299 | if(nnz==1 && block[i*16]) h264_idct_dc_add_altivec(dst + block_offset[i], block + i*16, stride); | |
300 | else h264_idct_add_altivec(dst + block_offset[i], block + i*16, stride); | |
301 | } | |
302 | } | |
303 | } | |
304 | ||
305 | static void h264_idct_add16intra_altivec(uint8_t *dst, const int *block_offset, | |
306 | int16_t *block, int stride, | |
307 | const uint8_t nnzc[15 * 8]) | |
308 | { | |
309 | int i; | |
310 | for(i=0; i<16; i++){ | |
311 | if(nnzc[ scan8[i] ]) h264_idct_add_altivec(dst + block_offset[i], block + i*16, stride); | |
312 | else if(block[i*16]) h264_idct_dc_add_altivec(dst + block_offset[i], block + i*16, stride); | |
313 | } | |
314 | } | |
315 | ||
316 | static void h264_idct8_add4_altivec(uint8_t *dst, const int *block_offset, | |
317 | int16_t *block, int stride, | |
318 | const uint8_t nnzc[15 * 8]) | |
319 | { | |
320 | int i; | |
321 | for(i=0; i<16; i+=4){ | |
322 | int nnz = nnzc[ scan8[i] ]; | |
323 | if(nnz){ | |
324 | if(nnz==1 && block[i*16]) h264_idct8_dc_add_altivec(dst + block_offset[i], block + i*16, stride); | |
325 | else h264_idct8_add_altivec(dst + block_offset[i], block + i*16, stride); | |
326 | } | |
327 | } | |
328 | } | |
329 | ||
330 | static void h264_idct_add8_altivec(uint8_t **dest, const int *block_offset, | |
331 | int16_t *block, int stride, | |
332 | const uint8_t nnzc[15 * 8]) | |
333 | { | |
334 | int i, j; | |
335 | for (j = 1; j < 3; j++) { | |
336 | for(i = j * 16; i < j * 16 + 4; i++){ | |
337 | if(nnzc[ scan8[i] ]) | |
338 | h264_idct_add_altivec(dest[j-1] + block_offset[i], block + i*16, stride); | |
339 | else if(block[i*16]) | |
340 | h264_idct_dc_add_altivec(dest[j-1] + block_offset[i], block + i*16, stride); | |
341 | } | |
342 | } | |
343 | } | |
344 | ||
345 | #define transpose4x16(r0, r1, r2, r3) { \ | |
346 | register vec_u8 r4; \ | |
347 | register vec_u8 r5; \ | |
348 | register vec_u8 r6; \ | |
349 | register vec_u8 r7; \ | |
350 | \ | |
351 | r4 = vec_mergeh(r0, r2); /*0, 2 set 0*/ \ | |
352 | r5 = vec_mergel(r0, r2); /*0, 2 set 1*/ \ | |
353 | r6 = vec_mergeh(r1, r3); /*1, 3 set 0*/ \ | |
354 | r7 = vec_mergel(r1, r3); /*1, 3 set 1*/ \ | |
355 | \ | |
356 | r0 = vec_mergeh(r4, r6); /*all set 0*/ \ | |
357 | r1 = vec_mergel(r4, r6); /*all set 1*/ \ | |
358 | r2 = vec_mergeh(r5, r7); /*all set 2*/ \ | |
359 | r3 = vec_mergel(r5, r7); /*all set 3*/ \ | |
360 | } | |
361 | ||
362 | static inline void write16x4(uint8_t *dst, int dst_stride, | |
363 | register vec_u8 r0, register vec_u8 r1, | |
364 | register vec_u8 r2, register vec_u8 r3) { | |
365 | DECLARE_ALIGNED(16, unsigned char, result)[64]; | |
366 | uint32_t *src_int = (uint32_t *)result, *dst_int = (uint32_t *)dst; | |
367 | int int_dst_stride = dst_stride/4; | |
368 | ||
369 | vec_st(r0, 0, result); | |
370 | vec_st(r1, 16, result); | |
371 | vec_st(r2, 32, result); | |
372 | vec_st(r3, 48, result); | |
373 | /* FIXME: there has to be a better way!!!! */ | |
374 | *dst_int = *src_int; | |
375 | *(dst_int+ int_dst_stride) = *(src_int + 1); | |
376 | *(dst_int+ 2*int_dst_stride) = *(src_int + 2); | |
377 | *(dst_int+ 3*int_dst_stride) = *(src_int + 3); | |
378 | *(dst_int+ 4*int_dst_stride) = *(src_int + 4); | |
379 | *(dst_int+ 5*int_dst_stride) = *(src_int + 5); | |
380 | *(dst_int+ 6*int_dst_stride) = *(src_int + 6); | |
381 | *(dst_int+ 7*int_dst_stride) = *(src_int + 7); | |
382 | *(dst_int+ 8*int_dst_stride) = *(src_int + 8); | |
383 | *(dst_int+ 9*int_dst_stride) = *(src_int + 9); | |
384 | *(dst_int+10*int_dst_stride) = *(src_int + 10); | |
385 | *(dst_int+11*int_dst_stride) = *(src_int + 11); | |
386 | *(dst_int+12*int_dst_stride) = *(src_int + 12); | |
387 | *(dst_int+13*int_dst_stride) = *(src_int + 13); | |
388 | *(dst_int+14*int_dst_stride) = *(src_int + 14); | |
389 | *(dst_int+15*int_dst_stride) = *(src_int + 15); | |
390 | } | |
391 | ||
392 | /** @brief performs a 6x16 transpose of data in src, and stores it to dst | |
393 | @todo FIXME: see if we can't spare some vec_lvsl() by them factorizing | |
394 | out of unaligned_load() */ | |
395 | #define readAndTranspose16x6(src, src_stride, r8, r9, r10, r11, r12, r13) {\ | |
396 | register vec_u8 r0 = unaligned_load(0, src); \ | |
397 | register vec_u8 r1 = unaligned_load( src_stride, src); \ | |
398 | register vec_u8 r2 = unaligned_load(2* src_stride, src); \ | |
399 | register vec_u8 r3 = unaligned_load(3* src_stride, src); \ | |
400 | register vec_u8 r4 = unaligned_load(4* src_stride, src); \ | |
401 | register vec_u8 r5 = unaligned_load(5* src_stride, src); \ | |
402 | register vec_u8 r6 = unaligned_load(6* src_stride, src); \ | |
403 | register vec_u8 r7 = unaligned_load(7* src_stride, src); \ | |
404 | register vec_u8 r14 = unaligned_load(14*src_stride, src); \ | |
405 | register vec_u8 r15 = unaligned_load(15*src_stride, src); \ | |
406 | \ | |
407 | r8 = unaligned_load( 8*src_stride, src); \ | |
408 | r9 = unaligned_load( 9*src_stride, src); \ | |
409 | r10 = unaligned_load(10*src_stride, src); \ | |
410 | r11 = unaligned_load(11*src_stride, src); \ | |
411 | r12 = unaligned_load(12*src_stride, src); \ | |
412 | r13 = unaligned_load(13*src_stride, src); \ | |
413 | \ | |
414 | /*Merge first pairs*/ \ | |
415 | r0 = vec_mergeh(r0, r8); /*0, 8*/ \ | |
416 | r1 = vec_mergeh(r1, r9); /*1, 9*/ \ | |
417 | r2 = vec_mergeh(r2, r10); /*2,10*/ \ | |
418 | r3 = vec_mergeh(r3, r11); /*3,11*/ \ | |
419 | r4 = vec_mergeh(r4, r12); /*4,12*/ \ | |
420 | r5 = vec_mergeh(r5, r13); /*5,13*/ \ | |
421 | r6 = vec_mergeh(r6, r14); /*6,14*/ \ | |
422 | r7 = vec_mergeh(r7, r15); /*7,15*/ \ | |
423 | \ | |
424 | /*Merge second pairs*/ \ | |
425 | r8 = vec_mergeh(r0, r4); /*0,4, 8,12 set 0*/ \ | |
426 | r9 = vec_mergel(r0, r4); /*0,4, 8,12 set 1*/ \ | |
427 | r10 = vec_mergeh(r1, r5); /*1,5, 9,13 set 0*/ \ | |
428 | r11 = vec_mergel(r1, r5); /*1,5, 9,13 set 1*/ \ | |
429 | r12 = vec_mergeh(r2, r6); /*2,6,10,14 set 0*/ \ | |
430 | r13 = vec_mergel(r2, r6); /*2,6,10,14 set 1*/ \ | |
431 | r14 = vec_mergeh(r3, r7); /*3,7,11,15 set 0*/ \ | |
432 | r15 = vec_mergel(r3, r7); /*3,7,11,15 set 1*/ \ | |
433 | \ | |
434 | /*Third merge*/ \ | |
435 | r0 = vec_mergeh(r8, r12); /*0,2,4,6,8,10,12,14 set 0*/ \ | |
436 | r1 = vec_mergel(r8, r12); /*0,2,4,6,8,10,12,14 set 1*/ \ | |
437 | r2 = vec_mergeh(r9, r13); /*0,2,4,6,8,10,12,14 set 2*/ \ | |
438 | r4 = vec_mergeh(r10, r14); /*1,3,5,7,9,11,13,15 set 0*/ \ | |
439 | r5 = vec_mergel(r10, r14); /*1,3,5,7,9,11,13,15 set 1*/ \ | |
440 | r6 = vec_mergeh(r11, r15); /*1,3,5,7,9,11,13,15 set 2*/ \ | |
441 | /* Don't need to compute 3 and 7*/ \ | |
442 | \ | |
443 | /*Final merge*/ \ | |
444 | r8 = vec_mergeh(r0, r4); /*all set 0*/ \ | |
445 | r9 = vec_mergel(r0, r4); /*all set 1*/ \ | |
446 | r10 = vec_mergeh(r1, r5); /*all set 2*/ \ | |
447 | r11 = vec_mergel(r1, r5); /*all set 3*/ \ | |
448 | r12 = vec_mergeh(r2, r6); /*all set 4*/ \ | |
449 | r13 = vec_mergel(r2, r6); /*all set 5*/ \ | |
450 | /* Don't need to compute 14 and 15*/ \ | |
451 | \ | |
452 | } | |
453 | ||
454 | // out: o = |x-y| < a | |
455 | static inline vec_u8 diff_lt_altivec ( register vec_u8 x, | |
456 | register vec_u8 y, | |
457 | register vec_u8 a) { | |
458 | ||
459 | register vec_u8 diff = vec_subs(x, y); | |
460 | register vec_u8 diffneg = vec_subs(y, x); | |
461 | register vec_u8 o = vec_or(diff, diffneg); /* |x-y| */ | |
462 | o = (vec_u8)vec_cmplt(o, a); | |
463 | return o; | |
464 | } | |
465 | ||
466 | static inline vec_u8 h264_deblock_mask ( register vec_u8 p0, | |
467 | register vec_u8 p1, | |
468 | register vec_u8 q0, | |
469 | register vec_u8 q1, | |
470 | register vec_u8 alpha, | |
471 | register vec_u8 beta) { | |
472 | ||
473 | register vec_u8 mask; | |
474 | register vec_u8 tempmask; | |
475 | ||
476 | mask = diff_lt_altivec(p0, q0, alpha); | |
477 | tempmask = diff_lt_altivec(p1, p0, beta); | |
478 | mask = vec_and(mask, tempmask); | |
479 | tempmask = diff_lt_altivec(q1, q0, beta); | |
480 | mask = vec_and(mask, tempmask); | |
481 | ||
482 | return mask; | |
483 | } | |
484 | ||
485 | // out: newp1 = clip((p2 + ((p0 + q0 + 1) >> 1)) >> 1, p1-tc0, p1+tc0) | |
486 | static inline vec_u8 h264_deblock_q1(register vec_u8 p0, | |
487 | register vec_u8 p1, | |
488 | register vec_u8 p2, | |
489 | register vec_u8 q0, | |
490 | register vec_u8 tc0) { | |
491 | ||
492 | register vec_u8 average = vec_avg(p0, q0); | |
493 | register vec_u8 temp; | |
494 | register vec_u8 uncliped; | |
495 | register vec_u8 ones; | |
496 | register vec_u8 max; | |
497 | register vec_u8 min; | |
498 | register vec_u8 newp1; | |
499 | ||
500 | temp = vec_xor(average, p2); | |
501 | average = vec_avg(average, p2); /*avg(p2, avg(p0, q0)) */ | |
502 | ones = vec_splat_u8(1); | |
503 | temp = vec_and(temp, ones); /*(p2^avg(p0, q0)) & 1 */ | |
504 | uncliped = vec_subs(average, temp); /*(p2+((p0+q0+1)>>1))>>1 */ | |
505 | max = vec_adds(p1, tc0); | |
506 | min = vec_subs(p1, tc0); | |
507 | newp1 = vec_max(min, uncliped); | |
508 | newp1 = vec_min(max, newp1); | |
509 | return newp1; | |
510 | } | |
511 | ||
512 | #define h264_deblock_p0_q0(p0, p1, q0, q1, tc0masked) { \ | |
513 | \ | |
514 | const vec_u8 A0v = vec_sl(vec_splat_u8(10), vec_splat_u8(4)); \ | |
515 | \ | |
516 | register vec_u8 pq0bit = vec_xor(p0,q0); \ | |
517 | register vec_u8 q1minus; \ | |
518 | register vec_u8 p0minus; \ | |
519 | register vec_u8 stage1; \ | |
520 | register vec_u8 stage2; \ | |
521 | register vec_u8 vec160; \ | |
522 | register vec_u8 delta; \ | |
523 | register vec_u8 deltaneg; \ | |
524 | \ | |
525 | q1minus = vec_nor(q1, q1); /* 255 - q1 */ \ | |
526 | stage1 = vec_avg(p1, q1minus); /* (p1 - q1 + 256)>>1 */ \ | |
527 | stage2 = vec_sr(stage1, vec_splat_u8(1)); /* (p1 - q1 + 256)>>2 = 64 + (p1 - q1) >> 2 */ \ | |
528 | p0minus = vec_nor(p0, p0); /* 255 - p0 */ \ | |
529 | stage1 = vec_avg(q0, p0minus); /* (q0 - p0 + 256)>>1 */ \ | |
530 | pq0bit = vec_and(pq0bit, vec_splat_u8(1)); \ | |
531 | stage2 = vec_avg(stage2, pq0bit); /* 32 + ((q0 - p0)&1 + (p1 - q1) >> 2 + 1) >> 1 */ \ | |
532 | stage2 = vec_adds(stage2, stage1); /* 160 + ((p0 - q0) + (p1 - q1) >> 2 + 1) >> 1 */ \ | |
533 | vec160 = vec_ld(0, &A0v); \ | |
534 | deltaneg = vec_subs(vec160, stage2); /* -d */ \ | |
535 | delta = vec_subs(stage2, vec160); /* d */ \ | |
536 | deltaneg = vec_min(tc0masked, deltaneg); \ | |
537 | delta = vec_min(tc0masked, delta); \ | |
538 | p0 = vec_subs(p0, deltaneg); \ | |
539 | q0 = vec_subs(q0, delta); \ | |
540 | p0 = vec_adds(p0, delta); \ | |
541 | q0 = vec_adds(q0, deltaneg); \ | |
542 | } | |
543 | ||
544 | #define h264_loop_filter_luma_altivec(p2, p1, p0, q0, q1, q2, alpha, beta, tc0) { \ | |
545 | DECLARE_ALIGNED(16, unsigned char, temp)[16]; \ | |
546 | register vec_u8 alphavec; \ | |
547 | register vec_u8 betavec; \ | |
548 | register vec_u8 mask; \ | |
549 | register vec_u8 p1mask; \ | |
550 | register vec_u8 q1mask; \ | |
551 | register vector signed char tc0vec; \ | |
552 | register vec_u8 finaltc0; \ | |
553 | register vec_u8 tc0masked; \ | |
554 | register vec_u8 newp1; \ | |
555 | register vec_u8 newq1; \ | |
556 | \ | |
557 | temp[0] = alpha; \ | |
558 | temp[1] = beta; \ | |
559 | alphavec = vec_ld(0, temp); \ | |
560 | betavec = vec_splat(alphavec, 0x1); \ | |
561 | alphavec = vec_splat(alphavec, 0x0); \ | |
562 | mask = h264_deblock_mask(p0, p1, q0, q1, alphavec, betavec); /*if in block */ \ | |
563 | \ | |
564 | AV_COPY32(temp, tc0); \ | |
565 | tc0vec = vec_ld(0, (signed char*)temp); \ | |
566 | tc0vec = vec_mergeh(tc0vec, tc0vec); \ | |
567 | tc0vec = vec_mergeh(tc0vec, tc0vec); \ | |
568 | mask = vec_and(mask, vec_cmpgt(tc0vec, vec_splat_s8(-1))); /* if tc0[i] >= 0 */ \ | |
569 | finaltc0 = vec_and((vec_u8)tc0vec, mask); /* tc = tc0 */ \ | |
570 | \ | |
571 | p1mask = diff_lt_altivec(p2, p0, betavec); \ | |
572 | p1mask = vec_and(p1mask, mask); /* if ( |p2 - p0| < beta) */ \ | |
573 | tc0masked = vec_and(p1mask, (vec_u8)tc0vec); \ | |
574 | finaltc0 = vec_sub(finaltc0, p1mask); /* tc++ */ \ | |
575 | newp1 = h264_deblock_q1(p0, p1, p2, q0, tc0masked); \ | |
576 | /*end if*/ \ | |
577 | \ | |
578 | q1mask = diff_lt_altivec(q2, q0, betavec); \ | |
579 | q1mask = vec_and(q1mask, mask); /* if ( |q2 - q0| < beta ) */\ | |
580 | tc0masked = vec_and(q1mask, (vec_u8)tc0vec); \ | |
581 | finaltc0 = vec_sub(finaltc0, q1mask); /* tc++ */ \ | |
582 | newq1 = h264_deblock_q1(p0, q1, q2, q0, tc0masked); \ | |
583 | /*end if*/ \ | |
584 | \ | |
585 | h264_deblock_p0_q0(p0, p1, q0, q1, finaltc0); \ | |
586 | p1 = newp1; \ | |
587 | q1 = newq1; \ | |
588 | } | |
589 | ||
590 | static void h264_v_loop_filter_luma_altivec(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0) { | |
591 | ||
592 | if ((tc0[0] & tc0[1] & tc0[2] & tc0[3]) >= 0) { | |
593 | register vec_u8 p2 = vec_ld(-3*stride, pix); | |
594 | register vec_u8 p1 = vec_ld(-2*stride, pix); | |
595 | register vec_u8 p0 = vec_ld(-1*stride, pix); | |
596 | register vec_u8 q0 = vec_ld(0, pix); | |
597 | register vec_u8 q1 = vec_ld(stride, pix); | |
598 | register vec_u8 q2 = vec_ld(2*stride, pix); | |
599 | h264_loop_filter_luma_altivec(p2, p1, p0, q0, q1, q2, alpha, beta, tc0); | |
600 | vec_st(p1, -2*stride, pix); | |
601 | vec_st(p0, -1*stride, pix); | |
602 | vec_st(q0, 0, pix); | |
603 | vec_st(q1, stride, pix); | |
604 | } | |
605 | } | |
606 | ||
607 | static void h264_h_loop_filter_luma_altivec(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0) { | |
608 | ||
609 | register vec_u8 line0, line1, line2, line3, line4, line5; | |
610 | if ((tc0[0] & tc0[1] & tc0[2] & tc0[3]) < 0) | |
611 | return; | |
612 | readAndTranspose16x6(pix-3, stride, line0, line1, line2, line3, line4, line5); | |
613 | h264_loop_filter_luma_altivec(line0, line1, line2, line3, line4, line5, alpha, beta, tc0); | |
614 | transpose4x16(line1, line2, line3, line4); | |
615 | write16x4(pix-2, stride, line1, line2, line3, line4); | |
616 | } | |
617 | ||
618 | static av_always_inline | |
619 | void weight_h264_W_altivec(uint8_t *block, int stride, int height, | |
620 | int log2_denom, int weight, int offset, int w) | |
621 | { | |
622 | int y, aligned; | |
623 | vec_u8 vblock; | |
624 | vec_s16 vtemp, vweight, voffset, v0, v1; | |
625 | vec_u16 vlog2_denom; | |
626 | DECLARE_ALIGNED(16, int32_t, temp)[4]; | |
627 | LOAD_ZERO; | |
628 | ||
629 | offset <<= log2_denom; | |
630 | if(log2_denom) offset += 1<<(log2_denom-1); | |
631 | temp[0] = log2_denom; | |
632 | temp[1] = weight; | |
633 | temp[2] = offset; | |
634 | ||
635 | vtemp = (vec_s16)vec_ld(0, temp); | |
636 | vlog2_denom = (vec_u16)vec_splat(vtemp, 1); | |
637 | vweight = vec_splat(vtemp, 3); | |
638 | voffset = vec_splat(vtemp, 5); | |
639 | aligned = !((unsigned long)block & 0xf); | |
640 | ||
641 | for (y = 0; y < height; y++) { | |
642 | vblock = vec_ld(0, block); | |
643 | ||
644 | v0 = (vec_s16)vec_mergeh(zero_u8v, vblock); | |
645 | v1 = (vec_s16)vec_mergel(zero_u8v, vblock); | |
646 | ||
647 | if (w == 16 || aligned) { | |
648 | v0 = vec_mladd(v0, vweight, zero_s16v); | |
649 | v0 = vec_adds(v0, voffset); | |
650 | v0 = vec_sra(v0, vlog2_denom); | |
651 | } | |
652 | if (w == 16 || !aligned) { | |
653 | v1 = vec_mladd(v1, vweight, zero_s16v); | |
654 | v1 = vec_adds(v1, voffset); | |
655 | v1 = vec_sra(v1, vlog2_denom); | |
656 | } | |
657 | vblock = vec_packsu(v0, v1); | |
658 | vec_st(vblock, 0, block); | |
659 | ||
660 | block += stride; | |
661 | } | |
662 | } | |
663 | ||
664 | static av_always_inline | |
665 | void biweight_h264_W_altivec(uint8_t *dst, uint8_t *src, int stride, int height, | |
666 | int log2_denom, int weightd, int weights, int offset, int w) | |
667 | { | |
668 | int y, dst_aligned, src_aligned; | |
669 | vec_u8 vsrc, vdst; | |
670 | vec_s16 vtemp, vweights, vweightd, voffset, v0, v1, v2, v3; | |
671 | vec_u16 vlog2_denom; | |
672 | DECLARE_ALIGNED(16, int32_t, temp)[4]; | |
673 | LOAD_ZERO; | |
674 | ||
675 | offset = ((offset + 1) | 1) << log2_denom; | |
676 | temp[0] = log2_denom+1; | |
677 | temp[1] = weights; | |
678 | temp[2] = weightd; | |
679 | temp[3] = offset; | |
680 | ||
681 | vtemp = (vec_s16)vec_ld(0, temp); | |
682 | vlog2_denom = (vec_u16)vec_splat(vtemp, 1); | |
683 | vweights = vec_splat(vtemp, 3); | |
684 | vweightd = vec_splat(vtemp, 5); | |
685 | voffset = vec_splat(vtemp, 7); | |
686 | dst_aligned = !((unsigned long)dst & 0xf); | |
687 | src_aligned = !((unsigned long)src & 0xf); | |
688 | ||
689 | for (y = 0; y < height; y++) { | |
690 | vdst = vec_ld(0, dst); | |
691 | vsrc = vec_ld(0, src); | |
692 | ||
693 | v0 = (vec_s16)vec_mergeh(zero_u8v, vdst); | |
694 | v1 = (vec_s16)vec_mergel(zero_u8v, vdst); | |
695 | v2 = (vec_s16)vec_mergeh(zero_u8v, vsrc); | |
696 | v3 = (vec_s16)vec_mergel(zero_u8v, vsrc); | |
697 | ||
698 | if (w == 8) { | |
699 | if (src_aligned) | |
700 | v3 = v2; | |
701 | else | |
702 | v2 = v3; | |
703 | } | |
704 | ||
705 | if (w == 16 || dst_aligned) { | |
706 | v0 = vec_mladd(v0, vweightd, zero_s16v); | |
707 | v2 = vec_mladd(v2, vweights, zero_s16v); | |
708 | ||
709 | v0 = vec_adds(v0, voffset); | |
710 | v0 = vec_adds(v0, v2); | |
711 | v0 = vec_sra(v0, vlog2_denom); | |
712 | } | |
713 | if (w == 16 || !dst_aligned) { | |
714 | v1 = vec_mladd(v1, vweightd, zero_s16v); | |
715 | v3 = vec_mladd(v3, vweights, zero_s16v); | |
716 | ||
717 | v1 = vec_adds(v1, voffset); | |
718 | v1 = vec_adds(v1, v3); | |
719 | v1 = vec_sra(v1, vlog2_denom); | |
720 | } | |
721 | vdst = vec_packsu(v0, v1); | |
722 | vec_st(vdst, 0, dst); | |
723 | ||
724 | dst += stride; | |
725 | src += stride; | |
726 | } | |
727 | } | |
728 | ||
729 | #define H264_WEIGHT(W) \ | |
730 | static void weight_h264_pixels ## W ## _altivec(uint8_t *block, int stride, int height, \ | |
731 | int log2_denom, int weight, int offset) \ | |
732 | { \ | |
733 | weight_h264_W_altivec(block, stride, height, log2_denom, weight, offset, W); \ | |
734 | }\ | |
735 | static void biweight_h264_pixels ## W ## _altivec(uint8_t *dst, uint8_t *src, int stride, int height, \ | |
736 | int log2_denom, int weightd, int weights, int offset) \ | |
737 | { \ | |
738 | biweight_h264_W_altivec(dst, src, stride, height, log2_denom, weightd, weights, offset, W); \ | |
739 | } | |
740 | ||
741 | H264_WEIGHT(16) | |
742 | H264_WEIGHT( 8) | |
743 | #endif /* HAVE_ALTIVEC */ | |
744 | ||
745 | av_cold void ff_h264dsp_init_ppc(H264DSPContext *c, const int bit_depth, | |
746 | const int chroma_format_idc) | |
747 | { | |
748 | #if HAVE_ALTIVEC | |
749 | if (!PPC_ALTIVEC(av_get_cpu_flags())) | |
750 | return; | |
751 | ||
752 | if (bit_depth == 8) { | |
753 | c->h264_idct_add = h264_idct_add_altivec; | |
754 | if (chroma_format_idc <= 1) | |
755 | c->h264_idct_add8 = h264_idct_add8_altivec; | |
756 | c->h264_idct_add16 = h264_idct_add16_altivec; | |
757 | c->h264_idct_add16intra = h264_idct_add16intra_altivec; | |
758 | c->h264_idct_dc_add= h264_idct_dc_add_altivec; | |
759 | c->h264_idct8_dc_add = h264_idct8_dc_add_altivec; | |
760 | c->h264_idct8_add = h264_idct8_add_altivec; | |
761 | c->h264_idct8_add4 = h264_idct8_add4_altivec; | |
762 | c->h264_v_loop_filter_luma= h264_v_loop_filter_luma_altivec; | |
763 | c->h264_h_loop_filter_luma= h264_h_loop_filter_luma_altivec; | |
764 | ||
765 | c->weight_h264_pixels_tab[0] = weight_h264_pixels16_altivec; | |
766 | c->weight_h264_pixels_tab[1] = weight_h264_pixels8_altivec; | |
767 | c->biweight_h264_pixels_tab[0] = biweight_h264_pixels16_altivec; | |
768 | c->biweight_h264_pixels_tab[1] = biweight_h264_pixels8_altivec; | |
769 | } | |
770 | #endif /* HAVE_ALTIVEC */ | |
771 | } |