Commit | Line | Data |
---|---|---|
2ba45a60 DM |
1 | /* |
2 | * VC-1 and WMV3 decoder - DSP functions AltiVec-optimized | |
3 | * Copyright (c) 2006 Konstantin Shishkov | |
4 | * | |
5 | * This file is part of FFmpeg. | |
6 | * | |
7 | * FFmpeg is free software; you can redistribute it and/or | |
8 | * modify it under the terms of the GNU Lesser General Public | |
9 | * License as published by the Free Software Foundation; either | |
10 | * version 2.1 of the License, or (at your option) any later version. | |
11 | * | |
12 | * FFmpeg is distributed in the hope that it will be useful, | |
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
15 | * Lesser General Public License for more details. | |
16 | * | |
17 | * You should have received a copy of the GNU Lesser General Public | |
18 | * License along with FFmpeg; if not, write to the Free Software | |
19 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA | |
20 | */ | |
21 | ||
22 | #include "config.h" | |
23 | #include "libavutil/attributes.h" | |
24 | #include "libavutil/cpu.h" | |
25 | #include "libavutil/ppc/cpu.h" | |
26 | #include "libavutil/ppc/types_altivec.h" | |
27 | #include "libavutil/ppc/util_altivec.h" | |
28 | #include "libavcodec/vc1dsp.h" | |
29 | ||
30 | #if HAVE_ALTIVEC | |
31 | ||
32 | // main steps of 8x8 transform | |
33 | #define STEP8(s0, s1, s2, s3, s4, s5, s6, s7, vec_rnd) \ | |
34 | do { \ | |
35 | t0 = vec_sl(vec_add(s0, s4), vec_2); \ | |
36 | t0 = vec_add(vec_sl(t0, vec_1), t0); \ | |
37 | t0 = vec_add(t0, vec_rnd); \ | |
38 | t1 = vec_sl(vec_sub(s0, s4), vec_2); \ | |
39 | t1 = vec_add(vec_sl(t1, vec_1), t1); \ | |
40 | t1 = vec_add(t1, vec_rnd); \ | |
41 | t2 = vec_add(vec_sl(s6, vec_2), vec_sl(s6, vec_1)); \ | |
42 | t2 = vec_add(t2, vec_sl(s2, vec_4)); \ | |
43 | t3 = vec_add(vec_sl(s2, vec_2), vec_sl(s2, vec_1)); \ | |
44 | t3 = vec_sub(t3, vec_sl(s6, vec_4)); \ | |
45 | t4 = vec_add(t0, t2); \ | |
46 | t5 = vec_add(t1, t3); \ | |
47 | t6 = vec_sub(t1, t3); \ | |
48 | t7 = vec_sub(t0, t2); \ | |
49 | \ | |
50 | t0 = vec_sl(vec_add(s1, s3), vec_4); \ | |
51 | t0 = vec_add(t0, vec_sl(s5, vec_3)); \ | |
52 | t0 = vec_add(t0, vec_sl(s7, vec_2)); \ | |
53 | t0 = vec_add(t0, vec_sub(s5, s3)); \ | |
54 | \ | |
55 | t1 = vec_sl(vec_sub(s1, s5), vec_4); \ | |
56 | t1 = vec_sub(t1, vec_sl(s7, vec_3)); \ | |
57 | t1 = vec_sub(t1, vec_sl(s3, vec_2)); \ | |
58 | t1 = vec_sub(t1, vec_add(s1, s7)); \ | |
59 | \ | |
60 | t2 = vec_sl(vec_sub(s7, s3), vec_4); \ | |
61 | t2 = vec_add(t2, vec_sl(s1, vec_3)); \ | |
62 | t2 = vec_add(t2, vec_sl(s5, vec_2)); \ | |
63 | t2 = vec_add(t2, vec_sub(s1, s7)); \ | |
64 | \ | |
65 | t3 = vec_sl(vec_sub(s5, s7), vec_4); \ | |
66 | t3 = vec_sub(t3, vec_sl(s3, vec_3)); \ | |
67 | t3 = vec_add(t3, vec_sl(s1, vec_2)); \ | |
68 | t3 = vec_sub(t3, vec_add(s3, s5)); \ | |
69 | \ | |
70 | s0 = vec_add(t4, t0); \ | |
71 | s1 = vec_add(t5, t1); \ | |
72 | s2 = vec_add(t6, t2); \ | |
73 | s3 = vec_add(t7, t3); \ | |
74 | s4 = vec_sub(t7, t3); \ | |
75 | s5 = vec_sub(t6, t2); \ | |
76 | s6 = vec_sub(t5, t1); \ | |
77 | s7 = vec_sub(t4, t0); \ | |
78 | }while(0) | |
79 | ||
80 | #define SHIFT_HOR8(s0, s1, s2, s3, s4, s5, s6, s7) \ | |
81 | do { \ | |
82 | s0 = vec_sra(s0, vec_3); \ | |
83 | s1 = vec_sra(s1, vec_3); \ | |
84 | s2 = vec_sra(s2, vec_3); \ | |
85 | s3 = vec_sra(s3, vec_3); \ | |
86 | s4 = vec_sra(s4, vec_3); \ | |
87 | s5 = vec_sra(s5, vec_3); \ | |
88 | s6 = vec_sra(s6, vec_3); \ | |
89 | s7 = vec_sra(s7, vec_3); \ | |
90 | }while(0) | |
91 | ||
92 | #define SHIFT_VERT8(s0, s1, s2, s3, s4, s5, s6, s7) \ | |
93 | do { \ | |
94 | s0 = vec_sra(s0, vec_7); \ | |
95 | s1 = vec_sra(s1, vec_7); \ | |
96 | s2 = vec_sra(s2, vec_7); \ | |
97 | s3 = vec_sra(s3, vec_7); \ | |
98 | s4 = vec_sra(vec_add(s4, vec_1s), vec_7); \ | |
99 | s5 = vec_sra(vec_add(s5, vec_1s), vec_7); \ | |
100 | s6 = vec_sra(vec_add(s6, vec_1s), vec_7); \ | |
101 | s7 = vec_sra(vec_add(s7, vec_1s), vec_7); \ | |
102 | }while(0) | |
103 | ||
104 | /* main steps of 4x4 transform */ | |
105 | #define STEP4(s0, s1, s2, s3, vec_rnd) \ | |
106 | do { \ | |
107 | t1 = vec_add(vec_sl(s0, vec_4), s0); \ | |
108 | t1 = vec_add(t1, vec_rnd); \ | |
109 | t2 = vec_add(vec_sl(s2, vec_4), s2); \ | |
110 | t0 = vec_add(t1, t2); \ | |
111 | t1 = vec_sub(t1, t2); \ | |
112 | t3 = vec_sl(vec_sub(s3, s1), vec_1); \ | |
113 | t3 = vec_add(t3, vec_sl(t3, vec_2)); \ | |
114 | t2 = vec_add(t3, vec_sl(s1, vec_5)); \ | |
115 | t3 = vec_add(t3, vec_sl(s3, vec_3)); \ | |
116 | t3 = vec_add(t3, vec_sl(s3, vec_2)); \ | |
117 | s0 = vec_add(t0, t2); \ | |
118 | s1 = vec_sub(t1, t3); \ | |
119 | s2 = vec_add(t1, t3); \ | |
120 | s3 = vec_sub(t0, t2); \ | |
121 | }while (0) | |
122 | ||
123 | #define SHIFT_HOR4(s0, s1, s2, s3) \ | |
124 | s0 = vec_sra(s0, vec_3); \ | |
125 | s1 = vec_sra(s1, vec_3); \ | |
126 | s2 = vec_sra(s2, vec_3); \ | |
127 | s3 = vec_sra(s3, vec_3); | |
128 | ||
129 | #define SHIFT_VERT4(s0, s1, s2, s3) \ | |
130 | s0 = vec_sra(s0, vec_7); \ | |
131 | s1 = vec_sra(s1, vec_7); \ | |
132 | s2 = vec_sra(s2, vec_7); \ | |
133 | s3 = vec_sra(s3, vec_7); | |
134 | ||
135 | /** Do inverse transform on 8x8 block | |
136 | */ | |
137 | static void vc1_inv_trans_8x8_altivec(int16_t block[64]) | |
138 | { | |
139 | vector signed short src0, src1, src2, src3, src4, src5, src6, src7; | |
140 | vector signed int s0, s1, s2, s3, s4, s5, s6, s7; | |
141 | vector signed int s8, s9, sA, sB, sC, sD, sE, sF; | |
142 | vector signed int t0, t1, t2, t3, t4, t5, t6, t7; | |
143 | const vector signed int vec_64 = vec_sl(vec_splat_s32(4), vec_splat_u32(4)); | |
144 | const vector unsigned int vec_7 = vec_splat_u32(7); | |
145 | const vector unsigned int vec_4 = vec_splat_u32(4); | |
146 | const vector signed int vec_4s = vec_splat_s32(4); | |
147 | const vector unsigned int vec_3 = vec_splat_u32(3); | |
148 | const vector unsigned int vec_2 = vec_splat_u32(2); | |
149 | const vector signed int vec_1s = vec_splat_s32(1); | |
150 | const vector unsigned int vec_1 = vec_splat_u32(1); | |
151 | ||
152 | src0 = vec_ld( 0, block); | |
153 | src1 = vec_ld( 16, block); | |
154 | src2 = vec_ld( 32, block); | |
155 | src3 = vec_ld( 48, block); | |
156 | src4 = vec_ld( 64, block); | |
157 | src5 = vec_ld( 80, block); | |
158 | src6 = vec_ld( 96, block); | |
159 | src7 = vec_ld(112, block); | |
160 | ||
161 | s0 = vec_unpackl(src0); | |
162 | s1 = vec_unpackl(src1); | |
163 | s2 = vec_unpackl(src2); | |
164 | s3 = vec_unpackl(src3); | |
165 | s4 = vec_unpackl(src4); | |
166 | s5 = vec_unpackl(src5); | |
167 | s6 = vec_unpackl(src6); | |
168 | s7 = vec_unpackl(src7); | |
169 | s8 = vec_unpackh(src0); | |
170 | s9 = vec_unpackh(src1); | |
171 | sA = vec_unpackh(src2); | |
172 | sB = vec_unpackh(src3); | |
173 | sC = vec_unpackh(src4); | |
174 | sD = vec_unpackh(src5); | |
175 | sE = vec_unpackh(src6); | |
176 | sF = vec_unpackh(src7); | |
177 | STEP8(s0, s1, s2, s3, s4, s5, s6, s7, vec_4s); | |
178 | SHIFT_HOR8(s0, s1, s2, s3, s4, s5, s6, s7); | |
179 | STEP8(s8, s9, sA, sB, sC, sD, sE, sF, vec_4s); | |
180 | SHIFT_HOR8(s8, s9, sA, sB, sC, sD, sE, sF); | |
181 | src0 = vec_pack(s8, s0); | |
182 | src1 = vec_pack(s9, s1); | |
183 | src2 = vec_pack(sA, s2); | |
184 | src3 = vec_pack(sB, s3); | |
185 | src4 = vec_pack(sC, s4); | |
186 | src5 = vec_pack(sD, s5); | |
187 | src6 = vec_pack(sE, s6); | |
188 | src7 = vec_pack(sF, s7); | |
189 | TRANSPOSE8(src0, src1, src2, src3, src4, src5, src6, src7); | |
190 | ||
191 | s0 = vec_unpackl(src0); | |
192 | s1 = vec_unpackl(src1); | |
193 | s2 = vec_unpackl(src2); | |
194 | s3 = vec_unpackl(src3); | |
195 | s4 = vec_unpackl(src4); | |
196 | s5 = vec_unpackl(src5); | |
197 | s6 = vec_unpackl(src6); | |
198 | s7 = vec_unpackl(src7); | |
199 | s8 = vec_unpackh(src0); | |
200 | s9 = vec_unpackh(src1); | |
201 | sA = vec_unpackh(src2); | |
202 | sB = vec_unpackh(src3); | |
203 | sC = vec_unpackh(src4); | |
204 | sD = vec_unpackh(src5); | |
205 | sE = vec_unpackh(src6); | |
206 | sF = vec_unpackh(src7); | |
207 | STEP8(s0, s1, s2, s3, s4, s5, s6, s7, vec_64); | |
208 | SHIFT_VERT8(s0, s1, s2, s3, s4, s5, s6, s7); | |
209 | STEP8(s8, s9, sA, sB, sC, sD, sE, sF, vec_64); | |
210 | SHIFT_VERT8(s8, s9, sA, sB, sC, sD, sE, sF); | |
211 | src0 = vec_pack(s8, s0); | |
212 | src1 = vec_pack(s9, s1); | |
213 | src2 = vec_pack(sA, s2); | |
214 | src3 = vec_pack(sB, s3); | |
215 | src4 = vec_pack(sC, s4); | |
216 | src5 = vec_pack(sD, s5); | |
217 | src6 = vec_pack(sE, s6); | |
218 | src7 = vec_pack(sF, s7); | |
219 | ||
220 | vec_st(src0, 0, block); | |
221 | vec_st(src1, 16, block); | |
222 | vec_st(src2, 32, block); | |
223 | vec_st(src3, 48, block); | |
224 | vec_st(src4, 64, block); | |
225 | vec_st(src5, 80, block); | |
226 | vec_st(src6, 96, block); | |
227 | vec_st(src7,112, block); | |
228 | } | |
229 | ||
230 | /** Do inverse transform on 8x4 part of block | |
231 | */ | |
232 | static void vc1_inv_trans_8x4_altivec(uint8_t *dest, int stride, int16_t *block) | |
233 | { | |
234 | vector signed short src0, src1, src2, src3, src4, src5, src6, src7; | |
235 | vector signed int s0, s1, s2, s3, s4, s5, s6, s7; | |
236 | vector signed int s8, s9, sA, sB, sC, sD, sE, sF; | |
237 | vector signed int t0, t1, t2, t3, t4, t5, t6, t7; | |
238 | const vector signed int vec_64 = vec_sl(vec_splat_s32(4), vec_splat_u32(4)); | |
239 | const vector unsigned int vec_7 = vec_splat_u32(7); | |
240 | const vector unsigned int vec_5 = vec_splat_u32(5); | |
241 | const vector unsigned int vec_4 = vec_splat_u32(4); | |
242 | const vector signed int vec_4s = vec_splat_s32(4); | |
243 | const vector unsigned int vec_3 = vec_splat_u32(3); | |
244 | const vector unsigned int vec_2 = vec_splat_u32(2); | |
245 | const vector unsigned int vec_1 = vec_splat_u32(1); | |
246 | vector unsigned char tmp; | |
247 | vector signed short tmp2, tmp3; | |
248 | vector unsigned char perm0, perm1, p0, p1, p; | |
249 | ||
250 | src0 = vec_ld( 0, block); | |
251 | src1 = vec_ld( 16, block); | |
252 | src2 = vec_ld( 32, block); | |
253 | src3 = vec_ld( 48, block); | |
254 | src4 = vec_ld( 64, block); | |
255 | src5 = vec_ld( 80, block); | |
256 | src6 = vec_ld( 96, block); | |
257 | src7 = vec_ld(112, block); | |
258 | ||
259 | TRANSPOSE8(src0, src1, src2, src3, src4, src5, src6, src7); | |
260 | s0 = vec_unpackl(src0); | |
261 | s1 = vec_unpackl(src1); | |
262 | s2 = vec_unpackl(src2); | |
263 | s3 = vec_unpackl(src3); | |
264 | s4 = vec_unpackl(src4); | |
265 | s5 = vec_unpackl(src5); | |
266 | s6 = vec_unpackl(src6); | |
267 | s7 = vec_unpackl(src7); | |
268 | s8 = vec_unpackh(src0); | |
269 | s9 = vec_unpackh(src1); | |
270 | sA = vec_unpackh(src2); | |
271 | sB = vec_unpackh(src3); | |
272 | sC = vec_unpackh(src4); | |
273 | sD = vec_unpackh(src5); | |
274 | sE = vec_unpackh(src6); | |
275 | sF = vec_unpackh(src7); | |
276 | STEP8(s0, s1, s2, s3, s4, s5, s6, s7, vec_4s); | |
277 | SHIFT_HOR8(s0, s1, s2, s3, s4, s5, s6, s7); | |
278 | STEP8(s8, s9, sA, sB, sC, sD, sE, sF, vec_4s); | |
279 | SHIFT_HOR8(s8, s9, sA, sB, sC, sD, sE, sF); | |
280 | src0 = vec_pack(s8, s0); | |
281 | src1 = vec_pack(s9, s1); | |
282 | src2 = vec_pack(sA, s2); | |
283 | src3 = vec_pack(sB, s3); | |
284 | src4 = vec_pack(sC, s4); | |
285 | src5 = vec_pack(sD, s5); | |
286 | src6 = vec_pack(sE, s6); | |
287 | src7 = vec_pack(sF, s7); | |
288 | TRANSPOSE8(src0, src1, src2, src3, src4, src5, src6, src7); | |
289 | ||
290 | s0 = vec_unpackh(src0); | |
291 | s1 = vec_unpackh(src1); | |
292 | s2 = vec_unpackh(src2); | |
293 | s3 = vec_unpackh(src3); | |
294 | s8 = vec_unpackl(src0); | |
295 | s9 = vec_unpackl(src1); | |
296 | sA = vec_unpackl(src2); | |
297 | sB = vec_unpackl(src3); | |
298 | STEP4(s0, s1, s2, s3, vec_64); | |
299 | SHIFT_VERT4(s0, s1, s2, s3); | |
300 | STEP4(s8, s9, sA, sB, vec_64); | |
301 | SHIFT_VERT4(s8, s9, sA, sB); | |
302 | src0 = vec_pack(s0, s8); | |
303 | src1 = vec_pack(s1, s9); | |
304 | src2 = vec_pack(s2, sA); | |
305 | src3 = vec_pack(s3, sB); | |
306 | ||
dcebb6f3 | 307 | #if HAVE_BIGENDIAN |
2ba45a60 DM |
308 | p0 = vec_lvsl (0, dest); |
309 | p1 = vec_lvsl (stride, dest); | |
310 | p = vec_splat_u8 (-1); | |
311 | perm0 = vec_mergeh (p, p0); | |
312 | perm1 = vec_mergeh (p, p1); | |
dcebb6f3 DM |
313 | #define GET_TMP2(dst, p) \ |
314 | tmp = vec_ld (0, dest); \ | |
315 | tmp2 = (vector signed short)vec_perm (tmp, vec_splat_u8(0), p); | |
316 | #else | |
317 | #define GET_TMP2(dst,p) \ | |
318 | tmp = vec_vsx_ld (0, dst); \ | |
319 | tmp2 = (vector signed short)vec_mergeh (tmp, vec_splat_u8(0)); | |
320 | #endif | |
2ba45a60 DM |
321 | |
322 | #define ADD(dest,src,perm) \ | |
dcebb6f3 | 323 | GET_TMP2(dest, perm); \ |
2ba45a60 DM |
324 | tmp3 = vec_adds (tmp2, src); \ |
325 | tmp = vec_packsu (tmp3, tmp3); \ | |
326 | vec_ste ((vector unsigned int)tmp, 0, (unsigned int *)dest); \ | |
327 | vec_ste ((vector unsigned int)tmp, 4, (unsigned int *)dest); | |
328 | ||
329 | ADD (dest, src0, perm0) dest += stride; | |
330 | ADD (dest, src1, perm1) dest += stride; | |
331 | ADD (dest, src2, perm0) dest += stride; | |
332 | ADD (dest, src3, perm1) | |
333 | } | |
334 | ||
335 | #define PUT_OP_U8_ALTIVEC(d, s, dst) d = s | |
336 | #define AVG_OP_U8_ALTIVEC(d, s, dst) d = vec_avg(dst, s) | |
337 | ||
338 | #define OP_U8_ALTIVEC PUT_OP_U8_ALTIVEC | |
339 | #define PREFIX_no_rnd_vc1_chroma_mc8_altivec put_no_rnd_vc1_chroma_mc8_altivec | |
340 | #include "h264chroma_template.c" | |
341 | #undef OP_U8_ALTIVEC | |
342 | #undef PREFIX_no_rnd_vc1_chroma_mc8_altivec | |
343 | ||
344 | #define OP_U8_ALTIVEC AVG_OP_U8_ALTIVEC | |
345 | #define PREFIX_no_rnd_vc1_chroma_mc8_altivec avg_no_rnd_vc1_chroma_mc8_altivec | |
346 | #include "h264chroma_template.c" | |
347 | #undef OP_U8_ALTIVEC | |
348 | #undef PREFIX_no_rnd_vc1_chroma_mc8_altivec | |
349 | ||
350 | #endif /* HAVE_ALTIVEC */ | |
351 | ||
352 | av_cold void ff_vc1dsp_init_ppc(VC1DSPContext *dsp) | |
353 | { | |
354 | #if HAVE_ALTIVEC | |
355 | if (!PPC_ALTIVEC(av_get_cpu_flags())) | |
356 | return; | |
357 | ||
358 | dsp->vc1_inv_trans_8x8 = vc1_inv_trans_8x8_altivec; | |
359 | dsp->vc1_inv_trans_8x4 = vc1_inv_trans_8x4_altivec; | |
360 | dsp->put_no_rnd_vc1_chroma_pixels_tab[0] = put_no_rnd_vc1_chroma_mc8_altivec; | |
361 | dsp->avg_no_rnd_vc1_chroma_pixels_tab[0] = avg_no_rnd_vc1_chroma_mc8_altivec; | |
362 | #endif /* HAVE_ALTIVEC */ | |
363 | } |