f2efc5ddd0f4fbb84ca0d6d442fef161a5aea7b0
2 * Copyright (C) 2003 James Klicman <james@klicman.org>
4 * This file is part of FFmpeg.
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
26 #include "libavutil/attributes.h"
27 #include "libavutil/cpu.h"
28 #include "libavutil/ppc/cpu.h"
29 #include "libavcodec/fdctdsp.h"
34 #define vs16(v) ((vector signed short) (v))
35 #define vs32(v) ((vector signed int) (v))
36 #define vu8(v) ((vector unsigned char) (v))
37 #define vu16(v) ((vector unsigned short) (v))
38 #define vu32(v) ((vector unsigned int) (v))
40 #define C1 0.98078525066375732421875000 /* cos(1 * PI / 16) */
41 #define C2 0.92387950420379638671875000 /* cos(2 * PI / 16) */
42 #define C3 0.83146959543228149414062500 /* cos(3 * PI / 16) */
43 #define C4 0.70710676908493041992187500 /* cos(4 * PI / 16) */
44 #define C5 0.55557024478912353515625000 /* cos(5 * PI / 16) */
45 #define C6 0.38268342614173889160156250 /* cos(6 * PI / 16) */
46 #define C7 0.19509032368659973144531250 /* cos(7 * PI / 16) */
47 #define SQRT_2 1.41421353816986083984375000 /* sqrt(2) */
51 #define W2 (SQRT_2 * C6)
52 #define W3 (SQRT_2 * C3)
53 #define W4 (SQRT_2 * (-C1 + C3 + C5 - C7))
54 #define W5 (SQRT_2 * (C1 + C3 - C5 + C7))
55 #define W6 (SQRT_2 * (C1 + C3 + C5 - C7))
56 #define W7 (SQRT_2 * (C1 + C3 - C5 - C7))
57 #define W8 (SQRT_2 * (C7 - C3))
58 #define W9 (SQRT_2 * (-C1 - C3))
59 #define WA (SQRT_2 * (-C3 - C5))
60 #define WB (SQRT_2 * (C5 - C3))
62 static const vector
float fdctconsts
[3] = {
68 #define LD_W0 vec_splat(cnsts0, 0)
69 #define LD_W1 vec_splat(cnsts0, 1)
70 #define LD_W2 vec_splat(cnsts0, 2)
71 #define LD_W3 vec_splat(cnsts0, 3)
72 #define LD_W4 vec_splat(cnsts1, 0)
73 #define LD_W5 vec_splat(cnsts1, 1)
74 #define LD_W6 vec_splat(cnsts1, 2)
75 #define LD_W7 vec_splat(cnsts1, 3)
76 #define LD_W8 vec_splat(cnsts2, 0)
77 #define LD_W9 vec_splat(cnsts2, 1)
78 #define LD_WA vec_splat(cnsts2, 2)
79 #define LD_WB vec_splat(cnsts2, 3)
81 #define FDCTROW(b0, b1, b2, b3, b4, b5, b6, b7) /* {{{ */ \
82 x0 = vec_add(b0, b7); /* x0 = b0 + b7; */ \
83 x7 = vec_sub(b0, b7); /* x7 = b0 - b7; */ \
84 x1 = vec_add(b1, b6); /* x1 = b1 + b6; */ \
85 x6 = vec_sub(b1, b6); /* x6 = b1 - b6; */ \
86 x2 = vec_add(b2, b5); /* x2 = b2 + b5; */ \
87 x5 = vec_sub(b2, b5); /* x5 = b2 - b5; */ \
88 x3 = vec_add(b3, b4); /* x3 = b3 + b4; */ \
89 x4 = vec_sub(b3, b4); /* x4 = b3 - b4; */ \
91 b7 = vec_add(x0, x3); /* b7 = x0 + x3; */ \
92 b1 = vec_add(x1, x2); /* b1 = x1 + x2; */ \
93 b0 = vec_add(b7, b1); /* b0 = b7 + b1; */ \
94 b4 = vec_sub(b7, b1); /* b4 = b7 - b1; */ \
96 b2 = vec_sub(x0, x3); /* b2 = x0 - x3; */ \
97 b6 = vec_sub(x1, x2); /* b6 = x1 - x2; */ \
98 b5 = vec_add(b6, b2); /* b5 = b6 + b2; */ \
100 b5 = vec_madd(cnst, b5, mzero); /* b5 = b5 * W2; */ \
102 b2 = vec_madd(cnst, b2, b5); /* b2 = b5 + b2 * W1; */ \
104 b6 = vec_madd(cnst, b6, b5); /* b6 = b5 + b6 * W0; */ \
106 x0 = vec_add(x4, x7); /* x0 = x4 + x7; */ \
107 x1 = vec_add(x5, x6); /* x1 = x5 + x6; */ \
108 x2 = vec_add(x4, x6); /* x2 = x4 + x6; */ \
109 x3 = vec_add(x5, x7); /* x3 = x5 + x7; */ \
110 x8 = vec_add(x2, x3); /* x8 = x2 + x3; */ \
112 x8 = vec_madd(cnst, x8, mzero); /* x8 = x8 * W3; */ \
115 x0 = vec_madd(cnst, x0, mzero); /* x0 *= W8; */ \
117 x1 = vec_madd(cnst, x1, mzero); /* x1 *= W9; */ \
119 x2 = vec_madd(cnst, x2, x8); /* x2 = x2 * WA + x8; */ \
121 x3 = vec_madd(cnst, x3, x8); /* x3 = x3 * WB + x8; */ \
124 b7 = vec_madd(cnst, x4, x0); /* b7 = x4 * W4 + x0; */ \
126 b5 = vec_madd(cnst, x5, x1); /* b5 = x5 * W5 + x1; */ \
128 b3 = vec_madd(cnst, x6, x1); /* b3 = x6 * W6 + x1; */ \
130 b1 = vec_madd(cnst, x7, x0); /* b1 = x7 * W7 + x0; */ \
132 b7 = vec_add(b7, x2); /* b7 = b7 + x2; */ \
133 b5 = vec_add(b5, x3); /* b5 = b5 + x3; */ \
134 b3 = vec_add(b3, x2); /* b3 = b3 + x2; */ \
135 b1 = vec_add(b1, x3) /* b1 = b1 + x3; */ \
138 #define FDCTCOL(b0, b1, b2, b3, b4, b5, b6, b7) /* {{{ */ \
139 x0 = vec_add(b0, b7); /* x0 = b0 + b7; */ \
140 x7 = vec_sub(b0, b7); /* x7 = b0 - b7; */ \
141 x1 = vec_add(b1, b6); /* x1 = b1 + b6; */ \
142 x6 = vec_sub(b1, b6); /* x6 = b1 - b6; */ \
143 x2 = vec_add(b2, b5); /* x2 = b2 + b5; */ \
144 x5 = vec_sub(b2, b5); /* x5 = b2 - b5; */ \
145 x3 = vec_add(b3, b4); /* x3 = b3 + b4; */ \
146 x4 = vec_sub(b3, b4); /* x4 = b3 - b4; */ \
148 b7 = vec_add(x0, x3); /* b7 = x0 + x3; */ \
149 b1 = vec_add(x1, x2); /* b1 = x1 + x2; */ \
150 b0 = vec_add(b7, b1); /* b0 = b7 + b1; */ \
151 b4 = vec_sub(b7, b1); /* b4 = b7 - b1; */ \
153 b2 = vec_sub(x0, x3); /* b2 = x0 - x3; */ \
154 b6 = vec_sub(x1, x2); /* b6 = x1 - x2; */ \
155 b5 = vec_add(b6, b2); /* b5 = b6 + b2; */ \
157 b5 = vec_madd(cnst, b5, mzero); /* b5 = b5 * W2; */ \
159 b2 = vec_madd(cnst, b2, b5); /* b2 = b5 + b2 * W1; */ \
161 b6 = vec_madd(cnst, b6, b5); /* b6 = b5 + b6 * W0; */ \
163 x0 = vec_add(x4, x7); /* x0 = x4 + x7; */ \
164 x1 = vec_add(x5, x6); /* x1 = x5 + x6; */ \
165 x2 = vec_add(x4, x6); /* x2 = x4 + x6; */ \
166 x3 = vec_add(x5, x7); /* x3 = x5 + x7; */ \
167 x8 = vec_add(x2, x3); /* x8 = x2 + x3; */ \
169 x8 = vec_madd(cnst, x8, mzero); /* x8 = x8 * W3; */ \
172 x0 = vec_madd(cnst, x0, mzero); /* x0 *= W8; */ \
174 x1 = vec_madd(cnst, x1, mzero); /* x1 *= W9; */ \
176 x2 = vec_madd(cnst, x2, x8); /* x2 = x2 * WA + x8; */ \
178 x3 = vec_madd(cnst, x3, x8); /* x3 = x3 * WB + x8; */ \
181 b7 = vec_madd(cnst, x4, x0); /* b7 = x4 * W4 + x0; */ \
183 b5 = vec_madd(cnst, x5, x1); /* b5 = x5 * W5 + x1; */ \
185 b3 = vec_madd(cnst, x6, x1); /* b3 = x6 * W6 + x1; */ \
187 b1 = vec_madd(cnst, x7, x0); /* b1 = x7 * W7 + x0; */ \
189 b7 = vec_add(b7, x2); /* b7 += x2; */ \
190 b5 = vec_add(b5, x3); /* b5 += x3; */ \
191 b3 = vec_add(b3, x2); /* b3 += x2; */ \
192 b1 = vec_add(b1, x3) /* b1 += x3; */ \
195 /* two dimensional discrete cosine transform */
196 void ff_fdct_altivec(int16_t *block
)
198 vector
signed short *bp
;
199 vector
float *cp
= fdctconsts
;
200 vector
float b00
, b10
, b20
, b30
, b40
, b50
, b60
, b70
;
201 vector
float b01
, b11
, b21
, b31
, b41
, b51
, b61
, b71
;
202 vector
float mzero
, cnst
, cnsts0
, cnsts1
, cnsts2
;
203 vector
float x0
, x1
, x2
, x3
, x4
, x5
, x6
, x7
, x8
;
205 /* setup constants {{{ */
207 mzero
= ((vector
float) vec_splat_u32(-1));
208 mzero
= ((vector
float) vec_sl(vu32(mzero
), vu32(mzero
)));
209 cnsts0
= vec_ld(0, cp
);
211 cnsts1
= vec_ld(0, cp
);
213 cnsts2
= vec_ld(0, cp
);
216 /* 8x8 matrix transpose (vector short[8]) {{{ */
217 #define MERGE_S16(hl, a, b) vec_merge ## hl(vs16(a), vs16(b))
219 bp
= (vector
signed short *) block
;
220 b00
= ((vector
float) vec_ld(0, bp
));
221 b40
= ((vector
float) vec_ld(16 * 4, bp
));
222 b01
= ((vector
float) MERGE_S16(h
, b00
, b40
));
223 b11
= ((vector
float) MERGE_S16(l
, b00
, b40
));
225 b10
= ((vector
float) vec_ld(0, bp
));
226 b50
= ((vector
float) vec_ld(16 * 4, bp
));
227 b21
= ((vector
float) MERGE_S16(h
, b10
, b50
));
228 b31
= ((vector
float) MERGE_S16(l
, b10
, b50
));
230 b20
= ((vector
float) vec_ld(0, bp
));
231 b60
= ((vector
float) vec_ld(16 * 4, bp
));
232 b41
= ((vector
float) MERGE_S16(h
, b20
, b60
));
233 b51
= ((vector
float) MERGE_S16(l
, b20
, b60
));
235 b30
= ((vector
float) vec_ld(0, bp
));
236 b70
= ((vector
float) vec_ld(16 * 4, bp
));
237 b61
= ((vector
float) MERGE_S16(h
, b30
, b70
));
238 b71
= ((vector
float) MERGE_S16(l
, b30
, b70
));
240 x0
= ((vector
float) MERGE_S16(h
, b01
, b41
));
241 x1
= ((vector
float) MERGE_S16(l
, b01
, b41
));
242 x2
= ((vector
float) MERGE_S16(h
, b11
, b51
));
243 x3
= ((vector
float) MERGE_S16(l
, b11
, b51
));
244 x4
= ((vector
float) MERGE_S16(h
, b21
, b61
));
245 x5
= ((vector
float) MERGE_S16(l
, b21
, b61
));
246 x6
= ((vector
float) MERGE_S16(h
, b31
, b71
));
247 x7
= ((vector
float) MERGE_S16(l
, b31
, b71
));
249 b00
= ((vector
float) MERGE_S16(h
, x0
, x4
));
250 b10
= ((vector
float) MERGE_S16(l
, x0
, x4
));
251 b20
= ((vector
float) MERGE_S16(h
, x1
, x5
));
252 b30
= ((vector
float) MERGE_S16(l
, x1
, x5
));
253 b40
= ((vector
float) MERGE_S16(h
, x2
, x6
));
254 b50
= ((vector
float) MERGE_S16(l
, x2
, x6
));
255 b60
= ((vector
float) MERGE_S16(h
, x3
, x7
));
256 b70
= ((vector
float) MERGE_S16(l
, x3
, x7
));
261 /* Some of the initial calculations can be done as vector short
262 * before conversion to vector float. The following code section
263 * takes advantage of this. */
266 x0
= ((vector
float) vec_add(vs16(b00
), vs16(b70
)));
267 x7
= ((vector
float) vec_sub(vs16(b00
), vs16(b70
)));
268 x1
= ((vector
float) vec_add(vs16(b10
), vs16(b60
)));
269 x6
= ((vector
float) vec_sub(vs16(b10
), vs16(b60
)));
270 x2
= ((vector
float) vec_add(vs16(b20
), vs16(b50
)));
271 x5
= ((vector
float) vec_sub(vs16(b20
), vs16(b50
)));
272 x3
= ((vector
float) vec_add(vs16(b30
), vs16(b40
)));
273 x4
= ((vector
float) vec_sub(vs16(b30
), vs16(b40
)));
275 b70
= ((vector
float) vec_add(vs16(x0
), vs16(x3
)));
276 b10
= ((vector
float) vec_add(vs16(x1
), vs16(x2
)));
278 b00
= ((vector
float) vec_add(vs16(b70
), vs16(b10
)));
279 b40
= ((vector
float) vec_sub(vs16(b70
), vs16(b10
)));
282 b ## n ## 1 = ((vector float) vec_unpackl(vs16(b ## n ## 0))); \
283 b ## n ## 0 = ((vector float) vec_unpackh(vs16(b ## n ## 0))); \
284 b ## n ## 1 = vec_ctf(vs32(b ## n ## 1), 0); \
285 b ## n ## 0 = vec_ctf(vs32(b ## n ## 0), 0)
290 b20
= ((vector
float) vec_sub(vs16(x0
), vs16(x3
)));
291 b60
= ((vector
float) vec_sub(vs16(x1
), vs16(x2
)));
298 x0
= vec_add(b60
, b20
);
299 x1
= vec_add(b61
, b21
);
302 x0
= vec_madd(cnst
, x0
, mzero
);
303 x1
= vec_madd(cnst
, x1
, mzero
);
305 b20
= vec_madd(cnst
, b20
, x0
);
306 b21
= vec_madd(cnst
, b21
, x1
);
308 b60
= vec_madd(cnst
, b60
, x0
);
309 b61
= vec_madd(cnst
, b61
, x1
);
312 b ## 0 = ((vector float) vec_unpackh(vs16(x))); \
313 b ## 1 = ((vector float) vec_unpackl(vs16(x))); \
314 b ## 0 = vec_ctf(vs32(b ## 0), 0); \
315 b ## 1 = vec_ctf(vs32(b ## 1), 0)
324 x0
= vec_add(b70
, b10
);
325 x1
= vec_add(b50
, b30
);
326 x2
= vec_add(b70
, b30
);
327 x3
= vec_add(b50
, b10
);
328 x8
= vec_add(x2
, x3
);
330 x8
= vec_madd(cnst
, x8
, mzero
);
333 x0
= vec_madd(cnst
, x0
, mzero
);
335 x1
= vec_madd(cnst
, x1
, mzero
);
337 x2
= vec_madd(cnst
, x2
, x8
);
339 x3
= vec_madd(cnst
, x3
, x8
);
342 b70
= vec_madd(cnst
, b70
, x0
);
344 b50
= vec_madd(cnst
, b50
, x1
);
346 b30
= vec_madd(cnst
, b30
, x1
);
348 b10
= vec_madd(cnst
, b10
, x0
);
350 b70
= vec_add(b70
, x2
);
351 b50
= vec_add(b50
, x3
);
352 b30
= vec_add(b30
, x2
);
353 b10
= vec_add(b10
, x3
);
355 x0
= vec_add(b71
, b11
);
356 x1
= vec_add(b51
, b31
);
357 x2
= vec_add(b71
, b31
);
358 x3
= vec_add(b51
, b11
);
359 x8
= vec_add(x2
, x3
);
361 x8
= vec_madd(cnst
, x8
, mzero
);
364 x0
= vec_madd(cnst
, x0
, mzero
);
366 x1
= vec_madd(cnst
, x1
, mzero
);
368 x2
= vec_madd(cnst
, x2
, x8
);
370 x3
= vec_madd(cnst
, x3
, x8
);
373 b71
= vec_madd(cnst
, b71
, x0
);
375 b51
= vec_madd(cnst
, b51
, x1
);
377 b31
= vec_madd(cnst
, b31
, x1
);
379 b11
= vec_madd(cnst
, b11
, x0
);
381 b71
= vec_add(b71
, x2
);
382 b51
= vec_add(b51
, x3
);
383 b31
= vec_add(b31
, x2
);
384 b11
= vec_add(b11
, x3
);
387 /* 8x8 matrix transpose (vector float[8][2]) {{{ */
388 x0
= vec_mergel(b00
, b20
);
389 x1
= vec_mergeh(b00
, b20
);
390 x2
= vec_mergel(b10
, b30
);
391 x3
= vec_mergeh(b10
, b30
);
393 b00
= vec_mergeh(x1
, x3
);
394 b10
= vec_mergel(x1
, x3
);
395 b20
= vec_mergeh(x0
, x2
);
396 b30
= vec_mergel(x0
, x2
);
398 x4
= vec_mergel(b41
, b61
);
399 x5
= vec_mergeh(b41
, b61
);
400 x6
= vec_mergel(b51
, b71
);
401 x7
= vec_mergeh(b51
, b71
);
403 b41
= vec_mergeh(x5
, x7
);
404 b51
= vec_mergel(x5
, x7
);
405 b61
= vec_mergeh(x4
, x6
);
406 b71
= vec_mergel(x4
, x6
);
408 x0
= vec_mergel(b01
, b21
);
409 x1
= vec_mergeh(b01
, b21
);
410 x2
= vec_mergel(b11
, b31
);
411 x3
= vec_mergeh(b11
, b31
);
413 x4
= vec_mergel(b40
, b60
);
414 x5
= vec_mergeh(b40
, b60
);
415 x6
= vec_mergel(b50
, b70
);
416 x7
= vec_mergeh(b50
, b70
);
418 b40
= vec_mergeh(x1
, x3
);
419 b50
= vec_mergel(x1
, x3
);
420 b60
= vec_mergeh(x0
, x2
);
421 b70
= vec_mergel(x0
, x2
);
423 b01
= vec_mergeh(x5
, x7
);
424 b11
= vec_mergel(x5
, x7
);
425 b21
= vec_mergeh(x4
, x6
);
426 b31
= vec_mergel(x4
, x6
);
429 FDCTCOL(b00
, b10
, b20
, b30
, b40
, b50
, b60
, b70
);
430 FDCTCOL(b01
, b11
, b21
, b31
, b41
, b51
, b61
, b71
);
432 /* round, convert back to short {{{ */
434 b ## n ## 0 = vec_round(b ## n ## 0); \
435 b ## n ## 1 = vec_round(b ## n ## 1); \
436 b ## n ## 0 = ((vector float) vec_cts(b ## n ## 0, 0)); \
437 b ## n ## 1 = ((vector float) vec_cts(b ## n ## 1, 0)); \
438 b ## n ## 0 = ((vector float) vec_pack(vs32(b ## n ## 0), \
439 vs32(b ## n ## 1))); \
440 vec_st(vs16(b ## n ## 0), 0, bp)
442 bp
= (vector
signed short *) block
;
463 #endif /* HAVE_ALTIVEC */
465 av_cold
void ff_fdctdsp_init_ppc(FDCTDSPContext
*c
, AVCodecContext
*avctx
,
466 unsigned high_bit_depth
)
469 if (!PPC_ALTIVEC(av_get_cpu_flags()))
472 if (!high_bit_depth
) {
473 if (avctx
->dct_algo
== FF_DCT_AUTO
||
474 avctx
->dct_algo
== FF_DCT_ALTIVEC
) {
475 c
->fdct
= ff_fdct_altivec
;
478 #endif /* HAVE_ALTIVEC */