2 * FFT transform with Altivec optimizations
3 * Copyright (c) 2009 Loren Merritt
5 * This algorithm (though not any of the implementation details) is
6 * based on libdjbfft by D. J. Bernstein.
8 * This file is part of FFmpeg.
10 * FFmpeg is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
15 * FFmpeg is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with FFmpeg; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
26 * These functions are not individually interchangeable with the C versions.
27 * While C takes arrays of FFTComplex, Altivec leaves intermediate results
28 * in blocks as convenient to the vector size.
29 * i.e. {4x real, 4x imaginary, 4x real, ...}
31 * I ignore standard calling convention.
32 * Instead, the following registers are treated as global constants:
35 * v19..v29: permutations
38 * and the rest are free for local use.
43 #if HAVE_GNU_AS && HAVE_ALTIVEC
49 .macro addi2 ra, imm // add 32-bit immediate
54 addis \ra, \ra, \imm@ha
58 .macro FFT4 a0, a1, a2, a3 // in:0-1 out:2-3
59 vperm \a2,\a0,\a1,v20 // vcprm(0,1,s2,s1) // {r0,i0,r3,i2}
60 vperm \a3,\a0,\a1,v21 // vcprm(2,3,s0,s3) // {r1,i1,r2,i3}
61 vaddfp \a0,\a2,\a3 // {t1,t2,t6,t5}
62 vsubfp \a1,\a2,\a3 // {t3,t4,t8,t7}
63 vmrghw \a2,\a0,\a1 // vcprm(0,s0,1,s1) // {t1,t3,t2,t4}
64 vperm \a3,\a0,\a1,v22 // vcprm(2,s3,3,s2) // {t6,t7,t5,t8}
65 vaddfp \a0,\a2,\a3 // {r0,r1,i0,i1}
66 vsubfp \a1,\a2,\a3 // {r2,r3,i2,i3}
67 vperm \a2,\a0,\a1,v23 // vcprm(0,1,s0,s1) // {r0,r1,r2,r3}
68 vperm \a3,\a0,\a1,v24 // vcprm(2,3,s2,s3) // {i0,i1,i2,i3}
71 .macro FFT4x2 a0, a1, b0, b1, a2, a3, b2, b3
72 vperm \a2,\a0,\a1,v20 // vcprm(0,1,s2,s1) // {r0,i0,r3,i2}
73 vperm \a3,\a0,\a1,v21 // vcprm(2,3,s0,s3) // {r1,i1,r2,i3}
76 vaddfp \a0,\a2,\a3 // {t1,t2,t6,t5}
77 vsubfp \a1,\a2,\a3 // {t3,t4,t8,t7}
80 vmrghw \a2,\a0,\a1 // vcprm(0,s0,1,s1) // {t1,t3,t2,t4}
81 vperm \a3,\a0,\a1,v22 // vcprm(2,s3,3,s2) // {t6,t7,t5,t8}
84 vaddfp \a0,\a2,\a3 // {r0,r1,i0,i1}
85 vsubfp \a1,\a2,\a3 // {r2,r3,i2,i3}
88 vperm \a2,\a0,\a1,v23 // vcprm(0,1,s0,s1) // {r0,r1,r2,r3}
89 vperm \a3,\a0,\a1,v24 // vcprm(2,3,s2,s3) // {i0,i1,i2,i3}
94 .macro FFT8 a0, a1, b0, b1, a2, a3, b2, b3, b4 // in,out:a0-b1
95 vmrghw \b2,\b0,\b1 // vcprm(0,s0,1,s1) // {r4,r6,i4,i6}
96 vmrglw \b3,\b0,\b1 // vcprm(2,s2,3,s3) // {r5,r7,i5,i7}
97 vperm \a2,\a0,\a1,v20 // FFT4 ...
99 vaddfp \b0,\b2,\b3 // {t1,t3,t2,t4}
100 vsubfp \b1,\b2,\b3 // {r5,r7,i5,i7}
101 vperm \b4,\b1,\b1,v25 // vcprm(2,3,0,1) // {i5,i7,r5,r7}
104 vmaddfp \b1,\b1,v17,v14 // * {-1,1,1,-1}/sqrt(2)
105 vmaddfp \b1,\b4,v18,\b1 // * { 1,1,1,1 }/sqrt(2) // {t8,ta,t7,t9}
107 vperm \a3,\a0,\a1,v22
108 vperm \b2,\b0,\b1,v26 // vcprm(1,2,s3,s0) // {t3,t2,t9,t8}
109 vperm \b3,\b0,\b1,v27 // vcprm(0,3,s2,s1) // {t1,t4,t7,ta}
112 vaddfp \b0,\b2,\b3 // {t1,t2,t9,ta}
113 vsubfp \b1,\b2,\b3 // {t6,t5,tc,tb}
114 vperm \a2,\a0,\a1,v23
115 vperm \a3,\a0,\a1,v24
116 vperm \b2,\b0,\b1,v28 // vcprm(0,2,s1,s3) // {t1,t9,t5,tb}
117 vperm \b3,\b0,\b1,v29 // vcprm(1,3,s0,s2) // {t2,ta,t6,tc}
118 vsubfp \b0,\a2,\b2 // {r4,r5,r6,r7}
119 vsubfp \b1,\a3,\b3 // {i4,i5,i6,i7}
120 vaddfp \a0,\a2,\b2 // {r0,r1,r2,r3}
121 vaddfp \a1,\a3,\b3 // {i0,i1,i2,i3}
124 .macro BF d0,d1,s0,s1
129 .macro zip d0,d1,s0,s1
134 .macro def_fft4 interleave
135 fft4\interleave\()_altivec:
150 .macro def_fft8 interleave
151 fft8\interleave\()_altivec:
157 FFT8 v0,v1,v2,v3,v4,v5,v6,v7,v8
174 .macro def_fft16 interleave
175 fft16\interleave\()_altivec:
183 FFT4x2 v0,v1,v2,v3,v4,v5,v6,v7
188 FFT8 v0,v1,v2,v3,v8,v9,v10,v11,v12
189 vmaddfp v8,v4,v15,v14 // r2*wre
190 vmaddfp v9,v5,v15,v14 // i2*wre
191 vmaddfp v10,v6,v15,v14 // r3*wre
192 vmaddfp v11,v7,v15,v14 // i3*wre
193 vmaddfp v8,v5,v16,v8 // i2*wim
194 vnmsubfp v9,v4,v16,v9 // r2*wim
195 vnmsubfp v10,v7,v16,v10 // i3*wim
196 vmaddfp v11,v6,v16,v11 // r3*wim
229 // void pass(float *z, float *wre, int n)
230 .macro PASS interleave, suffix
231 fft_pass\suffix\()_altivec:
238 addi r6,r5,16 // o1+16
239 addi r8,r7,16 // o2+16
240 addi r11,r10,16 // o3+16
246 vperm v9,v9,v10,v19 // vcprm(s0,3,2,1) => wim[0 .. -3]
247 lvx v4,r3,r7 // r2 = z[o2]
248 lvx v5,r3,r8 // i2 = z[o2+16]
249 lvx v6,r3,r10 // r3 = z[o3]
250 lvx v7,r3,r11 // i3 = z[o3+16]
251 vmaddfp v10,v4,v8,v14 // r2*wre
252 vmaddfp v11,v5,v8,v14 // i2*wre
253 vmaddfp v12,v6,v8,v14 // r3*wre
254 vmaddfp v13,v7,v8,v14 // i3*wre
255 lvx v0, 0,r3 // r0 = z[0]
256 lvx v3,r3,r6 // i1 = z[o1+16]
257 vmaddfp v10,v5,v9,v10 // i2*wim
258 vnmsubfp v11,v4,v9,v11 // r2*wim
259 vnmsubfp v12,v7,v9,v12 // i3*wim
260 vmaddfp v13,v6,v9,v13 // r3*wim
261 lvx v1,r3,r9 // i0 = z[16]
262 lvx v2,r3,r5 // r1 = z[o1]
305 #define M_SQRT1_2 0.70710678118654752440 /* 1/sqrt(2) */
307 #define WORD_0 0x00,0x01,0x02,0x03
308 #define WORD_1 0x04,0x05,0x06,0x07
309 #define WORD_2 0x08,0x09,0x0a,0x0b
310 #define WORD_3 0x0c,0x0d,0x0e,0x0f
311 #define WORD_s0 0x10,0x11,0x12,0x13
312 #define WORD_s1 0x14,0x15,0x16,0x17
313 #define WORD_s2 0x18,0x19,0x1a,0x1b
314 #define WORD_s3 0x1c,0x1d,0x1e,0x1f
316 #define vcprm(a, b, c, d) .byte WORD_##a, WORD_##b, WORD_##c, WORD_##d
322 .float 1, 0.92387953, M_SQRT1_2, 0.38268343
323 .float 0, 0.38268343, M_SQRT1_2, 0.92387953
324 .float -M_SQRT1_2, M_SQRT1_2, M_SQRT1_2,-M_SQRT1_2
325 .float M_SQRT1_2, M_SQRT1_2, M_SQRT1_2, M_SQRT1_2
338 .macro lvm b, r, regs:vararg
346 .macro stvm b, r, regs:vararg
354 .macro fft_calc interleave
355 extfunc ff_fft_calc\interleave\()_altivec
358 stpu r1, -(160+16*PS)(r1)
361 stvm r6, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29
367 movrel r6, fft_data, r11
368 lvm r6, v14, v15, v16, v17, v18, v19, v20, v21
369 lvm r6, v22, v23, v24, v25, v26, v27, v28, v29
372 movrel r12, X(ff_cos_tabs), r11
374 movrel r6, fft_dispatch_tab\interleave\()_altivec, r11
377 slwi r3, r3, 2+ARCH_PPC64
384 lvm r6, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29
393 .macro DECL_FFT suffix, bits, n, n2, n4
394 fft\n\suffix\()_altivec:
396 stp r0,PS*(\bits-3)(r1)
403 lp r0,PS*(\bits-3)(r1)
407 b fft_pass\suffix\()_altivec
410 .macro DECL_FFTS interleave, suffix
415 PASS \interleave, \suffix
416 DECL_FFT \suffix, 5, 32, 16, 8
417 DECL_FFT \suffix, 6, 64, 32, 16
418 DECL_FFT \suffix, 7, 128, 64, 32
419 DECL_FFT \suffix, 8, 256, 128, 64
420 DECL_FFT \suffix, 9, 512, 256, 128
421 DECL_FFT \suffix,10, 1024, 512, 256
422 DECL_FFT \suffix,11, 2048, 1024, 512
423 DECL_FFT \suffix,12, 4096, 2048, 1024
424 DECL_FFT \suffix,13, 8192, 4096, 2048
425 DECL_FFT \suffix,14,16384, 8192, 4096
426 DECL_FFT \suffix,15,32768,16384, 8192
427 DECL_FFT \suffix,16,65536,32768,16384
433 fft_dispatch_tab\suffix\()_altivec:
434 PTR fft4\suffix\()_altivec
435 PTR fft8\suffix\()_altivec
436 PTR fft16\suffix\()_altivec
437 PTR fft32\suffix\()_altivec
438 PTR fft64\suffix\()_altivec
439 PTR fft128\suffix\()_altivec
440 PTR fft256\suffix\()_altivec
441 PTR fft512\suffix\()_altivec
442 PTR fft1024\suffix\()_altivec
443 PTR fft2048\suffix\()_altivec
444 PTR fft4096\suffix\()_altivec
445 PTR fft8192\suffix\()_altivec
446 PTR fft16384\suffix\()_altivec
447 PTR fft32768\suffix\()_altivec
448 PTR fft65536\suffix\()_altivec
452 DECL_FFTS 1, _interleave
454 #endif /* HAVE_GNU_AS && HAVE_ALTIVEC */