653a71aca94e2b898ea2e32f9ac2b8ce261e34d7
2 /* -----------------------------------------------------------------------------------------------------------
3 Software License for The Fraunhofer FDK AAC Codec Library for Android
5 © Copyright 1995 - 2013 Fraunhofer-Gesellschaft zur Förderung der angewandten Forschung e.V.
9 The Fraunhofer FDK AAC Codec Library for Android ("FDK AAC Codec") is software that implements
10 the MPEG Advanced Audio Coding ("AAC") encoding and decoding scheme for digital audio.
11 This FDK AAC Codec software is intended to be used on a wide variety of Android devices.
13 AAC's HE-AAC and HE-AAC v2 versions are regarded as today's most efficient general perceptual
14 audio codecs. AAC-ELD is considered the best-performing full-bandwidth communications codec by
15 independent studies and is widely deployed. AAC has been standardized by ISO and IEC as part
16 of the MPEG specifications.
18 Patent licenses for necessary patent claims for the FDK AAC Codec (including those of Fraunhofer)
19 may be obtained through Via Licensing (www.vialicensing.com) or through the respective patent owners
20 individually for the purpose of encoding or decoding bit streams in products that are compliant with
21 the ISO/IEC MPEG audio standards. Please note that most manufacturers of Android devices already license
22 these patent claims through Via Licensing or directly from the patent owners, and therefore FDK AAC Codec
23 software may already be covered under those patent licenses when it is used for those licensed purposes only.
25 Commercially-licensed AAC software libraries, including floating-point versions with enhanced sound quality,
26 are also available from Fraunhofer. Users are encouraged to check the Fraunhofer website for additional
27 applications information and documentation.
31 Redistribution and use in source and binary forms, with or without modification, are permitted without
32 payment of copyright license fees provided that you satisfy the following conditions:
34 You must retain the complete text of this software license in redistributions of the FDK AAC Codec or
35 your modifications thereto in source code form.
37 You must retain the complete text of this software license in the documentation and/or other materials
38 provided with redistributions of the FDK AAC Codec or your modifications thereto in binary form.
39 You must make available free of charge copies of the complete source code of the FDK AAC Codec and your
40 modifications thereto to recipients of copies in binary form.
42 The name of Fraunhofer may not be used to endorse or promote products derived from this library without
43 prior written permission.
45 You may not charge copyright license fees for anyone to use, copy or distribute the FDK AAC Codec
46 software or your modifications thereto.
48 Your modified versions of the FDK AAC Codec must carry prominent notices stating that you changed the software
49 and the date of any change. For modified versions of the FDK AAC Codec, the term
50 "Fraunhofer FDK AAC Codec Library for Android" must be replaced by the term
51 "Third-Party Modified Version of the Fraunhofer FDK AAC Codec Library for Android."
55 NO EXPRESS OR IMPLIED LICENSES TO ANY PATENT CLAIMS, including without limitation the patents of Fraunhofer,
56 ARE GRANTED BY THIS SOFTWARE LICENSE. Fraunhofer provides no warranty of patent non-infringement with
57 respect to this software.
59 You may use this FDK AAC Codec software or modifications thereto only for purposes that are authorized
60 by appropriate patent licenses.
64 This FDK AAC Codec software is provided by Fraunhofer on behalf of the copyright holders and contributors
65 "AS IS" and WITHOUT ANY EXPRESS OR IMPLIED WARRANTIES, including but not limited to the implied warranties
66 of merchantability and fitness for a particular purpose. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
67 CONTRIBUTORS BE LIABLE for any direct, indirect, incidental, special, exemplary, or consequential damages,
68 including but not limited to procurement of substitute goods or services; loss of use, data, or profits,
69 or business interruption, however caused and on any theory of liability, whether in contract, strict
70 liability, or tort (including negligence), arising in any way out of the use of this software, even if
71 advised of the possibility of such damage.
73 5. CONTACT INFORMATION
75 Fraunhofer Institute for Integrated Circuits IIS
76 Attention: Audio and Multimedia Departments - FDK AAC LL
78 91058 Erlangen, Germany
80 www.iis.fraunhofer.de/amm
81 amm-info@iis.fraunhofer.de
82 ----------------------------------------------------------------------------------------------------------- */
84 /*************************** Fraunhofer IIS FDK Tools **********************
86 Author(s): Josef Hoepfl, DSP Solutions
87 Description: Fix point FFT
89 ******************************************************************************/
94 #include "FDK_tools_rom.h"
100 #define F3C(x) STC(x)
102 #define C31 (F3C(0x91261468)) /* FL2FXCONST_DBL(-0.86602540) */
104 /* Performs the FFT of length 3 according to the algorithm after winograd.
105 No scaling of the input vector because the scaling is already done in the rotation vector. */
106 static FORCEINLINE
void fft3(FIXP_DBL
*RESTRICT pDat
)
111 r1
= pDat
[2] + pDat
[4];
112 r2
= fMult((pDat
[2] - pDat
[4]), C31
);
113 pDat
[0] = pDat
[0] + r1
;
114 r1
= pDat
[0] - r1
- (r1
>>1);
117 s1
= pDat
[3] + pDat
[5];
118 s2
= fMult((pDat
[3] - pDat
[5]), C31
);
119 pDat
[1] = pDat
[1] + s1
;
120 s1
= pDat
[1] - s1
- (s1
>>1);
130 #define F5C(x) STC(x)
132 #define C51 (F5C(0x79bc3854)) /* FL2FXCONST_DBL( 0.95105652) */
133 #define C52 (F5C(0x9d839db0)) /* FL2FXCONST_DBL(-1.53884180/2) */
134 #define C53 (F5C(0xd18053ce)) /* FL2FXCONST_DBL(-0.36327126) */
135 #define C54 (F5C(0x478dde64)) /* FL2FXCONST_DBL( 0.55901699) */
136 #define C55 (F5C(0xb0000001)) /* FL2FXCONST_DBL(-1.25/2) */
138 /* performs the FFT of length 5 according to the algorithm after winograd */
139 static FORCEINLINE
void fft5(FIXP_DBL
*RESTRICT pDat
)
141 FIXP_DBL r1
,r2
,r3
,r4
;
142 FIXP_DBL s1
,s2
,s3
,s4
;
146 r1
= pDat
[2] + pDat
[8];
147 r4
= pDat
[2] - pDat
[8];
148 r3
= pDat
[4] + pDat
[6];
149 r2
= pDat
[4] - pDat
[6];
150 t
= fMult((r1
-r3
), C54
);
152 pDat
[0] = pDat
[0] + r1
;
153 /* Bit shift left because of the constant C55 which was scaled with the factor 0.5 because of the representation of
154 the values as fracts */
155 r1
= pDat
[0] + (fMultDiv2(r1
, C55
) <<(2));
158 t
= fMult((r4
+ r2
), C51
);
159 /* Bit shift left because of the constant C55 which was scaled with the factor 0.5 because of the representation of
160 the values as fracts */
161 r4
= t
+ (fMultDiv2(r4
, C52
) <<(2));
162 r2
= t
+ fMult(r2
, C53
);
165 s1
= pDat
[3] + pDat
[9];
166 s4
= pDat
[3] - pDat
[9];
167 s3
= pDat
[5] + pDat
[7];
168 s2
= pDat
[5] - pDat
[7];
169 t
= fMult((s1
- s3
), C54
);
171 pDat
[1] = pDat
[1] + s1
;
172 /* Bit shift left because of the constant C55 which was scaled with the factor 0.5 because of the representation of
173 the values as fracts */
174 s1
= pDat
[1] + (fMultDiv2(s1
, C55
) <<(2));
177 t
= fMult((s4
+ s2
), C51
);
178 /* Bit shift left because of the constant C55 which was scaled with the factor 0.5 because of the representation of
179 the values as fracts */
180 s4
= t
+ (fMultDiv2(s4
, C52
) <<(2));
181 s2
= t
+ fMult(s2
, C53
);
203 /* Performs the FFT of length 15. It is split into FFTs of length 3 and length 5. */
204 static inline void fft15(FIXP_DBL
*pInput
)
206 FIXP_DBL aDst
[2*N15
];
207 FIXP_DBL aDst1
[2*N15
];
210 /* Sort input vector for fft's of length 3
211 input3(0:2) = [input(0) input(5) input(10)];
212 input3(3:5) = [input(3) input(8) input(13)];
213 input3(6:8) = [input(6) input(11) input(1)];
214 input3(9:11) = [input(9) input(14) input(4)];
215 input3(12:14) = [input(12) input(2) input(7)]; */
217 const FIXP_DBL
*pSrc
= pInput
;
218 FIXP_DBL
*RESTRICT pDst
= aDst
;
219 /* Merge 3 loops into one, skip call of fft3 */
220 for(i
=0,l
=0,k
=0; i
<N5
; i
++, k
+=6)
223 pDst
[k
+1] = pSrc
[l
+1];
229 pDst
[k
+3] = pSrc
[l
+1];
234 pDst
[k
+5] = pSrc
[l
+1];
235 l
+= (2*N5
) + (2*N3
);
239 /* fft3 merged with shift right by 2 loop */
243 r1
= pDst
[k
+2] + pDst
[k
+4];
244 r2
= fMult((pDst
[k
+2] - pDst
[k
+4]), C31
);
246 pDst
[k
+0] = (s1
+ r1
)>>2;
250 s1
= pDst
[k
+3] + pDst
[k
+5];
251 s2
= fMult((pDst
[k
+3] - pDst
[k
+5]), C31
);
253 pDst
[k
+1] = (r3
+ s1
)>>2;
257 pDst
[k
+2] = (r1
- s2
)>>2;
258 pDst
[k
+4] = (r1
+ s2
)>>2;
259 pDst
[k
+3] = (s1
+ r2
)>>2;
260 pDst
[k
+5] = (s1
- r2
)>>2;
263 /* Sort input vector for fft's of length 5
264 input5(0:4) = [output3(0) output3(3) output3(6) output3(9) output3(12)];
265 input5(5:9) = [output3(1) output3(4) output3(7) output3(10) output3(13)];
266 input5(10:14) = [output3(2) output3(5) output3(8) output3(11) output3(14)]; */
267 /* Merge 2 loops into one, brings about 10% */
269 const FIXP_DBL
*pSrc
= aDst
;
270 FIXP_DBL
*RESTRICT pDst
= aDst1
;
271 for(i
=0,l
=0,k
=0; i
<N3
; i
++, k
+=10)
274 pDst
[k
+0] = pSrc
[l
+0];
275 pDst
[k
+1] = pSrc
[l
+1];
276 pDst
[k
+2] = pSrc
[l
+0+(2*N3
)];
277 pDst
[k
+3] = pSrc
[l
+1+(2*N3
)];
278 pDst
[k
+4] = pSrc
[l
+0+(4*N3
)];
279 pDst
[k
+5] = pSrc
[l
+1+(4*N3
)];
280 pDst
[k
+6] = pSrc
[l
+0+(6*N3
)];
281 pDst
[k
+7] = pSrc
[l
+1+(6*N3
)];
282 pDst
[k
+8] = pSrc
[l
+0+(8*N3
)];
283 pDst
[k
+9] = pSrc
[l
+1+(8*N3
)];
287 /* Sort output vector of length 15
288 output = [out5(0) out5(6) out5(12) out5(3) out5(9)
289 out5(10) out5(1) out5(7) out5(13) out5(4)
290 out5(5) out5(11) out5(2) out5(8) out5(14)]; */
291 /* optimize clumsy loop, brings about 5% */
293 const FIXP_DBL
*pSrc
= aDst1
;
294 FIXP_DBL
*RESTRICT pDst
= pInput
;
295 for(i
=0,l
=0,k
=0; i
<N3
; i
++, k
+=10)
298 pDst
[k
+1] = pSrc
[l
+1];
303 pDst
[k
+3] = pSrc
[l
+1];
308 pDst
[k
+5] = pSrc
[l
+1];
313 pDst
[k
+7] = pSrc
[l
+1];
318 pDst
[k
+9] = pSrc
[l
+1];
319 l
+= 2; /* no modulo check needed, it cannot occur */
324 #define W_PiFOURTH STC(0x5a82799a)
325 #ifndef SUMDIFF_PIFOURTH
326 #define SUMDIFF_PIFOURTH(diff,sum,a,b) \
329 wa = fMultDiv2(a, W_PiFOURTH);\
330 wb = fMultDiv2(b, W_PiFOURTH);\
336 /* This version is more overflow save, but less cycle optimal. */
337 #define SUMDIFF_EIGTH(x, y, ix, iy, vr, vi, ur, ui) \
338 vr = (x[ 0 + ix]>>1) + (x[16 + ix]>>1); /* Re A + Re B */ \
339 vi = (x[ 8 + ix]>>1) + (x[24 + ix]>>1); /* Re C + Re D */ \
340 ur = (x[ 1 + ix]>>1) + (x[17 + ix]>>1); /* Im A + Im B */ \
341 ui = (x[ 9 + ix]>>1) + (x[25 + ix]>>1); /* Im C + Im D */ \
342 y[ 0 + iy] = vr + vi; /* Re A' = ReA + ReB +ReC + ReD */ \
343 y[ 4 + iy] = vr - vi; /* Re C' = -(ReC+ReD) + (ReA+ReB) */ \
344 y[ 1 + iy] = ur + ui; /* Im A' = sum of imag values */ \
345 y[ 5 + iy] = ur - ui; /* Im C' = -Im C -Im D +Im A +Im B */ \
346 vr -= x[16 + ix]; /* Re A - Re B */ \
347 vi = vi - x[24 + ix]; /* Re C - Re D */ \
348 ur -= x[17 + ix]; /* Im A - Im B */ \
349 ui = ui - x[25 + ix]; /* Im C - Im D */ \
350 y[ 2 + iy] = ui + vr; /* Re B' = Im C - Im D + Re A - Re B */ \
351 y[ 6 + iy] = vr - ui; /* Re D' = -Im C + Im D + Re A - Re B */ \
352 y[ 3 + iy] = ur - vi; /* Im B'= -Re C + Re D + Im A - Im B */ \
353 y[ 7 + iy] = vi + ur; /* Im D'= Re C - Re D + Im A - Im B */
355 static const FIXP_STP fft16_w16
[2] = { STCP(0x7641af3d, 0x30fbc54d), STCP(0x30fbc54d, 0x7641af3d) };
358 inline void fft_16(FIXP_DBL
*RESTRICT x
)
360 FIXP_DBL vr
, vi
, ur
, ui
;
363 SUMDIFF_EIGTH(x
, y
, 0, 0, vr
, vi
, ur
, ui
);
364 SUMDIFF_EIGTH(x
, y
, 4, 8, vr
, vi
, ur
, ui
);
365 SUMDIFF_EIGTH(x
, y
, 2, 16, vr
, vi
, ur
, ui
);
366 SUMDIFF_EIGTH(x
, y
, 6, 24, vr
, vi
, ur
, ui
);
374 x
[ 0] = ur
+ (vr
>>1);
375 x
[ 1] = ui
+ (vi
>>1);
376 x
[ 8] = ur
- (vr
>>1);
377 x
[ 9] = ui
- (vi
>>1);
385 x
[ 4] = ur
+ (vr
>>1);
386 x
[ 5] = ui
- (vi
>>1);
387 x
[12] = ur
- (vr
>>1);
388 x
[13] = ui
+ (vi
>>1);
396 x
[16] = ur
+ (vr
>>1);
397 x
[17] = ui
+ (vi
>>1);
398 x
[24] = ur
- (vr
>>1);
399 x
[25] = ui
- (vi
>>1);
407 x
[20] = ur
+ (vr
>>1);
408 x
[21] = ui
- (vi
>>1);
409 x
[28] = ur
- (vr
>>1);
410 x
[29] = ui
+ (vi
>>1);
414 SUMDIFF_PIFOURTH(vi
, vr
, y
[10], y
[11])
415 //vr = fMultDiv2((y[11] + y[10]),W_PiFOURTH);
416 //vi = fMultDiv2((y[11] - y[10]),W_PiFOURTH);
419 x
[ 2] = (ur
>>1) + vr
;
420 x
[ 3] = (ui
>>1) + vi
;
421 x
[10] = (ur
>>1) - vr
;
422 x
[11] = (ui
>>1) - vi
;
426 SUMDIFF_PIFOURTH(vr
, vi
, y
[14], y
[15])
429 x
[ 6] = (ur
>>1) + vr
;
430 x
[ 7] = (ui
>>1) - vi
;
431 x
[14] = (ur
>>1) - vr
;
432 x
[15] = (ui
>>1) + vi
;
436 SUMDIFF_PIFOURTH(vi
, vr
, y
[26], y
[27])
439 x
[18] = (ur
>>1) + vr
;
440 x
[19] = (ui
>>1) + vi
;
441 x
[26] = (ur
>>1) - vr
;
442 x
[27] = (ui
>>1) - vi
;
446 SUMDIFF_PIFOURTH(vr
, vi
, y
[30], y
[31])
449 x
[22] = (ur
>>1) + vr
;
450 x
[23] = (ui
>>1) - vi
;
451 x
[30] = (ur
>>1) - vr
;
452 x
[31] = (ui
>>1) + vi
;
460 x
[ 0] = ur
+ (vr
>>1);
461 x
[ 1] = ui
+ (vi
>>1);
462 x
[16] = ur
- (vr
>>1);
463 x
[17] = ui
- (vi
>>1);
471 x
[ 8] = ur
+ (vr
>>1);
472 x
[ 9] = ui
- (vi
>>1);
473 x
[24] = ur
- (vr
>>1);
474 x
[25] = ui
+ (vi
>>1);
478 cplxMultDiv2(&vi
, &vr
, x
[19], x
[18], fft16_w16
[0]);
481 x
[ 2] = (ur
>>1) + vr
;
482 x
[ 3] = (ui
>>1) + vi
;
483 x
[18] = (ur
>>1) - vr
;
484 x
[19] = (ui
>>1) - vi
;
488 cplxMultDiv2(&vr
, &vi
, x
[27], x
[26], fft16_w16
[0]);
491 x
[10] = (ur
>>1) + vr
;
492 x
[11] = (ui
>>1) - vi
;
493 x
[26] = (ur
>>1) - vr
;
494 x
[27] = (ui
>>1) + vi
;
498 SUMDIFF_PIFOURTH(vi
, vr
, x
[20], x
[21])
501 x
[ 4] = (ur
>>1) + vr
;
502 x
[ 5] = (ui
>>1) + vi
;
503 x
[20] = (ur
>>1) - vr
;
504 x
[21] = (ui
>>1) - vi
;
508 SUMDIFF_PIFOURTH(vr
, vi
, x
[28], x
[29])
511 x
[12] = (ur
>>1) + vr
;
512 x
[13] = (ui
>>1) - vi
;
513 x
[28] = (ur
>>1) - vr
;
514 x
[29] = (ui
>>1) + vi
;
518 cplxMultDiv2(&vi
, &vr
, x
[23], x
[22], fft16_w16
[1]);
521 x
[ 6] = (ur
>>1) + vr
;
522 x
[ 7] = (ui
>>1) + vi
;
523 x
[22] = (ur
>>1) - vr
;
524 x
[23] = (ui
>>1) - vi
;
528 cplxMultDiv2(&vr
, &vi
, x
[31], x
[30], fft16_w16
[1]);
531 x
[14] = (ur
>>1) + vr
;
532 x
[15] = (ui
>>1) - vi
;
533 x
[30] = (ur
>>1) - vr
;
534 x
[31] = (ui
>>1) + vi
;
537 #ifndef FUNCTION_fft_32
538 static const FIXP_STP fft32_w32
[6] =
540 STCP (0x7641af3d, 0x30fbc54d), STCP(0x30fbc54d, 0x7641af3d), STCP(0x7d8a5f40, 0x18f8b83c),
541 STCP (0x6a6d98a4, 0x471cece7), STCP(0x471cece7, 0x6a6d98a4), STCP(0x18f8b83c, 0x7d8a5f40)
545 inline void fft_32(FIXP_DBL
*x
)
548 #define W_PiFOURTH STC(0x5a82799a)
550 FIXP_DBL vr
,vi
,ur
,ui
;
557 /////////////////////////////////////////////////////////////////////////////////////////
560 vr
= (x
[ 0] + x
[32])>>1; /* Re A + Re B */
561 vi
= (x
[16] + x
[48]); /* Re C + Re D */
562 ur
= (x
[ 1] + x
[33])>>1; /* Im A + Im B */
563 ui
= (x
[17] + x
[49]); /* Im C + Im D */
565 y
[ 0] = vr
+ (vi
>>1); /* Re A' = ReA + ReB +ReC + ReD */
566 y
[ 4] = vr
- (vi
>>1); /* Re C' = -(ReC+ReD) + (ReA+ReB) */
567 y
[ 1] = ur
+ (ui
>>1); /* Im A' = sum of imag values */
568 y
[ 5] = ur
- (ui
>>1); /* Im C' = -Im C -Im D +Im A +Im B */
570 vr
-= x
[32]; /* Re A - Re B */
571 vi
= (vi
>>1) - x
[48]; /* Re C - Re D */
572 ur
-= x
[33]; /* Im A - Im B */
573 ui
= (ui
>>1) - x
[49]; /* Im C - Im D */
575 y
[ 2] = ui
+ vr
; /* Re B' = Im C - Im D + Re A - Re B */
576 y
[ 6] = vr
- ui
; /* Re D' = -Im C + Im D + Re A - Re B */
577 y
[ 3] = ur
- vi
; /* Im B'= -Re C + Re D + Im A - Im B */
578 y
[ 7] = vi
+ ur
; /* Im D'= Re C - Re D + Im A - Im B */
581 vr
= (x
[ 8] + x
[40])>>1; /* Re A + Re B */
582 vi
= (x
[24] + x
[56]); /* Re C + Re D */
583 ur
= (x
[ 9] + x
[41])>>1; /* Im A + Im B */
584 ui
= (x
[25] + x
[57]); /* Im C + Im D */
586 y
[ 8] = vr
+ (vi
>>1); /* Re A' = ReA + ReB +ReC + ReD */
587 y
[12] = vr
- (vi
>>1); /* Re C' = -(ReC+ReD) + (ReA+ReB) */
588 y
[ 9] = ur
+ (ui
>>1); /* Im A' = sum of imag values */
589 y
[13] = ur
- (ui
>>1); /* Im C' = -Im C -Im D +Im A +Im B */
591 vr
-= x
[40]; /* Re A - Re B */
592 vi
= (vi
>>1) - x
[56]; /* Re C - Re D */
593 ur
-= x
[41]; /* Im A - Im B */
594 ui
= (ui
>>1) - x
[57]; /* Im C - Im D */
596 y
[10] = ui
+ vr
; /* Re B' = Im C - Im D + Re A - Re B */
597 y
[14] = vr
- ui
; /* Re D' = -Im C + Im D + Re A - Re B */
598 y
[11] = ur
- vi
; /* Im B'= -Re C + Re D + Im A - Im B */
599 y
[15] = vi
+ ur
; /* Im D'= Re C - Re D + Im A - Im B */
602 vr
= (x
[ 4] + x
[36])>>1; /* Re A + Re B */
603 vi
= (x
[20] + x
[52]); /* Re C + Re D */
604 ur
= (x
[ 5] + x
[37])>>1; /* Im A + Im B */
605 ui
= (x
[21] + x
[53]); /* Im C + Im D */
607 y
[16] = vr
+ (vi
>>1); /* Re A' = ReA + ReB +ReC + ReD */
608 y
[20] = vr
- (vi
>>1); /* Re C' = -(ReC+ReD) + (ReA+ReB) */
609 y
[17] = ur
+ (ui
>>1); /* Im A' = sum of imag values */
610 y
[21] = ur
- (ui
>>1); /* Im C' = -Im C -Im D +Im A +Im B */
612 vr
-= x
[36]; /* Re A - Re B */
613 vi
= (vi
>>1) - x
[52]; /* Re C - Re D */
614 ur
-= x
[37]; /* Im A - Im B */
615 ui
= (ui
>>1) - x
[53]; /* Im C - Im D */
617 y
[18] = ui
+ vr
; /* Re B' = Im C - Im D + Re A - Re B */
618 y
[22] = vr
- ui
; /* Re D' = -Im C + Im D + Re A - Re B */
619 y
[19] = ur
- vi
; /* Im B'= -Re C + Re D + Im A - Im B */
620 y
[23] = vi
+ ur
; /* Im D'= Re C - Re D + Im A - Im B */
623 vr
= (x
[12] + x
[44])>>1; /* Re A + Re B */
624 vi
= (x
[28] + x
[60]); /* Re C + Re D */
625 ur
= (x
[13] + x
[45])>>1; /* Im A + Im B */
626 ui
= (x
[29] + x
[61]); /* Im C + Im D */
628 y
[24] = vr
+ (vi
>>1); /* Re A' = ReA + ReB +ReC + ReD */
629 y
[28] = vr
- (vi
>>1); /* Re C' = -(ReC+ReD) + (ReA+ReB) */
630 y
[25] = ur
+ (ui
>>1); /* Im A' = sum of imag values */
631 y
[29] = ur
- (ui
>>1); /* Im C' = -Im C -Im D +Im A +Im B */
633 vr
-= x
[44]; /* Re A - Re B */
634 vi
= (vi
>>1) - x
[60]; /* Re C - Re D */
635 ur
-= x
[45]; /* Im A - Im B */
636 ui
= (ui
>>1) - x
[61]; /* Im C - Im D */
638 y
[26] = ui
+ vr
; /* Re B' = Im C - Im D + Re A - Re B */
639 y
[30] = vr
- ui
; /* Re D' = -Im C + Im D + Re A - Re B */
640 y
[27] = ur
- vi
; /* Im B'= -Re C + Re D + Im A - Im B */
641 y
[31] = vi
+ ur
; /* Im D'= Re C - Re D + Im A - Im B */
644 vr
= (x
[ 2] + x
[34])>>1; /* Re A + Re B */
645 vi
= (x
[18] + x
[50]); /* Re C + Re D */
646 ur
= (x
[ 3] + x
[35])>>1; /* Im A + Im B */
647 ui
= (x
[19] + x
[51]); /* Im C + Im D */
649 y
[32] = vr
+ (vi
>>1); /* Re A' = ReA + ReB +ReC + ReD */
650 y
[36] = vr
- (vi
>>1); /* Re C' = -(ReC+ReD) + (ReA+ReB) */
651 y
[33] = ur
+ (ui
>>1); /* Im A' = sum of imag values */
652 y
[37] = ur
- (ui
>>1); /* Im C' = -Im C -Im D +Im A +Im B */
654 vr
-= x
[34]; /* Re A - Re B */
655 vi
= (vi
>>1) - x
[50]; /* Re C - Re D */
656 ur
-= x
[35]; /* Im A - Im B */
657 ui
= (ui
>>1) - x
[51]; /* Im C - Im D */
659 y
[34] = ui
+ vr
; /* Re B' = Im C - Im D + Re A - Re B */
660 y
[38] = vr
- ui
; /* Re D' = -Im C + Im D + Re A - Re B */
661 y
[35] = ur
- vi
; /* Im B'= -Re C + Re D + Im A - Im B */
662 y
[39] = vi
+ ur
; /* Im D'= Re C - Re D + Im A - Im B */
665 vr
= (x
[10] + x
[42])>>1; /* Re A + Re B */
666 vi
= (x
[26] + x
[58]); /* Re C + Re D */
667 ur
= (x
[11] + x
[43])>>1; /* Im A + Im B */
668 ui
= (x
[27] + x
[59]); /* Im C + Im D */
670 y
[40] = vr
+ (vi
>>1); /* Re A' = ReA + ReB +ReC + ReD */
671 y
[44] = vr
- (vi
>>1); /* Re C' = -(ReC+ReD) + (ReA+ReB) */
672 y
[41] = ur
+ (ui
>>1); /* Im A' = sum of imag values */
673 y
[45] = ur
- (ui
>>1); /* Im C' = -Im C -Im D +Im A +Im B */
675 vr
-= x
[42]; /* Re A - Re B */
676 vi
= (vi
>>1) - x
[58]; /* Re C - Re D */
677 ur
-= x
[43]; /* Im A - Im B */
678 ui
= (ui
>>1) - x
[59]; /* Im C - Im D */
680 y
[42] = ui
+ vr
; /* Re B' = Im C - Im D + Re A - Re B */
681 y
[46] = vr
- ui
; /* Re D' = -Im C + Im D + Re A - Re B */
682 y
[43] = ur
- vi
; /* Im B'= -Re C + Re D + Im A - Im B */
683 y
[47] = vi
+ ur
; /* Im D'= Re C - Re D + Im A - Im B */
686 vr
= (x
[ 6] + x
[38])>>1; /* Re A + Re B */
687 vi
= (x
[22] + x
[54]); /* Re C + Re D */
688 ur
= (x
[ 7] + x
[39])>>1; /* Im A + Im B */
689 ui
= (x
[23] + x
[55]); /* Im C + Im D */
691 y
[48] = vr
+ (vi
>>1); /* Re A' = ReA + ReB +ReC + ReD */
692 y
[52] = vr
- (vi
>>1); /* Re C' = -(ReC+ReD) + (ReA+ReB) */
693 y
[49] = ur
+ (ui
>>1); /* Im A' = sum of imag values */
694 y
[53] = ur
- (ui
>>1); /* Im C' = -Im C -Im D +Im A +Im B */
696 vr
-= x
[38]; /* Re A - Re B */
697 vi
= (vi
>>1) - x
[54]; /* Re C - Re D */
698 ur
-= x
[39]; /* Im A - Im B */
699 ui
= (ui
>>1) - x
[55]; /* Im C - Im D */
701 y
[50] = ui
+ vr
; /* Re B' = Im C - Im D + Re A - Re B */
702 y
[54] = vr
- ui
; /* Re D' = -Im C + Im D + Re A - Re B */
703 y
[51] = ur
- vi
; /* Im B'= -Re C + Re D + Im A - Im B */
704 y
[55] = vi
+ ur
; /* Im D'= Re C - Re D + Im A - Im B */
707 vr
= (x
[14] + x
[46])>>1; /* Re A + Re B */
708 vi
= (x
[30] + x
[62]); /* Re C + Re D */
709 ur
= (x
[15] + x
[47])>>1; /* Im A + Im B */
710 ui
= (x
[31] + x
[63]); /* Im C + Im D */
712 y
[56] = vr
+ (vi
>>1); /* Re A' = ReA + ReB +ReC + ReD */
713 y
[60] = vr
- (vi
>>1); /* Re C' = -(ReC+ReD) + (ReA+ReB) */
714 y
[57] = ur
+ (ui
>>1); /* Im A' = sum of imag values */
715 y
[61] = ur
- (ui
>>1); /* Im C' = -Im C -Im D +Im A +Im B */
717 vr
-= x
[46]; /* Re A - Re B */
718 vi
= (vi
>>1) - x
[62]; /* Re C - Re D */
719 ur
-= x
[47]; /* Im A - Im B */
720 ui
= (ui
>>1) - x
[63]; /* Im C - Im D */
722 y
[58] = ui
+ vr
; /* Re B' = Im C - Im D + Re A - Re B */
723 y
[62] = vr
- ui
; /* Re D' = -Im C + Im D + Re A - Re B */
724 y
[59] = ur
- vi
; /* Im B'= -Re C + Re D + Im A - Im B */
725 y
[63] = vi
+ ur
; /* Im D'= Re C - Re D + Im A - Im B */
728 FIXP_DBL
*xt
= &x
[0];
729 FIXP_DBL
*yt
= &y
[0];
738 xt
[ 0] = ur
+ (vr
>>1);
739 xt
[ 1] = ui
+ (vi
>>1);
740 xt
[ 8] = ur
- (vr
>>1);
741 xt
[ 9] = ui
- (vi
>>1);
747 xt
[ 4] = ur
+ (vr
>>1);
748 xt
[ 5] = ui
- (vi
>>1);
749 xt
[12] = ur
- (vr
>>1);
750 xt
[13] = ui
+ (vi
>>1);
752 SUMDIFF_PIFOURTH(vi
, vr
, yt
[10], yt
[11])
755 xt
[ 2] = (ur
>>1) + vr
;
756 xt
[ 3] = (ui
>>1) + vi
;
757 xt
[10] = (ur
>>1) - vr
;
758 xt
[11] = (ui
>>1) - vi
;
760 SUMDIFF_PIFOURTH(vr
, vi
, yt
[14], yt
[15])
764 xt
[ 6] = (ur
>>1) + vr
;
765 xt
[ 7] = (ui
>>1) - vi
;
766 xt
[14] = (ur
>>1) - vr
;
767 xt
[15] = (ui
>>1) + vi
;
776 x
[ 0] = ur
+ (vr
>>1);
777 x
[ 1] = ui
+ (vi
>>1);
778 x
[16] = ur
- (vr
>>1);
779 x
[17] = ui
- (vi
>>1);
785 x
[ 8] = ur
+ (vr
>>1);
786 x
[ 9] = ui
- (vi
>>1);
787 x
[24] = ur
- (vr
>>1);
788 x
[25] = ui
+ (vi
>>1);
794 x
[32] = ur
+ (vr
>>1);
795 x
[33] = ui
+ (vi
>>1);
796 x
[48] = ur
- (vr
>>1);
797 x
[49] = ui
- (vi
>>1);
803 x
[40] = ur
+ (vr
>>1);
804 x
[41] = ui
- (vi
>>1);
805 x
[56] = ur
- (vr
>>1);
806 x
[57] = ui
+ (vi
>>1);
808 cplxMultDiv2(&vi
, &vr
, x
[19], x
[18], fft32_w32
[0]);
811 x
[ 2] = (ur
>>1) + vr
;
812 x
[ 3] = (ui
>>1) + vi
;
813 x
[18] = (ur
>>1) - vr
;
814 x
[19] = (ui
>>1) - vi
;
816 cplxMultDiv2(&vr
, &vi
, x
[27], x
[26], fft32_w32
[0]);
819 x
[10] = (ur
>>1) + vr
;
820 x
[11] = (ui
>>1) - vi
;
821 x
[26] = (ur
>>1) - vr
;
822 x
[27] = (ui
>>1) + vi
;
824 cplxMultDiv2(&vi
, &vr
, x
[51], x
[50], fft32_w32
[0]);
827 x
[34] = (ur
>>1) + vr
;
828 x
[35] = (ui
>>1) + vi
;
829 x
[50] = (ur
>>1) - vr
;
830 x
[51] = (ui
>>1) - vi
;
832 cplxMultDiv2(&vr
, &vi
, x
[59], x
[58], fft32_w32
[0]);
835 x
[42] = (ur
>>1) + vr
;
836 x
[43] = (ui
>>1) - vi
;
837 x
[58] = (ur
>>1) - vr
;
838 x
[59] = (ui
>>1) + vi
;
840 SUMDIFF_PIFOURTH(vi
, vr
, x
[20], x
[21])
843 x
[ 4] = (ur
>>1) + vr
;
844 x
[ 5] = (ui
>>1) + vi
;
845 x
[20] = (ur
>>1) - vr
;
846 x
[21] = (ui
>>1) - vi
;
848 SUMDIFF_PIFOURTH(vr
, vi
, x
[28], x
[29])
851 x
[12] = (ur
>>1) + vr
;
852 x
[13] = (ui
>>1) - vi
;
853 x
[28] = (ur
>>1) - vr
;
854 x
[29] = (ui
>>1) + vi
;
856 SUMDIFF_PIFOURTH(vi
, vr
, x
[52], x
[53])
859 x
[36] = (ur
>>1) + vr
;
860 x
[37] = (ui
>>1) + vi
;
861 x
[52] = (ur
>>1) - vr
;
862 x
[53] = (ui
>>1) - vi
;
864 SUMDIFF_PIFOURTH(vr
, vi
, x
[60], x
[61])
867 x
[44] = (ur
>>1) + vr
;
868 x
[45] = (ui
>>1) - vi
;
869 x
[60] = (ur
>>1) - vr
;
870 x
[61] = (ui
>>1) + vi
;
873 cplxMultDiv2(&vi
, &vr
, x
[23], x
[22], fft32_w32
[1]);
876 x
[ 6] = (ur
>>1) + vr
;
877 x
[ 7] = (ui
>>1) + vi
;
878 x
[22] = (ur
>>1) - vr
;
879 x
[23] = (ui
>>1) - vi
;
881 cplxMultDiv2(&vr
, &vi
, x
[31], x
[30], fft32_w32
[1]);
884 x
[14] = (ur
>>1) + vr
;
885 x
[15] = (ui
>>1) - vi
;
886 x
[30] = (ur
>>1) - vr
;
887 x
[31] = (ui
>>1) + vi
;
889 cplxMultDiv2(&vi
, &vr
, x
[55], x
[54], fft32_w32
[1]);
892 x
[38] = (ur
>>1) + vr
;
893 x
[39] = (ui
>>1) + vi
;
894 x
[54] = (ur
>>1) - vr
;
895 x
[55] = (ui
>>1) - vi
;
897 cplxMultDiv2(&vr
, &vi
, x
[63], x
[62], fft32_w32
[1]);
901 x
[46] = (ur
>>1) + vr
;
902 x
[47] = (ui
>>1) - vi
;
903 x
[62] = (ur
>>1) - vr
;
904 x
[63] = (ui
>>1) + vi
;
910 x
[ 0] = ur
+ (vr
>>1);
911 x
[ 1] = ui
+ (vi
>>1);
912 x
[32] = ur
- (vr
>>1);
913 x
[33] = ui
- (vi
>>1);
919 x
[16] = ur
+ (vr
>>1);
920 x
[17] = ui
- (vi
>>1);
921 x
[48] = ur
- (vr
>>1);
922 x
[49] = ui
+ (vi
>>1);
924 cplxMultDiv2(&vi
, &vr
, x
[35], x
[34], fft32_w32
[2]);
927 x
[ 2] = (ur
>>1) + vr
;
928 x
[ 3] = (ui
>>1) + vi
;
929 x
[34] = (ur
>>1) - vr
;
930 x
[35] = (ui
>>1) - vi
;
932 cplxMultDiv2(&vr
, &vi
, x
[51], x
[50], fft32_w32
[2]);
935 x
[18] = (ur
>>1) + vr
;
936 x
[19] = (ui
>>1) - vi
;
937 x
[50] = (ur
>>1) - vr
;
938 x
[51] = (ui
>>1) + vi
;
940 cplxMultDiv2(&vi
, &vr
, x
[37], x
[36], fft32_w32
[0]);
943 x
[ 4] = (ur
>>1) + vr
;
944 x
[ 5] = (ui
>>1) + vi
;
945 x
[36] = (ur
>>1) - vr
;
946 x
[37] = (ui
>>1) - vi
;
948 cplxMultDiv2(&vr
, &vi
, x
[53], x
[52], fft32_w32
[0]);
951 x
[20] = (ur
>>1) + vr
;
952 x
[21] = (ui
>>1) - vi
;
953 x
[52] = (ur
>>1) - vr
;
954 x
[53] = (ui
>>1) + vi
;
956 cplxMultDiv2(&vi
, &vr
, x
[39], x
[38], fft32_w32
[3]);
959 x
[ 6] = (ur
>>1) + vr
;
960 x
[ 7] = (ui
>>1) + vi
;
961 x
[38] = (ur
>>1) - vr
;
962 x
[39] = (ui
>>1) - vi
;
964 cplxMultDiv2(&vr
, &vi
, x
[55], x
[54], fft32_w32
[3]);
967 x
[22] = (ur
>>1) + vr
;
968 x
[23] = (ui
>>1) - vi
;
969 x
[54] = (ur
>>1) - vr
;
970 x
[55] = (ui
>>1) + vi
;
972 SUMDIFF_PIFOURTH(vi
, vr
, x
[40], x
[41])
975 x
[ 8] = (ur
>>1) + vr
;
976 x
[ 9] = (ui
>>1) + vi
;
977 x
[40] = (ur
>>1) - vr
;
978 x
[41] = (ui
>>1) - vi
;
980 SUMDIFF_PIFOURTH(vr
, vi
, x
[56], x
[57])
983 x
[24] = (ur
>>1) + vr
;
984 x
[25] = (ui
>>1) - vi
;
985 x
[56] = (ur
>>1) - vr
;
986 x
[57] = (ui
>>1) + vi
;
988 cplxMultDiv2(&vi
, &vr
, x
[43], x
[42], fft32_w32
[4]);
992 x
[10] = (ur
>>1) + vr
;
993 x
[11] = (ui
>>1) + vi
;
994 x
[42] = (ur
>>1) - vr
;
995 x
[43] = (ui
>>1) - vi
;
997 cplxMultDiv2(&vr
, &vi
, x
[59], x
[58], fft32_w32
[4]);
1000 x
[26] = (ur
>>1) + vr
;
1001 x
[27] = (ui
>>1) - vi
;
1002 x
[58] = (ur
>>1) - vr
;
1003 x
[59] = (ui
>>1) + vi
;
1005 cplxMultDiv2(&vi
, &vr
, x
[45], x
[44], fft32_w32
[1]);
1008 x
[12] = (ur
>>1) + vr
;
1009 x
[13] = (ui
>>1) + vi
;
1010 x
[44] = (ur
>>1) - vr
;
1011 x
[45] = (ui
>>1) - vi
;
1013 cplxMultDiv2(&vr
, &vi
, x
[61], x
[60], fft32_w32
[1]);
1016 x
[28] = (ur
>>1) + vr
;
1017 x
[29] = (ui
>>1) - vi
;
1018 x
[60] = (ur
>>1) - vr
;
1019 x
[61] = (ui
>>1) + vi
;
1021 cplxMultDiv2(&vi
, &vr
, x
[47], x
[46], fft32_w32
[5]);
1024 x
[14] = (ur
>>1) + vr
;
1025 x
[15] = (ui
>>1) + vi
;
1026 x
[46] = (ur
>>1) - vr
;
1027 x
[47] = (ui
>>1) - vi
;
1029 cplxMultDiv2(&vr
, &vi
, x
[63], x
[62], fft32_w32
[5]);
1032 x
[30] = (ur
>>1) + vr
;
1033 x
[31] = (ui
>>1) - vi
;
1034 x
[62] = (ur
>>1) - vr
;
1035 x
[63] = (ui
>>1) + vi
;
1037 #endif /* #ifndef FUNCTION_fft_32 */
1041 * \brief Apply rotation vectors to a data buffer.
1042 * \param cl length of each row of input data.
1043 * \param l total length of input data.
1044 * \param pVecRe real part of rotation ceofficient vector.
1045 * \param pVecIm imaginary part of rotation ceofficient vector.
1047 static inline void fft_apply_rot_vector(FIXP_DBL
*RESTRICT pData
, const int cl
, const int l
, const FIXP_STB
*pVecRe
, const FIXP_STB
*pVecIm
)
1054 for(i
=0; i
<cl
; i
++) {
1058 pData
[2*i
] = re
>>2; /* * 0.25 */
1059 pData
[2*i
+1] = im
>>2; /* * 0.25 */
1066 pData
[2*i
] = re
>>2; /* * 0.25 */
1067 pData
[2*i
+1] = im
>>2; /* * 0.25 */
1069 for (c
=i
+1; c
<i
+cl
; c
++)
1072 im
= pData
[2*c
+1]>>1;
1076 cplxMultDiv2(&pData
[2*c
+1], &pData
[2*c
], im
, re
, vre
, vim
);
1081 #define FFT_TWO_STAGE_MACRO_ENABLE
1084 #ifdef FFT_TWO_STAGE_MACRO_ENABLE
1086 #define fftN2(pInput, length, dim1, dim2, fft_func1, fft_func2, RotVectorReal, RotVectorImag) \
1090 C_ALLOC_SCRATCH_START(aDst, FIXP_DBL, length*2); \
1091 C_ALLOC_SCRATCH_START(aDst2, FIXP_DBL, dim2*2); \
1093 FDK_ASSERT(length == dim1*dim2); \
1095 /* Perform dim2 times the fft of length dim1. The input samples are at the address of pSrc and the \
1096 output samples are at the address of pDst. The input vector for the fft of length dim1 is built \
1097 of the interleaved samples in pSrc, the output samples are stored consecutively. \
1100 const FIXP_DBL* pSrc = pInput; \
1101 FIXP_DBL *RESTRICT pDst = aDst; \
1103 for(i=0; i<dim2; i++) \
1105 for(j=0; j<dim1; j++) \
1107 pDst[2*j] = pSrc[2*j*dim2]; \
1108 pDst[2*j+1] = pSrc[2*j*dim2+1]; \
1113 pDst = pDst + 2*dim1; \
1117 /* Perform the modulation of the output of the fft of length dim1 */ \
1118 fft_apply_rot_vector(aDst, dim1, length, RotVectorReal, RotVectorImag); \
1120 /* Perform dim1 times the fft of length dim2. The input samples are at the address of aDst and the \
1121 output samples are at the address of pInput. The input vector for the fft of length dim2 is built \
1122 of the interleaved samples in aDst, the output samples are stored consecutively at the address \
1126 const FIXP_DBL* pSrc = aDst; \
1127 FIXP_DBL *RESTRICT pDst = aDst2; \
1128 FIXP_DBL *RESTRICT pDstOut = pInput; \
1130 for(i=0; i<dim1; i++) \
1132 for(j=0; j<dim2; j++) \
1134 pDst[2*j] = pSrc[2*j*dim1]; \
1135 pDst[2*j+1] = pSrc[2*j*dim1+1]; \
1140 for(j=0; j<dim2; j++) \
1142 pDstOut[2*j*dim1] = pDst[2*j]; \
1143 pDstOut[2*j*dim1+1] = pDst[2*j+1]; \
1150 C_ALLOC_SCRATCH_END(aDst2, FIXP_DBL, dim2*2); \
1151 C_ALLOC_SCRATCH_END(aDst, FIXP_DBL, length*2); \
1154 #else /* FFT_TWO_STAGE_MACRO_ENABLE */
1156 /* select either switch case of function pointer. */
1157 //#define FFT_TWO_STAGE_SWITCH_CASE
1159 static inline void fftN2(
1164 void (* const fft1
)(FIXP_DBL
*),
1165 void (* const fft2
)(FIXP_DBL
*),
1166 const FIXP_STB
*RotVectorReal
,
1167 const FIXP_STB
*RotVectorImag
1170 /* The real part of the input samples are at the addresses with even indices and the imaginary
1171 part of the input samples are at the addresses with odd indices. The output samples are stored
1172 at the address of pInput
1174 FIXP_DBL
*pSrc
, *pDst
, *pDstOut
;
1177 C_ALLOC_SCRATCH_START(aDst
, FIXP_DBL
, length
*2);
1178 C_ALLOC_SCRATCH_START(aDst2
, FIXP_DBL
, dim2
*2);
1180 FDK_ASSERT(length
== dim1
*dim2
);
1182 /* Perform dim2 times the fft of length dim1. The input samples are at the address of pSrc and the
1183 output samples are at the address of pDst. The input vector for the fft of length dim1 is built
1184 of the interleaved samples in pSrc, the output samples are stored consecutively.
1188 for(i
=0; i
<length
/dim1
; i
++)
1190 for(j
=0; j
<length
/dim2
; j
++)
1192 pDst
[2*j
] = pSrc
[2*j
*dim2
];
1193 pDst
[2*j
+1] = pSrc
[2*j
*dim2
+1];
1196 /* fft of size dim1 */
1197 #ifndef FFT_TWO_STAGE_SWITCH_CASE
1201 case 3: fft3(pDst
); break;
1202 case 4: fft_4(pDst
); break;
1203 case 5: fft5(pDst
); break;
1204 case 8: fft_8(pDst
); break;
1205 case 15: fft15(pDst
); break;
1206 case 16: fft_16(pDst
); break;
1207 case 32: fft_32(pDst
); break;
1208 /*case 64: fft_64(pDst); break;*/
1209 case 128: fft_128(pDst
); break;
1213 pDst
= pDst
+ 2*length
/dim2
;
1216 /* Perform the modulation of the output of the fft of length dim1 */
1218 fft_apply_rot_vector(pSrc
, length
/dim2
, length
, RotVectorReal
, RotVectorImag
);
1220 /* Perform dim1 times the fft of length dim2. The input samples are at the address of aDst and the
1221 output samples are at the address of pInput. The input vector for the fft of length dim2 is built
1222 of the interleaved samples in aDst, the output samples are stored consecutively at the address
1228 for(i
=0; i
<length
/dim2
; i
++)
1230 for(j
=0; j
<length
/dim1
; j
++)
1232 pDst
[2*j
] = pSrc
[2*j
*dim1
];
1233 pDst
[2*j
+1] = pSrc
[2*j
*dim1
+1];
1236 #ifndef FFT_TWO_STAGE_SWITCH_CASE
1240 case 3: fft3(pDst
); break;
1241 case 4: fft_4(pDst
); break;
1242 case 5: fft5(pDst
); break;
1243 case 8: fft_8(pDst
); break;
1244 case 15: fft15(pDst
); break;
1245 case 16: fft_16(pDst
); break;
1246 case 32: fft_32(pDst
); break;
1247 /*case 64: fft_64(pDst); break;*/
1248 case 128: fft_128(pDst
); break;
1252 for(j
=0; j
<length
/dim1
; j
++)
1254 pDstOut
[2*j
*dim1
] = pDst
[2*j
];
1255 pDstOut
[2*j
*dim1
+1] = pDst
[2*j
+1];
1261 C_ALLOC_SCRATCH_END(aDst2
, FIXP_DBL
, dim2
*2);
1262 C_ALLOC_SCRATCH_END(aDst
, FIXP_DBL
, length
*2);
1265 #endif /* FFT_TWO_STAGE_MACRO_ENABLE */
1278 #define SCALEFACTOR60 5
1280 The function performs the fft of length 60. It is splittet into fft's of length 4 and fft's of
1281 length 15. Between the fft's a modolation is calculated.
1283 static inline void fft60(FIXP_DBL
*pInput
, INT
*pScalefactor
)
1288 RotVectorReal60
, RotVectorImag60
1290 *pScalefactor
+= SCALEFACTOR60
;
1295 /* Fallback implementation in case of no better implementation available. */
1297 #define SCALEFACTOR240 7
1300 The function performs the fft of length 240. It is splittet into fft's of length 16 and fft's of
1301 length 15. Between the fft's a modulation is calculated.
1303 static inline void fft240(FIXP_DBL
*pInput
, INT
*pScalefactor
)
1306 pInput
, 240, 16, 15,
1308 RotVectorReal240
, RotVectorImag240
1310 *pScalefactor
+= SCALEFACTOR240
;
1314 #define SCALEFACTOR480 8
1316 #define TABLE_SIZE_16 (32/2)
1319 The function performs the fft of length 480. It is splittet into fft's of length 32 and fft's of
1320 length 15. Between the fft's a modulation is calculated.
1322 static inline void fft480(FIXP_DBL
*pInput
, INT
*pScalefactor
)
1325 pInput
, 480, 32, 15,
1327 RotVectorReal480
, RotVectorImag480
1329 *pScalefactor
+= SCALEFACTOR480
;
1332 void fft(int length
, FIXP_DBL
*pInput
, INT
*pScalefactor
)
1337 *pScalefactor
+= SCALEFACTOR32
;
1345 *pScalefactor
+= SCALEFACTOR16
;
1349 *pScalefactor
+= SCALEFACTOR8
;
1356 *pScalefactor
+= SCALEFACTOR4
;
1366 fft60(pInput
, pScalefactor
);
1369 dit_fft(pInput
, 6, SineTable512
, 512);
1370 *pScalefactor
+= SCALEFACTOR64
;
1373 fft240(pInput
, pScalefactor
);
1376 dit_fft(pInput
, 8, SineTable512
, 512);
1377 *pScalefactor
+= SCALEFACTOR256
;
1380 fft480(pInput
, pScalefactor
);
1383 dit_fft(pInput
, 9, SineTable512
, 512);
1384 *pScalefactor
+= SCALEFACTOR512
;
1387 FDK_ASSERT(0); /* FFT length not supported! */
1394 void ifft(int length
, FIXP_DBL
*pInput
, INT
*scalefactor
)
1398 FDK_ASSERT(0); /* IFFT length not supported! */