Imported Debian version 2.4.3~trusty1
[deb_ffmpeg.git] / ffmpeg / libavutil / mips / float_dsp_mips.c
CommitLineData
2ba45a60
DM
1/*
2 * Copyright (c) 2012
3 * MIPS Technologies, Inc., California.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. Neither the name of the MIPS Technologies, Inc., nor the names of its
14 * contributors may be used to endorse or promote products derived from
15 * this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE MIPS TECHNOLOGIES, INC. ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE MIPS TECHNOLOGIES, INC. BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 *
29 * Author: Branimir Vasic (bvasic@mips.com)
30 * Author: Zoran Lukic (zoranl@mips.com)
31 *
32 * This file is part of FFmpeg.
33 *
34 * FFmpeg is free software; you can redistribute it and/or
35 * modify it under the terms of the GNU Lesser General Public
36 * License as published by the Free Software Foundation; either
37 * version 2.1 of the License, or (at your option) any later version.
38 *
39 * FFmpeg is distributed in the hope that it will be useful,
40 * but WITHOUT ANY WARRANTY; without even the implied warranty of
41 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
42 * Lesser General Public License for more details.
43 *
44 * You should have received a copy of the GNU Lesser General Public
45 * License along with FFmpeg; if not, write to the Free Software
46 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
47 */
48
49/**
50 * @file
51 * Reference: libavutil/float_dsp.c
52 */
53
54#include "config.h"
55#include "libavutil/float_dsp.h"
56
57#if HAVE_INLINE_ASM && HAVE_MIPSFPU
58static void vector_fmul_mips(float *dst, const float *src0, const float *src1,
59 int len)
60{
61 int i;
62
63 if (len & 3) {
64 for (i = 0; i < len; i++)
65 dst[i] = src0[i] * src1[i];
66 } else {
67 float *d = (float *)dst;
68 float *d_end = d + len;
69 float *s0 = (float *)src0;
70 float *s1 = (float *)src1;
71
72 float src0_0, src0_1, src0_2, src0_3;
73 float src1_0, src1_1, src1_2, src1_3;
74
75 __asm__ volatile (
76 "1: \n\t"
77 "lwc1 %[src0_0], 0(%[s0]) \n\t"
78 "lwc1 %[src1_0], 0(%[s1]) \n\t"
79 "lwc1 %[src0_1], 4(%[s0]) \n\t"
80 "lwc1 %[src1_1], 4(%[s1]) \n\t"
81 "lwc1 %[src0_2], 8(%[s0]) \n\t"
82 "lwc1 %[src1_2], 8(%[s1]) \n\t"
83 "lwc1 %[src0_3], 12(%[s0]) \n\t"
84 "lwc1 %[src1_3], 12(%[s1]) \n\t"
85 "mul.s %[src0_0], %[src0_0], %[src1_0] \n\t"
86 "mul.s %[src0_1], %[src0_1], %[src1_1] \n\t"
87 "mul.s %[src0_2], %[src0_2], %[src1_2] \n\t"
88 "mul.s %[src0_3], %[src0_3], %[src1_3] \n\t"
89 "swc1 %[src0_0], 0(%[d]) \n\t"
90 "swc1 %[src0_1], 4(%[d]) \n\t"
91 "swc1 %[src0_2], 8(%[d]) \n\t"
92 "swc1 %[src0_3], 12(%[d]) \n\t"
93 "addiu %[s0], %[s0], 16 \n\t"
94 "addiu %[s1], %[s1], 16 \n\t"
95 "addiu %[d], %[d], 16 \n\t"
96 "bne %[d], %[d_end], 1b \n\t"
97
98 : [src0_0]"=&f"(src0_0), [src0_1]"=&f"(src0_1),
99 [src0_2]"=&f"(src0_2), [src0_3]"=&f"(src0_3),
100 [src1_0]"=&f"(src1_0), [src1_1]"=&f"(src1_1),
101 [src1_2]"=&f"(src1_2), [src1_3]"=&f"(src1_3),
102 [d]"+r"(d), [s0]"+r"(s0), [s1]"+r"(s1)
103 : [d_end]"r"(d_end)
104 : "memory"
105 );
106 }
107}
108
109static void vector_fmul_scalar_mips(float *dst, const float *src, float mul,
110 int len)
111{
112 float temp0, temp1, temp2, temp3;
113 float *local_src = (float*)src;
114 float *end = local_src + len;
115
116 /* loop unrolled 4 times */
117 __asm__ volatile(
118 ".set push \n\t"
119 ".set noreorder \n\t"
120 "1: \n\t"
121 "lwc1 %[temp0], 0(%[src]) \n\t"
122 "lwc1 %[temp1], 4(%[src]) \n\t"
123 "lwc1 %[temp2], 8(%[src]) \n\t"
124 "lwc1 %[temp3], 12(%[src]) \n\t"
125 "addiu %[dst], %[dst], 16 \n\t"
126 "mul.s %[temp0], %[temp0], %[mul] \n\t"
127 "mul.s %[temp1], %[temp1], %[mul] \n\t"
128 "mul.s %[temp2], %[temp2], %[mul] \n\t"
129 "mul.s %[temp3], %[temp3], %[mul] \n\t"
130 "addiu %[src], %[src], 16 \n\t"
131 "swc1 %[temp0], -16(%[dst]) \n\t"
132 "swc1 %[temp1], -12(%[dst]) \n\t"
133 "swc1 %[temp2], -8(%[dst]) \n\t"
134 "bne %[src], %[end], 1b \n\t"
135 " swc1 %[temp3], -4(%[dst]) \n\t"
136 ".set pop \n\t"
137
138 : [temp0]"=&f"(temp0), [temp1]"=&f"(temp1),
139 [temp2]"=&f"(temp2), [temp3]"=&f"(temp3),
140 [dst]"+r"(dst), [src]"+r"(local_src)
141 : [end]"r"(end), [mul]"f"(mul)
142 : "memory"
143 );
144}
145
146static void vector_fmul_window_mips(float *dst, const float *src0,
147 const float *src1, const float *win, int len)
148{
149 int i, j;
150 /*
151 * variables used in inline assembler
152 */
153 float * dst_i, * dst_j, * dst_i2, * dst_j2;
154 float temp, temp1, temp2, temp3, temp4, temp5, temp6, temp7;
155
156 dst += len;
157 win += len;
158 src0 += len;
159
160 for (i = -len, j = len - 1; i < 0; i += 8, j -= 8) {
161
162 dst_i = dst + i;
163 dst_j = dst + j;
164
165 dst_i2 = dst + i + 4;
166 dst_j2 = dst + j - 4;
167
168 __asm__ volatile (
169 "mul.s %[temp], %[s1], %[wi] \n\t"
170 "mul.s %[temp1], %[s1], %[wj] \n\t"
171 "mul.s %[temp2], %[s11], %[wi1] \n\t"
172 "mul.s %[temp3], %[s11], %[wj1] \n\t"
173
174 "msub.s %[temp], %[temp], %[s0], %[wj] \n\t"
175 "madd.s %[temp1], %[temp1], %[s0], %[wi] \n\t"
176 "msub.s %[temp2], %[temp2], %[s01], %[wj1] \n\t"
177 "madd.s %[temp3], %[temp3], %[s01], %[wi1] \n\t"
178
179 "swc1 %[temp], 0(%[dst_i]) \n\t" /* dst[i] = s0*wj - s1*wi; */
180 "swc1 %[temp1], 0(%[dst_j]) \n\t" /* dst[j] = s0*wi + s1*wj; */
181 "swc1 %[temp2], 4(%[dst_i]) \n\t" /* dst[i+1] = s01*wj1 - s11*wi1; */
182 "swc1 %[temp3], -4(%[dst_j]) \n\t" /* dst[j-1] = s01*wi1 + s11*wj1; */
183
184 "mul.s %[temp4], %[s12], %[wi2] \n\t"
185 "mul.s %[temp5], %[s12], %[wj2] \n\t"
186 "mul.s %[temp6], %[s13], %[wi3] \n\t"
187 "mul.s %[temp7], %[s13], %[wj3] \n\t"
188
189 "msub.s %[temp4], %[temp4], %[s02], %[wj2] \n\t"
190 "madd.s %[temp5], %[temp5], %[s02], %[wi2] \n\t"
191 "msub.s %[temp6], %[temp6], %[s03], %[wj3] \n\t"
192 "madd.s %[temp7], %[temp7], %[s03], %[wi3] \n\t"
193
194 "swc1 %[temp4], 8(%[dst_i]) \n\t" /* dst[i+2] = s02*wj2 - s12*wi2; */
195 "swc1 %[temp5], -8(%[dst_j]) \n\t" /* dst[j-2] = s02*wi2 + s12*wj2; */
196 "swc1 %[temp6], 12(%[dst_i]) \n\t" /* dst[i+2] = s03*wj3 - s13*wi3; */
197 "swc1 %[temp7], -12(%[dst_j]) \n\t" /* dst[j-3] = s03*wi3 + s13*wj3; */
198 : [temp]"=&f"(temp), [temp1]"=&f"(temp1), [temp2]"=&f"(temp2),
199 [temp3]"=&f"(temp3), [temp4]"=&f"(temp4), [temp5]"=&f"(temp5),
200 [temp6]"=&f"(temp6), [temp7]"=&f"(temp7)
201 : [dst_j]"r"(dst_j), [dst_i]"r" (dst_i),
202 [s0] "f"(src0[i]), [wj] "f"(win[j]), [s1] "f"(src1[j]),
203 [wi] "f"(win[i]), [s01]"f"(src0[i + 1]),[wj1]"f"(win[j - 1]),
204 [s11]"f"(src1[j - 1]), [wi1]"f"(win[i + 1]), [s02]"f"(src0[i + 2]),
205 [wj2]"f"(win[j - 2]), [s12]"f"(src1[j - 2]),[wi2]"f"(win[i + 2]),
206 [s03]"f"(src0[i + 3]), [wj3]"f"(win[j - 3]), [s13]"f"(src1[j - 3]),
207 [wi3]"f"(win[i + 3])
208 : "memory"
209 );
210
211 __asm__ volatile (
212 "mul.s %[temp], %[s1], %[wi] \n\t"
213 "mul.s %[temp1], %[s1], %[wj] \n\t"
214 "mul.s %[temp2], %[s11], %[wi1] \n\t"
215 "mul.s %[temp3], %[s11], %[wj1] \n\t"
216
217 "msub.s %[temp], %[temp], %[s0], %[wj] \n\t"
218 "madd.s %[temp1], %[temp1], %[s0], %[wi] \n\t"
219 "msub.s %[temp2], %[temp2], %[s01], %[wj1] \n\t"
220 "madd.s %[temp3], %[temp3], %[s01], %[wi1] \n\t"
221
222 "swc1 %[temp], 0(%[dst_i2]) \n\t" /* dst[i] = s0*wj - s1*wi; */
223 "swc1 %[temp1], 0(%[dst_j2]) \n\t" /* dst[j] = s0*wi + s1*wj; */
224 "swc1 %[temp2], 4(%[dst_i2]) \n\t" /* dst[i+1] = s01*wj1 - s11*wi1; */
225 "swc1 %[temp3], -4(%[dst_j2]) \n\t" /* dst[j-1] = s01*wi1 + s11*wj1; */
226
227 "mul.s %[temp4], %[s12], %[wi2] \n\t"
228 "mul.s %[temp5], %[s12], %[wj2] \n\t"
229 "mul.s %[temp6], %[s13], %[wi3] \n\t"
230 "mul.s %[temp7], %[s13], %[wj3] \n\t"
231
232 "msub.s %[temp4], %[temp4], %[s02], %[wj2] \n\t"
233 "madd.s %[temp5], %[temp5], %[s02], %[wi2] \n\t"
234 "msub.s %[temp6], %[temp6], %[s03], %[wj3] \n\t"
235 "madd.s %[temp7], %[temp7], %[s03], %[wi3] \n\t"
236
237 "swc1 %[temp4], 8(%[dst_i2]) \n\t" /* dst[i+2] = s02*wj2 - s12*wi2; */
238 "swc1 %[temp5], -8(%[dst_j2]) \n\t" /* dst[j-2] = s02*wi2 + s12*wj2; */
239 "swc1 %[temp6], 12(%[dst_i2]) \n\t" /* dst[i+2] = s03*wj3 - s13*wi3; */
240 "swc1 %[temp7], -12(%[dst_j2]) \n\t" /* dst[j-3] = s03*wi3 + s13*wj3; */
241 : [temp]"=&f"(temp),
242 [temp1]"=&f"(temp1), [temp2]"=&f"(temp2), [temp3]"=&f"(temp3),
243 [temp4]"=&f"(temp4), [temp5]"=&f"(temp5), [temp6]"=&f"(temp6),
244 [temp7] "=&f" (temp7)
245 : [dst_j2]"r"(dst_j2), [dst_i2]"r"(dst_i2),
246 [s0] "f"(src0[i + 4]), [wj] "f"(win[j - 4]), [s1] "f"(src1[j - 4]),
247 [wi] "f"(win[i + 4]), [s01]"f"(src0[i + 5]),[wj1]"f"(win[j - 5]),
248 [s11]"f"(src1[j - 5]), [wi1]"f"(win[i + 5]), [s02]"f"(src0[i + 6]),
249 [wj2]"f"(win[j - 6]), [s12]"f"(src1[j - 6]),[wi2]"f"(win[i + 6]),
250 [s03]"f"(src0[i + 7]), [wj3]"f"(win[j - 7]), [s13]"f"(src1[j - 7]),
251 [wi3]"f"(win[i + 7])
252 : "memory"
253 );
254 }
255}
256
257static void butterflies_float_mips(float *av_restrict v1, float *av_restrict v2,
258 int len)
259{
260 float temp0, temp1, temp2, temp3, temp4;
261 float temp5, temp6, temp7, temp8, temp9;
262 float temp10, temp11, temp12, temp13, temp14, temp15;
263 int pom;
264 pom = (len >> 2)-1;
265
266 /* loop unrolled 4 times */
267 __asm__ volatile (
268 "lwc1 %[temp0], 0(%[v1]) \n\t"
269 "lwc1 %[temp1], 4(%[v1]) \n\t"
270 "lwc1 %[temp2], 8(%[v1]) \n\t"
271 "lwc1 %[temp3], 12(%[v1]) \n\t"
272 "lwc1 %[temp4], 0(%[v2]) \n\t"
273 "lwc1 %[temp5], 4(%[v2]) \n\t"
274 "lwc1 %[temp6], 8(%[v2]) \n\t"
275 "lwc1 %[temp7], 12(%[v2]) \n\t"
276 "beq %[pom], $zero, 2f \n\t"
277 "1: \n\t"
278 "sub.s %[temp8], %[temp0], %[temp4] \n\t"
279 "add.s %[temp9], %[temp0], %[temp4] \n\t"
280 "sub.s %[temp10], %[temp1], %[temp5] \n\t"
281 "add.s %[temp11], %[temp1], %[temp5] \n\t"
282 "sub.s %[temp12], %[temp2], %[temp6] \n\t"
283 "add.s %[temp13], %[temp2], %[temp6] \n\t"
284 "sub.s %[temp14], %[temp3], %[temp7] \n\t"
285 "add.s %[temp15], %[temp3], %[temp7] \n\t"
286 "addiu %[v1], %[v1], 16 \n\t"
287 "addiu %[v2], %[v2], 16 \n\t"
288 "addiu %[pom], %[pom], -1 \n\t"
289 "lwc1 %[temp0], 0(%[v1]) \n\t"
290 "lwc1 %[temp1], 4(%[v1]) \n\t"
291 "lwc1 %[temp2], 8(%[v1]) \n\t"
292 "lwc1 %[temp3], 12(%[v1]) \n\t"
293 "lwc1 %[temp4], 0(%[v2]) \n\t"
294 "lwc1 %[temp5], 4(%[v2]) \n\t"
295 "lwc1 %[temp6], 8(%[v2]) \n\t"
296 "lwc1 %[temp7], 12(%[v2]) \n\t"
297 "swc1 %[temp9], -16(%[v1]) \n\t"
298 "swc1 %[temp8], -16(%[v2]) \n\t"
299 "swc1 %[temp11], -12(%[v1]) \n\t"
300 "swc1 %[temp10], -12(%[v2]) \n\t"
301 "swc1 %[temp13], -8(%[v1]) \n\t"
302 "swc1 %[temp12], -8(%[v2]) \n\t"
303 "swc1 %[temp15], -4(%[v1]) \n\t"
304 "swc1 %[temp14], -4(%[v2]) \n\t"
305 "bgtz %[pom], 1b \n\t"
306 "2: \n\t"
307 "sub.s %[temp8], %[temp0], %[temp4] \n\t"
308 "add.s %[temp9], %[temp0], %[temp4] \n\t"
309 "sub.s %[temp10], %[temp1], %[temp5] \n\t"
310 "add.s %[temp11], %[temp1], %[temp5] \n\t"
311 "sub.s %[temp12], %[temp2], %[temp6] \n\t"
312 "add.s %[temp13], %[temp2], %[temp6] \n\t"
313 "sub.s %[temp14], %[temp3], %[temp7] \n\t"
314 "add.s %[temp15], %[temp3], %[temp7] \n\t"
315 "swc1 %[temp9], 0(%[v1]) \n\t"
316 "swc1 %[temp8], 0(%[v2]) \n\t"
317 "swc1 %[temp11], 4(%[v1]) \n\t"
318 "swc1 %[temp10], 4(%[v2]) \n\t"
319 "swc1 %[temp13], 8(%[v1]) \n\t"
320 "swc1 %[temp12], 8(%[v2]) \n\t"
321 "swc1 %[temp15], 12(%[v1]) \n\t"
322 "swc1 %[temp14], 12(%[v2]) \n\t"
323
324 : [v1]"+r"(v1), [v2]"+r"(v2), [pom]"+r"(pom), [temp0] "=&f" (temp0),
325 [temp1]"=&f"(temp1), [temp2]"=&f"(temp2), [temp3]"=&f"(temp3),
326 [temp4]"=&f"(temp4), [temp5]"=&f"(temp5), [temp6]"=&f"(temp6),
327 [temp7]"=&f"(temp7), [temp8]"=&f"(temp8), [temp9]"=&f"(temp9),
328 [temp10]"=&f"(temp10), [temp11]"=&f"(temp11), [temp12]"=&f"(temp12),
329 [temp13]"=&f"(temp13), [temp14]"=&f"(temp14), [temp15]"=&f"(temp15)
330 :
331 : "memory"
332 );
333}
334
335static void vector_fmul_reverse_mips(float *dst, const float *src0, const float *src1, int len){
336 int i;
337 float temp0, temp1, temp2, temp3, temp4, temp5, temp6, temp7;
338 src1 += len-1;
339
340 for(i=0; i<(len>>2); i++)
341 {
342 /* loop unrolled 4 times */
343 __asm__ volatile(
344 "lwc1 %[temp0], 0(%[src0]) \n\t"
345 "lwc1 %[temp1], 0(%[src1]) \n\t"
346 "lwc1 %[temp2], 4(%[src0]) \n\t"
347 "lwc1 %[temp3], -4(%[src1]) \n\t"
348 "lwc1 %[temp4], 8(%[src0]) \n\t"
349 "lwc1 %[temp5], -8(%[src1]) \n\t"
350 "lwc1 %[temp6], 12(%[src0]) \n\t"
351 "lwc1 %[temp7], -12(%[src1]) \n\t"
352 "mul.s %[temp0], %[temp1], %[temp0] \n\t"
353 "mul.s %[temp2], %[temp3], %[temp2] \n\t"
354 "mul.s %[temp4], %[temp5], %[temp4] \n\t"
355 "mul.s %[temp6], %[temp7], %[temp6] \n\t"
356 "addiu %[src0], %[src0], 16 \n\t"
357 "addiu %[src1], %[src1], -16 \n\t"
358 "addiu %[dst], %[dst], 16 \n\t"
359 "swc1 %[temp0], -16(%[dst]) \n\t"
360 "swc1 %[temp2], -12(%[dst]) \n\t"
361 "swc1 %[temp4], -8(%[dst]) \n\t"
362 "swc1 %[temp6], -4(%[dst]) \n\t"
363
364 : [dst]"+r"(dst), [src0]"+r"(src0), [src1]"+r"(src1),
365 [temp0]"=&f"(temp0), [temp1]"=&f"(temp1),[temp2]"=&f"(temp2),
366 [temp3]"=&f"(temp3), [temp4]"=&f"(temp4), [temp5]"=&f"(temp5),
367 [temp6]"=&f"(temp6), [temp7]"=&f"(temp7)
368 :
369 : "memory"
370 );
371 }
372}
373#endif /* HAVE_INLINE_ASM && HAVE_MIPSFPU */
374
375void ff_float_dsp_init_mips(AVFloatDSPContext *fdsp) {
376#if HAVE_INLINE_ASM && HAVE_MIPSFPU
377 fdsp->vector_fmul = vector_fmul_mips;
378 fdsp->vector_fmul_scalar = vector_fmul_scalar_mips;
379 fdsp->vector_fmul_window = vector_fmul_window_mips;
380 fdsp->butterflies_float = butterflies_float_mips;
381 fdsp->vector_fmul_reverse = vector_fmul_reverse_mips;
382#endif /* HAVE_INLINE_ASM && HAVE_MIPSFPU */
383}