8f360190f479144ecf26c133ad4769e7824bbaf1
2 * Alpha optimized DSP utils
3 * Copyright (c) 2002 Falk Hueffner <falk@debian.org>
5 * This file is part of FFmpeg.
7 * FFmpeg is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * FFmpeg is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with FFmpeg; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22 #include "libavutil/attributes.h"
23 #include "libavcodec/me_cmp.h"
26 int pix_abs16x16_mvi_asm(void *v
, uint8_t *pix1
, uint8_t *pix2
, int line_size
, int h
);
28 static inline uint64_t avg2(uint64_t a
, uint64_t b
)
30 return (a
| b
) - (((a
^ b
) & BYTE_VEC(0xfe)) >> 1);
33 static inline uint64_t avg4(uint64_t l1
, uint64_t l2
, uint64_t l3
, uint64_t l4
)
35 uint64_t r1
= ((l1
& ~BYTE_VEC(0x03)) >> 2)
36 + ((l2
& ~BYTE_VEC(0x03)) >> 2)
37 + ((l3
& ~BYTE_VEC(0x03)) >> 2)
38 + ((l4
& ~BYTE_VEC(0x03)) >> 2);
39 uint64_t r2
= (( (l1
& BYTE_VEC(0x03))
40 + (l2
& BYTE_VEC(0x03))
41 + (l3
& BYTE_VEC(0x03))
42 + (l4
& BYTE_VEC(0x03))
43 + BYTE_VEC(0x02)) >> 2) & BYTE_VEC(0x03);
47 static int pix_abs8x8_mvi(void *v
, uint8_t *pix1
, uint8_t *pix2
, int line_size
, int h
)
51 if ((size_t) pix2
& 0x7) {
52 /* works only when pix2 is actually unaligned */
53 do { /* do 8 pixel a time */
58 result
+= perr(p1
, p2
);
69 result
+= perr(p1
, p2
);
79 #if 0 /* now done in assembly */
80 int pix_abs16x16_mvi(uint8_t *pix1
, uint8_t *pix2
, int line_size
)
85 if ((size_t) pix2
& 0x7) {
86 /* works only when pix2 is actually unaligned */
87 do { /* do 16 pixel a time */
88 uint64_t p1_l
, p1_r
, p2_l
, p2_r
;
94 p2_l
= extql(ldq_u(pix2
), pix2
) | extqh(t
, pix2
);
95 p2_r
= extql(t
, pix2
) | extqh(ldq_u(pix2
+ 16), pix2
);
99 result
+= perr(p1_l
, p2_l
)
104 uint64_t p1_l
, p1_r
, p2_l
, p2_r
;
107 p1_r
= ldq(pix1
+ 8);
109 p2_r
= ldq(pix2
+ 8);
113 result
+= perr(p1_l
, p2_l
)
122 static int pix_abs16x16_x2_mvi(void *v
, uint8_t *pix1
, uint8_t *pix2
, int line_size
, int h
)
125 uint64_t disalign
= (size_t) pix2
& 0x7;
130 uint64_t p1_l
, p1_r
, p2_l
, p2_r
;
134 p1_r
= ldq(pix1
+ 8);
137 p2_l
= avg2(l
, (l
>> 8) | ((uint64_t) r
<< 56));
138 p2_r
= avg2(r
, (r
>> 8) | ((uint64_t) pix2
[16] << 56));
142 result
+= perr(p1_l
, p2_l
)
147 /* |.......l|lllllllr|rrrrrrr*|
148 This case is special because disalign1 would be 8, which
149 gets treated as 0 by extqh. At least it is a bit faster
152 uint64_t p1_l
, p1_r
, p2_l
, p2_r
;
156 p1_r
= ldq(pix1
+ 8);
159 r
= ldq_u(pix2
+ 16);
160 p2_l
= avg2(extql(l
, disalign
) | extqh(m
, disalign
), m
);
161 p2_r
= avg2(extql(m
, disalign
) | extqh(r
, disalign
), r
);
165 result
+= perr(p1_l
, p2_l
)
171 uint64_t disalign1
= disalign
+ 1;
172 uint64_t p1_l
, p1_r
, p2_l
, p2_r
;
176 p1_r
= ldq(pix1
+ 8);
179 r
= ldq_u(pix2
+ 16);
180 p2_l
= avg2(extql(l
, disalign
) | extqh(m
, disalign
),
181 extql(l
, disalign1
) | extqh(m
, disalign1
));
182 p2_r
= avg2(extql(m
, disalign
) | extqh(r
, disalign
),
183 extql(m
, disalign1
) | extqh(r
, disalign1
));
187 result
+= perr(p1_l
, p2_l
)
195 static int pix_abs16x16_y2_mvi(void *v
, uint8_t *pix1
, uint8_t *pix2
, int line_size
, int h
)
199 if ((size_t) pix2
& 0x7) {
200 uint64_t t
, p2_l
, p2_r
;
202 p2_l
= extql(ldq_u(pix2
), pix2
) | extqh(t
, pix2
);
203 p2_r
= extql(t
, pix2
) | extqh(ldq_u(pix2
+ 16), pix2
);
206 uint64_t p1_l
, p1_r
, np2_l
, np2_r
;
210 p1_r
= ldq(pix1
+ 8);
213 np2_l
= extql(ldq_u(pix2
), pix2
) | extqh(t
, pix2
);
214 np2_r
= extql(t
, pix2
) | extqh(ldq_u(pix2
+ 16), pix2
);
216 result
+= perr(p1_l
, avg2(p2_l
, np2_l
))
217 + perr(p1_r
, avg2(p2_r
, np2_r
));
227 p2_r
= ldq(pix2
+ 8);
229 uint64_t p1_l
, p1_r
, np2_l
, np2_r
;
232 p1_r
= ldq(pix1
+ 8);
235 np2_r
= ldq(pix2
+ 8);
237 result
+= perr(p1_l
, avg2(p2_l
, np2_l
))
238 + perr(p1_r
, avg2(p2_r
, np2_r
));
248 static int pix_abs16x16_xy2_mvi(void *v
, uint8_t *pix1
, uint8_t *pix2
, int line_size
, int h
)
253 uint64_t p2_l
, p2_r
, p2_x
;
256 p1_r
= ldq(pix1
+ 8);
258 if ((size_t) pix2
& 0x7) { /* could be optimized a lot */
260 p2_r
= uldq(pix2
+ 8);
261 p2_x
= (uint64_t) pix2
[16] << 56;
264 p2_r
= ldq(pix2
+ 8);
265 p2_x
= ldq(pix2
+ 16) << 56;
269 uint64_t np1_l
, np1_r
;
270 uint64_t np2_l
, np2_r
, np2_x
;
276 np1_r
= ldq(pix1
+ 8);
278 if ((size_t) pix2
& 0x7) { /* could be optimized a lot */
280 np2_r
= uldq(pix2
+ 8);
281 np2_x
= (uint64_t) pix2
[16] << 56;
284 np2_r
= ldq(pix2
+ 8);
285 np2_x
= ldq(pix2
+ 16) << 56;
289 avg4( p2_l
, ( p2_l
>> 8) | ((uint64_t) p2_r
<< 56),
290 np2_l
, (np2_l
>> 8) | ((uint64_t) np2_r
<< 56)))
292 avg4( p2_r
, ( p2_r
>> 8) | ((uint64_t) p2_x
),
293 np2_r
, (np2_r
>> 8) | ((uint64_t) np2_x
)));
305 av_cold
void ff_me_cmp_init_alpha(MECmpContext
*c
, AVCodecContext
*avctx
)
307 /* amask clears all bits that correspond to present features. */
308 if (amask(AMASK_MVI
) == 0) {
309 c
->sad
[0] = pix_abs16x16_mvi_asm
;
310 c
->sad
[1] = pix_abs8x8_mvi
;
311 c
->pix_abs
[0][0] = pix_abs16x16_mvi_asm
;
312 c
->pix_abs
[1][0] = pix_abs8x8_mvi
;
313 c
->pix_abs
[0][1] = pix_abs16x16_x2_mvi
;
314 c
->pix_abs
[0][2] = pix_abs16x16_y2_mvi
;
315 c
->pix_abs
[0][3] = pix_abs16x16_xy2_mvi
;