2 * VP8 compatible video decoder
4 * Copyright (C) 2010 David Conrad
6 * This file is part of FFmpeg.
8 * FFmpeg is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
13 * FFmpeg is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with FFmpeg; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
24 #include "libavutil/cpu.h"
25 #include "libavutil/mem.h"
26 #include "libavutil/ppc/cpu.h"
27 #include "libavutil/ppc/types_altivec.h"
28 #include "libavutil/ppc/util_altivec.h"
29 #include "libavcodec/vp8dsp.h"
30 #include "hpeldsp_altivec.h"
33 #define REPT4(...) { __VA_ARGS__, __VA_ARGS__, __VA_ARGS__, __VA_ARGS__ }
35 // h subpel filter uses msum to multiply+add 4 pixel taps at once
36 static const vec_s8 h_subpel_filters_inner
[7] =
38 REPT4( -6, 123, 12, -1),
39 REPT4(-11, 108, 36, -8),
40 REPT4( -9, 93, 50, -6),
41 REPT4(-16, 77, 77, -16),
42 REPT4( -6, 50, 93, -9),
43 REPT4( -8, 36, 108, -11),
44 REPT4( -1, 12, 123, -6),
47 // for 6tap filters, these are the outer two taps
48 // The zeros mask off pixels 4-7 when filtering 0-3
50 static const vec_s8 h_subpel_filters_outer
[3] =
57 #define LOAD_H_SUBPEL_FILTER(i) \
58 vec_s8 filter_inner = h_subpel_filters_inner[i]; \
59 vec_s8 filter_outerh = h_subpel_filters_outer[(i)>>1]; \
60 vec_s8 filter_outerl = vec_sld(filter_outerh, filter_outerh, 2)
62 #define FILTER_H(dstv, off) \
63 a = vec_ld((off)-is6tap-1, src); \
64 b = vec_ld((off)-is6tap-1+15, src); \
66 pixh = vec_perm(a, b, permh##off); \
67 pixl = vec_perm(a, b, perml##off); \
68 filth = vec_msum(filter_inner, pixh, c64); \
69 filtl = vec_msum(filter_inner, pixl, c64); \
72 outer = vec_perm(a, b, perm_6tap##off); \
73 filth = vec_msum(filter_outerh, outer, filth); \
74 filtl = vec_msum(filter_outerl, outer, filtl); \
77 filtl = filth; /* discard pixels 4-7 */ \
78 dstv = vec_packs(filth, filtl); \
79 dstv = vec_sra(dstv, c7)
81 static av_always_inline
82 void put_vp8_epel_h_altivec_core(uint8_t *dst
, ptrdiff_t dst_stride
,
83 uint8_t *src
, ptrdiff_t src_stride
,
84 int h
, int mx
, int w
, int is6tap
)
86 LOAD_H_SUBPEL_FILTER(mx
-1);
87 vec_u8 align_vec0
, align_vec8
, permh0
, permh8
, filt
;
88 vec_u8 perm_6tap0
, perm_6tap8
, perml0
, perml8
;
89 vec_u8 a
, b
, pixh
, pixl
, outer
;
93 vec_u8 perm_inner6
= { 1,2,3,4, 2,3,4,5, 3,4,5,6, 4,5,6,7 };
94 vec_u8 perm_inner4
= { 0,1,2,3, 1,2,3,4, 2,3,4,5, 3,4,5,6 };
95 vec_u8 perm_inner
= is6tap
? perm_inner6
: perm_inner4
;
96 vec_u8 perm_outer
= { 4,9, 0,5, 5,10, 1,6, 6,11, 2,7, 7,12, 3,8 };
97 vec_s32 c64
= vec_sl(vec_splat_s32(1), vec_splat_u32(6));
98 vec_u16 c7
= vec_splat_u16(7);
100 align_vec0
= vec_lvsl( -is6tap
-1, src
);
101 align_vec8
= vec_lvsl(8-is6tap
-1, src
);
103 permh0
= vec_perm(align_vec0
, align_vec0
, perm_inner
);
104 permh8
= vec_perm(align_vec8
, align_vec8
, perm_inner
);
105 perm_inner
= vec_add(perm_inner
, vec_splat_u8(4));
106 perml0
= vec_perm(align_vec0
, align_vec0
, perm_inner
);
107 perml8
= vec_perm(align_vec8
, align_vec8
, perm_inner
);
108 perm_6tap0
= vec_perm(align_vec0
, align_vec0
, perm_outer
);
109 perm_6tap8
= vec_perm(align_vec8
, align_vec8
, perm_outer
);
116 filt
= vec_packsu(f16h
, f16l
);
117 vec_st(filt
, 0, dst
);
119 filt
= vec_packsu(f16h
, f16h
);
120 vec_ste((vec_u32
)filt
, 0, (uint32_t*)dst
);
122 vec_ste((vec_u32
)filt
, 4, (uint32_t*)dst
);
129 // v subpel filter does a simple vertical multiply + add
130 static const vec_u8 v_subpel_filters
[7] =
132 { 0, 6, 123, 12, 1, 0 },
133 { 2, 11, 108, 36, 8, 1 },
134 { 0, 9, 93, 50, 6, 0 },
135 { 3, 16, 77, 77, 16, 3 },
136 { 0, 6, 50, 93, 9, 0 },
137 { 1, 8, 36, 108, 11, 2 },
138 { 0, 1, 12, 123, 6, 0 },
141 #define LOAD_V_SUBPEL_FILTER(i) \
142 vec_u8 subpel_filter = v_subpel_filters[i]; \
143 vec_u8 f0 = vec_splat(subpel_filter, 0); \
144 vec_u8 f1 = vec_splat(subpel_filter, 1); \
145 vec_u8 f2 = vec_splat(subpel_filter, 2); \
146 vec_u8 f3 = vec_splat(subpel_filter, 3); \
147 vec_u8 f4 = vec_splat(subpel_filter, 4); \
148 vec_u8 f5 = vec_splat(subpel_filter, 5)
150 #define FILTER_V(dstv, vec_mul) \
151 s1f = (vec_s16)vec_mul(s1, f1); \
152 s2f = (vec_s16)vec_mul(s2, f2); \
153 s3f = (vec_s16)vec_mul(s3, f3); \
154 s4f = (vec_s16)vec_mul(s4, f4); \
155 s2f = vec_subs(s2f, s1f); \
156 s3f = vec_subs(s3f, s4f); \
158 s0f = (vec_s16)vec_mul(s0, f0); \
159 s5f = (vec_s16)vec_mul(s5, f5); \
160 s2f = vec_adds(s2f, s0f); \
161 s3f = vec_adds(s3f, s5f); \
163 dstv = vec_adds(s2f, s3f); \
164 dstv = vec_adds(dstv, c64); \
165 dstv = vec_sra(dstv, c7)
167 static av_always_inline
168 void put_vp8_epel_v_altivec_core(uint8_t *dst
, ptrdiff_t dst_stride
,
169 uint8_t *src
, ptrdiff_t src_stride
,
170 int h
, int my
, int w
, int is6tap
)
172 LOAD_V_SUBPEL_FILTER(my
-1);
173 vec_u8 s0
, s1
, s2
, s3
, s4
, s5
, filt
, align_vech
, perm_vec
, align_vecl
;
174 vec_s16 s0f
, s1f
, s2f
, s3f
, s4f
, s5f
, f16h
, f16l
;
175 vec_s16 c64
= vec_sl(vec_splat_s16(1), vec_splat_u16(6));
176 vec_u16 c7
= vec_splat_u16(7);
178 // we want pixels 0-7 to be in the even positions and 8-15 in the odd,
179 // so combine this permute with the alignment permute vector
180 align_vech
= vec_lvsl(0, src
);
181 align_vecl
= vec_sld(align_vech
, align_vech
, 8);
183 perm_vec
= vec_mergeh(align_vech
, align_vecl
);
185 perm_vec
= vec_mergeh(align_vech
, align_vech
);
188 s0
= load_with_perm_vec(-2*src_stride
, src
, perm_vec
);
189 s1
= load_with_perm_vec(-1*src_stride
, src
, perm_vec
);
190 s2
= load_with_perm_vec( 0*src_stride
, src
, perm_vec
);
191 s3
= load_with_perm_vec( 1*src_stride
, src
, perm_vec
);
193 s4
= load_with_perm_vec( 2*src_stride
, src
, perm_vec
);
195 src
+= (2+is6tap
)*src_stride
;
199 s5
= load_with_perm_vec(0, src
, perm_vec
);
201 s4
= load_with_perm_vec(0, src
, perm_vec
);
203 FILTER_V(f16h
, vec_mule
);
206 FILTER_V(f16l
, vec_mulo
);
207 filt
= vec_packsu(f16h
, f16l
);
208 vec_st(filt
, 0, dst
);
210 filt
= vec_packsu(f16h
, f16h
);
212 filt
= (vec_u8
)vec_splat((vec_u32
)filt
, 0);
214 vec_ste((vec_u32
)filt
, 4, (uint32_t*)dst
);
215 vec_ste((vec_u32
)filt
, 0, (uint32_t*)dst
);
231 #define EPEL_FUNCS(WIDTH, TAPS) \
233 void put_vp8_epel ## WIDTH ## _h ## TAPS ## _altivec(uint8_t *dst, ptrdiff_t dst_stride, uint8_t *src, ptrdiff_t src_stride, int h, int mx, int my) \
235 put_vp8_epel_h_altivec_core(dst, dst_stride, src, src_stride, h, mx, WIDTH, TAPS == 6); \
239 void put_vp8_epel ## WIDTH ## _v ## TAPS ## _altivec(uint8_t *dst, ptrdiff_t dst_stride, uint8_t *src, ptrdiff_t src_stride, int h, int mx, int my) \
241 put_vp8_epel_v_altivec_core(dst, dst_stride, src, src_stride, h, my, WIDTH, TAPS == 6); \
244 #define EPEL_HV(WIDTH, HTAPS, VTAPS) \
245 static void put_vp8_epel ## WIDTH ## _h ## HTAPS ## v ## VTAPS ## _altivec(uint8_t *dst, ptrdiff_t dstride, uint8_t *src, ptrdiff_t sstride, int h, int mx, int my) \
247 DECLARE_ALIGNED(16, uint8_t, tmp)[(2*WIDTH+5)*16]; \
249 put_vp8_epel ## WIDTH ## _h ## HTAPS ## _altivec(tmp, 16, src-2*sstride, sstride, h+5, mx, my); \
250 put_vp8_epel ## WIDTH ## _v ## VTAPS ## _altivec(dst, dstride, tmp+2*16, 16, h, mx, my); \
252 put_vp8_epel ## WIDTH ## _h ## HTAPS ## _altivec(tmp, 16, src-sstride, sstride, h+4, mx, my); \
253 put_vp8_epel ## WIDTH ## _v ## VTAPS ## _altivec(dst, dstride, tmp+16, 16, h, mx, my); \
273 static void put_vp8_pixels16_altivec(uint8_t *dst
, ptrdiff_t dstride
, uint8_t *src
, ptrdiff_t sstride
, int h
, int mx
, int my
)
275 register vector
unsigned char pixelsv1
, pixelsv2
;
276 register vector
unsigned char pixelsv1B
, pixelsv2B
;
277 register vector
unsigned char pixelsv1C
, pixelsv2C
;
278 register vector
unsigned char pixelsv1D
, pixelsv2D
;
280 register vector
unsigned char perm
= vec_lvsl(0, src
);
282 register ptrdiff_t dstride2
= dstride
<< 1, sstride2
= sstride
<< 1;
283 register ptrdiff_t dstride3
= dstride2
+ dstride
, sstride3
= sstride
+ sstride2
;
284 register ptrdiff_t dstride4
= dstride
<< 2, sstride4
= sstride
<< 2;
286 // hand-unrolling the loop by 4 gains about 15%
287 // mininum execution time goes from 74 to 60 cycles
288 // it's faster than -funroll-loops, but using
289 // -funroll-loops w/ this is bad - 74 cycles again.
290 // all this is on a 7450, tuning for the 7450
291 for (i
= 0; i
< h
; i
+= 4) {
292 pixelsv1
= vec_ld( 0, src
);
293 pixelsv2
= vec_ld(15, src
);
294 pixelsv1B
= vec_ld(sstride
, src
);
295 pixelsv2B
= vec_ld(15 + sstride
, src
);
296 pixelsv1C
= vec_ld(sstride2
, src
);
297 pixelsv2C
= vec_ld(15 + sstride2
, src
);
298 pixelsv1D
= vec_ld(sstride3
, src
);
299 pixelsv2D
= vec_ld(15 + sstride3
, src
);
300 vec_st(vec_perm(pixelsv1
, pixelsv2
, perm
),
301 0, (unsigned char*)dst
);
302 vec_st(vec_perm(pixelsv1B
, pixelsv2B
, perm
),
303 dstride
, (unsigned char*)dst
);
304 vec_st(vec_perm(pixelsv1C
, pixelsv2C
, perm
),
305 dstride2
, (unsigned char*)dst
);
306 vec_st(vec_perm(pixelsv1D
, pixelsv2D
, perm
),
307 dstride3
, (unsigned char*)dst
);
313 #endif /* HAVE_ALTIVEC */
316 av_cold
void ff_vp78dsp_init_ppc(VP8DSPContext
*c
)
319 if (!PPC_ALTIVEC(av_get_cpu_flags()))
322 c
->put_vp8_epel_pixels_tab
[0][0][0] = put_vp8_pixels16_altivec
;
323 c
->put_vp8_epel_pixels_tab
[0][0][2] = put_vp8_epel16_h6_altivec
;
324 c
->put_vp8_epel_pixels_tab
[0][2][0] = put_vp8_epel16_v6_altivec
;
325 c
->put_vp8_epel_pixels_tab
[0][2][2] = put_vp8_epel16_h6v6_altivec
;
327 c
->put_vp8_epel_pixels_tab
[1][0][2] = put_vp8_epel8_h6_altivec
;
328 c
->put_vp8_epel_pixels_tab
[1][2][0] = put_vp8_epel8_v6_altivec
;
329 c
->put_vp8_epel_pixels_tab
[1][0][1] = put_vp8_epel8_h4_altivec
;
330 c
->put_vp8_epel_pixels_tab
[1][1][0] = put_vp8_epel8_v4_altivec
;
332 c
->put_vp8_epel_pixels_tab
[1][2][2] = put_vp8_epel8_h6v6_altivec
;
333 c
->put_vp8_epel_pixels_tab
[1][1][1] = put_vp8_epel8_h4v4_altivec
;
334 c
->put_vp8_epel_pixels_tab
[1][1][2] = put_vp8_epel8_h6v4_altivec
;
335 c
->put_vp8_epel_pixels_tab
[1][2][1] = put_vp8_epel8_h4v6_altivec
;
337 c
->put_vp8_epel_pixels_tab
[2][0][2] = put_vp8_epel4_h6_altivec
;
338 c
->put_vp8_epel_pixels_tab
[2][2][0] = put_vp8_epel4_v6_altivec
;
339 c
->put_vp8_epel_pixels_tab
[2][0][1] = put_vp8_epel4_h4_altivec
;
340 c
->put_vp8_epel_pixels_tab
[2][1][0] = put_vp8_epel4_v4_altivec
;
342 c
->put_vp8_epel_pixels_tab
[2][2][2] = put_vp8_epel4_h6v6_altivec
;
343 c
->put_vp8_epel_pixels_tab
[2][1][1] = put_vp8_epel4_h4v4_altivec
;
344 c
->put_vp8_epel_pixels_tab
[2][1][2] = put_vp8_epel4_h6v4_altivec
;
345 c
->put_vp8_epel_pixels_tab
[2][2][1] = put_vp8_epel4_h4v6_altivec
;
346 #endif /* HAVE_ALTIVEC */