2 * Copyright (c) 2004 Romain Dolbeau <romain@dolbeau.org>
4 * This file is part of FFmpeg.
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 #include "libavutil/mem.h"
22 #include "libavutil/ppc/types_altivec.h"
23 #include "libavutil/ppc/util_altivec.h"
25 /* this code assume that stride % 16 == 0 */
27 #define CHROMA_MC8_ALTIVEC_CORE(BIAS1, BIAS2) \
28 vsrc2ssH = (vec_s16)VEC_MERGEH(zero_u8v,(vec_u8)vsrc2uc);\
29 vsrc3ssH = (vec_s16)VEC_MERGEH(zero_u8v,(vec_u8)vsrc3uc);\
31 psum = vec_mladd(vA, vsrc0ssH, BIAS1);\
32 psum = vec_mladd(vB, vsrc1ssH, psum);\
33 psum = vec_mladd(vC, vsrc2ssH, psum);\
34 psum = vec_mladd(vD, vsrc3ssH, psum);\
36 psum = vec_sr(psum, v6us);\
38 vdst = vec_ld(0, dst);\
39 ppsum = (vec_u8)vec_pack(psum, psum);\
40 vfdst = vec_perm(vdst, ppsum, fperm);\
42 OP_U8_ALTIVEC(fsum, vfdst, vdst);\
44 vec_st(fsum, 0, dst);\
52 #define CHROMA_MC8_ALTIVEC_CORE_SIMPLE \
54 vsrc0ssH = (vec_s16)VEC_MERGEH(zero_u8v,(vec_u8)vsrc0uc);\
55 vsrc1ssH = (vec_s16)VEC_MERGEH(zero_u8v,(vec_u8)vsrc1uc);\
57 psum = vec_mladd(vA, vsrc0ssH, v32ss);\
58 psum = vec_mladd(vE, vsrc1ssH, psum);\
59 psum = vec_sr(psum, v6us);\
61 vdst = vec_ld(0, dst);\
62 ppsum = (vec_u8)vec_pack(psum, psum);\
63 vfdst = vec_perm(vdst, ppsum, fperm);\
65 OP_U8_ALTIVEC(fsum, vfdst, vdst);\
67 vec_st(fsum, 0, dst);\
73 #define add28(a) vec_add(v28ss, a)
76 #define GET_VSRC1(vs0, off, b, perm0, s){ \
77 vec_u8 vsrcCuc, vsrcDuc; \
78 vsrcCuc = vec_ld(off, s); \
80 vsrcDuc = vec_ld(off + b, s); \
84 vs0 = vec_perm(vsrcCuc, vsrcDuc, perm0); \
86 #define GET_VSRC(vs0, vs1, off, b, perm0, perm1, s){ \
87 vec_u8 vsrcCuc, vsrcDuc; \
88 vsrcCuc = vec_ld(off, s); \
90 vsrcDuc = vec_ld(off + b, s); \
94 vs0 = vec_perm(vsrcCuc, vsrcDuc, perm0); \
95 if (reallyBadAlign){ \
98 vs1 = vec_perm(vsrcCuc, vsrcDuc, perm1); \
103 #define GET_VSRC1(vs0, off, b, perm0, s){ \
104 vs0 = vec_vsx_ld(off, s); \
106 #define GET_VSRC(vs0, vs1, off, b, perm0, perm1, s){ \
107 vs0 = vec_vsx_ld(off, s); \
108 vs1 = vec_vsx_ld(off + 1, s); \
110 #endif /* HAVE_BIGENDIAN */
112 #ifdef PREFIX_h264_chroma_mc8_altivec
113 static void PREFIX_h264_chroma_mc8_altivec(uint8_t * dst
, uint8_t * src
,
114 int stride
, int h
, int x
, int y
) {
115 DECLARE_ALIGNED(16, signed int, ABCD
)[4] =
116 {((8 - x
) * (8 - y
)),
123 const vec_s32 vABCD
= vec_ld(0, ABCD
);
124 const vec_s16 vA
= VEC_SPLAT16(vABCD
, 1);
125 const vec_s16 vB
= VEC_SPLAT16(vABCD
, 3);
126 const vec_s16 vC
= VEC_SPLAT16(vABCD
, 5);
127 const vec_s16 vD
= VEC_SPLAT16(vABCD
, 7);
128 const vec_s16 v32ss
= vec_sl(vec_splat_s16(1),vec_splat_u16(5));
129 const vec_u16 v6us
= vec_splat_u16(6);
131 vec_u8 vsrcperm0
, vsrcperm1
;
132 vec_u8 vsrc0uc
, vsrc1uc
;
133 vec_s16 vsrc0ssH
, vsrc1ssH
;
134 vec_u8 vsrc2uc
, vsrc3uc
;
135 vec_s16 vsrc2ssH
, vsrc3ssH
, psum
;
136 vec_u8 vdst
, ppsum
, vfdst
, fsum
;
138 register int loadSecond
= (((unsigned long)src
) % 16) <= 7 ? 0 : 1;
139 register int reallyBadAlign
= (((unsigned long)src
) % 16) == 15 ? 1 : 0;
140 vsrcperm0
= vec_lvsl(0, src
);
141 vsrcperm1
= vec_lvsl(1, src
);
144 if (((unsigned long)dst
) % 16 == 0) {
145 fperm
= (vec_u8
){0x10, 0x11, 0x12, 0x13,
146 0x14, 0x15, 0x16, 0x17,
147 0x08, 0x09, 0x0A, 0x0B,
148 0x0C, 0x0D, 0x0E, 0x0F};
150 fperm
= (vec_u8
){0x00, 0x01, 0x02, 0x03,
151 0x04, 0x05, 0x06, 0x07,
152 0x18, 0x19, 0x1A, 0x1B,
153 0x1C, 0x1D, 0x1E, 0x1F};
156 GET_VSRC(vsrc0uc
, vsrc1uc
, 0, 16, vsrcperm0
, vsrcperm1
, src
);
158 vsrc0ssH
= (vec_s16
)VEC_MERGEH(zero_u8v
,(vec_u8
)vsrc0uc
);
159 vsrc1ssH
= (vec_s16
)VEC_MERGEH(zero_u8v
,(vec_u8
)vsrc1uc
);
162 for (i
= 0 ; i
< h
; i
++) {
163 GET_VSRC(vsrc2uc
, vsrc3uc
, stride
, 16, vsrcperm0
, vsrcperm1
, src
);
164 CHROMA_MC8_ALTIVEC_CORE(v32ss
, noop
);
167 const vec_s16 vE
= vec_add(vB
, vC
);
168 if (ABCD
[2]) { // x == 0 B == 0
169 for (i
= 0 ; i
< h
; i
++) {
170 GET_VSRC1(vsrc1uc
, stride
, 15, vsrcperm0
, src
);
171 CHROMA_MC8_ALTIVEC_CORE_SIMPLE
;
174 } else { // y == 0 C == 0
175 for (i
= 0 ; i
< h
; i
++) {
176 GET_VSRC(vsrc0uc
, vsrc1uc
, 0, 15, vsrcperm0
, vsrcperm1
, src
);
177 CHROMA_MC8_ALTIVEC_CORE_SIMPLE
;
184 /* this code assume that stride % 16 == 0 */
185 #ifdef PREFIX_no_rnd_vc1_chroma_mc8_altivec
186 static void PREFIX_no_rnd_vc1_chroma_mc8_altivec(uint8_t * dst
, uint8_t * src
, int stride
, int h
, int x
, int y
) {
187 DECLARE_ALIGNED(16, signed int, ABCD
)[4] =
188 {((8 - x
) * (8 - y
)),
195 const vec_s32 vABCD
= vec_ld(0, ABCD
);
196 const vec_s16 vA
= VEC_SPLAT16(vABCD
, 1);
197 const vec_s16 vB
= VEC_SPLAT16(vABCD
, 3);
198 const vec_s16 vC
= VEC_SPLAT16(vABCD
, 5);
199 const vec_s16 vD
= VEC_SPLAT16(vABCD
, 7);
200 const vec_s16 v28ss
= vec_sub(vec_sl(vec_splat_s16(1),vec_splat_u16(5)),vec_splat_s16(4));
201 const vec_u16 v6us
= vec_splat_u16(6);
203 vec_u8 vsrcperm0
, vsrcperm1
;
204 vec_u8 vsrc0uc
, vsrc1uc
;
205 vec_s16 vsrc0ssH
, vsrc1ssH
;
206 vec_u8 vsrc2uc
, vsrc3uc
;
207 vec_s16 vsrc2ssH
, vsrc3ssH
, psum
;
208 vec_u8 vdst
, ppsum
, vfdst
, fsum
;
210 register int loadSecond
= (((unsigned long)src
) % 16) <= 7 ? 0 : 1;
211 register int reallyBadAlign
= (((unsigned long)src
) % 16) == 15 ? 1 : 0;
212 vsrcperm0
= vec_lvsl(0, src
);
213 vsrcperm1
= vec_lvsl(1, src
);
216 if (((unsigned long)dst
) % 16 == 0) {
217 fperm
= (vec_u8
){0x10, 0x11, 0x12, 0x13,
218 0x14, 0x15, 0x16, 0x17,
219 0x08, 0x09, 0x0A, 0x0B,
220 0x0C, 0x0D, 0x0E, 0x0F};
222 fperm
= (vec_u8
){0x00, 0x01, 0x02, 0x03,
223 0x04, 0x05, 0x06, 0x07,
224 0x18, 0x19, 0x1A, 0x1B,
225 0x1C, 0x1D, 0x1E, 0x1F};
228 GET_VSRC(vsrc0uc
, vsrc1uc
, 0, 16, vsrcperm0
, vsrcperm1
, src
);
230 vsrc0ssH
= (vec_s16
)VEC_MERGEH(zero_u8v
, (vec_u8
)vsrc0uc
);
231 vsrc1ssH
= (vec_s16
)VEC_MERGEH(zero_u8v
, (vec_u8
)vsrc1uc
);
233 for (i
= 0 ; i
< h
; i
++) {
234 GET_VSRC(vsrc2uc
, vsrc3uc
, stride
, 16, vsrcperm0
, vsrcperm1
, src
);
235 CHROMA_MC8_ALTIVEC_CORE(vec_splat_s16(0), add28
);
242 #undef CHROMA_MC8_ALTIVEC_CORE