Commit | Line | Data |
---|---|---|
2ba45a60 DM |
1 | /* |
2 | * Copyright (c) 2004 Romain Dolbeau <romain@dolbeau.org> | |
3 | * | |
4 | * This file is part of FFmpeg. | |
5 | * | |
6 | * FFmpeg is free software; you can redistribute it and/or | |
7 | * modify it under the terms of the GNU Lesser General Public | |
8 | * License as published by the Free Software Foundation; either | |
9 | * version 2.1 of the License, or (at your option) any later version. | |
10 | * | |
11 | * FFmpeg is distributed in the hope that it will be useful, | |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
14 | * Lesser General Public License for more details. | |
15 | * | |
16 | * You should have received a copy of the GNU Lesser General Public | |
17 | * License along with FFmpeg; if not, write to the Free Software | |
18 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA | |
19 | */ | |
20 | ||
21 | #include "libavutil/mem.h" | |
f6fa7814 DM |
22 | #include "libavutil/ppc/types_altivec.h" |
23 | #include "libavutil/ppc/util_altivec.h" | |
2ba45a60 DM |
24 | |
25 | /* this code assume that stride % 16 == 0 */ | |
26 | ||
27 | #define CHROMA_MC8_ALTIVEC_CORE(BIAS1, BIAS2) \ | |
f6fa7814 DM |
28 | vsrc2ssH = (vec_s16)VEC_MERGEH(zero_u8v,(vec_u8)vsrc2uc);\ |
29 | vsrc3ssH = (vec_s16)VEC_MERGEH(zero_u8v,(vec_u8)vsrc3uc);\ | |
2ba45a60 DM |
30 | \ |
31 | psum = vec_mladd(vA, vsrc0ssH, BIAS1);\ | |
32 | psum = vec_mladd(vB, vsrc1ssH, psum);\ | |
33 | psum = vec_mladd(vC, vsrc2ssH, psum);\ | |
34 | psum = vec_mladd(vD, vsrc3ssH, psum);\ | |
35 | psum = BIAS2(psum);\ | |
36 | psum = vec_sr(psum, v6us);\ | |
37 | \ | |
38 | vdst = vec_ld(0, dst);\ | |
39 | ppsum = (vec_u8)vec_pack(psum, psum);\ | |
40 | vfdst = vec_perm(vdst, ppsum, fperm);\ | |
41 | \ | |
42 | OP_U8_ALTIVEC(fsum, vfdst, vdst);\ | |
43 | \ | |
44 | vec_st(fsum, 0, dst);\ | |
45 | \ | |
46 | vsrc0ssH = vsrc2ssH;\ | |
47 | vsrc1ssH = vsrc3ssH;\ | |
48 | \ | |
49 | dst += stride;\ | |
50 | src += stride; | |
51 | ||
52 | #define CHROMA_MC8_ALTIVEC_CORE_SIMPLE \ | |
53 | \ | |
f6fa7814 DM |
54 | vsrc0ssH = (vec_s16)VEC_MERGEH(zero_u8v,(vec_u8)vsrc0uc);\ |
55 | vsrc1ssH = (vec_s16)VEC_MERGEH(zero_u8v,(vec_u8)vsrc1uc);\ | |
2ba45a60 DM |
56 | \ |
57 | psum = vec_mladd(vA, vsrc0ssH, v32ss);\ | |
58 | psum = vec_mladd(vE, vsrc1ssH, psum);\ | |
59 | psum = vec_sr(psum, v6us);\ | |
60 | \ | |
61 | vdst = vec_ld(0, dst);\ | |
62 | ppsum = (vec_u8)vec_pack(psum, psum);\ | |
63 | vfdst = vec_perm(vdst, ppsum, fperm);\ | |
64 | \ | |
65 | OP_U8_ALTIVEC(fsum, vfdst, vdst);\ | |
66 | \ | |
67 | vec_st(fsum, 0, dst);\ | |
68 | \ | |
69 | dst += stride;\ | |
70 | src += stride; | |
71 | ||
72 | #define noop(a) a | |
73 | #define add28(a) vec_add(v28ss, a) | |
74 | ||
f6fa7814 DM |
75 | #if HAVE_BIGENDIAN |
76 | #define GET_VSRC1(vs0, off, b, perm0, s){ \ | |
77 | vec_u8 vsrcCuc, vsrcDuc; \ | |
78 | vsrcCuc = vec_ld(off, s); \ | |
79 | if (loadSecond){ \ | |
80 | vsrcDuc = vec_ld(off + b, s); \ | |
81 | } else \ | |
82 | vsrcDuc = vsrcCuc; \ | |
83 | \ | |
84 | vs0 = vec_perm(vsrcCuc, vsrcDuc, perm0); \ | |
85 | } | |
86 | #define GET_VSRC(vs0, vs1, off, b, perm0, perm1, s){ \ | |
87 | vec_u8 vsrcCuc, vsrcDuc; \ | |
88 | vsrcCuc = vec_ld(off, s); \ | |
89 | if (loadSecond){ \ | |
90 | vsrcDuc = vec_ld(off + b, s); \ | |
91 | } else \ | |
92 | vsrcDuc = vsrcCuc; \ | |
93 | \ | |
94 | vs0 = vec_perm(vsrcCuc, vsrcDuc, perm0); \ | |
95 | if (reallyBadAlign){ \ | |
96 | vs1 = vsrcDuc; \ | |
97 | } else \ | |
98 | vs1 = vec_perm(vsrcCuc, vsrcDuc, perm1); \ | |
99 | } | |
100 | ||
101 | #else | |
102 | ||
103 | #define GET_VSRC1(vs0, off, b, perm0, s){ \ | |
104 | vs0 = vec_vsx_ld(off, s); \ | |
105 | } | |
106 | #define GET_VSRC(vs0, vs1, off, b, perm0, perm1, s){ \ | |
107 | vs0 = vec_vsx_ld(off, s); \ | |
108 | vs1 = vec_vsx_ld(off + 1, s); \ | |
109 | } | |
110 | #endif /* HAVE_BIGENDIAN */ | |
111 | ||
2ba45a60 DM |
112 | #ifdef PREFIX_h264_chroma_mc8_altivec |
113 | static void PREFIX_h264_chroma_mc8_altivec(uint8_t * dst, uint8_t * src, | |
114 | int stride, int h, int x, int y) { | |
115 | DECLARE_ALIGNED(16, signed int, ABCD)[4] = | |
116 | {((8 - x) * (8 - y)), | |
117 | (( x) * (8 - y)), | |
118 | ((8 - x) * ( y)), | |
119 | (( x) * ( y))}; | |
120 | register int i; | |
121 | vec_u8 fperm; | |
2ba45a60 | 122 | LOAD_ZERO; |
f6fa7814 DM |
123 | const vec_s32 vABCD = vec_ld(0, ABCD); |
124 | const vec_s16 vA = VEC_SPLAT16(vABCD, 1); | |
125 | const vec_s16 vB = VEC_SPLAT16(vABCD, 3); | |
126 | const vec_s16 vC = VEC_SPLAT16(vABCD, 5); | |
127 | const vec_s16 vD = VEC_SPLAT16(vABCD, 7); | |
2ba45a60 DM |
128 | const vec_s16 v32ss = vec_sl(vec_splat_s16(1),vec_splat_u16(5)); |
129 | const vec_u16 v6us = vec_splat_u16(6); | |
2ba45a60 | 130 | |
f6fa7814 | 131 | vec_u8 vsrcperm0, vsrcperm1; |
2ba45a60 DM |
132 | vec_u8 vsrc0uc, vsrc1uc; |
133 | vec_s16 vsrc0ssH, vsrc1ssH; | |
f6fa7814 | 134 | vec_u8 vsrc2uc, vsrc3uc; |
2ba45a60 DM |
135 | vec_s16 vsrc2ssH, vsrc3ssH, psum; |
136 | vec_u8 vdst, ppsum, vfdst, fsum; | |
f6fa7814 DM |
137 | #if HAVE_BIGENDIAN |
138 | register int loadSecond = (((unsigned long)src) % 16) <= 7 ? 0 : 1; | |
139 | register int reallyBadAlign = (((unsigned long)src) % 16) == 15 ? 1 : 0; | |
140 | vsrcperm0 = vec_lvsl(0, src); | |
141 | vsrcperm1 = vec_lvsl(1, src); | |
142 | #endif | |
2ba45a60 DM |
143 | |
144 | if (((unsigned long)dst) % 16 == 0) { | |
145 | fperm = (vec_u8){0x10, 0x11, 0x12, 0x13, | |
146 | 0x14, 0x15, 0x16, 0x17, | |
147 | 0x08, 0x09, 0x0A, 0x0B, | |
148 | 0x0C, 0x0D, 0x0E, 0x0F}; | |
149 | } else { | |
150 | fperm = (vec_u8){0x00, 0x01, 0x02, 0x03, | |
151 | 0x04, 0x05, 0x06, 0x07, | |
152 | 0x18, 0x19, 0x1A, 0x1B, | |
153 | 0x1C, 0x1D, 0x1E, 0x1F}; | |
154 | } | |
155 | ||
f6fa7814 | 156 | GET_VSRC(vsrc0uc, vsrc1uc, 0, 16, vsrcperm0, vsrcperm1, src); |
2ba45a60 | 157 | |
f6fa7814 DM |
158 | vsrc0ssH = (vec_s16)VEC_MERGEH(zero_u8v,(vec_u8)vsrc0uc); |
159 | vsrc1ssH = (vec_s16)VEC_MERGEH(zero_u8v,(vec_u8)vsrc1uc); | |
2ba45a60 DM |
160 | |
161 | if (ABCD[3]) { | |
f6fa7814 DM |
162 | for (i = 0 ; i < h ; i++) { |
163 | GET_VSRC(vsrc2uc, vsrc3uc, stride, 16, vsrcperm0, vsrcperm1, src); | |
164 | CHROMA_MC8_ALTIVEC_CORE(v32ss, noop); | |
2ba45a60 DM |
165 | } |
166 | } else { | |
167 | const vec_s16 vE = vec_add(vB, vC); | |
168 | if (ABCD[2]) { // x == 0 B == 0 | |
f6fa7814 DM |
169 | for (i = 0 ; i < h ; i++) { |
170 | GET_VSRC1(vsrc1uc, stride, 15, vsrcperm0, src); | |
171 | CHROMA_MC8_ALTIVEC_CORE_SIMPLE; | |
172 | vsrc0uc = vsrc1uc; | |
2ba45a60 DM |
173 | } |
174 | } else { // y == 0 C == 0 | |
f6fa7814 DM |
175 | for (i = 0 ; i < h ; i++) { |
176 | GET_VSRC(vsrc0uc, vsrc1uc, 0, 15, vsrcperm0, vsrcperm1, src); | |
177 | CHROMA_MC8_ALTIVEC_CORE_SIMPLE; | |
2ba45a60 DM |
178 | } |
179 | } | |
180 | } | |
181 | } | |
182 | #endif | |
183 | ||
184 | /* this code assume that stride % 16 == 0 */ | |
185 | #ifdef PREFIX_no_rnd_vc1_chroma_mc8_altivec | |
186 | static void PREFIX_no_rnd_vc1_chroma_mc8_altivec(uint8_t * dst, uint8_t * src, int stride, int h, int x, int y) { | |
187 | DECLARE_ALIGNED(16, signed int, ABCD)[4] = | |
188 | {((8 - x) * (8 - y)), | |
189 | (( x) * (8 - y)), | |
190 | ((8 - x) * ( y)), | |
191 | (( x) * ( y))}; | |
192 | register int i; | |
193 | vec_u8 fperm; | |
2ba45a60 | 194 | LOAD_ZERO; |
f6fa7814 DM |
195 | const vec_s32 vABCD = vec_ld(0, ABCD); |
196 | const vec_s16 vA = VEC_SPLAT16(vABCD, 1); | |
197 | const vec_s16 vB = VEC_SPLAT16(vABCD, 3); | |
198 | const vec_s16 vC = VEC_SPLAT16(vABCD, 5); | |
199 | const vec_s16 vD = VEC_SPLAT16(vABCD, 7); | |
2ba45a60 DM |
200 | const vec_s16 v28ss = vec_sub(vec_sl(vec_splat_s16(1),vec_splat_u16(5)),vec_splat_s16(4)); |
201 | const vec_u16 v6us = vec_splat_u16(6); | |
2ba45a60 | 202 | |
f6fa7814 | 203 | vec_u8 vsrcperm0, vsrcperm1; |
2ba45a60 DM |
204 | vec_u8 vsrc0uc, vsrc1uc; |
205 | vec_s16 vsrc0ssH, vsrc1ssH; | |
f6fa7814 | 206 | vec_u8 vsrc2uc, vsrc3uc; |
2ba45a60 DM |
207 | vec_s16 vsrc2ssH, vsrc3ssH, psum; |
208 | vec_u8 vdst, ppsum, vfdst, fsum; | |
f6fa7814 DM |
209 | #if HAVE_BIGENDIAN |
210 | register int loadSecond = (((unsigned long)src) % 16) <= 7 ? 0 : 1; | |
211 | register int reallyBadAlign = (((unsigned long)src) % 16) == 15 ? 1 : 0; | |
212 | vsrcperm0 = vec_lvsl(0, src); | |
213 | vsrcperm1 = vec_lvsl(1, src); | |
214 | #endif | |
2ba45a60 DM |
215 | |
216 | if (((unsigned long)dst) % 16 == 0) { | |
217 | fperm = (vec_u8){0x10, 0x11, 0x12, 0x13, | |
218 | 0x14, 0x15, 0x16, 0x17, | |
219 | 0x08, 0x09, 0x0A, 0x0B, | |
220 | 0x0C, 0x0D, 0x0E, 0x0F}; | |
221 | } else { | |
222 | fperm = (vec_u8){0x00, 0x01, 0x02, 0x03, | |
223 | 0x04, 0x05, 0x06, 0x07, | |
224 | 0x18, 0x19, 0x1A, 0x1B, | |
225 | 0x1C, 0x1D, 0x1E, 0x1F}; | |
226 | } | |
227 | ||
f6fa7814 | 228 | GET_VSRC(vsrc0uc, vsrc1uc, 0, 16, vsrcperm0, vsrcperm1, src); |
2ba45a60 | 229 | |
f6fa7814 DM |
230 | vsrc0ssH = (vec_s16)VEC_MERGEH(zero_u8v, (vec_u8)vsrc0uc); |
231 | vsrc1ssH = (vec_s16)VEC_MERGEH(zero_u8v, (vec_u8)vsrc1uc); | |
2ba45a60 | 232 | |
f6fa7814 DM |
233 | for (i = 0 ; i < h ; i++) { |
234 | GET_VSRC(vsrc2uc, vsrc3uc, stride, 16, vsrcperm0, vsrcperm1, src); | |
235 | CHROMA_MC8_ALTIVEC_CORE(vec_splat_s16(0), add28); | |
2ba45a60 DM |
236 | } |
237 | } | |
238 | #endif | |
239 | ||
240 | #undef noop | |
241 | #undef add28 | |
242 | #undef CHROMA_MC8_ALTIVEC_CORE |