Commit | Line | Data |
---|---|---|
2ba45a60 DM |
1 | ;****************************************************************************** |
2 | ;* VP9 loop filter SIMD optimizations | |
3 | ;* | |
4 | ;* Copyright (C) 2013-2014 Clément Bœsch <u pkh me> | |
5 | ;* | |
6 | ;* This file is part of FFmpeg. | |
7 | ;* | |
8 | ;* FFmpeg is free software; you can redistribute it and/or | |
9 | ;* modify it under the terms of the GNU Lesser General Public | |
10 | ;* License as published by the Free Software Foundation; either | |
11 | ;* version 2.1 of the License, or (at your option) any later version. | |
12 | ;* | |
13 | ;* FFmpeg is distributed in the hope that it will be useful, | |
14 | ;* but WITHOUT ANY WARRANTY; without even the implied warranty of | |
15 | ;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
16 | ;* Lesser General Public License for more details. | |
17 | ;* | |
18 | ;* You should have received a copy of the GNU Lesser General Public | |
19 | ;* License along with FFmpeg; if not, write to the Free Software | |
20 | ;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA | |
21 | ;****************************************************************************** | |
22 | ||
23 | %include "libavutil/x86/x86util.asm" | |
24 | ||
25 | %if ARCH_X86_64 | |
26 | ||
27 | SECTION_RODATA | |
28 | ||
29 | cextern pb_3 | |
30 | cextern pb_80 | |
31 | ||
32 | pb_4: times 16 db 0x04 | |
33 | pb_10: times 16 db 0x10 | |
34 | pb_40: times 16 db 0x40 | |
35 | pb_81: times 16 db 0x81 | |
36 | pb_f8: times 16 db 0xf8 | |
37 | pb_fe: times 16 db 0xfe | |
38 | ||
39 | cextern pw_4 | |
40 | cextern pw_8 | |
41 | ||
42 | ; with mix functions, two 8-bit thresholds are stored in a 16-bit storage, | |
43 | ; the following mask is used to splat both in the same register | |
44 | mask_mix: times 8 db 0 | |
45 | times 8 db 1 | |
46 | ||
47 | mask_mix84: times 8 db 0xff | |
48 | times 8 db 0x00 | |
49 | mask_mix48: times 8 db 0x00 | |
50 | times 8 db 0xff | |
51 | ||
52 | SECTION .text | |
53 | ||
54 | ; %1 = abs(%2-%3) | |
55 | %macro ABSSUB 4 ; dst, src1 (RO), src2 (RO), tmp | |
56 | psubusb %1, %3, %2 | |
57 | psubusb %4, %2, %3 | |
58 | por %1, %4 | |
59 | %endmacro | |
60 | ||
61 | ; %1 = %1<=%2 | |
62 | %macro CMP_LTE 3-4 ; src/dst, cmp, tmp, pb_80 | |
63 | %if %0 == 4 | |
64 | pxor %1, %4 | |
65 | %endif | |
66 | pcmpgtb %3, %2, %1 ; cmp > src? | |
67 | pcmpeqb %1, %2 ; cmp == src? XXX: avoid this with a -1/+1 well placed? | |
68 | por %1, %3 ; cmp >= src? | |
69 | %endmacro | |
70 | ||
71 | ; %1 = abs(%2-%3) <= %4 | |
72 | %macro ABSSUB_CMP 6-7 [pb_80]; dst, src1, src2, cmp, tmp1, tmp2, [pb_80] | |
73 | ABSSUB %1, %2, %3, %6 ; dst = abs(src1-src2) | |
74 | CMP_LTE %1, %4, %6, %7 ; dst <= cmp | |
75 | %endmacro | |
76 | ||
77 | %macro MASK_APPLY 4 ; %1=new_data/dst %2=old_data %3=mask %4=tmp | |
78 | pand %1, %3 ; new &= mask | |
79 | pandn %4, %3, %2 ; tmp = ~mask & old | |
80 | por %1, %4 ; new&mask | old&~mask | |
81 | %endmacro | |
82 | ||
83 | %macro FILTER_SUBx2_ADDx2 8 ; %1=dst %2=h/l %3=cache %4=sub1 %5=sub2 %6=add1 %7=add2 %8=rshift | |
84 | punpck%2bw %3, %4, m0 | |
85 | psubw %1, %3 | |
86 | punpck%2bw %3, %5, m0 | |
87 | psubw %1, %3 | |
88 | punpck%2bw %3, %6, m0 | |
89 | paddw %1, %3 | |
90 | punpck%2bw %3, %7, m0 | |
91 | paddw %3, %1 | |
92 | psraw %1, %3, %8 | |
93 | %endmacro | |
94 | ||
95 | %macro FILTER_INIT 8 ; tmp1, tmp2, cacheL, cacheH, dstp, filterid, mask, source | |
96 | FILTER%6_INIT %1, l, %3 | |
97 | FILTER%6_INIT %2, h, %4 | |
98 | packuswb %1, %2 | |
99 | MASK_APPLY %1, %8, %7, %2 | |
100 | mova %5, %1 | |
101 | %endmacro | |
102 | ||
103 | %macro FILTER_UPDATE 11-14 ; tmp1, tmp2, cacheL, cacheH, dstp, -, -, +, +, rshift, mask, [source], [preload reg + value] | |
104 | %if %0 == 13 ; no source + preload | |
105 | mova %12, %13 | |
106 | %elif %0 == 14 ; source + preload | |
107 | mova %13, %14 | |
108 | %endif | |
109 | FILTER_SUBx2_ADDx2 %1, l, %3, %6, %7, %8, %9, %10 | |
110 | FILTER_SUBx2_ADDx2 %2, h, %4, %6, %7, %8, %9, %10 | |
111 | packuswb %1, %2 | |
112 | %if %0 == 12 || %0 == 14 | |
113 | MASK_APPLY %1, %12, %11, %2 | |
114 | %else | |
115 | MASK_APPLY %1, %5, %11, %2 | |
116 | %endif | |
117 | mova %5, %1 | |
118 | %endmacro | |
119 | ||
120 | %macro SRSHIFT3B_2X 4 ; reg1, reg2, [pb_10], tmp | |
121 | mova %4, [pb_f8] | |
122 | pand %1, %4 | |
123 | pand %2, %4 | |
124 | psrlq %1, 3 | |
125 | psrlq %2, 3 | |
126 | pxor %1, %3 | |
127 | pxor %2, %3 | |
128 | psubb %1, %3 | |
129 | psubb %2, %3 | |
130 | %endmacro | |
131 | ||
132 | %macro EXTRACT_POS_NEG 3 ; i8, neg, pos | |
133 | pxor %3, %3 | |
134 | pxor %2, %2 | |
135 | pcmpgtb %3, %1 ; i8 < 0 mask | |
136 | psubb %2, %1 ; neg values (only the originally - will be kept) | |
137 | pand %2, %3 ; negative values of i8 (but stored as +) | |
138 | pandn %3, %1 ; positive values of i8 | |
139 | %endmacro | |
140 | ||
141 | ; clip_u8(u8 + i8) | |
142 | %macro SIGN_ADD 5 ; dst, u8, i8, tmp1, tmp2 | |
143 | EXTRACT_POS_NEG %3, %4, %5 | |
144 | psubusb %1, %2, %4 ; sub the negatives | |
145 | paddusb %1, %5 ; add the positives | |
146 | %endmacro | |
147 | ||
148 | ; clip_u8(u8 - i8) | |
149 | %macro SIGN_SUB 5 ; dst, u8, i8, tmp1, tmp2 | |
150 | EXTRACT_POS_NEG %3, %4, %5 | |
151 | psubusb %1, %2, %5 ; sub the positives | |
152 | paddusb %1, %4 ; add the negatives | |
153 | %endmacro | |
154 | ||
155 | %macro FILTER6_INIT 3 ; %1=dst %2=h/l %3=cache | |
156 | punpck%2bw %1, m14, m0 ; p3: B->W | |
157 | paddw %3, %1, %1 ; p3*2 | |
158 | paddw %3, %1 ; p3*3 | |
159 | punpck%2bw %1, m15, m0 ; p2: B->W | |
160 | paddw %3, %1 ; p3*3 + p2 | |
161 | paddw %3, %1 ; p3*3 + p2*2 | |
162 | punpck%2bw %1, m10, m0 ; p1: B->W | |
163 | paddw %3, %1 ; p3*3 + p2*2 + p1 | |
164 | punpck%2bw %1, m11, m0 ; p0: B->W | |
165 | paddw %3, %1 ; p3*3 + p2*2 + p1 + p0 | |
166 | punpck%2bw %1, m12, m0 ; q0: B->W | |
167 | paddw %3, %1 ; p3*3 + p2*2 + p1 + p0 + q0 | |
168 | paddw %3, [pw_4] ; p3*3 + p2*2 + p1 + p0 + q0 + 4 | |
169 | psraw %1, %3, 3 ; (p3*3 + p2*2 + p1 + p0 + q0 + 4) >> 3 | |
170 | %endmacro | |
171 | ||
172 | %macro FILTER14_INIT 3 ; %1=dst %2=h/l %3=cache | |
173 | punpck%2bw %1, m2, m0 ; p7: B->W | |
174 | psllw %3, %1, 3 ; p7*8 | |
175 | psubw %3, %1 ; p7*7 | |
176 | punpck%2bw %1, m3, m0 ; p6: B->W | |
177 | paddw %3, %1 ; p7*7 + p6 | |
178 | paddw %3, %1 ; p7*7 + p6*2 | |
179 | punpck%2bw %1, m8, m0 ; p5: B->W | |
180 | paddw %3, %1 ; p7*7 + p6*2 + p5 | |
181 | punpck%2bw %1, m9, m0 ; p4: B->W | |
182 | paddw %3, %1 ; p7*7 + p6*2 + p5 + p4 | |
183 | punpck%2bw %1, m14, m0 ; p3: B->W | |
184 | paddw %3, %1 ; p7*7 + p6*2 + p5 + p4 + p3 | |
185 | punpck%2bw %1, m15, m0 ; p2: B->W | |
186 | paddw %3, %1 ; p7*7 + p6*2 + p5 + .. + p2 | |
187 | punpck%2bw %1, m10, m0 ; p1: B->W | |
188 | paddw %3, %1 ; p7*7 + p6*2 + p5 + .. + p1 | |
189 | punpck%2bw %1, m11, m0 ; p0: B->W | |
190 | paddw %3, %1 ; p7*7 + p6*2 + p5 + .. + p0 | |
191 | punpck%2bw %1, m12, m0 ; q0: B->W | |
192 | paddw %3, %1 ; p7*7 + p6*2 + p5 + .. + p0 + q0 | |
193 | paddw %3, [pw_8] ; p7*7 + p6*2 + p5 + .. + p0 + q0 + 8 | |
194 | psraw %1, %3, 4 ; (p7*7 + p6*2 + p5 + .. + p0 + q0 + 8) >> 4 | |
195 | %endmacro | |
196 | ||
197 | %macro TRANSPOSE16x16B 17 | |
198 | mova %17, m%16 | |
199 | SBUTTERFLY bw, %1, %2, %16 | |
200 | SBUTTERFLY bw, %3, %4, %16 | |
201 | SBUTTERFLY bw, %5, %6, %16 | |
202 | SBUTTERFLY bw, %7, %8, %16 | |
203 | SBUTTERFLY bw, %9, %10, %16 | |
204 | SBUTTERFLY bw, %11, %12, %16 | |
205 | SBUTTERFLY bw, %13, %14, %16 | |
206 | mova m%16, %17 | |
207 | mova %17, m%14 | |
208 | SBUTTERFLY bw, %15, %16, %14 | |
209 | SBUTTERFLY wd, %1, %3, %14 | |
210 | SBUTTERFLY wd, %2, %4, %14 | |
211 | SBUTTERFLY wd, %5, %7, %14 | |
212 | SBUTTERFLY wd, %6, %8, %14 | |
213 | SBUTTERFLY wd, %9, %11, %14 | |
214 | SBUTTERFLY wd, %10, %12, %14 | |
215 | SBUTTERFLY wd, %13, %15, %14 | |
216 | mova m%14, %17 | |
217 | mova %17, m%12 | |
218 | SBUTTERFLY wd, %14, %16, %12 | |
219 | SBUTTERFLY dq, %1, %5, %12 | |
220 | SBUTTERFLY dq, %2, %6, %12 | |
221 | SBUTTERFLY dq, %3, %7, %12 | |
222 | SBUTTERFLY dq, %4, %8, %12 | |
223 | SBUTTERFLY dq, %9, %13, %12 | |
224 | SBUTTERFLY dq, %10, %14, %12 | |
225 | SBUTTERFLY dq, %11, %15, %12 | |
226 | mova m%12, %17 | |
227 | mova %17, m%8 | |
228 | SBUTTERFLY dq, %12, %16, %8 | |
229 | SBUTTERFLY qdq, %1, %9, %8 | |
230 | SBUTTERFLY qdq, %2, %10, %8 | |
231 | SBUTTERFLY qdq, %3, %11, %8 | |
232 | SBUTTERFLY qdq, %4, %12, %8 | |
233 | SBUTTERFLY qdq, %5, %13, %8 | |
234 | SBUTTERFLY qdq, %6, %14, %8 | |
235 | SBUTTERFLY qdq, %7, %15, %8 | |
236 | mova m%8, %17 | |
237 | mova %17, m%1 | |
238 | SBUTTERFLY qdq, %8, %16, %1 | |
239 | mova m%1, %17 | |
240 | SWAP %2, %9 | |
241 | SWAP %3, %5 | |
242 | SWAP %4, %13 | |
243 | SWAP %6, %11 | |
244 | SWAP %8, %15 | |
245 | SWAP %12, %14 | |
246 | %endmacro | |
247 | ||
248 | ; transpose 16 half lines (high part) to 8 full centered lines | |
249 | %macro TRANSPOSE16x8B 16 | |
250 | punpcklbw m%1, m%2 | |
251 | punpcklbw m%3, m%4 | |
252 | punpcklbw m%5, m%6 | |
253 | punpcklbw m%7, m%8 | |
254 | punpcklbw m%9, m%10 | |
255 | punpcklbw m%11, m%12 | |
256 | punpcklbw m%13, m%14 | |
257 | punpcklbw m%15, m%16 | |
258 | SBUTTERFLY wd, %1, %3, %2 | |
259 | SBUTTERFLY wd, %5, %7, %2 | |
260 | SBUTTERFLY wd, %9, %11, %2 | |
261 | SBUTTERFLY wd, %13, %15, %2 | |
262 | SBUTTERFLY dq, %1, %5, %2 | |
263 | SBUTTERFLY dq, %3, %7, %2 | |
264 | SBUTTERFLY dq, %9, %13, %2 | |
265 | SBUTTERFLY dq, %11, %15, %2 | |
266 | SBUTTERFLY qdq, %1, %9, %2 | |
267 | SBUTTERFLY qdq, %3, %11, %2 | |
268 | SBUTTERFLY qdq, %5, %13, %2 | |
269 | SBUTTERFLY qdq, %7, %15, %2 | |
270 | SWAP %5, %1 | |
271 | SWAP %6, %9 | |
272 | SWAP %7, %1 | |
273 | SWAP %8, %13 | |
274 | SWAP %9, %3 | |
275 | SWAP %10, %11 | |
276 | SWAP %11, %1 | |
277 | SWAP %12, %15 | |
278 | %endmacro | |
279 | ||
280 | %macro DEFINE_REAL_P7_TO_Q7 0-1 0 | |
281 | %define P7 dst1q + 2*mstrideq + %1 | |
282 | %define P6 dst1q + mstrideq + %1 | |
283 | %define P5 dst1q + %1 | |
284 | %define P4 dst1q + strideq + %1 | |
285 | %define P3 dstq + 4*mstrideq + %1 | |
286 | %define P2 dstq + mstride3q + %1 | |
287 | %define P1 dstq + 2*mstrideq + %1 | |
288 | %define P0 dstq + mstrideq + %1 | |
289 | %define Q0 dstq + %1 | |
290 | %define Q1 dstq + strideq + %1 | |
291 | %define Q2 dstq + 2*strideq + %1 | |
292 | %define Q3 dstq + stride3q + %1 | |
293 | %define Q4 dstq + 4*strideq + %1 | |
294 | %define Q5 dst2q + mstrideq + %1 | |
295 | %define Q6 dst2q + %1 | |
296 | %define Q7 dst2q + strideq + %1 | |
297 | %endmacro | |
298 | ||
299 | ; ..............AB -> AAAAAAAABBBBBBBB | |
300 | %macro SPLATB_MIX 1-2 [mask_mix] | |
301 | %if cpuflag(ssse3) | |
302 | pshufb %1, %2 | |
303 | %else | |
304 | punpcklbw %1, %1 | |
305 | punpcklwd %1, %1 | |
306 | punpckldq %1, %1 | |
307 | %endif | |
308 | %endmacro | |
309 | ||
310 | %macro LOOPFILTER 2 ; %1=v/h %2=size1 | |
311 | lea mstrideq, [strideq] | |
312 | neg mstrideq | |
313 | ||
314 | lea stride3q, [strideq+2*strideq] | |
315 | mov mstride3q, stride3q | |
316 | neg mstride3q | |
317 | ||
318 | %ifidn %1, h | |
319 | %if %2 > 16 | |
320 | %define movx movh | |
321 | lea dstq, [dstq + 8*strideq - 4] | |
322 | %else | |
323 | %define movx movu | |
324 | lea dstq, [dstq + 8*strideq - 8] ; go from top center (h pos) to center left (v pos) | |
325 | %endif | |
326 | %endif | |
327 | ||
328 | lea dst1q, [dstq + 2*mstride3q] ; dst1q = &dst[stride * -6] | |
329 | lea dst2q, [dstq + 2* stride3q] ; dst2q = &dst[stride * +6] | |
330 | ||
331 | DEFINE_REAL_P7_TO_Q7 | |
332 | ||
333 | %ifidn %1, h | |
334 | movx m0, [P7] | |
335 | movx m1, [P6] | |
336 | movx m2, [P5] | |
337 | movx m3, [P4] | |
338 | movx m4, [P3] | |
339 | movx m5, [P2] | |
340 | movx m6, [P1] | |
341 | movx m7, [P0] | |
342 | movx m8, [Q0] | |
343 | movx m9, [Q1] | |
344 | movx m10, [Q2] | |
345 | movx m11, [Q3] | |
346 | movx m12, [Q4] | |
347 | movx m13, [Q5] | |
348 | movx m14, [Q6] | |
349 | movx m15, [Q7] | |
350 | %define P7 rsp + 0 | |
351 | %define P6 rsp + 16 | |
352 | %define P5 rsp + 32 | |
353 | %define P4 rsp + 48 | |
354 | %define P3 rsp + 64 | |
355 | %define P2 rsp + 80 | |
356 | %define P1 rsp + 96 | |
357 | %define P0 rsp + 112 | |
358 | %define Q0 rsp + 128 | |
359 | %define Q1 rsp + 144 | |
360 | %define Q2 rsp + 160 | |
361 | %define Q3 rsp + 176 | |
362 | %define Q4 rsp + 192 | |
363 | %define Q5 rsp + 208 | |
364 | %define Q6 rsp + 224 | |
365 | %define Q7 rsp + 240 | |
366 | ||
367 | %if %2 == 16 | |
368 | TRANSPOSE16x16B 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, [rsp] | |
369 | mova [P7], m0 | |
370 | mova [P6], m1 | |
371 | mova [P5], m2 | |
372 | mova [P4], m3 | |
373 | %else | |
374 | TRANSPOSE16x8B 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 | |
375 | %endif | |
376 | mova [P3], m4 | |
377 | mova [P2], m5 | |
378 | mova [P1], m6 | |
379 | mova [P0], m7 | |
380 | mova [Q0], m8 | |
381 | mova [Q1], m9 | |
382 | mova [Q2], m10 | |
383 | mova [Q3], m11 | |
384 | %if %2 == 16 | |
385 | mova [Q4], m12 | |
386 | mova [Q5], m13 | |
387 | mova [Q6], m14 | |
388 | mova [Q7], m15 | |
389 | %endif | |
390 | %endif | |
391 | ||
392 | ; calc fm mask | |
393 | %if %2 == 16 | |
394 | %if cpuflag(ssse3) | |
395 | pxor m0, m0 | |
396 | %endif | |
397 | SPLATB_REG m2, I, m0 ; I I I I ... | |
398 | SPLATB_REG m3, E, m0 ; E E E E ... | |
399 | %else | |
400 | %if cpuflag(ssse3) | |
401 | mova m0, [mask_mix] | |
402 | %endif | |
403 | movd m2, Id | |
404 | movd m3, Ed | |
405 | SPLATB_MIX m2, m0 | |
406 | SPLATB_MIX m3, m0 | |
407 | %endif | |
408 | mova m0, [pb_80] | |
409 | pxor m2, m0 | |
410 | pxor m3, m0 | |
411 | %ifidn %1, v | |
412 | mova m8, [P3] | |
413 | mova m9, [P2] | |
414 | mova m10, [P1] | |
415 | mova m11, [P0] | |
416 | mova m12, [Q0] | |
417 | mova m13, [Q1] | |
418 | mova m14, [Q2] | |
419 | mova m15, [Q3] | |
420 | %else | |
421 | ; In case of horizontal, P3..Q3 are already present in some registers due | |
422 | ; to the previous transpose, so we just swap registers. | |
423 | SWAP 8, 4, 12 | |
424 | SWAP 9, 5, 13 | |
425 | SWAP 10, 6, 14 | |
426 | SWAP 11, 7, 15 | |
427 | %endif | |
428 | ABSSUB_CMP m5, m8, m9, m2, m6, m7, m0 ; m5 = abs(p3-p2) <= I | |
429 | ABSSUB_CMP m1, m9, m10, m2, m6, m7, m0 ; m1 = abs(p2-p1) <= I | |
430 | pand m5, m1 | |
431 | ABSSUB_CMP m1, m10, m11, m2, m6, m7, m0 ; m1 = abs(p1-p0) <= I | |
432 | pand m5, m1 | |
433 | ABSSUB_CMP m1, m12, m13, m2, m6, m7, m0 ; m1 = abs(q1-q0) <= I | |
434 | pand m5, m1 | |
435 | ABSSUB_CMP m1, m13, m14, m2, m6, m7, m0 ; m1 = abs(q2-q1) <= I | |
436 | pand m5, m1 | |
437 | ABSSUB_CMP m1, m14, m15, m2, m6, m7, m0 ; m1 = abs(q3-q2) <= I | |
438 | pand m5, m1 | |
439 | ABSSUB m1, m11, m12, m7 ; abs(p0-q0) | |
440 | paddusb m1, m1 ; abs(p0-q0) * 2 | |
441 | ABSSUB m2, m10, m13, m7 ; abs(p1-q1) | |
442 | pand m2, [pb_fe] ; drop lsb so shift can work | |
443 | psrlq m2, 1 ; abs(p1-q1)/2 | |
444 | paddusb m1, m2 ; abs(p0-q0)*2 + abs(p1-q1)/2 | |
445 | pxor m1, m0 | |
446 | pcmpgtb m4, m3, m1 ; E > X? | |
447 | pcmpeqb m3, m1 ; E == X? | |
448 | por m3, m4 ; E >= X? | |
449 | pand m3, m5 ; fm final value | |
450 | ||
451 | ; (m3: fm, m8..15: p3 p2 p1 p0 q0 q1 q2 q3) | |
452 | ; calc flat8in (if not 44_16) and hev masks | |
453 | mova m6, [pb_81] ; [1 1 1 1 ...] ^ 0x80 | |
454 | %if %2 != 44 | |
455 | ABSSUB_CMP m2, m8, m11, m6, m4, m5 ; abs(p3 - p0) <= 1 | |
456 | mova m8, [pb_80] | |
457 | ABSSUB_CMP m1, m9, m11, m6, m4, m5, m8 ; abs(p2 - p0) <= 1 | |
458 | pand m2, m1 | |
459 | ABSSUB m4, m10, m11, m5 ; abs(p1 - p0) | |
460 | %if %2 == 16 | |
461 | %if cpuflag(ssse3) | |
462 | pxor m0, m0 | |
463 | %endif | |
464 | SPLATB_REG m7, H, m0 ; H H H H ... | |
465 | %else | |
466 | movd m7, Hd | |
467 | SPLATB_MIX m7 | |
468 | %endif | |
469 | pxor m7, m8 | |
470 | pxor m4, m8 | |
471 | pcmpgtb m0, m4, m7 ; abs(p1 - p0) > H (1/2 hev condition) | |
472 | CMP_LTE m4, m6, m5 ; abs(p1 - p0) <= 1 | |
473 | pand m2, m4 ; (flat8in) | |
474 | ABSSUB m4, m13, m12, m1 ; abs(q1 - q0) | |
475 | pxor m4, m8 | |
476 | pcmpgtb m5, m4, m7 ; abs(q1 - q0) > H (2/2 hev condition) | |
477 | por m0, m5 ; hev final value | |
478 | CMP_LTE m4, m6, m5 ; abs(q1 - q0) <= 1 | |
479 | pand m2, m4 ; (flat8in) | |
480 | ABSSUB_CMP m1, m14, m12, m6, m4, m5, m8 ; abs(q2 - q0) <= 1 | |
481 | pand m2, m1 | |
482 | ABSSUB_CMP m1, m15, m12, m6, m4, m5, m8 ; abs(q3 - q0) <= 1 | |
483 | pand m2, m1 ; flat8in final value | |
484 | %if %2 == 84 || %2 == 48 | |
485 | pand m2, [mask_mix%2] | |
486 | %endif | |
487 | %else | |
488 | mova m6, [pb_80] | |
489 | movd m7, Hd | |
490 | SPLATB_MIX m7 | |
491 | pxor m7, m6 | |
492 | ABSSUB m4, m10, m11, m1 ; abs(p1 - p0) | |
493 | pxor m4, m6 | |
494 | pcmpgtb m0, m4, m7 ; abs(p1 - p0) > H (1/2 hev condition) | |
495 | ABSSUB m4, m13, m12, m1 ; abs(q1 - q0) | |
496 | pxor m4, m6 | |
497 | pcmpgtb m5, m4, m7 ; abs(q1 - q0) > H (2/2 hev condition) | |
498 | por m0, m5 ; hev final value | |
499 | %endif | |
500 | ||
501 | %if %2 == 16 | |
502 | ; (m0: hev, m2: flat8in, m3: fm, m6: pb_81, m9..15: p2 p1 p0 q0 q1 q2 q3) | |
503 | ; calc flat8out mask | |
504 | mova m8, [P7] | |
505 | mova m9, [P6] | |
506 | ABSSUB_CMP m1, m8, m11, m6, m4, m5 ; abs(p7 - p0) <= 1 | |
507 | ABSSUB_CMP m7, m9, m11, m6, m4, m5 ; abs(p6 - p0) <= 1 | |
508 | pand m1, m7 | |
509 | mova m8, [P5] | |
510 | mova m9, [P4] | |
511 | ABSSUB_CMP m7, m8, m11, m6, m4, m5 ; abs(p5 - p0) <= 1 | |
512 | pand m1, m7 | |
513 | ABSSUB_CMP m7, m9, m11, m6, m4, m5 ; abs(p4 - p0) <= 1 | |
514 | pand m1, m7 | |
515 | mova m14, [Q4] | |
516 | mova m15, [Q5] | |
517 | ABSSUB_CMP m7, m14, m12, m6, m4, m5 ; abs(q4 - q0) <= 1 | |
518 | pand m1, m7 | |
519 | ABSSUB_CMP m7, m15, m12, m6, m4, m5 ; abs(q5 - q0) <= 1 | |
520 | pand m1, m7 | |
521 | mova m14, [Q6] | |
522 | mova m15, [Q7] | |
523 | ABSSUB_CMP m7, m14, m12, m6, m4, m5 ; abs(q4 - q0) <= 1 | |
524 | pand m1, m7 | |
525 | ABSSUB_CMP m7, m15, m12, m6, m4, m5 ; abs(q5 - q0) <= 1 | |
526 | pand m1, m7 ; flat8out final value | |
527 | %endif | |
528 | ||
529 | ; if (fm) { | |
530 | ; if (out && in) filter_14() | |
531 | ; else if (in) filter_6() | |
532 | ; else if (hev) filter_2() | |
533 | ; else filter_4() | |
534 | ; } | |
535 | ; | |
536 | ; f14: fm & out & in | |
537 | ; f6: fm & ~f14 & in => fm & ~(out & in) & in => fm & ~out & in | |
538 | ; f2: fm & ~f14 & ~f6 & hev => fm & ~(out & in) & ~(~out & in) & hev => fm & ~in & hev | |
539 | ; f4: fm & ~f14 & ~f6 & ~f2 => fm & ~(out & in) & ~(~out & in) & ~(~in & hev) => fm & ~in & ~hev | |
540 | ||
541 | ; (m0: hev, [m1: flat8out], [m2: flat8in], m3: fm, m8..15: p5 p4 p1 p0 q0 q1 q6 q7) | |
542 | ; filter2() | |
543 | %if %2 != 44 | |
544 | mova m6, [pb_80] ; already in m6 if 44_16 | |
545 | %endif | |
546 | pxor m15, m12, m6 ; q0 ^ 0x80 | |
547 | pxor m14, m11, m6 ; p0 ^ 0x80 | |
548 | psubsb m15, m14 ; (signed) q0 - p0 | |
549 | pxor m4, m10, m6 ; p1 ^ 0x80 | |
550 | pxor m5, m13, m6 ; q1 ^ 0x80 | |
551 | psubsb m4, m5 ; (signed) p1 - q1 | |
552 | paddsb m4, m15 ; (q0 - p0) + (p1 - q1) | |
553 | paddsb m4, m15 ; 2*(q0 - p0) + (p1 - q1) | |
554 | paddsb m4, m15 ; 3*(q0 - p0) + (p1 - q1) | |
555 | paddsb m6, m4, [pb_4] ; m6: f1 = clip(f + 4, 127) | |
556 | paddsb m4, [pb_3] ; m4: f2 = clip(f + 3, 127) | |
557 | mova m14, [pb_10] ; will be reused in filter4() | |
558 | SRSHIFT3B_2X m6, m4, m14, m7 ; f1 and f2 sign byte shift by 3 | |
559 | SIGN_SUB m7, m12, m6, m5, m9 ; m7 = q0 - f1 | |
560 | SIGN_ADD m8, m11, m4, m5, m9 ; m8 = p0 + f2 | |
561 | %if %2 != 44 | |
562 | pandn m6, m2, m3 ; ~mask(in) & mask(fm) | |
563 | pand m6, m0 ; (~mask(in) & mask(fm)) & mask(hev) | |
564 | %else | |
565 | pand m6, m3, m0 | |
566 | %endif | |
567 | MASK_APPLY m7, m12, m6, m5 ; m7 = filter2(q0) & mask / we write it in filter4() | |
568 | MASK_APPLY m8, m11, m6, m5 ; m8 = filter2(p0) & mask / we write it in filter4() | |
569 | ||
570 | ; (m0: hev, [m1: flat8out], [m2: flat8in], m3: fm, m7..m8: q0' p0', m10..13: p1 p0 q0 q1, m14: pb_10, m15: q0-p0) | |
571 | ; filter4() | |
572 | mova m4, m15 | |
573 | paddsb m15, m4 ; 2 * (q0 - p0) | |
574 | paddsb m15, m4 ; 3 * (q0 - p0) | |
575 | paddsb m6, m15, [pb_4] ; m6: f1 = clip(f + 4, 127) | |
576 | paddsb m15, [pb_3] ; m15: f2 = clip(f + 3, 127) | |
577 | SRSHIFT3B_2X m6, m15, m14, m9 ; f1 and f2 sign byte shift by 3 | |
578 | %if %2 != 44 | |
579 | %define p0tmp m7 | |
580 | %define q0tmp m9 | |
581 | pandn m5, m2, m3 ; ~mask(in) & mask(fm) | |
582 | pandn m0, m5 ; ~mask(hev) & (~mask(in) & mask(fm)) | |
583 | %else | |
584 | %define p0tmp m1 | |
585 | %define q0tmp m2 | |
586 | pandn m0, m3 | |
587 | %endif | |
588 | SIGN_SUB q0tmp, m12, m6, m4, m14 ; q0 - f1 | |
589 | MASK_APPLY q0tmp, m7, m0, m5 ; filter4(q0) & mask | |
590 | mova [Q0], q0tmp | |
591 | SIGN_ADD p0tmp, m11, m15, m4, m14 ; p0 + f2 | |
592 | MASK_APPLY p0tmp, m8, m0, m5 ; filter4(p0) & mask | |
593 | mova [P0], p0tmp | |
594 | paddb m6, [pb_80] ; | |
595 | pxor m8, m8 ; f=(f1+1)>>1 | |
596 | pavgb m6, m8 ; | |
597 | psubb m6, [pb_40] ; | |
598 | SIGN_ADD m7, m10, m6, m8, m9 ; p1 + f | |
599 | SIGN_SUB m4, m13, m6, m8, m9 ; q1 - f | |
600 | MASK_APPLY m7, m10, m0, m14 ; m7 = filter4(p1) | |
601 | MASK_APPLY m4, m13, m0, m14 ; m4 = filter4(q1) | |
602 | mova [P1], m7 | |
603 | mova [Q1], m4 | |
604 | ||
605 | ; ([m1: flat8out], m2: flat8in, m3: fm, m10..13: p1 p0 q0 q1) | |
606 | ; filter6() | |
607 | %if %2 != 44 | |
608 | pxor m0, m0 | |
609 | %if %2 > 16 | |
610 | pand m3, m2 | |
611 | %else | |
612 | pand m2, m3 ; mask(fm) & mask(in) | |
613 | pandn m3, m1, m2 ; ~mask(out) & (mask(fm) & mask(in)) | |
614 | %endif | |
615 | mova m14, [P3] | |
616 | mova m15, [P2] | |
617 | mova m8, [Q2] | |
618 | mova m9, [Q3] | |
619 | FILTER_INIT m4, m5, m6, m7, [P2], 6, m3, m15 ; [p2] | |
620 | FILTER_UPDATE m6, m7, m4, m5, [P1], m14, m15, m10, m13, 3, m3 ; [p1] -p3 -p2 +p1 +q1 | |
621 | FILTER_UPDATE m4, m5, m6, m7, [P0], m14, m10, m11, m8, 3, m3 ; [p0] -p3 -p1 +p0 +q2 | |
622 | FILTER_UPDATE m6, m7, m4, m5, [Q0], m14, m11, m12, m9, 3, m3 ; [q0] -p3 -p0 +q0 +q3 | |
623 | FILTER_UPDATE m4, m5, m6, m7, [Q1], m15, m12, m13, m9, 3, m3 ; [q1] -p2 -q0 +q1 +q3 | |
624 | FILTER_UPDATE m6, m7, m4, m5, [Q2], m10, m13, m8, m9, 3, m3, m8 ; [q2] -p1 -q1 +q2 +q3 | |
625 | %endif | |
626 | ||
627 | ; (m0: 0, [m1: flat8out], m2: fm & flat8in, m8..15: q2 q3 p1 p0 q0 q1 p3 p2) | |
628 | ; filter14() | |
629 | ; | |
630 | ; m2 m3 m8 m9 m14 m15 m10 m11 m12 m13 | |
631 | ; | |
632 | ; q2 q3 p3 p2 p1 p0 q0 q1 | |
633 | ; p6 -7 p7 p6 p5 p4 . . . . . | |
634 | ; p5 -6 -p7 -p6 +p5 +q1 . . . . | |
635 | ; p4 -5 -p7 -p5 +p4 +q2 . . . q2 | |
636 | ; p3 -4 -p7 -p4 +p3 +q3 . . . q3 | |
637 | ; p2 -3 -p7 -p3 +p2 +q4 . . . q4 | |
638 | ; p1 -2 -p7 -p2 +p1 +q5 . . . q5 | |
639 | ; p0 -1 -p7 -p1 +p0 +q6 . . . q6 | |
640 | ; q0 +0 -p7 -p0 +q0 +q7 . . . q7 | |
641 | ; q1 +1 -p6 -q0 +q1 +q7 q1 . . . | |
642 | ; q2 +2 -p5 -q1 +q2 +q7 . q2 . . | |
643 | ; q3 +3 -p4 -q2 +q3 +q7 . q3 . . | |
644 | ; q4 +4 -p3 -q3 +q4 +q7 . q4 . . | |
645 | ; q5 +5 -p2 -q4 +q5 +q7 . q5 . . | |
646 | ; q6 +6 -p1 -q5 +q6 +q7 . q6 . . | |
647 | ||
648 | %if %2 == 16 | |
649 | pand m1, m2 ; mask(out) & (mask(fm) & mask(in)) | |
650 | mova m2, [P7] | |
651 | mova m3, [P6] | |
652 | mova m8, [P5] | |
653 | mova m9, [P4] | |
654 | FILTER_INIT m4, m5, m6, m7, [P6], 14, m1, m3 | |
655 | FILTER_UPDATE m6, m7, m4, m5, [P5], m2, m3, m8, m13, 4, m1, m8 ; [p5] -p7 -p6 +p5 +q1 | |
656 | FILTER_UPDATE m4, m5, m6, m7, [P4], m2, m8, m9, m13, 4, m1, m9, m13, [Q2] ; [p4] -p7 -p5 +p4 +q2 | |
657 | FILTER_UPDATE m6, m7, m4, m5, [P3], m2, m9, m14, m13, 4, m1, m14, m13, [Q3] ; [p3] -p7 -p4 +p3 +q3 | |
658 | FILTER_UPDATE m4, m5, m6, m7, [P2], m2, m14, m15, m13, 4, m1, m13, [Q4] ; [p2] -p7 -p3 +p2 +q4 | |
659 | FILTER_UPDATE m6, m7, m4, m5, [P1], m2, m15, m10, m13, 4, m1, m13, [Q5] ; [p1] -p7 -p2 +p1 +q5 | |
660 | FILTER_UPDATE m4, m5, m6, m7, [P0], m2, m10, m11, m13, 4, m1, m13, [Q6] ; [p0] -p7 -p1 +p0 +q6 | |
661 | FILTER_UPDATE m6, m7, m4, m5, [Q0], m2, m11, m12, m13, 4, m1, m13, [Q7] ; [q0] -p7 -p0 +q0 +q7 | |
662 | FILTER_UPDATE m4, m5, m6, m7, [Q1], m3, m12, m2, m13, 4, m1, m2, [Q1] ; [q1] -p6 -q0 +q1 +q7 | |
663 | FILTER_UPDATE m6, m7, m4, m5, [Q2], m8, m2, m3, m13, 4, m1, m3, [Q2] ; [q2] -p5 -q1 +q2 +q7 | |
664 | FILTER_UPDATE m4, m5, m6, m7, [Q3], m9, m3, m8, m13, 4, m1, m8, m8, [Q3] ; [q3] -p4 -q2 +q3 +q7 | |
665 | FILTER_UPDATE m6, m7, m4, m5, [Q4], m14, m8, m9, m13, 4, m1, m9, m9, [Q4] ; [q4] -p3 -q3 +q4 +q7 | |
666 | FILTER_UPDATE m4, m5, m6, m7, [Q5], m15, m9, m14, m13, 4, m1, m14, m14, [Q5] ; [q5] -p2 -q4 +q5 +q7 | |
667 | FILTER_UPDATE m6, m7, m4, m5, [Q6], m10, m14, m15, m13, 4, m1, m15, m15, [Q6] ; [q6] -p1 -q5 +q6 +q7 | |
668 | %endif | |
669 | ||
670 | %ifidn %1, h | |
671 | %if %2 == 16 | |
672 | mova m0, [P7] | |
673 | mova m1, [P6] | |
674 | mova m2, [P5] | |
675 | mova m3, [P4] | |
676 | mova m4, [P3] | |
677 | mova m5, [P2] | |
678 | mova m6, [P1] | |
679 | mova m7, [P0] | |
680 | mova m8, [Q0] | |
681 | mova m9, [Q1] | |
682 | mova m10, [Q2] | |
683 | mova m11, [Q3] | |
684 | mova m12, [Q4] | |
685 | mova m13, [Q5] | |
686 | mova m14, [Q6] | |
687 | mova m15, [Q7] | |
688 | TRANSPOSE16x16B 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, [rsp] | |
689 | DEFINE_REAL_P7_TO_Q7 | |
690 | movu [P7], m0 | |
691 | movu [P6], m1 | |
692 | movu [P5], m2 | |
693 | movu [P4], m3 | |
694 | movu [P3], m4 | |
695 | movu [P2], m5 | |
696 | movu [P1], m6 | |
697 | movu [P0], m7 | |
698 | movu [Q0], m8 | |
699 | movu [Q1], m9 | |
700 | movu [Q2], m10 | |
701 | movu [Q3], m11 | |
702 | movu [Q4], m12 | |
703 | movu [Q5], m13 | |
704 | movu [Q6], m14 | |
705 | movu [Q7], m15 | |
706 | %elif %2 == 44 | |
707 | SWAP 0, 7 ; m0 = p1 | |
708 | SWAP 3, 4 ; m3 = q1 | |
709 | DEFINE_REAL_P7_TO_Q7 2 | |
710 | SBUTTERFLY bw, 0, 1, 8 | |
711 | SBUTTERFLY bw, 2, 3, 8 | |
712 | SBUTTERFLY wd, 0, 2, 8 | |
713 | SBUTTERFLY wd, 1, 3, 8 | |
714 | SBUTTERFLY dq, 0, 4, 8 | |
715 | SBUTTERFLY dq, 1, 5, 8 | |
716 | SBUTTERFLY dq, 2, 6, 8 | |
717 | SBUTTERFLY dq, 3, 7, 8 | |
718 | movd [P7], m0 | |
719 | punpckhqdq m0, m8 | |
720 | movd [P6], m0 | |
721 | movd [Q0], m1 | |
722 | punpckhqdq m1, m9 | |
723 | movd [Q1], m1 | |
724 | movd [P3], m2 | |
725 | punpckhqdq m2, m10 | |
726 | movd [P2], m2 | |
727 | movd [Q4], m3 | |
728 | punpckhqdq m3, m11 | |
729 | movd [Q5], m3 | |
730 | movd [P5], m4 | |
731 | punpckhqdq m4, m12 | |
732 | movd [P4], m4 | |
733 | movd [Q2], m5 | |
734 | punpckhqdq m5, m13 | |
735 | movd [Q3], m5 | |
736 | movd [P1], m6 | |
737 | punpckhqdq m6, m14 | |
738 | movd [P0], m6 | |
739 | movd [Q6], m7 | |
740 | punpckhqdq m7, m8 | |
741 | movd [Q7], m7 | |
742 | %else | |
743 | ; the following code do a transpose of 8 full lines to 16 half | |
744 | ; lines (high part). It is inlined to avoid the need of a staging area | |
745 | mova m0, [P3] | |
746 | mova m1, [P2] | |
747 | mova m2, [P1] | |
748 | mova m3, [P0] | |
749 | mova m4, [Q0] | |
750 | mova m5, [Q1] | |
751 | mova m6, [Q2] | |
752 | mova m7, [Q3] | |
753 | DEFINE_REAL_P7_TO_Q7 | |
754 | SBUTTERFLY bw, 0, 1, 8 | |
755 | SBUTTERFLY bw, 2, 3, 8 | |
756 | SBUTTERFLY bw, 4, 5, 8 | |
757 | SBUTTERFLY bw, 6, 7, 8 | |
758 | SBUTTERFLY wd, 0, 2, 8 | |
759 | SBUTTERFLY wd, 1, 3, 8 | |
760 | SBUTTERFLY wd, 4, 6, 8 | |
761 | SBUTTERFLY wd, 5, 7, 8 | |
762 | SBUTTERFLY dq, 0, 4, 8 | |
763 | SBUTTERFLY dq, 1, 5, 8 | |
764 | SBUTTERFLY dq, 2, 6, 8 | |
765 | SBUTTERFLY dq, 3, 7, 8 | |
766 | movh [P7], m0 | |
767 | punpckhqdq m0, m8 | |
768 | movh [P6], m0 | |
769 | movh [Q0], m1 | |
770 | punpckhqdq m1, m9 | |
771 | movh [Q1], m1 | |
772 | movh [P3], m2 | |
773 | punpckhqdq m2, m10 | |
774 | movh [P2], m2 | |
775 | movh [Q4], m3 | |
776 | punpckhqdq m3, m11 | |
777 | movh [Q5], m3 | |
778 | movh [P5], m4 | |
779 | punpckhqdq m4, m12 | |
780 | movh [P4], m4 | |
781 | movh [Q2], m5 | |
782 | punpckhqdq m5, m13 | |
783 | movh [Q3], m5 | |
784 | movh [P1], m6 | |
785 | punpckhqdq m6, m14 | |
786 | movh [P0], m6 | |
787 | movh [Q6], m7 | |
788 | punpckhqdq m7, m8 | |
789 | movh [Q7], m7 | |
790 | %endif | |
791 | %endif | |
792 | ||
793 | RET | |
794 | %endmacro | |
795 | ||
796 | %macro LPF_16_VH 2 | |
797 | INIT_XMM %2 | |
798 | cglobal vp9_loop_filter_v_%1_16, 5,10,16, dst, stride, E, I, H, mstride, dst1, dst2, stride3, mstride3 | |
799 | LOOPFILTER v, %1 | |
800 | cglobal vp9_loop_filter_h_%1_16, 5,10,16, 256, dst, stride, E, I, H, mstride, dst1, dst2, stride3, mstride3 | |
801 | LOOPFILTER h, %1 | |
802 | %endmacro | |
803 | ||
804 | %macro LPF_16_VH_ALL_OPTS 1 | |
805 | LPF_16_VH %1, sse2 | |
806 | LPF_16_VH %1, ssse3 | |
807 | LPF_16_VH %1, avx | |
808 | %endmacro | |
809 | ||
810 | LPF_16_VH_ALL_OPTS 16 | |
811 | LPF_16_VH_ALL_OPTS 44 | |
812 | LPF_16_VH_ALL_OPTS 48 | |
813 | LPF_16_VH_ALL_OPTS 84 | |
814 | LPF_16_VH_ALL_OPTS 88 | |
815 | ||
816 | %endif ; x86-64 |