1 ;******************************************************************************
2 ;* VP9 IDCT SIMD optimizations
4 ;* Copyright (C) 2013 Clément Bœsch <u pkh me>
5 ;* Copyright (C) 2013 Ronald S. Bultje <rsbultje gmail com>
7 ;* This file is part of FFmpeg.
9 ;* FFmpeg is free software; you can redistribute it and/or
10 ;* modify it under the terms of the GNU Lesser General Public
11 ;* License as published by the Free Software Foundation; either
12 ;* version 2.1 of the License, or (at your option) any later version.
14 ;* FFmpeg is distributed in the hope that it will be useful,
15 ;* but WITHOUT ANY WARRANTY; without even the implied warranty of
16 ;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 ;* Lesser General Public License for more details.
19 ;* You should have received a copy of the GNU Lesser General Public
20 ;* License along with FFmpeg; if not, write to the Free Software
21 ;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22 ;******************************************************************************
24 %include "libavutil/x86/x86util.asm"
28 pw_11585x2: times 8 dw 23170
29 pw_m11585x2: times 8 dw -23170
31 %macro VP9_IDCT_COEFFS 2-3 0
32 pw_%1x2: times 8 dw %1*2
33 pw_m%1x2: times 8 dw -%1*2
34 pw_%2x2: times 8 dw %2*2
35 pw_m%2x2: times 8 dw -%2*2
36 pw_m%1_%2: times 4 dw -%1, %2
37 pw_%2_%1: times 4 dw %2, %1
38 pw_m%2_m%1: times 4 dw -%2, -%1
40 pw_m%2_%1: times 4 dw -%2, %1
41 pw_%1_%2: times 4 dw %1, %2
45 VP9_IDCT_COEFFS 15137, 6270, 1
46 VP9_IDCT_COEFFS 16069, 3196, 1
47 VP9_IDCT_COEFFS 9102, 13623, 1
48 VP9_IDCT_COEFFS 16305, 1606
49 VP9_IDCT_COEFFS 10394, 12665
50 VP9_IDCT_COEFFS 14449, 7723
51 VP9_IDCT_COEFFS 4756, 15679
52 VP9_IDCT_COEFFS 16364, 804
53 VP9_IDCT_COEFFS 11003, 12140
54 VP9_IDCT_COEFFS 14811, 7005
55 VP9_IDCT_COEFFS 5520, 15426
56 VP9_IDCT_COEFFS 15893, 3981
57 VP9_IDCT_COEFFS 8423, 14053
58 VP9_IDCT_COEFFS 13160, 9760
59 VP9_IDCT_COEFFS 2404, 16207
61 pw_5283_13377: times 4 dw 5283, 13377
62 pw_9929_13377: times 4 dw 9929, 13377
63 pw_15212_m13377: times 4 dw 15212, -13377
64 pw_15212_9929: times 4 dw 15212, 9929
65 pw_m5283_m15212: times 4 dw -5283, -15212
66 pw_13377x2: times 8 dw 13377*2
68 pd_8192: times 4 dd 8192
77 ; (a*x + b*y + round) >> shift
78 %macro VP9_MULSUB_2W_2X 5 ; dst1, dst2/src, round, coefs1, coefs2
87 %macro VP9_MULSUB_2W_4X 7 ; dst1, dst2, coef1, coef2, rnd, tmp1/src, tmp2
88 VP9_MULSUB_2W_2X %7, %6, %5, [pw_m%3_%4], [pw_%4_%3]
89 VP9_MULSUB_2W_2X %1, %2, %5, [pw_m%3_%4], [pw_%4_%3]
94 %macro VP9_UNPACK_MULSUB_2W_4X 7-9 ; dst1, dst2, (src1, src2,) coef1, coef2, rnd, tmp1, tmp2
96 punpckhwd m%6, m%2, m%1
98 VP9_MULSUB_2W_4X %1, %2, %3, %4, %5, %6, %7
100 punpckhwd m%8, m%4, m%3
101 punpcklwd m%2, m%4, m%3
102 VP9_MULSUB_2W_4X %1, %2, %5, %6, %7, %8, %9
106 %macro VP9_UNPACK_MULSUB_2D_4X 6 ; dst1 [src1], dst2 [src2], dst3, dst4, mul1, mul2
107 punpckhwd m%4, m%2, m%1
109 pmaddwd m%3, m%4, [pw_m%5_%6]
110 pmaddwd m%4, [pw_%6_%5]
111 pmaddwd m%1, m%2, [pw_m%5_%6]
112 pmaddwd m%2, [pw_%6_%5]
115 %macro VP9_RND_SH_SUMSUB_BA 6 ; dst1 [src1], dst2 [src2], src3, src4, tmp, round
116 SUMSUB_BA d, %1, %2, %5
117 SUMSUB_BA d, %3, %4, %5
130 %macro VP9_STORE_2X 5-6 dstq ; reg1, reg2, tmp1, tmp2, zero, dst
132 movh m%4, [%6+strideq]
140 movh [%6+strideq], m%4
143 %macro ZERO_BLOCK 4 ; mem, stride, nnzcpl, zero_reg
148 mova [%1+%%y+%%x], %4
149 %assign %%x (%%x+mmsize)
155 ;-------------------------------------------------------------------------------------------
156 ; void vp9_iwht_iwht_4x4_add_<opt>(uint8_t *dst, ptrdiff_t stride, int16_t *block, int eob);
157 ;-------------------------------------------------------------------------------------------
159 %macro VP9_IWHT4_1D 0
175 cglobal vp9_iwht_iwht_4x4_add, 3, 3, 0, dst, stride, block, eob
176 mova m0, [blockq+0*8]
177 mova m1, [blockq+1*8]
178 mova m2, [blockq+2*8]
179 mova m3, [blockq+3*8]
186 TRANSPOSE4x4W 0, 1, 2, 3, 4
190 VP9_STORE_2X 0, 1, 5, 6, 4
191 lea dstq, [dstq+strideq*2]
192 VP9_STORE_2X 2, 3, 5, 6, 4
193 ZERO_BLOCK blockq, 8, 4, m4
196 ;-------------------------------------------------------------------------------------------
197 ; void vp9_idct_idct_4x4_add_<opt>(uint8_t *dst, ptrdiff_t stride, int16_t *block, int eob);
198 ;-------------------------------------------------------------------------------------------
200 %macro VP9_IDCT4_1D_FINALIZE 0
201 SUMSUB_BA w, 3, 2, 4 ; m3=t3+t0, m2=-t3+t0
202 SUMSUB_BA w, 1, 0, 4 ; m1=t2+t1, m0=-t2+t1
203 SWAP 0, 3, 2 ; 3102 -> 0123
206 %macro VP9_IDCT4_1D 0
207 SUMSUB_BA w, 2, 0, 4 ; m2=IN(0)+IN(2) m0=IN(0)-IN(2)
208 pmulhrsw m2, m6 ; m2=t0
209 pmulhrsw m0, m6 ; m0=t1
210 VP9_UNPACK_MULSUB_2W_4X 1, 3, 15137, 6270, m7, 4, 5 ; m1=t2, m3=t3
211 VP9_IDCT4_1D_FINALIZE
214 ; 2x2 top left corner
215 %macro VP9_IDCT4_2x2_1D 0
216 pmulhrsw m0, m5 ; m0=t1
219 pmulhrsw m1, m6 ; m1=t2
220 pmulhrsw m3, m7 ; m3=t3
221 VP9_IDCT4_1D_FINALIZE
224 %macro VP9_IDCT4_WRITEOUT 0
226 pmulhrsw m0, m5 ; (x*2048 + (1<<14))>>15 <=> (x+8)>>4
228 VP9_STORE_2X 0, 1, 6, 7, 4
229 lea dstq, [dstq+2*strideq]
232 VP9_STORE_2X 2, 3, 6, 7, 4
236 cglobal vp9_idct_idct_4x4_add, 4,4,0, dst, stride, block, eob
238 cmp eobd, 4 ; 2x2 or smaller
241 cmp eobd, 1 ; faster path for when only DC is set
245 mova m5, [pw_11585x2]
251 pmulhrsw m0, [pw_2048] ; (x*2048 + (1<<14))>>15 <=> (x+8)>>4
252 VP9_STORE_2X 0, 0, 6, 7, 4
253 lea dstq, [dstq+2*strideq]
254 VP9_STORE_2X 0, 0, 6, 7, 4
257 ; faster path for when only top left 2x2 block is set
261 mova m5, [pw_11585x2]
263 mova m7, [pw_15137x2]
265 TRANSPOSE4x4W 0, 1, 2, 3, 4
267 pxor m4, m4 ; used for the block reset, and VP9_STORE_2X
273 .idctfull: ; generic full 4x4 idct/idct
278 mova m6, [pw_11585x2]
279 mova m7, [pd_8192] ; rounding
281 TRANSPOSE4x4W 0, 1, 2, 3, 4
283 pxor m4, m4 ; used for the block reset, and VP9_STORE_2X
291 ;-------------------------------------------------------------------------------------------
292 ; void vp9_iadst_iadst_4x4_add_<opt>(uint8_t *dst, ptrdiff_t stride, int16_t *block, int eob);
293 ;-------------------------------------------------------------------------------------------
295 %macro VP9_IADST4_1D 0
303 pmaddwd xmm1, xmm0, [pw_5283_13377]
304 pmaddwd xmm4, xmm0, [pw_9929_13377]
305 pmaddwd xmm0, [pw_15212_m13377]
306 pmaddwd xmm3, xmm2, [pw_15212_9929]
307 pmaddwd xmm2, [pw_m5283_m15212]
310 paddd xmm3, [pd_8192]
311 paddd xmm2, [pd_8192]
318 pmulhrsw m3, [pw_13377x2] ; out2
322 movdq2q m0, xmm0 ; out3
323 movdq2q m1, xmm1 ; out0
324 movdq2q m2, xmm4 ; out1
330 cglobal vp9_%1_%3_4x4_add, 3, 3, 8, dst, stride, block, eob
335 mova m6, [pw_11585x2]
336 mova m7, [pd_8192] ; rounding
338 TRANSPOSE4x4W 0, 1, 2, 3, 4
340 pxor m4, m4 ; used for the block reset, and VP9_STORE_2X
349 IADST4_FN idct, IDCT4, iadst, IADST4, ssse3
350 IADST4_FN iadst, IADST4, idct, IDCT4, ssse3
351 IADST4_FN iadst, IADST4, iadst, IADST4, ssse3
353 %if ARCH_X86_64 ; TODO: 32-bit? (32-bit limited to 8 xmm reg, we use more)
355 ;-------------------------------------------------------------------------------------------
356 ; void vp9_idct_idct_8x8_add_<opt>(uint8_t *dst, ptrdiff_t stride, int16_t *block, int eob);
357 ;-------------------------------------------------------------------------------------------
359 %macro VP9_IDCT8_1D_FINALIZE 0
360 SUMSUB_BA w, 3, 10, 4 ; m3=t0+t7, m10=t0-t7
361 SUMSUB_BA w, 1, 2, 4 ; m1=t1+t6, m2=t1-t6
362 SUMSUB_BA w, 11, 0, 4 ; m11=t2+t5, m0=t2-t5
363 SUMSUB_BA w, 9, 8, 4 ; m9=t3+t4, m8=t3-t4
368 %macro VP9_IDCT8_1D 0
369 SUMSUB_BA w, 8, 0, 4 ; m8=IN(0)+IN(4) m0=IN(0)-IN(4)
370 pmulhrsw m8, m12 ; m8=t0a
371 pmulhrsw m0, m12 ; m0=t1a
372 VP9_UNPACK_MULSUB_2W_4X 2, 10, 15137, 6270, m7, 4, 5 ; m2=t2a, m10=t3a
373 VP9_UNPACK_MULSUB_2W_4X 1, 11, 16069, 3196, m7, 4, 5 ; m1=t4a, m11=t7a
374 VP9_UNPACK_MULSUB_2W_4X 9, 3, 9102, 13623, m7, 4, 5 ; m9=t5a, m3=t6a
375 SUMSUB_BA w, 10, 8, 4 ; m10=t0a+t3a (t0), m8=t0a-t3a (t3)
376 SUMSUB_BA w, 2, 0, 4 ; m2=t1a+t2a (t1), m0=t1a-t2a (t2)
377 SUMSUB_BA w, 9, 1, 4 ; m9=t4a+t5a (t4), m1=t4a-t5a (t5a)
378 SUMSUB_BA w, 3, 11, 4 ; m3=t7a+t6a (t7), m11=t7a-t6a (t6a)
379 SUMSUB_BA w, 1, 11, 4 ; m1=t6a+t5a (t6), m11=t6a-t5a (t5)
380 pmulhrsw m1, m12 ; m1=t6
381 pmulhrsw m11, m12 ; m11=t5
382 VP9_IDCT8_1D_FINALIZE
385 %macro VP9_IDCT8_4x4_1D 0
386 pmulhrsw m0, m12 ; m0=t1a/t0a
387 pmulhrsw m10, m2, [pw_15137x2] ; m10=t3a
388 pmulhrsw m2, [pw_6270x2] ; m2=t2a
389 pmulhrsw m11, m1, [pw_16069x2] ; m11=t7a
390 pmulhrsw m1, [pw_3196x2] ; m1=t4a
391 pmulhrsw m9, m3, [pw_9102x2] ; m9=-t5a
392 pmulhrsw m3, [pw_13623x2] ; m3=t6a
393 psubw m8, m0, m10 ; m8=t0a-t3a (t3)
394 paddw m10, m0 ; m10=t0a+t3a (t0)
395 SUMSUB_BA w, 2, 0, 4 ; m2=t1a+t2a (t1), m0=t1a-t2a (t2)
396 SUMSUB_BA w, 9, 1, 4 ; m1=t4a+t5a (t4), m9=t4a-t5a (t5a)
398 SUMSUB_BA w, 3, 11, 4 ; m3=t7a+t6a (t7), m11=t7a-t6a (t6a)
399 SUMSUB_BA w, 1, 11, 4 ; m1=t6a+t5a (t6), m11=t6a-t5a (t5)
400 pmulhrsw m1, m12 ; m1=t6
401 pmulhrsw m11, m12 ; m11=t5
402 VP9_IDCT8_1D_FINALIZE
405 ; TODO: a lot of t* copies can probably be removed and merged with
406 ; following SUMSUBs from VP9_IDCT8_1D_FINALIZE with AVX
407 %macro VP9_IDCT8_2x2_1D 0
408 pmulhrsw m0, m12 ; m0=t0
410 pmulhrsw m1, m6 ; m1=t4
411 pmulhrsw m3, m7 ; m3=t7
413 mova m10, m0 ; m10=t2
415 mova m11, m3 ; t5 = t7a ...
416 mova m9, m3 ; t6 = t7a ...
417 psubw m11, m1 ; t5 = t7a - t4a
418 paddw m9, m1 ; t6 = t7a + t4a
419 pmulhrsw m11, m12 ; m11=t5
420 pmulhrsw m9, m12 ; m9=t6
423 VP9_IDCT8_1D_FINALIZE
426 %macro VP9_IDCT8_WRITEOUT 0
428 pmulhrsw m0, m5 ; (x*1024 + (1<<14))>>15 <=> (x+16)>>5
430 VP9_STORE_2X 0, 1, 6, 7, 4
431 lea dstq, [dstq+2*strideq]
434 VP9_STORE_2X 2, 3, 6, 7, 4
435 lea dstq, [dstq+2*strideq]
438 VP9_STORE_2X 8, 9, 6, 7, 4
439 lea dstq, [dstq+2*strideq]
442 VP9_STORE_2X 10, 11, 6, 7, 4
445 %macro VP9_IDCT_IDCT_8x8_ADD_XMM 1
447 cglobal vp9_idct_idct_8x8_add, 4,4,13, dst, stride, block, eob
449 mova m12, [pw_11585x2] ; often used
451 cmp eobd, 12 ; top left half or less
454 cmp eobd, 3 ; top left corner or less
457 cmp eobd, 1 ; faster path for when only DC is set
458 jne .idcttopleftcorner
467 pmulhrsw m0, m5 ; (x*1024 + (1<<14))>>15 <=> (x+16)>>5
468 VP9_STORE_2X 0, 0, 6, 7, 4
469 lea dstq, [dstq+2*strideq]
470 VP9_STORE_2X 0, 0, 6, 7, 4
471 lea dstq, [dstq+2*strideq]
472 VP9_STORE_2X 0, 0, 6, 7, 4
473 lea dstq, [dstq+2*strideq]
474 VP9_STORE_2X 0, 0, 6, 7, 4
477 ; faster path for when only left corner is set (3 input: DC, right to DC, below
478 ; to DC). Note: also working with a 2x2 block
483 mova m7, [pw_16069x2]
485 TRANSPOSE8x8W 0, 1, 2, 3, 8, 9, 10, 11, 4
487 pxor m4, m4 ; used for the block reset, and VP9_STORE_2X
494 movh m0, [blockq + 0]
495 movh m1, [blockq +16]
496 movh m2, [blockq +32]
497 movh m3, [blockq +48]
499 TRANSPOSE8x8W 0, 1, 2, 3, 8, 9, 10, 11, 4
509 .idctfull: ; generic full 8x8 idct/idct
510 mova m0, [blockq+ 0] ; IN(0)
511 mova m1, [blockq+ 16] ; IN(1)
512 mova m2, [blockq+ 32] ; IN(2)
513 mova m3, [blockq+ 48] ; IN(3)
514 mova m8, [blockq+ 64] ; IN(4)
515 mova m9, [blockq+ 80] ; IN(5)
516 mova m10, [blockq+ 96] ; IN(6)
517 mova m11, [blockq+112] ; IN(7)
518 mova m7, [pd_8192] ; rounding
520 TRANSPOSE8x8W 0, 1, 2, 3, 8, 9, 10, 11, 4
523 pxor m4, m4 ; used for the block reset, and VP9_STORE_2X
524 ZERO_BLOCK blockq, 16, 8, m4
529 VP9_IDCT_IDCT_8x8_ADD_XMM ssse3
530 VP9_IDCT_IDCT_8x8_ADD_XMM avx
532 ;---------------------------------------------------------------------------------------------
533 ; void vp9_iadst_iadst_8x8_add_<opt>(uint8_t *dst, ptrdiff_t stride, int16_t *block, int eob);
534 ;---------------------------------------------------------------------------------------------
536 %macro VP9_IADST8_1D 0 ; input/output=m0/1/2/3/8/9/10/11
537 VP9_UNPACK_MULSUB_2D_4X 11, 0, 4, 5, 16305, 1606 ; m11/4=t1[d], m0/5=t0[d]
538 VP9_UNPACK_MULSUB_2D_4X 3, 8, 6, 13, 10394, 12665 ; m3/6=t5[d], m8/13=t4[d]
539 VP9_RND_SH_SUMSUB_BA 8, 0, 13, 5, 14, m7 ; m8=t0[w], m0=t4[w]
540 VP9_RND_SH_SUMSUB_BA 3, 11, 6, 4, 14, m7 ; m3=t1[w], m11=t5[w]
542 VP9_UNPACK_MULSUB_2D_4X 9, 2, 4, 5, 14449, 7723 ; m9/4=t3[d], m2/5=t2[d]
543 VP9_UNPACK_MULSUB_2D_4X 1, 10, 6, 13, 4756, 15679 ; m1/6=t7[d], m10/13=t6[d]
544 VP9_RND_SH_SUMSUB_BA 10, 2, 13, 5, 14, m7 ; m10=t2[w], m2=t6[w]
545 VP9_RND_SH_SUMSUB_BA 1, 9, 6, 4, 14, m7 ; m1=t3[w], m9=t7[w]
547 ; m8=t0, m3=t1, m10=t2, m1=t3, m0=t4, m11=t5, m2=t6, m9=t7
549 VP9_UNPACK_MULSUB_2D_4X 0, 11, 4, 5, 15137, 6270 ; m0/4=t5[d], m11/5=t4[d]
550 VP9_UNPACK_MULSUB_2D_4X 9, 2, 6, 13, 6270, 15137 ; m9/6=t6[d], m2/13=t7[d]
551 VP9_RND_SH_SUMSUB_BA 9, 11, 6, 5, 14, m7
552 psignw m9, [pw_m1] ; m9=out1[w], m11=t6[w]
553 VP9_RND_SH_SUMSUB_BA 2, 0, 13, 4, 14, m7 ; m2=out6[w], m0=t7[w]
555 SUMSUB_BA w, 10, 8, 14 ; m10=out0[w], m8=t2[w]
556 SUMSUB_BA w, 1, 3, 14
557 psignw m1, [pw_m1] ; m1=out7[w], m3=t3[w]
559 ; m10=out0, m9=out1, m8=t2, m3=t3, m11=t6, m0=t7, m2=out6, m1=out7
562 SUMSUB_BA w, 0, 11, 5
565 pmulhrsw m8, m12 ; out4
566 pmulhrsw m0, m12 ; out2
567 psignw m3, [pw_m1] ; out3
568 psignw m11, [pw_m1] ; out5
570 ; m10=out0, m9=out1, m0=out2, m3=out3, m8=out4, m11=out5, m2=out6, m1=out7
578 cglobal vp9_%1_%3_8x8_add, 3, 3, 15, dst, stride, block, eob
579 mova m0, [blockq+ 0] ; IN(0)
580 mova m1, [blockq+ 16] ; IN(1)
581 mova m2, [blockq+ 32] ; IN(2)
582 mova m3, [blockq+ 48] ; IN(3)
583 mova m8, [blockq+ 64] ; IN(4)
584 mova m9, [blockq+ 80] ; IN(5)
585 mova m10, [blockq+ 96] ; IN(6)
586 mova m11, [blockq+112] ; IN(7)
588 mova m12, [pw_11585x2] ; often used
589 mova m7, [pd_8192] ; rounding
591 TRANSPOSE8x8W 0, 1, 2, 3, 8, 9, 10, 11, 4
594 pxor m4, m4 ; used for the block reset, and VP9_STORE_2X
595 ZERO_BLOCK blockq, 16, 8, m4
600 IADST8_FN idct, IDCT8, iadst, IADST8, ssse3
601 IADST8_FN idct, IDCT8, iadst, IADST8, avx
602 IADST8_FN iadst, IADST8, idct, IDCT8, ssse3
603 IADST8_FN iadst, IADST8, idct, IDCT8, avx
604 IADST8_FN iadst, IADST8, iadst, IADST8, ssse3
605 IADST8_FN iadst, IADST8, iadst, IADST8, avx
607 ;---------------------------------------------------------------------------------------------
608 ; void vp9_idct_idct_16x16_add_<opt>(uint8_t *dst, ptrdiff_t stride, int16_t *block, int eob);
609 ;---------------------------------------------------------------------------------------------
611 ; at the end of this macro, m7 is stored in stack_scratch
612 ; everything else (t0-6 and t8-15) is stored in m0-6 and m8-15
613 ; the following sumsubs have not been done yet:
614 ; SUMSUB_BA w, 6, 9, 15 ; t6, t9
615 ; SUMSUB_BA w, 7, 8, 15 ; t7, t8
616 %macro VP9_IDCT16_1D_START 4 ; src, nnzc, stride, stack_scratch
618 mova m3, [%1+ 1*%3] ; IN(1)
619 mova m12, [%1+ 2*%3] ; IN(2)
620 mova m0, [%1+ 3*%3] ; IN(3)
622 pmulhrsw m15, m12, [pw_16069x2] ; t6-7
623 pmulhrsw m12, [pw_3196x2] ; t4-5
624 pmulhrsw m4, m3, [pw_16305x2] ; t14-15
625 pmulhrsw m3, [pw_1606x2] ; t8-9
626 pmulhrsw m7, m0, [pw_m4756x2] ; t10-11
627 pmulhrsw m0, [pw_15679x2] ; t12-13
629 ; m8=t0, m9=t1, m10=t2, m11=t3, m12=t4, m14=t5, m13=t6, m15=t7
630 ; m3=t8, m5=t9, m1=t10, m7=t11, m0=t12, m6=t13, m2=t14, m4=t15
634 pmulhrsw m13, [pw_11585x2] ; t5
635 pmulhrsw m14, [pw_11585x2] ; t6
637 VP9_UNPACK_MULSUB_2W_4X 2, 5, 4, 3, 15137, 6270, [pd_8192], 10, 11 ; t9, t14
638 VP9_UNPACK_MULSUB_2W_4X 6, 1, 0, 7, 6270, m15137, [pd_8192], 10, 11 ; t10, t13
640 ; m15=t0, m14=t1, m13=t2, m12=t3, m11=t4, m10=t5, m9=t6, m8=t7
641 ; m7=t8, m6=t9, m2=t10, m3=t11, m4=t12, m5=t13, m1=t14, m0=t15
643 mova m5, [%1+ 1*%3] ; IN(1)
644 mova m14, [%1+ 2*%3] ; IN(2)
645 mova m6, [%1+ 3*%3] ; IN(3)
646 mova m9, [%1+ 4*%3] ; IN(4)
647 mova m7, [%1+ 5*%3] ; IN(5)
648 mova m15, [%1+ 6*%3] ; IN(6)
649 mova m4, [%1+ 7*%3] ; IN(7)
651 pmulhrsw m8, m9, [pw_15137x2] ; t3
652 pmulhrsw m9, [pw_6270x2] ; t2
653 pmulhrsw m13, m14, [pw_16069x2] ; t7
654 pmulhrsw m14, [pw_3196x2] ; t4
655 pmulhrsw m12, m15, [pw_m9102x2] ; t5
656 pmulhrsw m15, [pw_13623x2] ; t6
657 pmulhrsw m2, m5, [pw_16305x2] ; t15
658 pmulhrsw m5, [pw_1606x2] ; t8
659 pmulhrsw m3, m4, [pw_m10394x2] ; t9
660 pmulhrsw m4, [pw_12665x2] ; t14
661 pmulhrsw m0, m7, [pw_14449x2] ; t13
662 pmulhrsw m7, [pw_7723x2] ; t10
663 pmulhrsw m1, m6, [pw_m4756x2] ; t11
664 pmulhrsw m6, [pw_15679x2] ; t12
666 mova m3, [%1+ 9*%3] ; IN(9)
667 mova m12, [%1+10*%3] ; IN(10)
668 mova m0, [%1+11*%3] ; IN(11)
669 mova m8, [%1+12*%3] ; IN(12)
670 mova m1, [%1+13*%3] ; IN(13)
671 mova m13, [%1+14*%3] ; IN(14)
672 mova m2, [%1+15*%3] ; IN(15)
674 ; m10=in0, m5=in1, m14=in2, m6=in3, m9=in4, m7=in5, m15=in6, m4=in7
675 ; m11=in8, m3=in9, m12=in10 m0=in11, m8=in12, m1=in13, m13=in14, m2=in15
677 VP9_UNPACK_MULSUB_2W_4X 9, 8, 15137, 6270, [pd_8192], 10, 11 ; t2, t3
678 VP9_UNPACK_MULSUB_2W_4X 14, 13, 16069, 3196, [pd_8192], 10, 11 ; t4, t7
679 VP9_UNPACK_MULSUB_2W_4X 12, 15, 9102, 13623, [pd_8192], 10, 11 ; t5, t6
680 VP9_UNPACK_MULSUB_2W_4X 5, 2, 16305, 1606, [pd_8192], 10, 11 ; t8, t15
681 VP9_UNPACK_MULSUB_2W_4X 3, 4, 10394, 12665, [pd_8192], 10, 11 ; t9, t14
682 VP9_UNPACK_MULSUB_2W_4X 7, 0, 14449, 7723, [pd_8192], 10, 11 ; t10, t13
683 VP9_UNPACK_MULSUB_2W_4X 1, 6, 4756, 15679, [pd_8192], 10, 11 ; t11, t12
686 ; m11=t0, m10=t1, m9=t2, m8=t3, m14=t4, m12=t5, m15=t6, m13=t7
687 ; m5=t8, m3=t9, m7=t10, m1=t11, m6=t12, m0=t13, m4=t14, m2=t15
689 SUMSUB_BA w, 12, 14, 10 ; t4, t5
690 SUMSUB_BA w, 15, 13, 10 ; t7, t6
691 SUMSUB_BA w, 3, 5, 10 ; t8, t9
692 SUMSUB_BA w, 7, 1, 10 ; t11, t10
693 SUMSUB_BA w, 0, 6, 10 ; t12, t13
694 SUMSUB_BA w, 4, 2, 10 ; t15, t14
696 ; m8=t0, m9=t1, m10=t2, m11=t3, m12=t4, m14=t5, m13=t6, m15=t7
697 ; m3=t8, m5=t9, m1=t10, m7=t11, m0=t12, m6=t13, m2=t14, m4=t15
699 SUMSUB_BA w, 14, 13, 10
700 pmulhrsw m13, [pw_11585x2] ; t5
701 pmulhrsw m14, [pw_11585x2] ; t6
702 VP9_UNPACK_MULSUB_2W_4X 2, 5, 15137, 6270, [pd_8192], 10, 11 ; t9, t14
703 VP9_UNPACK_MULSUB_2W_4X 6, 1, 6270, m15137, [pd_8192], 10, 11 ; t10, t13
706 ; m8=t0, m9=t1, m10=t2, m11=t3, m12=t4, m13=t5, m14=t6, m15=t7
707 ; m3=t8, m2=t9, m6=t10, m7=t11, m0=t12, m1=t13, m5=t14, m4=t15
709 SUMSUB_BA w, 7, 3, 10 ; t8, t11
710 SUMSUB_BA w, 6, 2, 10 ; t9, t10
711 SUMSUB_BA w, 0, 4, 10 ; t15, t12
712 SUMSUB_BA w, 1, 5, 10 ; t14. t13
714 ; m15=t0, m14=t1, m13=t2, m12=t3, m11=t4, m10=t5, m9=t6, m8=t7
715 ; m7=t8, m6=t9, m2=t10, m3=t11, m4=t12, m5=t13, m1=t14, m0=t15
717 SUMSUB_BA w, 2, 5, 10
718 SUMSUB_BA w, 3, 4, 10
719 pmulhrsw m5, [pw_11585x2] ; t10
720 pmulhrsw m4, [pw_11585x2] ; t11
721 pmulhrsw m3, [pw_11585x2] ; t12
722 pmulhrsw m2, [pw_11585x2] ; t13
724 ; backup first register
727 ; m15=t0, m14=t1, m13=t2, m12=t3, m11=t4, m10=t5, m9=t6, m8=t7
728 ; m7=t8, m6=t9, m5=t10, m4=t11, m3=t12, m2=t13, m1=t14, m0=t15
732 mova m11, [%1+ 0*%3] ; IN(0)
733 pmulhrsw m11, [pw_11585x2] ; t0-t3
742 mova m10, [%1+ 0*%3] ; IN(0)
744 pmulhrsw m10, [pw_11585x2] ; t0 and t1
748 mova m11, [%1+ 8*%3] ; IN(8)
751 SUMSUB_BA w, 11, 10, 7
752 pmulhrsw m11, [pw_11585x2] ; t0
753 pmulhrsw m10, [pw_11585x2] ; t1
756 SUMSUB_BA w, 8, 11, 7 ; t0, t3
758 SUMSUB_BA w, 9, 10, 7 ; t1, t2
761 SUMSUB_BA w, 15, 8, 7 ; t0, t7
762 SUMSUB_BA w, 14, 9, 7 ; t1, t6
763 SUMSUB_BA w, 13, 10, 7 ; t2, t5
765 SUMSUB_BA w, 12, 11, 7 ; t3, t4
767 SUMSUB_BA w, 0, 15, 7 ; t0, t15
768 SUMSUB_BA w, 1, 14, 7 ; t1, t14
769 SUMSUB_BA w, 2, 13, 7 ; t2, t13
770 SUMSUB_BA w, 3, 12, 7 ; t3, t12
771 SUMSUB_BA w, 4, 11, 7 ; t4, t11
772 SUMSUB_BA w, 5, 10, 7 ; t5, t10
775 %macro VP9_IDCT16_1D 2-3 16 ; src, pass, nnzc
776 VP9_IDCT16_1D_START %1, %3, 32, tmpq+32
779 ; backup a different register
783 SUMSUB_BA w, 6, 9, 15 ; t6, t9
784 SUMSUB_BA w, 7, 8, 15 ; t7, t8
786 TRANSPOSE8x8W 0, 1, 2, 3, 4, 5, 6, 7, 15
797 TRANSPOSE8x8W 8, 9, 10, 11, 12, 13, 14, 15, 0
807 ; backup more registers
812 pmulhrsw m0, [pw_512]
813 pmulhrsw m1, [pw_512]
814 VP9_STORE_2X 0, 1, 8, 9, 7
815 lea dstq, [dstq+strideq*2]
816 pmulhrsw m2, [pw_512]
817 pmulhrsw m3, [pw_512]
818 VP9_STORE_2X 2, 3, 8, 9, 7
819 lea dstq, [dstq+strideq*2]
820 pmulhrsw m4, [pw_512]
821 pmulhrsw m5, [pw_512]
822 VP9_STORE_2X 4, 5, 8, 9, 7
823 lea dstq, [dstq+strideq*2]
826 SWAP 0, 7 ; move zero from m7 to m0
831 SUMSUB_BA w, 6, 9, 1 ; t6, t9
832 SUMSUB_BA w, 7, 8, 1 ; t7, t8
834 pmulhrsw m6, [pw_512]
835 pmulhrsw m7, [pw_512]
836 VP9_STORE_2X 6, 7, 1, 2, 0
837 lea dstq, [dstq+strideq*2]
838 pmulhrsw m8, [pw_512]
839 pmulhrsw m9, [pw_512]
840 VP9_STORE_2X 8, 9, 1, 2, 0
841 lea dstq, [dstq+strideq*2]
842 pmulhrsw m10, [pw_512]
843 pmulhrsw m11, [pw_512]
844 VP9_STORE_2X 10, 11, 1, 2, 0
845 lea dstq, [dstq+strideq*2]
846 pmulhrsw m12, [pw_512]
847 pmulhrsw m13, [pw_512]
848 VP9_STORE_2X 12, 13, 1, 2, 0
849 lea dstq, [dstq+strideq*2]
850 pmulhrsw m14, [pw_512]
851 pmulhrsw m15, [pw_512]
852 VP9_STORE_2X 14, 15, 1, 2, 0
856 %macro VP9_STORE_2XFULL 6-7 strideq; dc, tmp1, tmp2, tmp3, tmp4, zero, stride
859 punpcklbw m%2, m%3, m%6
861 punpcklbw m%4, m%5, m%6
873 %macro VP9_IDCT_IDCT_16x16_ADD_XMM 1
875 cglobal vp9_idct_idct_16x16_add, 4, 6, 16, 512, dst, stride, block, eob
876 ; 2x2=eob=3, 4x4=eob=10
879 cmp eobd, 1 ; faster path for when only DC is set
884 mova m1, [pw_11585x2]
888 pmulhrsw m0, [pw_512]
892 VP9_STORE_2XFULL 0, 1, 2, 3, 4, 5
893 lea dstq, [dstq+2*strideq]
895 VP9_STORE_2XFULL 0, 1, 2, 3, 4, 5
898 DEFINE_ARGS dst, stride, block, cnt, dst_bak, tmp
901 VP9_IDCT16_1D blockq, 1, 8
906 VP9_IDCT16_1D tmpq, 2, 8
907 lea dstq, [dst_bakq+8]
912 ; at the end of the loop, m0 should still be zero
913 ; use that to zero out block coefficients
914 ZERO_BLOCK blockq, 32, 8, m0
921 VP9_IDCT16_1D blockq, 1
932 VP9_IDCT16_1D tmpq, 2
933 lea dstq, [dst_bakq+8]
938 ; at the end of the loop, m0 should still be zero
939 ; use that to zero out block coefficients
940 ZERO_BLOCK blockq, 32, 16, m0
944 VP9_IDCT_IDCT_16x16_ADD_XMM ssse3
945 VP9_IDCT_IDCT_16x16_ADD_XMM avx
947 ;---------------------------------------------------------------------------------------------
948 ; void vp9_iadst_iadst_16x16_add_<opt>(uint8_t *dst, ptrdiff_t stride, int16_t *block, int eob);
949 ;---------------------------------------------------------------------------------------------
951 %macro VP9_IADST16_1D 2 ; src, pass
953 mova m0, [%1+ 0*32] ; in0
954 mova m1, [%1+15*32] ; in15
955 mova m8, [%1+ 7*32] ; in7
956 mova m9, [%1+ 8*32] ; in8
958 VP9_UNPACK_MULSUB_2D_4X 1, 0, 2, 3, 16364, 804 ; m1/2=t1[d], m0/3=t0[d]
959 VP9_UNPACK_MULSUB_2D_4X 8, 9, 11, 10, 11003, 12140 ; m8/11=t9[d], m9/10=t8[d]
960 VP9_RND_SH_SUMSUB_BA 9, 0, 10, 3, 4, [pd_8192] ; m9=t0[w], m0=t8[w]
961 VP9_RND_SH_SUMSUB_BA 8, 1, 11, 2, 4, [pd_8192] ; m8=t1[w], m1=t9[w]
963 mova m11, [%1+ 2*32] ; in2
964 mova m10, [%1+13*32] ; in13
965 mova m3, [%1+ 5*32] ; in5
966 mova m2, [%1+10*32] ; in10
968 VP9_UNPACK_MULSUB_2D_4X 10, 11, 6, 7, 15893, 3981 ; m10/6=t3[d], m11/7=t2[d]
969 VP9_UNPACK_MULSUB_2D_4X 3, 2, 4, 5, 8423, 14053 ; m3/4=t11[d], m2/5=t10[d]
970 VP9_RND_SH_SUMSUB_BA 2, 11, 5, 7, 12, [pd_8192] ; m2=t2[w], m11=t10[w]
971 VP9_RND_SH_SUMSUB_BA 3, 10, 4, 6, 12, [pd_8192] ; m3=t3[w], m10=t11[w]
973 mova [tmpq+ 0*%%str], m9 ; make some scratch space (t0:m9->r0)
974 mova m4, [%1+ 4*32] ; in4
975 mova m5, [%1+11*32] ; in11
976 mova m12, [%1+ 3*32] ; in3
977 mova m13, [%1+12*32] ; in12
979 VP9_UNPACK_MULSUB_2D_4X 5, 4, 7, 6, 14811, 7005 ; m5/7=t5[d], m4/6=t4[d]
980 VP9_UNPACK_MULSUB_2D_4X 12, 13, 14, 15, 5520, 15426 ; m12/14=t13[d], m13/15=t12[d]
981 VP9_RND_SH_SUMSUB_BA 13, 4, 15, 6, 9, [pd_8192] ; m13=t4[w], m4=t12[w]
982 VP9_RND_SH_SUMSUB_BA 12, 5, 14, 7, 9, [pd_8192] ; m12=t5[w], m5=t13[w]
984 mova [tmpq+ 2*%%str], m8 ; t1:m9->r2
985 mova [tmpq+ 3*%%str], m2 ; t2:m2->r3
986 mova [tmpq+ 4*%%str], m3 ; t3:m3->r4
987 mova [tmpq+ 5*%%str], m13 ; t4:m13->r5
988 mova m2, [%1+ 6*32] ; in6
989 mova m3, [%1+ 9*32] ; in9
990 mova m8, [%1+ 1*32] ; in1
991 mova m9, [%1+14*32] ; in14
993 VP9_UNPACK_MULSUB_2D_4X 3, 2, 7, 6, 13160, 9760 ; m3/7=t7[d], m2/6=t6[d]
994 VP9_UNPACK_MULSUB_2D_4X 8, 9, 13, 14, 2404, 16207 ; m8/13=t15[d], m9/14=t14[d]
995 VP9_RND_SH_SUMSUB_BA 9, 2, 14, 6, 15, [pd_8192] ; m9=t6[w], m2=t14[w]
996 VP9_RND_SH_SUMSUB_BA 8, 3, 13, 7, 15, [pd_8192] ; m8=t7[w], m3=t15[w]
998 ; r0=t0, r2=t1, r3=t2, r4=t3, r5=t4, m12=t5, m9=t6, m8=t7
999 ; m0=t8, m1=t9, m11=t10, m10=t11, m4=t12, m5=t13, m2=t14, m3=t15
1001 ; handle t8-15 first
1002 VP9_UNPACK_MULSUB_2D_4X 0, 1, 6, 7, 16069, 3196 ; m1/7=t8[d], m0/6=t9[d]
1003 VP9_UNPACK_MULSUB_2D_4X 5, 4, 13, 14, 3196, 16069 ; m5/13=t12[d], m4/14=t13[d]
1004 VP9_RND_SH_SUMSUB_BA 5, 1, 13, 7, 15, [pd_8192] ; m5=t8[w], m1=t12[w]
1005 VP9_RND_SH_SUMSUB_BA 4, 0, 14, 6, 15, [pd_8192] ; m4=t9[w], m0=t13[w]
1007 VP9_UNPACK_MULSUB_2D_4X 11, 10, 6, 7, 9102, 13623 ; m11/6=t11[d], m10/7=t10[d]
1008 VP9_UNPACK_MULSUB_2D_4X 3, 2, 13, 14, 13623, 9102 ; m3/13=t14[d], m2/14=t15[d]
1009 VP9_RND_SH_SUMSUB_BA 3, 10, 13, 7, 15, [pd_8192] ; m3=t10[w], m10=t14[w]
1010 VP9_RND_SH_SUMSUB_BA 2, 11, 14, 6, 15, [pd_8192] ; m2=t11[w], m11=t15[w]
1012 ; m5=t8, m4=t9, m3=t10, m2=t11, m1=t12, m0=t13, m10=t14, m11=t15
1014 VP9_UNPACK_MULSUB_2D_4X 1, 0, 6, 7, 15137, 6270 ; m1/6=t13[d], m0/7=t12[d]
1015 VP9_UNPACK_MULSUB_2D_4X 11, 10, 13, 14, 6270, 15137 ; m11/13=t14[d], m10/14=t15[d]
1016 VP9_RND_SH_SUMSUB_BA 11, 0, 13, 7, 15, [pd_8192] ; m11=out2[w], m0=t14[w]
1017 VP9_RND_SH_SUMSUB_BA 10, 1, 14, 6, 15, [pd_8192]
1018 psignw m10, [pw_m1] ; m10=out13[w], m1=t15[w]
1020 SUMSUB_BA w, 3, 5, 15
1021 psignw m3, [pw_m1] ; m3=out1[w], m5=t10[w]
1022 SUMSUB_BA w, 2, 4, 15 ; m2=out14[w], m4=t11[w]
1024 SUMSUB_BA w, 5, 4, 15
1025 pmulhrsw m5, [pw_11585x2] ; m5=out6[w]
1026 pmulhrsw m4, [pw_11585x2] ; m4=out9[w]
1027 SUMSUB_BA w, 1, 0, 15
1028 pmulhrsw m1, [pw_m11585x2] ; m1=out5[w]
1029 pmulhrsw m0, [pw_11585x2] ; m0=out10[w]
1031 ; m3=out1, m11=out2, m1=out5, m5=out6, m4=out9, m0=out10, m10=out13, m2=out14
1033 mova m6, [tmpq+ 0*%%str]
1034 mova m7, [tmpq+ 2*%%str]
1035 mova m13, [tmpq+ 3*%%str]
1036 mova m14, [tmpq+ 4*%%str]
1037 mova m15, [tmpq+ 5*%%str]
1038 mova [tmpq+ 8*%%str], m5
1039 mova [tmpq+ 9*%%str], m4
1040 mova [tmpq+10*%%str], m0
1041 mova [tmpq+11*%%str], m10
1042 mova [tmpq+12*%%str], m2
1044 ; m6=t0, m7=t1, m13=t2, m14=t3, m15=t4, m12=t5, m9=t6, m8=t7
1045 ; m3=out1, m11=out2, m1=out5, r8=out6, r9=out9, r10=out10, r11=out13, r12=out14
1047 SUMSUB_BA w, 15, 6, 0 ; m15=t0[w], m6=t4[w]
1048 SUMSUB_BA w, 12, 7, 0 ; m12=t1[w], m7=t5[w]
1049 SUMSUB_BA w, 9, 13, 0 ; m9=t2[w], m13=t6[w]
1050 SUMSUB_BA w, 8, 14, 0 ; m8=t3[w], m14=t7[w]
1052 VP9_UNPACK_MULSUB_2D_4X 6, 7, 0, 2, 15137, 6270 ; m6/0=t5[d], m7/2=t4[d]
1053 VP9_UNPACK_MULSUB_2D_4X 14, 13, 4, 5, 6270, 15137 ; m14/4=t6[d], m13/5=t7[d]
1054 VP9_RND_SH_SUMSUB_BA 14, 7, 4, 2, 10, [pd_8192]
1055 psignw m14, [pw_m1] ; m14=out3[w], m7=t6[w]
1056 VP9_RND_SH_SUMSUB_BA 13, 6, 5, 0, 10, [pd_8192] ; m13=out12[w], m6=t7[w]
1057 SUMSUB_BA w, 9, 15, 10 ; m9=out0[w], m15=t2[w]
1058 SUMSUB_BA w, 8, 12, 10
1059 psignw m8, [pw_m1] ; m8=out15[w], m12=t3[w]
1061 SUMSUB_BA w, 12, 15, 10
1062 pmulhrsw m12, [pw_m11585x2] ; m12=out7[w]
1063 pmulhrsw m15, [pw_11585x2] ; m15=out8[w]
1064 SUMSUB_BA w, 7, 6, 10
1065 pmulhrsw m7, [pw_11585x2] ; m7=out4[w]
1066 pmulhrsw m6, [pw_11585x2] ; m6=out11[w]
1068 ; m9=out0, m14=out3, m7=out4, m12=out7, m15=out8, m6=out11, m13=out12, m8=out15
1069 ; m3=out1, m11=out2, m1=out5, r8=out6, r9=out9, r10=out10, r11=out13, r12=out14
1072 mova m0, [tmpq+ 8*%%str]
1073 TRANSPOSE8x8W 9, 3, 11, 14, 7, 1, 0, 12, 2
1074 mova [tmpq+ 0*16], m9
1075 mova [tmpq+ 2*16], m3
1076 mova [tmpq+ 4*16], m11
1077 mova [tmpq+ 6*16], m14
1078 mova m9, [tmpq+ 9*%%str]
1079 mova m3, [tmpq+10*%%str]
1080 mova m11, [tmpq+11*%%str]
1081 mova m14, [tmpq+12*%%str]
1082 mova [tmpq+ 8*16], m7
1083 mova [tmpq+10*16], m1
1084 mova [tmpq+12*16], m0
1085 mova [tmpq+14*16], m12
1087 TRANSPOSE8x8W 15, 9, 3, 6, 13, 11, 14, 8, 2
1088 mova [tmpq+ 1*16], m15
1089 mova [tmpq+ 3*16], m9
1090 mova [tmpq+ 5*16], m3
1091 mova [tmpq+ 7*16], m6
1092 mova [tmpq+ 9*16], m13
1093 mova [tmpq+11*16], m11
1094 mova [tmpq+13*16], m14
1095 mova [tmpq+15*16], m8
1097 mova m5, [tmpq+ 8*%%str]
1100 pmulhrsw m9, [pw_512]
1101 pmulhrsw m3, [pw_512]
1102 VP9_STORE_2X 9, 3, 2, 4, 0
1103 lea dstq, [dstq+strideq*2]
1104 pmulhrsw m11, [pw_512]
1105 pmulhrsw m14, [pw_512]
1106 VP9_STORE_2X 11, 14, 2, 4, 0
1107 lea dstq, [dstq+strideq*2]
1108 pmulhrsw m7, [pw_512]
1109 pmulhrsw m1, [pw_512]
1110 VP9_STORE_2X 7, 1, 2, 4, 0
1111 lea dstq, [dstq+strideq*2]
1112 pmulhrsw m5, [pw_512]
1113 pmulhrsw m12, [pw_512]
1114 VP9_STORE_2X 5, 12, 2, 4, 0
1115 lea dstq, [dstq+strideq*2]
1117 mova m9, [tmpq+ 9*%%str]
1118 mova m3, [tmpq+10*%%str]
1119 mova m11, [tmpq+11*%%str]
1120 mova m14, [tmpq+12*%%str]
1122 pmulhrsw m15, [pw_512]
1123 pmulhrsw m9, [pw_512]
1124 VP9_STORE_2X 15, 9, 2, 4, 0
1125 lea dstq, [dstq+strideq*2]
1126 pmulhrsw m3, [pw_512]
1127 pmulhrsw m6, [pw_512]
1128 VP9_STORE_2X 3, 6, 2, 4, 0
1129 lea dstq, [dstq+strideq*2]
1130 pmulhrsw m13, [pw_512]
1131 pmulhrsw m11, [pw_512]
1132 VP9_STORE_2X 13, 11, 2, 4, 0
1133 lea dstq, [dstq+strideq*2]
1134 pmulhrsw m14, [pw_512]
1135 pmulhrsw m8, [pw_512]
1136 VP9_STORE_2X 14, 8, 2, 4, 0
1142 cglobal vp9_%1_%3_16x16_add, 3, 6, 16, 512, dst, stride, block, cnt, dst_bak, tmp
1158 lea dstq, [dst_bakq+8]
1163 ; at the end of the loop, m0 should still be zero
1164 ; use that to zero out block coefficients
1165 ZERO_BLOCK blockq, 32, 16, m0
1169 IADST16_FN idct, IDCT16, iadst, IADST16, ssse3
1170 IADST16_FN idct, IDCT16, iadst, IADST16, avx
1171 IADST16_FN iadst, IADST16, idct, IDCT16, ssse3
1172 IADST16_FN iadst, IADST16, idct, IDCT16, avx
1173 IADST16_FN iadst, IADST16, iadst, IADST16, ssse3
1174 IADST16_FN iadst, IADST16, iadst, IADST16, avx
1176 ;---------------------------------------------------------------------------------------------
1177 ; void vp9_idct_idct_32x32_add_<opt>(uint8_t *dst, ptrdiff_t stride, int16_t *block, int eob);
1178 ;---------------------------------------------------------------------------------------------
1180 %macro VP9_IDCT32_1D 2-3 32 ; src, pass, nnzc
1181 %assign %%str 16*%2*%2
1182 ; first do t0-15, this can be done identical to idct16x16
1183 VP9_IDCT16_1D_START %1, %3/2, 64*2, tmpq+ 4*%%str
1185 ; backup a different register
1186 mova [tmpq+30*%%str], m15 ; t15
1187 mova m7, [tmpq+ 4*%%str]
1189 SUMSUB_BA w, 6, 9, 15 ; t6, t9
1190 SUMSUB_BA w, 7, 8, 15 ; t7, t8
1192 ; store everything on stack to make space available for t16-31
1193 ; we store interleaved with the output of the second half (t16-31)
1194 ; so we don't need to allocate extra stack space
1195 mova [tmpq+ 0*%%str], m0 ; t0
1196 mova [tmpq+ 4*%%str], m1 ; t1
1197 mova [tmpq+ 8*%%str], m2 ; t2
1198 mova [tmpq+12*%%str], m3 ; t3
1199 mova [tmpq+16*%%str], m4 ; t4
1200 mova [tmpq+20*%%str], m5 ; t5
1201 mova [tmpq+24*%%str], m6 ; t6
1202 mova [tmpq+28*%%str], m7 ; t7
1203 mova [tmpq+ 2*%%str], m8 ; t8
1204 mova [tmpq+ 6*%%str], m9 ; t9
1205 mova [tmpq+10*%%str], m10 ; t10
1206 mova [tmpq+14*%%str], m11 ; t11
1207 mova [tmpq+18*%%str], m12 ; t12
1208 mova [tmpq+22*%%str], m13 ; t13
1209 mova [tmpq+26*%%str], m14 ; t14
1211 ; then, secondly, do t16-31
1218 pmulhrsw m11, m4, [pw_16364x2] ;t31
1219 pmulhrsw m4, [pw_804x2] ;t16
1220 pmulhrsw m8, m7, [pw_m5520x2] ;t19
1221 pmulhrsw m7, [pw_15426x2] ;t28
1222 pmulhrsw m15, m0, [pw_15893x2] ;t27
1223 pmulhrsw m0, [pw_3981x2] ;t20
1224 pmulhrsw m12, m3, [pw_m2404x2] ;t23
1225 pmulhrsw m3, [pw_16207x2] ;t24
1227 ; m4=t16/17, m8=t18/19, m0=t20/21, m12=t22/23,
1228 ; m3=t24/25, m15=t26/27, m7=t28/29, m11=t30/31
1230 VP9_UNPACK_MULSUB_2W_4X 5, 10, 11, 4, 16069, 3196, [pd_8192], 6, 9 ; t17, t30
1231 VP9_UNPACK_MULSUB_2W_4X 9, 6, 7, 8, 3196, m16069, [pd_8192], 1, 14 ; t18, t29
1232 ; from 1 stage forward
1233 SUMSUB_BA w, 8, 4, 1
1235 mova [tmpq+17*%%str], m8 ; t16
1236 mova [tmpq+21*%%str], m4 ; t19
1237 VP9_UNPACK_MULSUB_2W_4X 1, 14, 15, 0, 9102, 13623, [pd_8192], 4, 8 ; t21, t26
1238 VP9_UNPACK_MULSUB_2W_4X 13, 2, 3, 12, 13623, m9102, [pd_8192], 4, 8 ; t22, t25
1240 ; m4=t16, m5=t17, m9=t18, m8=t19, m0=t20, m1=t21, m13=t22, m12=t23,
1241 ; m3=t24, m2=t25, m14=t26, m15=t27, m7=t28, m6=t29, m10=t30, m11=t31
1243 mova m10, [%1+ 1*64]
1244 mova m13, [%1+ 3*64]
1245 mova m14, [%1+ 5*64]
1248 mova m15, [%1+11*64]
1249 mova m12, [%1+13*64]
1250 mova m11, [%1+15*64]
1252 pmulhrsw m5, m10, [pw_16364x2]
1253 pmulhrsw m10, [pw_804x2]
1254 pmulhrsw m4, m11, [pw_m11003x2]
1255 pmulhrsw m11, [pw_12140x2]
1256 pmulhrsw m7, m8, [pw_14811x2]
1257 pmulhrsw m8, [pw_7005x2]
1258 pmulhrsw m6, m9, [pw_m5520x2]
1259 pmulhrsw m9, [pw_15426x2]
1260 pmulhrsw m1, m14, [pw_15893x2]
1261 pmulhrsw m14, [pw_3981x2]
1262 pmulhrsw m0, m15, [pw_m8423x2]
1263 pmulhrsw m15, [pw_14053x2]
1272 ; m10=in1, m4=in17, m8=in9, m6=in25, m14=in5, m0=in21, m12=in13, m2=in29,
1273 ; m13=in3, m3=in19, m15=in11, m1=in27, m9=in7, m7=in23, m11=in15, m5=in31
1275 VP9_UNPACK_MULSUB_2W_4X 10, 5, 16364, 804, [pd_8192], 2, 3 ; t16, t31
1276 VP9_UNPACK_MULSUB_2W_4X 4, 11, 11003, 12140, [pd_8192], 2, 3 ; t17, t30
1277 VP9_UNPACK_MULSUB_2W_4X 8, 7, 14811, 7005, [pd_8192], 2, 3 ; t18, t29
1278 VP9_UNPACK_MULSUB_2W_4X 6, 9, 5520, 15426, [pd_8192], 2, 3 ; t19, t28
1279 VP9_UNPACK_MULSUB_2W_4X 14, 1, 15893, 3981, [pd_8192], 2, 3 ; t20, t27
1280 VP9_UNPACK_MULSUB_2W_4X 0, 15, 8423, 14053, [pd_8192], 2, 3 ; t21, t26
1283 ; from 1 stage forward
1284 SUMSUB_BA w, 4, 10, 2
1285 SUMSUB_BA w, 8, 6, 2
1286 ; from 2 stages forward
1287 SUMSUB_BA w, 8, 4, 2
1289 mova [tmpq+17*%%str], m8 ; t16
1290 mova [tmpq+21*%%str], m4 ; t19
1292 pmulhrsw m3, m12, [pw_13160x2]
1293 pmulhrsw m12, [pw_9760x2]
1294 pmulhrsw m2, m13, [pw_m2404x2]
1295 pmulhrsw m13, [pw_16207x2]
1299 VP9_UNPACK_MULSUB_2W_4X 12, 3, 13160, 9760, [pd_8192], 4, 8 ; t22, t25
1300 VP9_UNPACK_MULSUB_2W_4X 2, 13, 2404, 16207, [pd_8192], 4, 8 ; t23, t24
1303 ; m10=t16, m4=t17, m8=t18, m6=t19, m14=t20, m0=t21, m12=t22, m2=t23,
1304 ; m13=t24, m3=t25, m15=t26, m1=t27, m9=t28, m7=t29, m11=t30, m5=t31
1306 SUMSUB_BA w, 0, 14, 4
1307 SUMSUB_BA w, 12, 2, 4
1308 SUMSUB_BA w, 3, 13, 4
1309 SUMSUB_BA w, 15, 1, 4
1310 SUMSUB_BA w, 7, 9, 4
1311 SUMSUB_BA w, 11, 5, 4
1313 ; m4=t16, m10=t17, m6=t18, m8=t19, m0=t20, m14=t21, m2=t22, m12=t23,
1314 ; m3=t24, m13=t25, m1=t26, m15=t27, m7=t28, m9=t29, m5=t30, m11=t31
1316 VP9_UNPACK_MULSUB_2W_4X 5, 10, 16069, 3196, [pd_8192], 4, 8 ; t17, t30
1317 VP9_UNPACK_MULSUB_2W_4X 9, 6, 3196, m16069, [pd_8192], 4, 8 ; t18, t29
1318 VP9_UNPACK_MULSUB_2W_4X 1, 14, 9102, 13623, [pd_8192], 4, 8 ; t21, t26
1319 VP9_UNPACK_MULSUB_2W_4X 13, 2, 13623, m9102, [pd_8192], 4, 8 ; t22, t25
1322 ; m4=t16, m5=t17, m9=t18, m8=t19, m0=t20, m1=t21, m13=t22, m12=t23,
1323 ; m3=t24, m2=t25, m14=t26, m15=t27, m7=t28, m6=t29, m10=t30, m11=t31
1325 SUMSUB_BA w, 9, 5, 4
1326 SUMSUB_BA w, 1, 13, 4
1327 SUMSUB_BA w, 0, 12, 4
1328 SUMSUB_BA w, 15, 3, 4
1329 SUMSUB_BA w, 14, 2, 4
1330 SUMSUB_BA w, 6, 10, 4
1331 SUMSUB_BA w, 7, 11, 4
1333 ; m8[s]=t16, m9=t17, m5=t18, m4[s]=t19, m12=t20, m13=t21, m1=t22, m0=t23,
1334 ; m15=t24, m14=t25, m2=t26, m3=t27, m11=t28, m10=t29, m6=t30, m7=t31
1336 mova m8, [tmpq+17*%%str] ; t16
1337 ; from 2 stages forward
1338 SUMSUB_BA w, 0, 8, 4
1339 SUMSUB_BA w, 15, 7, 4
1340 ; from 3 stages forward
1341 SUMSUB_BA w, 8, 7, 4
1342 pmulhrsw m7, [pw_11585x2]
1343 pmulhrsw m8, [pw_11585x2]
1345 mova [tmpq+ 1*%%str], m0 ; t16
1346 mova [tmpq+29*%%str], m7 ; t23
1348 mova m4, [tmpq+21*%%str] ; t19
1349 VP9_UNPACK_MULSUB_2W_4X 10, 5, 15137, 6270, [pd_8192], 0, 7 ; t18, t29
1350 VP9_UNPACK_MULSUB_2W_4X 11, 4, 15137, 6270, [pd_8192], 0, 7 ; t19, t28
1351 VP9_UNPACK_MULSUB_2W_4X 3, 12, 6270, m15137, [pd_8192], 0, 7 ; t20, t27
1352 VP9_UNPACK_MULSUB_2W_4X 2, 13, 6270, m15137, [pd_8192], 0, 7 ; t21, t26
1354 ; m8=t16, m9=t17, m10=t18, m11=t19, m3=t20, m2=t21, m1=t22, m0=t23,
1355 ; m15=t24, m14=t25, m13=t26, m12=t27, m4=t28, m5=t29, m6=t30, m7=t31
1357 SUMSUB_BA w, 1, 9, 0
1358 SUMSUB_BA w, 2, 10, 0
1359 SUMSUB_BA w, 3, 11, 0
1360 SUMSUB_BA w, 12, 4, 0
1361 SUMSUB_BA w, 13, 5, 0
1362 SUMSUB_BA w, 14, 6, 0
1364 ; m0=t16, m1=t17, m2=t18, m3=t19, m11=t20, m10=t21, m9=t22, m8=t23,
1365 ; m7=t24, m6=t25, m5=t26, m4=t27, m12=t28, m13=t29, m14=t30, m15=t31
1367 SUMSUB_BA w, 9, 6, 0
1368 SUMSUB_BA w, 10, 5, 0
1369 SUMSUB_BA w, 11, 4, 0
1371 pmulhrsw m6, [pw_11585x2]
1372 pmulhrsw m9, [pw_11585x2]
1373 pmulhrsw m5, [pw_11585x2]
1374 pmulhrsw m10, [pw_11585x2]
1375 pmulhrsw m4, [pw_11585x2]
1376 pmulhrsw m11, [pw_11585x2]
1378 ; m0=t16, m1=t17, m2=t18, m3=t19, m4=t20, m5=t21, m6=t22, m7=t23,
1379 ; m8=t24, m9=t25, m10=t26, m11=t27, m12=t28, m13=t29, m14=t30, m15=t31
1381 ; store t17-19 (and t20-22 for pass 1) - keep t24-31 in registers for
1382 ; final sumsub in pass 1, or keep t20-22 and t24-31 in registers for
1383 ; final sumsub of pass 2
1384 mova [tmpq+ 5*%%str], m1 ; t17
1385 mova [tmpq+ 9*%%str], m2 ; t18
1386 mova [tmpq+13*%%str], m3 ; t19
1388 ; then do final pass to sumsub+store the two halves
1390 mova [tmpq+17*%%str], m4 ; t20
1391 mova [tmpq+21*%%str], m5 ; t21
1392 mova [tmpq+25*%%str], m6 ; t22
1394 mova m0, [tmpq+ 0*%%str] ; t0
1395 mova m1, [tmpq+ 4*%%str] ; t1
1396 mova m2, [tmpq+ 8*%%str] ; t2
1397 mova m3, [tmpq+12*%%str] ; t3
1398 mova m4, [tmpq+16*%%str] ; t4
1399 mova m5, [tmpq+20*%%str] ; t5
1400 mova m6, [tmpq+24*%%str] ; t6
1402 SUMSUB_BA w, 15, 0, 7
1403 mova [tmpq+ 3*%%str], m0 ; t15
1404 mova m7, [tmpq+28*%%str] ; t7
1405 SUMSUB_BA w, 14, 1, 0
1406 SUMSUB_BA w, 13, 2, 0
1407 SUMSUB_BA w, 12, 3, 0
1408 SUMSUB_BA w, 11, 4, 0
1409 SUMSUB_BA w, 10, 5, 0
1410 SUMSUB_BA w, 9, 6, 0
1411 SUMSUB_BA w, 8, 7, 0
1413 TRANSPOSE8x8W 15, 14, 13, 12, 11, 10, 9, 8, 0
1414 mova [tmpq+ 0*%%str], m15
1415 mova [tmpq+ 4*%%str], m14
1416 mova [tmpq+ 8*%%str], m13
1417 mova [tmpq+12*%%str], m12
1418 mova [tmpq+16*%%str], m11
1419 mova [tmpq+20*%%str], m10
1420 mova [tmpq+24*%%str], m9
1421 mova [tmpq+28*%%str], m8
1423 mova m0, [tmpq+ 3*%%str] ; t15
1424 TRANSPOSE8x8W 7, 6, 5, 4, 3, 2, 1, 0, 8
1425 mova [tmpq+ 3*%%str], m7
1426 mova [tmpq+ 7*%%str], m6
1427 mova [tmpq+11*%%str], m5
1428 mova [tmpq+15*%%str], m4
1429 mova [tmpq+19*%%str], m3
1430 mova [tmpq+23*%%str], m2
1431 mova [tmpq+27*%%str], m1
1432 mova [tmpq+31*%%str], m0
1434 mova m15, [tmpq+ 2*%%str] ; t8
1435 mova m14, [tmpq+ 6*%%str] ; t9
1436 mova m13, [tmpq+10*%%str] ; t10
1437 mova m12, [tmpq+14*%%str] ; t11
1438 mova m11, [tmpq+18*%%str] ; t12
1439 mova m10, [tmpq+22*%%str] ; t13
1440 mova m9, [tmpq+26*%%str] ; t14
1441 mova m8, [tmpq+30*%%str] ; t15
1442 mova m7, [tmpq+ 1*%%str] ; t16
1443 mova m6, [tmpq+ 5*%%str] ; t17
1444 mova m5, [tmpq+ 9*%%str] ; t18
1445 mova m4, [tmpq+13*%%str] ; t19
1446 mova m3, [tmpq+17*%%str] ; t20
1447 mova m2, [tmpq+21*%%str] ; t21
1448 mova m1, [tmpq+25*%%str] ; t22
1450 SUMSUB_BA w, 7, 8, 0
1451 mova [tmpq+ 2*%%str], m8
1452 mova m0, [tmpq+29*%%str] ; t23
1453 SUMSUB_BA w, 6, 9, 8
1454 SUMSUB_BA w, 5, 10, 8
1455 SUMSUB_BA w, 4, 11, 8
1456 SUMSUB_BA w, 3, 12, 8
1457 SUMSUB_BA w, 2, 13, 8
1458 SUMSUB_BA w, 1, 14, 8
1459 SUMSUB_BA w, 0, 15, 8
1461 TRANSPOSE8x8W 0, 1, 2, 3, 4, 5, 6, 7, 8
1462 mova [tmpq+ 1*%%str], m0
1463 mova [tmpq+ 5*%%str], m1
1464 mova [tmpq+ 9*%%str], m2
1465 mova [tmpq+13*%%str], m3
1466 mova [tmpq+17*%%str], m4
1467 mova [tmpq+21*%%str], m5
1468 mova [tmpq+25*%%str], m6
1469 mova [tmpq+29*%%str], m7
1471 mova m8, [tmpq+ 2*%%str]
1472 TRANSPOSE8x8W 8, 9, 10, 11, 12, 13, 14, 15, 0
1473 mova [tmpq+ 2*%%str], m8
1474 mova [tmpq+ 6*%%str], m9
1475 mova [tmpq+10*%%str], m10
1476 mova [tmpq+14*%%str], m11
1477 mova [tmpq+18*%%str], m12
1478 mova [tmpq+22*%%str], m13
1479 mova [tmpq+26*%%str], m14
1480 mova [tmpq+30*%%str], m15
1482 ; t0-7 is in [tmpq+{0,4,8,12,16,20,24,28}*%%str]
1483 ; t8-15 is in [tmpq+{2,6,10,14,18,22,26,30}*%%str]
1484 ; t16-19 and t23 is in [tmpq+{1,5,9,13,29}*%%str]
1486 ; t24-31 is in m8-15
1489 %macro %%STORE_2X2 7-8 1 ; src[1-4], tmp[1-2], zero, inc_dst_ptrs
1490 SUMSUB_BA w, %4, %1, %5
1491 SUMSUB_BA w, %3, %2, %5
1492 pmulhrsw m%4, [pw_512]
1493 pmulhrsw m%3, [pw_512]
1494 VP9_STORE_2X %4, %3, %5, %6, %7
1498 pmulhrsw m%2, [pw_512]
1499 pmulhrsw m%1, [pw_512]
1500 VP9_STORE_2X %2, %1, %5, %6, %7, dst_endq
1502 sub dst_endq, stride2q
1506 ; store t0-1 and t30-31
1507 mova m0, [tmpq+ 0*%%str]
1508 mova m1, [tmpq+ 4*%%str]
1509 %%STORE_2X2 0, 1, 14, 15, 2, 3, 7
1511 ; store t2-3 and t28-29
1512 mova m0, [tmpq+ 8*%%str]
1513 mova m1, [tmpq+12*%%str]
1514 %%STORE_2X2 0, 1, 12, 13, 2, 3, 7
1516 ; store t4-5 and t26-27
1517 mova m0, [tmpq+16*%%str]
1518 mova m1, [tmpq+20*%%str]
1519 %%STORE_2X2 0, 1, 10, 11, 2, 3, 7
1521 ; store t6-7 and t24-25
1522 mova m0, [tmpq+24*%%str]
1523 mova m1, [tmpq+28*%%str]
1524 %%STORE_2X2 0, 1, 8, 9, 2, 3, 7
1526 ; store t8-9 and t22-23
1527 mova m0, [tmpq+ 2*%%str]
1528 mova m1, [tmpq+ 6*%%str]
1529 mova m8, [tmpq+29*%%str]
1530 %%STORE_2X2 0, 1, 6, 8, 2, 3, 7
1532 ; store t10-11 and t20-21
1533 mova m0, [tmpq+10*%%str]
1534 mova m1, [tmpq+14*%%str]
1535 %%STORE_2X2 0, 1, 4, 5, 2, 3, 7
1537 ; store t12-13 and t18-19
1538 mova m0, [tmpq+18*%%str]
1539 mova m1, [tmpq+22*%%str]
1540 mova m5, [tmpq+13*%%str]
1541 mova m4, [tmpq+ 9*%%str]
1542 %%STORE_2X2 0, 1, 4, 5, 2, 3, 7
1545 mova m0, [tmpq+26*%%str]
1546 mova m1, [tmpq+30*%%str]
1547 mova m5, [tmpq+ 5*%%str]
1548 mova m4, [tmpq+ 1*%%str]
1549 %%STORE_2X2 0, 1, 4, 5, 2, 3, 7, 0
1553 %macro VP9_IDCT_IDCT_32x32_ADD_XMM 1
1555 cglobal vp9_idct_idct_32x32_add, 4, 9, 16, 2048, dst, stride, block, eob
1565 mova m1, [pw_11585x2]
1568 SPLATW m0, m0, q0000
1569 pmulhrsw m0, [pw_512]
1572 DEFINE_ARGS dst, stride, block, cnt
1574 VP9_STORE_2XFULL 0, 1, 2, 3, 4, 5, mmsize
1577 VP9_STORE_2XFULL 0, 1, 2, 3, 4, 5, mmsize
1580 DEFINE_ARGS dst_bak, stride, block, cnt, dst, stride30, dst_end, stride2, tmp
1583 VP9_IDCT32_1D blockq, 1, 8
1585 mov stride30q, strideq ; stride
1586 lea stride2q, [strideq*2] ; stride*2
1587 shl stride30q, 5 ; stride*32
1589 sub stride30q, stride2q ; stride*30
1592 lea dst_endq, [dst_bakq+stride30q]
1593 VP9_IDCT32_1D tmpq, 2, 8
1599 ; at the end of the loop, m7 should still be zero
1600 ; use that to zero out block coefficients
1601 ZERO_BLOCK blockq, 64, 8, m7
1608 VP9_IDCT32_1D blockq, 1, 16
1615 mov stride30q, strideq ; stride
1616 lea stride2q, [strideq*2] ; stride*2
1617 shl stride30q, 5 ; stride*32
1620 sub stride30q, stride2q ; stride*30
1623 lea dst_endq, [dst_bakq+stride30q]
1624 VP9_IDCT32_1D tmpq, 2, 16
1630 ; at the end of the loop, m7 should still be zero
1631 ; use that to zero out block coefficients
1632 ZERO_BLOCK blockq, 64, 16, m7
1639 VP9_IDCT32_1D blockq, 1
1646 mov stride30q, strideq ; stride
1647 lea stride2q, [strideq*2] ; stride*2
1648 shl stride30q, 5 ; stride*32
1651 sub stride30q, stride2q ; stride*30
1654 lea dst_endq, [dst_bakq+stride30q]
1655 VP9_IDCT32_1D tmpq, 2
1661 ; at the end of the loop, m7 should still be zero
1662 ; use that to zero out block coefficients
1663 ZERO_BLOCK blockq, 64, 32, m7
1667 VP9_IDCT_IDCT_32x32_ADD_XMM ssse3
1668 VP9_IDCT_IDCT_32x32_ADD_XMM avx