Imported Debian version 2.4.3~trusty1
[deb_ffmpeg.git] / ffmpeg / libavcodec / x86 / h264_chromamc_10bit.asm
1 ;*****************************************************************************
2 ;* MMX/SSE2/AVX-optimized 10-bit H.264 chroma MC code
3 ;*****************************************************************************
4 ;* Copyright (C) 2005-2011 x264 project
5 ;*
6 ;* Authors: Daniel Kang <daniel.d.kang@gmail.com>
7 ;*
8 ;* This file is part of FFmpeg.
9 ;*
10 ;* FFmpeg is free software; you can redistribute it and/or
11 ;* modify it under the terms of the GNU Lesser General Public
12 ;* License as published by the Free Software Foundation; either
13 ;* version 2.1 of the License, or (at your option) any later version.
14 ;*
15 ;* FFmpeg is distributed in the hope that it will be useful,
16 ;* but WITHOUT ANY WARRANTY; without even the implied warranty of
17 ;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 ;* Lesser General Public License for more details.
19 ;*
20 ;* You should have received a copy of the GNU Lesser General Public
21 ;* License along with FFmpeg; if not, write to the Free Software
22 ;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 ;******************************************************************************
24
25 %include "libavutil/x86/x86util.asm"
26
27 SECTION_RODATA
28
29 cextern pw_4
30 cextern pw_8
31 cextern pw_32
32 cextern pw_64
33
34 SECTION .text
35
36
37 %macro MV0_PIXELS_MC8 0
38 lea r4, [r2*3 ]
39 lea r5, [r2*4 ]
40 .next4rows:
41 movu m0, [r1 ]
42 movu m1, [r1+r2 ]
43 CHROMAMC_AVG m0, [r0 ]
44 CHROMAMC_AVG m1, [r0+r2 ]
45 mova [r0 ], m0
46 mova [r0+r2 ], m1
47 movu m0, [r1+r2*2]
48 movu m1, [r1+r4 ]
49 CHROMAMC_AVG m0, [r0+r2*2]
50 CHROMAMC_AVG m1, [r0+r4 ]
51 mova [r0+r2*2], m0
52 mova [r0+r4 ], m1
53 add r1, r5
54 add r0, r5
55 sub r3d, 4
56 jne .next4rows
57 %endmacro
58
59 ;-----------------------------------------------------------------------------
60 ; void ff_put/avg_h264_chroma_mc8(pixel *dst, pixel *src, int stride, int h,
61 ; int mx, int my)
62 ;-----------------------------------------------------------------------------
63 %macro CHROMA_MC8 1
64 cglobal %1_h264_chroma_mc8_10, 6,7,8
65 movsxdifnidn r2, r2d
66 mov r6d, r5d
67 or r6d, r4d
68 jne .at_least_one_non_zero
69 ; mx == 0 AND my == 0 - no filter needed
70 MV0_PIXELS_MC8
71 REP_RET
72
73 .at_least_one_non_zero:
74 mov r6d, 2
75 test r5d, r5d
76 je .x_interpolation
77 mov r6, r2 ; dxy = x ? 1 : stride
78 test r4d, r4d
79 jne .xy_interpolation
80 .x_interpolation:
81 ; mx == 0 XOR my == 0 - 1 dimensional filter only
82 or r4d, r5d ; x + y
83 movd m5, r4d
84 mova m4, [pw_8]
85 mova m6, [pw_4] ; mm6 = rnd >> 3
86 SPLATW m5, m5 ; mm5 = B = x
87 psubw m4, m5 ; mm4 = A = 8-x
88
89 .next1drow:
90 movu m0, [r1 ] ; mm0 = src[0..7]
91 movu m2, [r1+r6] ; mm2 = src[1..8]
92
93 pmullw m0, m4 ; mm0 = A * src[0..7]
94 pmullw m2, m5 ; mm2 = B * src[1..8]
95
96 paddw m0, m6
97 paddw m0, m2
98 psrlw m0, 3
99 CHROMAMC_AVG m0, [r0]
100 mova [r0], m0 ; dst[0..7] = (A * src[0..7] + B * src[1..8] + (rnd >> 3)) >> 3
101
102 add r0, r2
103 add r1, r2
104 dec r3d
105 jne .next1drow
106 REP_RET
107
108 .xy_interpolation: ; general case, bilinear
109 movd m4, r4m ; x
110 movd m6, r5m ; y
111
112 SPLATW m4, m4 ; mm4 = x words
113 SPLATW m6, m6 ; mm6 = y words
114 psllw m5, m4, 3 ; mm5 = 8x
115 pmullw m4, m6 ; mm4 = x * y
116 psllw m6, 3 ; mm6 = 8y
117 paddw m1, m5, m6 ; mm7 = 8x+8y
118 mova m7, m4 ; DD = x * y
119 psubw m5, m4 ; mm5 = B = 8x - xy
120 psubw m6, m4 ; mm6 = C = 8y - xy
121 paddw m4, [pw_64]
122 psubw m4, m1 ; mm4 = A = xy - (8x+8y) + 64
123
124 movu m0, [r1 ] ; mm0 = src[0..7]
125 movu m1, [r1+2] ; mm1 = src[1..8]
126 .next2drow:
127 add r1, r2
128
129 pmullw m2, m0, m4
130 pmullw m1, m5
131 paddw m2, m1 ; mm2 = A * src[0..7] + B * src[1..8]
132
133 movu m0, [r1]
134 movu m1, [r1+2]
135 pmullw m3, m0, m6
136 paddw m2, m3 ; mm2 += C * src[0..7+strde]
137 pmullw m3, m1, m7
138 paddw m2, m3 ; mm2 += D * src[1..8+strde]
139
140 paddw m2, [pw_32]
141 psrlw m2, 6
142 CHROMAMC_AVG m2, [r0]
143 mova [r0], m2 ; dst[0..7] = (mm2 + 32) >> 6
144
145 add r0, r2
146 dec r3d
147 jne .next2drow
148 REP_RET
149 %endmacro
150
151 ;-----------------------------------------------------------------------------
152 ; void ff_put/avg_h264_chroma_mc4(pixel *dst, pixel *src, int stride, int h,
153 ; int mx, int my)
154 ;-----------------------------------------------------------------------------
155 ;TODO: xmm mc4
156 %macro MC4_OP 2
157 movq %1, [r1 ]
158 movq m1, [r1+2]
159 add r1, r2
160 pmullw %1, m4
161 pmullw m1, m2
162 paddw m1, %1
163 mova %1, m1
164
165 pmullw %2, m5
166 pmullw m1, m3
167 paddw %2, [pw_32]
168 paddw m1, %2
169 psrlw m1, 6
170 CHROMAMC_AVG m1, %2, [r0]
171 movq [r0], m1
172 add r0, r2
173 %endmacro
174
175 %macro CHROMA_MC4 1
176 cglobal %1_h264_chroma_mc4_10, 6,6,7
177 movsxdifnidn r2, r2d
178 movd m2, r4m ; x
179 movd m3, r5m ; y
180 mova m4, [pw_8]
181 mova m5, m4
182 SPLATW m2, m2
183 SPLATW m3, m3
184 psubw m4, m2
185 psubw m5, m3
186
187 movq m0, [r1 ]
188 movq m6, [r1+2]
189 add r1, r2
190 pmullw m0, m4
191 pmullw m6, m2
192 paddw m6, m0
193
194 .next2rows:
195 MC4_OP m0, m6
196 MC4_OP m6, m0
197 sub r3d, 2
198 jnz .next2rows
199 REP_RET
200 %endmacro
201
202 ;-----------------------------------------------------------------------------
203 ; void ff_put/avg_h264_chroma_mc2(pixel *dst, pixel *src, int stride, int h,
204 ; int mx, int my)
205 ;-----------------------------------------------------------------------------
206 %macro CHROMA_MC2 1
207 cglobal %1_h264_chroma_mc2_10, 6,7
208 movsxdifnidn r2, r2d
209 mov r6d, r4d
210 shl r4d, 16
211 sub r4d, r6d
212 add r4d, 8
213 imul r5d, r4d ; x*y<<16 | y*(8-x)
214 shl r4d, 3
215 sub r4d, r5d ; x*(8-y)<<16 | (8-x)*(8-y)
216
217 movd m5, r4d
218 movd m6, r5d
219 punpckldq m5, m5 ; mm5 = {A,B,A,B}
220 punpckldq m6, m6 ; mm6 = {C,D,C,D}
221 pxor m7, m7
222 pshufw m2, [r1], 0x94 ; mm0 = src[0,1,1,2]
223
224 .nextrow:
225 add r1, r2
226 movq m1, m2
227 pmaddwd m1, m5 ; mm1 = A * src[0,1] + B * src[1,2]
228 pshufw m0, [r1], 0x94 ; mm0 = src[0,1,1,2]
229 movq m2, m0
230 pmaddwd m0, m6
231 paddw m1, [pw_32]
232 paddw m1, m0 ; mm1 += C * src[0,1] + D * src[1,2]
233 psrlw m1, 6
234 packssdw m1, m7
235 CHROMAMC_AVG m1, m3, [r0]
236 movd [r0], m1
237 add r0, r2
238 dec r3d
239 jnz .nextrow
240 REP_RET
241 %endmacro
242
243 %macro NOTHING 2-3
244 %endmacro
245 %macro AVG 2-3
246 %if %0==3
247 movq %2, %3
248 %endif
249 pavgw %1, %2
250 %endmacro
251
252 %define CHROMAMC_AVG NOTHING
253 INIT_XMM sse2
254 CHROMA_MC8 put
255 %if HAVE_AVX_EXTERNAL
256 INIT_XMM avx
257 CHROMA_MC8 put
258 %endif
259 INIT_MMX mmxext
260 CHROMA_MC4 put
261 CHROMA_MC2 put
262
263 %define CHROMAMC_AVG AVG
264 INIT_XMM sse2
265 CHROMA_MC8 avg
266 %if HAVE_AVX_EXTERNAL
267 INIT_XMM avx
268 CHROMA_MC8 avg
269 %endif
270 INIT_MMX mmxext
271 CHROMA_MC4 avg
272 CHROMA_MC2 avg