]>
Piment Noir Git Repositories - deb_ffmpeg.git/blob - ffmpeg/libavcodec/vp9dsp.c
6356adde32ff20383e20c1701dfb2624e3da4b4b
2 * VP9 compatible video decoder
4 * Copyright (C) 2013 Ronald S. Bultje <rsbultje gmail com>
5 * Copyright (C) 2013 Clément Bœsch <u pkh me>
7 * This file is part of FFmpeg.
9 * FFmpeg is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public
11 * License as published by the Free Software Foundation; either
12 * version 2.1 of the License, or (at your option) any later version.
14 * FFmpeg is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with FFmpeg; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
24 #include "libavutil/common.h"
25 #include "libavutil/intreadwrite.h"
29 // FIXME see whether we can merge parts of this (perhaps at least 4x4 and 8x8)
30 // back with h264pred.[ch]
32 static void vert_4x4_c(uint8_t *dst
, ptrdiff_t stride
,
33 const uint8_t *left
, const uint8_t *top
)
35 unsigned p4
= AV_RN32A(top
);
37 AV_WN32A(dst
+ stride
* 0, p4
);
38 AV_WN32A(dst
+ stride
* 1, p4
);
39 AV_WN32A(dst
+ stride
* 2, p4
);
40 AV_WN32A(dst
+ stride
* 3, p4
);
43 static void vert_8x8_c(uint8_t *dst
, ptrdiff_t stride
,
44 const uint8_t *left
, const uint8_t *top
)
46 uint64_t p8
= AV_RN64A(top
);
49 for (y
= 0; y
< 8; y
++) {
55 static void vert_16x16_c(uint8_t *dst
, ptrdiff_t stride
,
56 const uint8_t *left
, const uint8_t *top
)
58 uint64_t p8a
= AV_RN64A(top
+ 0), p8b
= AV_RN64A(top
+ 8);
61 for (y
= 0; y
< 16; y
++) {
62 AV_WN64A(dst
+ 0, p8a
);
63 AV_WN64A(dst
+ 8, p8b
);
68 static void vert_32x32_c(uint8_t *dst
, ptrdiff_t stride
,
69 const uint8_t *left
, const uint8_t *top
)
71 uint64_t p8a
= AV_RN64A(top
+ 0), p8b
= AV_RN64A(top
+ 8),
72 p8c
= AV_RN64A(top
+ 16), p8d
= AV_RN64A(top
+ 24);
75 for (y
= 0; y
< 32; y
++) {
76 AV_WN64A(dst
+ 0, p8a
);
77 AV_WN64A(dst
+ 8, p8b
);
78 AV_WN64A(dst
+ 16, p8c
);
79 AV_WN64A(dst
+ 24, p8d
);
84 static void hor_4x4_c(uint8_t *dst
, ptrdiff_t stride
,
85 const uint8_t *left
, const uint8_t *top
)
87 AV_WN32A(dst
+ stride
* 0, left
[3] * 0x01010101U
);
88 AV_WN32A(dst
+ stride
* 1, left
[2] * 0x01010101U
);
89 AV_WN32A(dst
+ stride
* 2, left
[1] * 0x01010101U
);
90 AV_WN32A(dst
+ stride
* 3, left
[0] * 0x01010101U
);
93 static void hor_8x8_c(uint8_t *dst
, ptrdiff_t stride
,
94 const uint8_t *left
, const uint8_t *top
)
98 for (y
= 0; y
< 8; y
++) {
99 AV_WN64A(dst
, left
[7 - y
] * 0x0101010101010101ULL
);
104 static void hor_16x16_c(uint8_t *dst
, ptrdiff_t stride
,
105 const uint8_t *left
, const uint8_t *top
)
109 for (y
= 0; y
< 16; y
++) {
110 uint64_t p8
= left
[15 - y
] * 0x0101010101010101ULL
;
112 AV_WN64A(dst
+ 0, p8
);
113 AV_WN64A(dst
+ 8, p8
);
118 static void hor_32x32_c(uint8_t *dst
, ptrdiff_t stride
,
119 const uint8_t *left
, const uint8_t *top
)
123 for (y
= 0; y
< 32; y
++) {
124 uint64_t p8
= left
[31 - y
] * 0x0101010101010101ULL
;
126 AV_WN64A(dst
+ 0, p8
);
127 AV_WN64A(dst
+ 8, p8
);
128 AV_WN64A(dst
+ 16, p8
);
129 AV_WN64A(dst
+ 24, p8
);
134 static void tm_4x4_c(uint8_t *dst
, ptrdiff_t stride
,
135 const uint8_t *left
, const uint8_t *top
)
139 for (y
= 0; y
< 4; y
++) {
140 int l_m_tl
= left
[3 - y
] - tl
;
142 dst
[0] = av_clip_uint8(top
[0] + l_m_tl
);
143 dst
[1] = av_clip_uint8(top
[1] + l_m_tl
);
144 dst
[2] = av_clip_uint8(top
[2] + l_m_tl
);
145 dst
[3] = av_clip_uint8(top
[3] + l_m_tl
);
150 static void tm_8x8_c(uint8_t *dst
, ptrdiff_t stride
,
151 const uint8_t *left
, const uint8_t *top
)
155 for (y
= 0; y
< 8; y
++) {
156 int l_m_tl
= left
[7 - y
] - tl
;
158 dst
[0] = av_clip_uint8(top
[0] + l_m_tl
);
159 dst
[1] = av_clip_uint8(top
[1] + l_m_tl
);
160 dst
[2] = av_clip_uint8(top
[2] + l_m_tl
);
161 dst
[3] = av_clip_uint8(top
[3] + l_m_tl
);
162 dst
[4] = av_clip_uint8(top
[4] + l_m_tl
);
163 dst
[5] = av_clip_uint8(top
[5] + l_m_tl
);
164 dst
[6] = av_clip_uint8(top
[6] + l_m_tl
);
165 dst
[7] = av_clip_uint8(top
[7] + l_m_tl
);
170 static void tm_16x16_c(uint8_t *dst
, ptrdiff_t stride
,
171 const uint8_t *left
, const uint8_t *top
)
175 for (y
= 0; y
< 16; y
++) {
176 int l_m_tl
= left
[15 - y
] - tl
;
178 dst
[ 0] = av_clip_uint8(top
[ 0] + l_m_tl
);
179 dst
[ 1] = av_clip_uint8(top
[ 1] + l_m_tl
);
180 dst
[ 2] = av_clip_uint8(top
[ 2] + l_m_tl
);
181 dst
[ 3] = av_clip_uint8(top
[ 3] + l_m_tl
);
182 dst
[ 4] = av_clip_uint8(top
[ 4] + l_m_tl
);
183 dst
[ 5] = av_clip_uint8(top
[ 5] + l_m_tl
);
184 dst
[ 6] = av_clip_uint8(top
[ 6] + l_m_tl
);
185 dst
[ 7] = av_clip_uint8(top
[ 7] + l_m_tl
);
186 dst
[ 8] = av_clip_uint8(top
[ 8] + l_m_tl
);
187 dst
[ 9] = av_clip_uint8(top
[ 9] + l_m_tl
);
188 dst
[10] = av_clip_uint8(top
[10] + l_m_tl
);
189 dst
[11] = av_clip_uint8(top
[11] + l_m_tl
);
190 dst
[12] = av_clip_uint8(top
[12] + l_m_tl
);
191 dst
[13] = av_clip_uint8(top
[13] + l_m_tl
);
192 dst
[14] = av_clip_uint8(top
[14] + l_m_tl
);
193 dst
[15] = av_clip_uint8(top
[15] + l_m_tl
);
198 static void tm_32x32_c(uint8_t *dst
, ptrdiff_t stride
,
199 const uint8_t *left
, const uint8_t *top
)
203 for (y
= 0; y
< 32; y
++) {
204 int l_m_tl
= left
[31 - y
] - tl
;
206 dst
[ 0] = av_clip_uint8(top
[ 0] + l_m_tl
);
207 dst
[ 1] = av_clip_uint8(top
[ 1] + l_m_tl
);
208 dst
[ 2] = av_clip_uint8(top
[ 2] + l_m_tl
);
209 dst
[ 3] = av_clip_uint8(top
[ 3] + l_m_tl
);
210 dst
[ 4] = av_clip_uint8(top
[ 4] + l_m_tl
);
211 dst
[ 5] = av_clip_uint8(top
[ 5] + l_m_tl
);
212 dst
[ 6] = av_clip_uint8(top
[ 6] + l_m_tl
);
213 dst
[ 7] = av_clip_uint8(top
[ 7] + l_m_tl
);
214 dst
[ 8] = av_clip_uint8(top
[ 8] + l_m_tl
);
215 dst
[ 9] = av_clip_uint8(top
[ 9] + l_m_tl
);
216 dst
[10] = av_clip_uint8(top
[10] + l_m_tl
);
217 dst
[11] = av_clip_uint8(top
[11] + l_m_tl
);
218 dst
[12] = av_clip_uint8(top
[12] + l_m_tl
);
219 dst
[13] = av_clip_uint8(top
[13] + l_m_tl
);
220 dst
[14] = av_clip_uint8(top
[14] + l_m_tl
);
221 dst
[15] = av_clip_uint8(top
[15] + l_m_tl
);
222 dst
[16] = av_clip_uint8(top
[16] + l_m_tl
);
223 dst
[17] = av_clip_uint8(top
[17] + l_m_tl
);
224 dst
[18] = av_clip_uint8(top
[18] + l_m_tl
);
225 dst
[19] = av_clip_uint8(top
[19] + l_m_tl
);
226 dst
[20] = av_clip_uint8(top
[20] + l_m_tl
);
227 dst
[21] = av_clip_uint8(top
[21] + l_m_tl
);
228 dst
[22] = av_clip_uint8(top
[22] + l_m_tl
);
229 dst
[23] = av_clip_uint8(top
[23] + l_m_tl
);
230 dst
[24] = av_clip_uint8(top
[24] + l_m_tl
);
231 dst
[25] = av_clip_uint8(top
[25] + l_m_tl
);
232 dst
[26] = av_clip_uint8(top
[26] + l_m_tl
);
233 dst
[27] = av_clip_uint8(top
[27] + l_m_tl
);
234 dst
[28] = av_clip_uint8(top
[28] + l_m_tl
);
235 dst
[29] = av_clip_uint8(top
[29] + l_m_tl
);
236 dst
[30] = av_clip_uint8(top
[30] + l_m_tl
);
237 dst
[31] = av_clip_uint8(top
[31] + l_m_tl
);
242 static void dc_4x4_c(uint8_t *dst
, ptrdiff_t stride
,
243 const uint8_t *left
, const uint8_t *top
)
245 unsigned dc
= 0x01010101U
* ((left
[0] + left
[1] + left
[2] + left
[3] +
246 top
[0] + top
[1] + top
[2] + top
[3] + 4) >> 3);
248 AV_WN32A(dst
+ stride
* 0, dc
);
249 AV_WN32A(dst
+ stride
* 1, dc
);
250 AV_WN32A(dst
+ stride
* 2, dc
);
251 AV_WN32A(dst
+ stride
* 3, dc
);
254 static void dc_8x8_c(uint8_t *dst
, ptrdiff_t stride
,
255 const uint8_t *left
, const uint8_t *top
)
257 uint64_t dc
= 0x0101010101010101ULL
*
258 ((left
[0] + left
[1] + left
[2] + left
[3] + left
[4] + left
[5] +
259 left
[6] + left
[7] + top
[0] + top
[1] + top
[2] + top
[3] +
260 top
[4] + top
[5] + top
[6] + top
[7] + 8) >> 4);
263 for (y
= 0; y
< 8; y
++) {
269 static void dc_16x16_c(uint8_t *dst
, ptrdiff_t stride
,
270 const uint8_t *left
, const uint8_t *top
)
272 uint64_t dc
= 0x0101010101010101ULL
*
273 ((left
[0] + left
[1] + left
[2] + left
[3] + left
[4] + left
[5] + left
[6] +
274 left
[7] + left
[8] + left
[9] + left
[10] + left
[11] + left
[12] +
275 left
[13] + left
[14] + left
[15] + top
[0] + top
[1] + top
[2] + top
[3] +
276 top
[4] + top
[5] + top
[6] + top
[7] + top
[8] + top
[9] + top
[10] +
277 top
[11] + top
[12] + top
[13] + top
[14] + top
[15] + 16) >> 5);
280 for (y
= 0; y
< 16; y
++) {
281 AV_WN64A(dst
+ 0, dc
);
282 AV_WN64A(dst
+ 8, dc
);
287 static void dc_32x32_c(uint8_t *dst
, ptrdiff_t stride
,
288 const uint8_t *left
, const uint8_t *top
)
290 uint64_t dc
= 0x0101010101010101ULL
*
291 ((left
[0] + left
[1] + left
[2] + left
[3] + left
[4] + left
[5] + left
[6] +
292 left
[7] + left
[8] + left
[9] + left
[10] + left
[11] + left
[12] +
293 left
[13] + left
[14] + left
[15] + left
[16] + left
[17] + left
[18] +
294 left
[19] + left
[20] + left
[21] + left
[22] + left
[23] + left
[24] +
295 left
[25] + left
[26] + left
[27] + left
[28] + left
[29] + left
[30] +
296 left
[31] + top
[0] + top
[1] + top
[2] + top
[3] + top
[4] + top
[5] +
297 top
[6] + top
[7] + top
[8] + top
[9] + top
[10] + top
[11] + top
[12] +
298 top
[13] + top
[14] + top
[15] + top
[16] + top
[17] + top
[18] + top
[19] +
299 top
[20] + top
[21] + top
[22] + top
[23] + top
[24] + top
[25] + top
[26] +
300 top
[27] + top
[28] + top
[29] + top
[30] + top
[31] + 32) >> 6);
303 for (y
= 0; y
< 32; y
++) {
304 AV_WN64A(dst
+ 0, dc
);
305 AV_WN64A(dst
+ 8, dc
);
306 AV_WN64A(dst
+ 16, dc
);
307 AV_WN64A(dst
+ 24, dc
);
312 static void dc_left_4x4_c(uint8_t *dst
, ptrdiff_t stride
,
313 const uint8_t *left
, const uint8_t *top
)
315 unsigned dc
= 0x01010101U
* ((left
[0] + left
[1] + left
[2] + left
[3] + 2) >> 2);
317 AV_WN32A(dst
+ stride
* 0, dc
);
318 AV_WN32A(dst
+ stride
* 1, dc
);
319 AV_WN32A(dst
+ stride
* 2, dc
);
320 AV_WN32A(dst
+ stride
* 3, dc
);
323 static void dc_left_8x8_c(uint8_t *dst
, ptrdiff_t stride
,
324 const uint8_t *left
, const uint8_t *top
)
326 uint64_t dc
= 0x0101010101010101ULL
*
327 ((left
[0] + left
[1] + left
[2] + left
[3] +
328 left
[4] + left
[5] + left
[6] + left
[7] + 4) >> 3);
331 for (y
= 0; y
< 8; y
++) {
337 static void dc_left_16x16_c(uint8_t *dst
, ptrdiff_t stride
,
338 const uint8_t *left
, const uint8_t *top
)
340 uint64_t dc
= 0x0101010101010101ULL
*
341 ((left
[0] + left
[1] + left
[2] + left
[3] + left
[4] + left
[5] +
342 left
[6] + left
[7] + left
[8] + left
[9] + left
[10] + left
[11] +
343 left
[12] + left
[13] + left
[14] + left
[15] + 8) >> 4);
346 for (y
= 0; y
< 16; y
++) {
347 AV_WN64A(dst
+ 0, dc
);
348 AV_WN64A(dst
+ 8, dc
);
353 static void dc_left_32x32_c(uint8_t *dst
, ptrdiff_t stride
,
354 const uint8_t *left
, const uint8_t *top
)
356 uint64_t dc
= 0x0101010101010101ULL
*
357 ((left
[0] + left
[1] + left
[2] + left
[3] + left
[4] + left
[5] +
358 left
[6] + left
[7] + left
[8] + left
[9] + left
[10] + left
[11] +
359 left
[12] + left
[13] + left
[14] + left
[15] + left
[16] + left
[17] +
360 left
[18] + left
[19] + left
[20] + left
[21] + left
[22] + left
[23] +
361 left
[24] + left
[25] + left
[26] + left
[27] + left
[28] + left
[29] +
362 left
[30] + left
[31] + 16) >> 5);
365 for (y
= 0; y
< 32; y
++) {
366 AV_WN64A(dst
+ 0, dc
);
367 AV_WN64A(dst
+ 8, dc
);
368 AV_WN64A(dst
+ 16, dc
);
369 AV_WN64A(dst
+ 24, dc
);
374 static void dc_top_4x4_c(uint8_t *dst
, ptrdiff_t stride
,
375 const uint8_t *left
, const uint8_t *top
)
377 unsigned dc
= 0x01010101U
* ((top
[0] + top
[1] + top
[2] + top
[3] + 2) >> 2);
379 AV_WN32A(dst
+ stride
* 0, dc
);
380 AV_WN32A(dst
+ stride
* 1, dc
);
381 AV_WN32A(dst
+ stride
* 2, dc
);
382 AV_WN32A(dst
+ stride
* 3, dc
);
385 static void dc_top_8x8_c(uint8_t *dst
, ptrdiff_t stride
,
386 const uint8_t *left
, const uint8_t *top
)
388 uint64_t dc
= 0x0101010101010101ULL
*
389 ((top
[0] + top
[1] + top
[2] + top
[3] +
390 top
[4] + top
[5] + top
[6] + top
[7] + 4) >> 3);
393 for (y
= 0; y
< 8; y
++) {
399 static void dc_top_16x16_c(uint8_t *dst
, ptrdiff_t stride
,
400 const uint8_t *left
, const uint8_t *top
)
402 uint64_t dc
= 0x0101010101010101ULL
*
403 ((top
[0] + top
[1] + top
[2] + top
[3] + top
[4] + top
[5] +
404 top
[6] + top
[7] + top
[8] + top
[9] + top
[10] + top
[11] +
405 top
[12] + top
[13] + top
[14] + top
[15] + 8) >> 4);
408 for (y
= 0; y
< 16; y
++) {
409 AV_WN64A(dst
+ 0, dc
);
410 AV_WN64A(dst
+ 8, dc
);
415 static void dc_top_32x32_c(uint8_t *dst
, ptrdiff_t stride
,
416 const uint8_t *left
, const uint8_t *top
)
418 uint64_t dc
= 0x0101010101010101ULL
*
419 ((top
[0] + top
[1] + top
[2] + top
[3] + top
[4] + top
[5] +
420 top
[6] + top
[7] + top
[8] + top
[9] + top
[10] + top
[11] +
421 top
[12] + top
[13] + top
[14] + top
[15] + top
[16] + top
[17] +
422 top
[18] + top
[19] + top
[20] + top
[21] + top
[22] + top
[23] +
423 top
[24] + top
[25] + top
[26] + top
[27] + top
[28] + top
[29] +
424 top
[30] + top
[31] + 16) >> 5);
427 for (y
= 0; y
< 32; y
++) {
428 AV_WN64A(dst
+ 0, dc
);
429 AV_WN64A(dst
+ 8, dc
);
430 AV_WN64A(dst
+ 16, dc
);
431 AV_WN64A(dst
+ 24, dc
);
436 static void dc_128_4x4_c(uint8_t *dst
, ptrdiff_t stride
,
437 const uint8_t *left
, const uint8_t *top
)
439 AV_WN32A(dst
+ stride
* 0, 0x80808080U
);
440 AV_WN32A(dst
+ stride
* 1, 0x80808080U
);
441 AV_WN32A(dst
+ stride
* 2, 0x80808080U
);
442 AV_WN32A(dst
+ stride
* 3, 0x80808080U
);
445 static void dc_128_8x8_c(uint8_t *dst
, ptrdiff_t stride
,
446 const uint8_t *left
, const uint8_t *top
)
450 for (y
= 0; y
< 8; y
++) {
451 AV_WN64A(dst
, 0x8080808080808080ULL
);
456 static void dc_128_16x16_c(uint8_t *dst
, ptrdiff_t stride
,
457 const uint8_t *left
, const uint8_t *top
)
461 for (y
= 0; y
< 16; y
++) {
462 AV_WN64A(dst
+ 0, 0x8080808080808080ULL
);
463 AV_WN64A(dst
+ 8, 0x8080808080808080ULL
);
468 static void dc_128_32x32_c(uint8_t *dst
, ptrdiff_t stride
,
469 const uint8_t *left
, const uint8_t *top
)
473 for (y
= 0; y
< 32; y
++) {
474 AV_WN64A(dst
+ 0, 0x8080808080808080ULL
);
475 AV_WN64A(dst
+ 8, 0x8080808080808080ULL
);
476 AV_WN64A(dst
+ 16, 0x8080808080808080ULL
);
477 AV_WN64A(dst
+ 24, 0x8080808080808080ULL
);
482 static void dc_127_4x4_c(uint8_t *dst
, ptrdiff_t stride
,
483 const uint8_t *left
, const uint8_t *top
)
485 AV_WN32A(dst
+ stride
* 0, 0x7F7F7F7FU
);
486 AV_WN32A(dst
+ stride
* 1, 0x7F7F7F7FU
);
487 AV_WN32A(dst
+ stride
* 2, 0x7F7F7F7FU
);
488 AV_WN32A(dst
+ stride
* 3, 0x7F7F7F7FU
);
491 static void dc_127_8x8_c(uint8_t *dst
, ptrdiff_t stride
,
492 const uint8_t *left
, const uint8_t *top
)
496 for (y
= 0; y
< 8; y
++) {
497 AV_WN64A(dst
, 0x7F7F7F7F7F7F7F7FULL
);
502 static void dc_127_16x16_c(uint8_t *dst
, ptrdiff_t stride
,
503 const uint8_t *left
, const uint8_t *top
)
507 for (y
= 0; y
< 16; y
++) {
508 AV_WN64A(dst
+ 0, 0x7F7F7F7F7F7F7F7FULL
);
509 AV_WN64A(dst
+ 8, 0x7F7F7F7F7F7F7F7FULL
);
514 static void dc_127_32x32_c(uint8_t *dst
, ptrdiff_t stride
,
515 const uint8_t *left
, const uint8_t *top
)
519 for (y
= 0; y
< 32; y
++) {
520 AV_WN64A(dst
+ 0, 0x7F7F7F7F7F7F7F7FULL
);
521 AV_WN64A(dst
+ 8, 0x7F7F7F7F7F7F7F7FULL
);
522 AV_WN64A(dst
+ 16, 0x7F7F7F7F7F7F7F7FULL
);
523 AV_WN64A(dst
+ 24, 0x7F7F7F7F7F7F7F7FULL
);
528 static void dc_129_4x4_c(uint8_t *dst
, ptrdiff_t stride
,
529 const uint8_t *left
, const uint8_t *top
)
531 AV_WN32A(dst
+ stride
* 0, 0x81818181U
);
532 AV_WN32A(dst
+ stride
* 1, 0x81818181U
);
533 AV_WN32A(dst
+ stride
* 2, 0x81818181U
);
534 AV_WN32A(dst
+ stride
* 3, 0x81818181U
);
537 static void dc_129_8x8_c(uint8_t *dst
, ptrdiff_t stride
,
538 const uint8_t *left
, const uint8_t *top
)
542 for (y
= 0; y
< 8; y
++) {
543 AV_WN64A(dst
, 0x8181818181818181ULL
);
548 static void dc_129_16x16_c(uint8_t *dst
, ptrdiff_t stride
,
549 const uint8_t *left
, const uint8_t *top
)
553 for (y
= 0; y
< 16; y
++) {
554 AV_WN64A(dst
+ 0, 0x8181818181818181ULL
);
555 AV_WN64A(dst
+ 8, 0x8181818181818181ULL
);
560 static void dc_129_32x32_c(uint8_t *dst
, ptrdiff_t stride
,
561 const uint8_t *left
, const uint8_t *top
)
565 for (y
= 0; y
< 32; y
++) {
566 AV_WN64A(dst
+ 0, 0x8181818181818181ULL
);
567 AV_WN64A(dst
+ 8, 0x8181818181818181ULL
);
568 AV_WN64A(dst
+ 16, 0x8181818181818181ULL
);
569 AV_WN64A(dst
+ 24, 0x8181818181818181ULL
);
574 #define DST(x, y) dst[(x) + (y) * stride]
576 static void diag_downleft_4x4_c(uint8_t *dst
, ptrdiff_t stride
,
577 const uint8_t *left
, const uint8_t *top
)
579 int a0
= top
[0], a1
= top
[1], a2
= top
[2], a3
= top
[3],
580 a4
= top
[4], a5
= top
[5], a6
= top
[6], a7
= top
[7];
582 DST(0,0) = (a0
+ a1
* 2 + a2
+ 2) >> 2;
583 DST(1,0) = DST(0,1) = (a1
+ a2
* 2 + a3
+ 2) >> 2;
584 DST(2,0) = DST(1,1) = DST(0,2) = (a2
+ a3
* 2 + a4
+ 2) >> 2;
585 DST(3,0) = DST(2,1) = DST(1,2) = DST(0,3) = (a3
+ a4
* 2 + a5
+ 2) >> 2;
586 DST(3,1) = DST(2,2) = DST(1,3) = (a4
+ a5
* 2 + a6
+ 2) >> 2;
587 DST(3,2) = DST(2,3) = (a5
+ a6
* 2 + a7
+ 2) >> 2;
588 DST(3,3) = a7
; // note: this is different from vp8 and such
591 #define def_diag_downleft(size) \
592 static void diag_downleft_##size##x##size##_c(uint8_t *dst, ptrdiff_t stride, \
593 const uint8_t *left, const uint8_t *top) \
596 uint8_t v[size - 1]; \
598 for (i = 0; i < size - 2; i++) \
599 v[i] = (top[i] + top[i + 1] * 2 + top[i + 2] + 2) >> 2; \
600 v[size - 2] = (top[size - 2] + top[size - 1] * 3 + 2) >> 2; \
602 for (j = 0; j < size; j++) { \
603 memcpy(dst + j*stride, v + j, size - 1 - j); \
604 memset(dst + j*stride + size - 1 - j, top[size - 1], j + 1); \
609 def_diag_downleft(16)
610 def_diag_downleft(32)
612 static void diag_downright_4x4_c(uint8_t *dst
, ptrdiff_t stride
,
613 const uint8_t *left
, const uint8_t *top
)
615 int tl
= top
[-1], a0
= top
[0], a1
= top
[1], a2
= top
[2], a3
= top
[3],
616 l0
= left
[3], l1
= left
[2], l2
= left
[1], l3
= left
[0];
618 DST(0,3) = (l1
+ l2
* 2 + l3
+ 2) >> 2;
619 DST(0,2) = DST(1,3) = (l0
+ l1
* 2 + l2
+ 2) >> 2;
620 DST(0,1) = DST(1,2) = DST(2,3) = (tl
+ l0
* 2 + l1
+ 2) >> 2;
621 DST(0,0) = DST(1,1) = DST(2,2) = DST(3,3) = (l0
+ tl
* 2 + a0
+ 2) >> 2;
622 DST(1,0) = DST(2,1) = DST(3,2) = (tl
+ a0
* 2 + a1
+ 2) >> 2;
623 DST(2,0) = DST(3,1) = (a0
+ a1
* 2 + a2
+ 2) >> 2;
624 DST(3,0) = (a1
+ a2
* 2 + a3
+ 2) >> 2;
627 #define def_diag_downright(size) \
628 static void diag_downright_##size##x##size##_c(uint8_t *dst, ptrdiff_t stride, \
629 const uint8_t *left, const uint8_t *top) \
632 uint8_t v[size + size - 1]; \
634 for (i = 0; i < size - 2; i++) { \
635 v[i ] = (left[i] + left[i + 1] * 2 + left[i + 2] + 2) >> 2; \
636 v[size + 1 + i] = (top[i] + top[i + 1] * 2 + top[i + 2] + 2) >> 2; \
638 v[size - 2] = (left[size - 2] + left[size - 1] * 2 + top[-1] + 2) >> 2; \
639 v[size - 1] = (left[size - 1] + top[-1] * 2 + top[ 0] + 2) >> 2; \
640 v[size ] = (top[-1] + top[0] * 2 + top[ 1] + 2) >> 2; \
642 for (j = 0; j < size; j++) \
643 memcpy(dst + j*stride, v + size - 1 - j, size); \
646 def_diag_downright(8)
647 def_diag_downright(16)
648 def_diag_downright(32)
650 static void vert_right_4x4_c(uint8_t *dst
, ptrdiff_t stride
,
651 const uint8_t *left
, const uint8_t *top
)
653 int tl
= top
[-1], a0
= top
[0], a1
= top
[1], a2
= top
[2], a3
= top
[3],
654 l0
= left
[3], l1
= left
[2], l2
= left
[1];
656 DST(0,3) = (l0
+ l1
* 2 + l2
+ 2) >> 2;
657 DST(0,2) = (tl
+ l0
* 2 + l1
+ 2) >> 2;
658 DST(0,0) = DST(1,2) = (tl
+ a0
+ 1) >> 1;
659 DST(0,1) = DST(1,3) = (l0
+ tl
* 2 + a0
+ 2) >> 2;
660 DST(1,0) = DST(2,2) = (a0
+ a1
+ 1) >> 1;
661 DST(1,1) = DST(2,3) = (tl
+ a0
* 2 + a1
+ 2) >> 2;
662 DST(2,0) = DST(3,2) = (a1
+ a2
+ 1) >> 1;
663 DST(2,1) = DST(3,3) = (a0
+ a1
* 2 + a2
+ 2) >> 2;
664 DST(3,0) = (a2
+ a3
+ 1) >> 1;
665 DST(3,1) = (a1
+ a2
* 2 + a3
+ 2) >> 2;
668 #define def_vert_right(size) \
669 static void vert_right_##size##x##size##_c(uint8_t *dst, ptrdiff_t stride, \
670 const uint8_t *left, const uint8_t *top) \
673 uint8_t ve[size + size/2 - 1], vo[size + size/2 - 1]; \
675 for (i = 0; i < size/2 - 2; i++) { \
676 vo[i] = (left[i*2 + 3] + left[i*2 + 2] * 2 + left[i*2 + 1] + 2) >> 2; \
677 ve[i] = (left[i*2 + 4] + left[i*2 + 3] * 2 + left[i*2 + 2] + 2) >> 2; \
679 vo[size/2 - 2] = (left[size - 1] + left[size - 2] * 2 + left[size - 3] + 2) >> 2; \
680 ve[size/2 - 2] = (top[-1] + left[size - 1] * 2 + left[size - 2] + 2) >> 2; \
682 ve[size/2 - 1] = (top[-1] + top[0] + 1) >> 1; \
683 vo[size/2 - 1] = (left[size - 1] + top[-1] * 2 + top[0] + 2) >> 2; \
684 for (i = 0; i < size - 1; i++) { \
685 ve[size/2 + i] = (top[i] + top[i + 1] + 1) >> 1; \
686 vo[size/2 + i] = (top[i - 1] + top[i] * 2 + top[i + 1] + 2) >> 2; \
689 for (j = 0; j < size / 2; j++) { \
690 memcpy(dst + j*2 *stride, ve + size/2 - 1 - j, size); \
691 memcpy(dst + (j*2 + 1)*stride, vo + size/2 - 1 - j, size); \
699 static void hor_down_4x4_c(uint8_t *dst
, ptrdiff_t stride
,
700 const uint8_t *left
, const uint8_t *top
)
702 int l0
= left
[3], l1
= left
[2], l2
= left
[1], l3
= left
[0],
703 tl
= top
[-1], a0
= top
[0], a1
= top
[1], a2
= top
[2];
705 DST(2,0) = (tl
+ a0
* 2 + a1
+ 2) >> 2;
706 DST(3,0) = (a0
+ a1
* 2 + a2
+ 2) >> 2;
707 DST(0,0) = DST(2,1) = (tl
+ l0
+ 1) >> 1;
708 DST(1,0) = DST(3,1) = (a0
+ tl
* 2 + l0
+ 2) >> 2;
709 DST(0,1) = DST(2,2) = (l0
+ l1
+ 1) >> 1;
710 DST(1,1) = DST(3,2) = (tl
+ l0
* 2 + l1
+ 2) >> 2;
711 DST(0,2) = DST(2,3) = (l1
+ l2
+ 1) >> 1;
712 DST(1,2) = DST(3,3) = (l0
+ l1
* 2 + l2
+ 2) >> 2;
713 DST(0,3) = (l2
+ l3
+ 1) >> 1;
714 DST(1,3) = (l1
+ l2
* 2 + l3
+ 2) >> 2;
717 #define def_hor_down(size) \
718 static void hor_down_##size##x##size##_c(uint8_t *dst, ptrdiff_t stride, \
719 const uint8_t *left, const uint8_t *top) \
722 uint8_t v[size * 3 - 2]; \
724 for (i = 0; i < size - 2; i++) { \
725 v[i*2 ] = (left[i + 1] + left[i + 0] + 1) >> 1; \
726 v[i*2 + 1] = (left[i + 2] + left[i + 1] * 2 + left[i + 0] + 2) >> 2; \
727 v[size*2 + i] = (top[i - 1] + top[i] * 2 + top[i + 1] + 2) >> 2; \
729 v[size*2 - 2] = (top[-1] + left[size - 1] + 1) >> 1; \
730 v[size*2 - 4] = (left[size - 1] + left[size - 2] + 1) >> 1; \
731 v[size*2 - 1] = (top[0] + top[-1] * 2 + left[size - 1] + 2) >> 2; \
732 v[size*2 - 3] = (top[-1] + left[size - 1] * 2 + left[size - 2] + 2) >> 2; \
734 for (j = 0; j < size; j++) \
735 memcpy(dst + j*stride, v + size*2 - 2 - j*2, size); \
742 static void vert_left_4x4_c(uint8_t *dst
, ptrdiff_t stride
,
743 const uint8_t *left
, const uint8_t *top
)
745 int a0
= top
[0], a1
= top
[1], a2
= top
[2], a3
= top
[3],
746 a4
= top
[4], a5
= top
[5], a6
= top
[6];
748 DST(0,0) = (a0
+ a1
+ 1) >> 1;
749 DST(0,1) = (a0
+ a1
* 2 + a2
+ 2) >> 2;
750 DST(1,0) = DST(0,2) = (a1
+ a2
+ 1) >> 1;
751 DST(1,1) = DST(0,3) = (a1
+ a2
* 2 + a3
+ 2) >> 2;
752 DST(2,0) = DST(1,2) = (a2
+ a3
+ 1) >> 1;
753 DST(2,1) = DST(1,3) = (a2
+ a3
* 2 + a4
+ 2) >> 2;
754 DST(3,0) = DST(2,2) = (a3
+ a4
+ 1) >> 1;
755 DST(3,1) = DST(2,3) = (a3
+ a4
* 2 + a5
+ 2) >> 2;
756 DST(3,2) = (a4
+ a5
+ 1) >> 1;
757 DST(3,3) = (a4
+ a5
* 2 + a6
+ 2) >> 2;
760 #define def_vert_left(size) \
761 static void vert_left_##size##x##size##_c(uint8_t *dst, ptrdiff_t stride, \
762 const uint8_t *left, const uint8_t *top) \
765 uint8_t ve[size - 1], vo[size - 1]; \
767 for (i = 0; i < size - 2; i++) { \
768 ve[i] = (top[i] + top[i + 1] + 1) >> 1; \
769 vo[i] = (top[i] + top[i + 1] * 2 + top[i + 2] + 2) >> 2; \
771 ve[size - 2] = (top[size - 2] + top[size - 1] + 1) >> 1; \
772 vo[size - 2] = (top[size - 2] + top[size - 1] * 3 + 2) >> 2; \
774 for (j = 0; j < size / 2; j++) { \
775 memcpy(dst + j*2 * stride, ve + j, size - j - 1); \
776 memset(dst + j*2 * stride + size - j - 1, top[size - 1], j + 1); \
777 memcpy(dst + (j*2 + 1) * stride, vo + j, size - j - 1); \
778 memset(dst + (j*2 + 1) * stride + size - j - 1, top[size - 1], j + 1); \
786 static void hor_up_4x4_c(uint8_t *dst
, ptrdiff_t stride
,
787 const uint8_t *left
, const uint8_t *top
)
789 int l0
= left
[3], l1
= left
[2], l2
= left
[1], l3
= left
[0];
791 DST(0,0) = (l0
+ l1
+ 1) >> 1;
792 DST(1,0) = (l0
+ l1
* 2 + l2
+ 2) >> 2;
793 DST(0,1) = DST(2,0) = (l1
+ l2
+ 1) >> 1;
794 DST(1,1) = DST(3,0) = (l1
+ l2
* 2 + l3
+ 2) >> 2;
795 DST(0,2) = DST(2,1) = (l2
+ l3
+ 1) >> 1;
796 DST(1,2) = DST(3,1) = (l2
+ l3
* 3 + 2) >> 2;
797 DST(0,3) = DST(1,3) = DST(2,2) = DST(2,3) = DST(3,2) = DST(3,3) = l3
;
800 #define def_hor_up(size) \
801 static void hor_up_##size##x##size##_c(uint8_t *dst, ptrdiff_t stride, \
802 const uint8_t *left, const uint8_t *top) \
805 uint8_t v[size*2 - 2]; \
807 for (i = 0; i < size - 2; i++) { \
808 v[i*2 ] = (left[size - i - 1] + left[size - i - 2] + 1) >> 1; \
809 v[i*2 + 1] = (left[size - i - 1] + left[size - i - 2] * 2 + left[size - i - 3] + 2) >> 2; \
811 v[size*2 - 4] = (left[1] + left[0] + 1) >> 1; \
812 v[size*2 - 3] = (left[1] + left[0] * 3 + 2) >> 2; \
814 for (j = 0; j < size / 2; j++) \
815 memcpy(dst + j*stride, v + j*2, size); \
816 for (j = size / 2; j < size; j++) { \
817 memcpy(dst + j*stride, v + j*2, size*2 - 2 - j*2); \
818 memset(dst + j*stride + size*2 - 2 - j*2, left[0], \
829 static av_cold
void vp9dsp_intrapred_init(VP9DSPContext
*dsp
)
831 #define init_intra_pred(tx, sz) \
832 dsp->intra_pred[tx][VERT_PRED] = vert_##sz##_c; \
833 dsp->intra_pred[tx][HOR_PRED] = hor_##sz##_c; \
834 dsp->intra_pred[tx][DC_PRED] = dc_##sz##_c; \
835 dsp->intra_pred[tx][DIAG_DOWN_LEFT_PRED] = diag_downleft_##sz##_c; \
836 dsp->intra_pred[tx][DIAG_DOWN_RIGHT_PRED] = diag_downright_##sz##_c; \
837 dsp->intra_pred[tx][VERT_RIGHT_PRED] = vert_right_##sz##_c; \
838 dsp->intra_pred[tx][HOR_DOWN_PRED] = hor_down_##sz##_c; \
839 dsp->intra_pred[tx][VERT_LEFT_PRED] = vert_left_##sz##_c; \
840 dsp->intra_pred[tx][HOR_UP_PRED] = hor_up_##sz##_c; \
841 dsp->intra_pred[tx][TM_VP8_PRED] = tm_##sz##_c; \
842 dsp->intra_pred[tx][LEFT_DC_PRED] = dc_left_##sz##_c; \
843 dsp->intra_pred[tx][TOP_DC_PRED] = dc_top_##sz##_c; \
844 dsp->intra_pred[tx][DC_128_PRED] = dc_128_##sz##_c; \
845 dsp->intra_pred[tx][DC_127_PRED] = dc_127_##sz##_c; \
846 dsp->intra_pred[tx][DC_129_PRED] = dc_129_##sz##_c
848 init_intra_pred(TX_4X4
, 4x4
);
849 init_intra_pred(TX_8X8
, 8x8
);
850 init_intra_pred(TX_16X16
, 16x16
);
851 init_intra_pred(TX_32X32
, 32x32
);
853 #undef init_intra_pred
856 #define itxfm_wrapper(type_a, type_b, sz, bits, has_dconly) \
857 static void type_a##_##type_b##_##sz##x##sz##_add_c(uint8_t *dst, \
859 int16_t *block, int eob) \
862 int16_t tmp[sz * sz], out[sz]; \
864 if (has_dconly && eob == 1) { \
865 const int t = (((block[0] * 11585 + (1 << 13)) >> 14) \
866 * 11585 + (1 << 13)) >> 14; \
868 for (i = 0; i < sz; i++) { \
869 for (j = 0; j < sz; j++) \
870 dst[j * stride] = av_clip_uint8(dst[j * stride] + \
872 (t + (1 << (bits - 1))) >> bits : \
879 for (i = 0; i < sz; i++) \
880 type_a##sz##_1d(block + i, sz, tmp + i * sz, 0); \
881 memset(block, 0, sz * sz * sizeof(*block)); \
882 for (i = 0; i < sz; i++) { \
883 type_b##sz##_1d(tmp + i, sz, out, 1); \
884 for (j = 0; j < sz; j++) \
885 dst[j * stride] = av_clip_uint8(dst[j * stride] + \
887 (out[j] + (1 << (bits - 1))) >> bits : \
893 #define itxfm_wrap(sz, bits) \
894 itxfm_wrapper(idct, idct, sz, bits, 1) \
895 itxfm_wrapper(iadst, idct, sz, bits, 0) \
896 itxfm_wrapper(idct, iadst, sz, bits, 0) \
897 itxfm_wrapper(iadst, iadst, sz, bits, 0)
899 #define IN(x) in[(x) * stride]
901 static av_always_inline
void idct4_1d(const int16_t *in
, ptrdiff_t stride
,
902 int16_t *out
, int pass
)
906 t0
= ((IN(0) + IN(2)) * 11585 + (1 << 13)) >> 14;
907 t1
= ((IN(0) - IN(2)) * 11585 + (1 << 13)) >> 14;
908 t2
= (IN(1) * 6270 - IN(3) * 15137 + (1 << 13)) >> 14;
909 t3
= (IN(1) * 15137 + IN(3) * 6270 + (1 << 13)) >> 14;
917 static av_always_inline
void iadst4_1d(const int16_t *in
, ptrdiff_t stride
,
918 int16_t *out
, int pass
)
922 t0
= 5283 * IN(0) + 15212 * IN(2) + 9929 * IN(3);
923 t1
= 9929 * IN(0) - 5283 * IN(2) - 15212 * IN(3);
924 t2
= 13377 * (IN(0) - IN(2) + IN(3));
927 out
[0] = (t0
+ t3
+ (1 << 13)) >> 14;
928 out
[1] = (t1
+ t3
+ (1 << 13)) >> 14;
929 out
[2] = (t2
+ (1 << 13)) >> 14;
930 out
[3] = (t0
+ t1
- t3
+ (1 << 13)) >> 14;
935 static av_always_inline
void idct8_1d(const int16_t *in
, ptrdiff_t stride
,
936 int16_t *out
, int pass
)
938 int t0
, t0a
, t1
, t1a
, t2
, t2a
, t3
, t3a
, t4
, t4a
, t5
, t5a
, t6
, t6a
, t7
, t7a
;
940 t0a
= ((IN(0) + IN(4)) * 11585 + (1 << 13)) >> 14;
941 t1a
= ((IN(0) - IN(4)) * 11585 + (1 << 13)) >> 14;
942 t2a
= (IN(2) * 6270 - IN(6) * 15137 + (1 << 13)) >> 14;
943 t3a
= (IN(2) * 15137 + IN(6) * 6270 + (1 << 13)) >> 14;
944 t4a
= (IN(1) * 3196 - IN(7) * 16069 + (1 << 13)) >> 14;
945 t5a
= (IN(5) * 13623 - IN(3) * 9102 + (1 << 13)) >> 14;
946 t6a
= (IN(5) * 9102 + IN(3) * 13623 + (1 << 13)) >> 14;
947 t7a
= (IN(1) * 16069 + IN(7) * 3196 + (1 << 13)) >> 14;
958 t5
= ((t6a
- t5a
) * 11585 + (1 << 13)) >> 14;
959 t6
= ((t6a
+ t5a
) * 11585 + (1 << 13)) >> 14;
971 static av_always_inline
void iadst8_1d(const int16_t *in
, ptrdiff_t stride
,
972 int16_t *out
, int pass
)
974 int t0
, t0a
, t1
, t1a
, t2
, t2a
, t3
, t3a
, t4
, t4a
, t5
, t5a
, t6
, t6a
, t7
, t7a
;
976 t0a
= 16305 * IN(7) + 1606 * IN(0);
977 t1a
= 1606 * IN(7) - 16305 * IN(0);
978 t2a
= 14449 * IN(5) + 7723 * IN(2);
979 t3a
= 7723 * IN(5) - 14449 * IN(2);
980 t4a
= 10394 * IN(3) + 12665 * IN(4);
981 t5a
= 12665 * IN(3) - 10394 * IN(4);
982 t6a
= 4756 * IN(1) + 15679 * IN(6);
983 t7a
= 15679 * IN(1) - 4756 * IN(6);
985 t0
= (t0a
+ t4a
+ (1 << 13)) >> 14;
986 t1
= (t1a
+ t5a
+ (1 << 13)) >> 14;
987 t2
= (t2a
+ t6a
+ (1 << 13)) >> 14;
988 t3
= (t3a
+ t7a
+ (1 << 13)) >> 14;
989 t4
= (t0a
- t4a
+ (1 << 13)) >> 14;
990 t5
= (t1a
- t5a
+ (1 << 13)) >> 14;
991 t6
= (t2a
- t6a
+ (1 << 13)) >> 14;
992 t7
= (t3a
- t7a
+ (1 << 13)) >> 14;
994 t4a
= 15137 * t4
+ 6270 * t5
;
995 t5a
= 6270 * t4
- 15137 * t5
;
996 t6a
= 15137 * t7
- 6270 * t6
;
997 t7a
= 6270 * t7
+ 15137 * t6
;
1000 out
[7] = -(t1
+ t3
);
1004 out
[1] = -((t4a
+ t6a
+ (1 << 13)) >> 14);
1005 out
[6] = (t5a
+ t7a
+ (1 << 13)) >> 14;
1006 t6
= (t4a
- t6a
+ (1 << 13)) >> 14;
1007 t7
= (t5a
- t7a
+ (1 << 13)) >> 14;
1009 out
[3] = -(((t2
+ t3
) * 11585 + (1 << 13)) >> 14);
1010 out
[4] = ((t2
- t3
) * 11585 + (1 << 13)) >> 14;
1011 out
[2] = ((t6
+ t7
) * 11585 + (1 << 13)) >> 14;
1012 out
[5] = -(((t6
- t7
) * 11585 + (1 << 13)) >> 14);
1017 static av_always_inline
void idct16_1d(const int16_t *in
, ptrdiff_t stride
,
1018 int16_t *out
, int pass
)
1020 int t0
, t1
, t2
, t3
, t4
, t5
, t6
, t7
, t8
, t9
, t10
, t11
, t12
, t13
, t14
, t15
;
1021 int t0a
, t1a
, t2a
, t3a
, t4a
, t5a
, t6a
, t7a
;
1022 int t8a
, t9a
, t10a
, t11a
, t12a
, t13a
, t14a
, t15a
;
1024 t0a
= ((IN(0) + IN(8)) * 11585 + (1 << 13)) >> 14;
1025 t1a
= ((IN(0) - IN(8)) * 11585 + (1 << 13)) >> 14;
1026 t2a
= (IN(4) * 6270 - IN(12) * 15137 + (1 << 13)) >> 14;
1027 t3a
= (IN(4) * 15137 + IN(12) * 6270 + (1 << 13)) >> 14;
1028 t4a
= (IN(2) * 3196 - IN(14) * 16069 + (1 << 13)) >> 14;
1029 t7a
= (IN(2) * 16069 + IN(14) * 3196 + (1 << 13)) >> 14;
1030 t5a
= (IN(10) * 13623 - IN(6) * 9102 + (1 << 13)) >> 14;
1031 t6a
= (IN(10) * 9102 + IN(6) * 13623 + (1 << 13)) >> 14;
1032 t8a
= (IN(1) * 1606 - IN(15) * 16305 + (1 << 13)) >> 14;
1033 t15a
= (IN(1) * 16305 + IN(15) * 1606 + (1 << 13)) >> 14;
1034 t9a
= (IN(9) * 12665 - IN(7) * 10394 + (1 << 13)) >> 14;
1035 t14a
= (IN(9) * 10394 + IN(7) * 12665 + (1 << 13)) >> 14;
1036 t10a
= (IN(5) * 7723 - IN(11) * 14449 + (1 << 13)) >> 14;
1037 t13a
= (IN(5) * 14449 + IN(11) * 7723 + (1 << 13)) >> 14;
1038 t11a
= (IN(13) * 15679 - IN(3) * 4756 + (1 << 13)) >> 14;
1039 t12a
= (IN(13) * 4756 + IN(3) * 15679 + (1 << 13)) >> 14;
1058 t5a
= ((t6
- t5
) * 11585 + (1 << 13)) >> 14;
1059 t6a
= ((t6
+ t5
) * 11585 + (1 << 13)) >> 14;
1060 t9a
= ( t14
* 6270 - t9
* 15137 + (1 << 13)) >> 14;
1061 t14a
= ( t14
* 15137 + t9
* 6270 + (1 << 13)) >> 14;
1062 t10a
= (-(t13
* 15137 + t10
* 6270) + (1 << 13)) >> 14;
1063 t13a
= ( t13
* 6270 - t10
* 15137 + (1 << 13)) >> 14;
1082 t10a
= ((t13
- t10
) * 11585 + (1 << 13)) >> 14;
1083 t13a
= ((t13
+ t10
) * 11585 + (1 << 13)) >> 14;
1084 t11
= ((t12a
- t11a
) * 11585 + (1 << 13)) >> 14;
1085 t12
= ((t12a
+ t11a
) * 11585 + (1 << 13)) >> 14;
1087 out
[ 0] = t0a
+ t15a
;
1088 out
[ 1] = t1a
+ t14
;
1089 out
[ 2] = t2a
+ t13a
;
1090 out
[ 3] = t3a
+ t12
;
1092 out
[ 5] = t5
+ t10a
;
1097 out
[10] = t5
- t10a
;
1099 out
[12] = t3a
- t12
;
1100 out
[13] = t2a
- t13a
;
1101 out
[14] = t1a
- t14
;
1102 out
[15] = t0a
- t15a
;
1105 static av_always_inline
void iadst16_1d(const int16_t *in
, ptrdiff_t stride
,
1106 int16_t *out
, int pass
)
1108 int t0
, t1
, t2
, t3
, t4
, t5
, t6
, t7
, t8
, t9
, t10
, t11
, t12
, t13
, t14
, t15
;
1109 int t0a
, t1a
, t2a
, t3a
, t4a
, t5a
, t6a
, t7a
;
1110 int t8a
, t9a
, t10a
, t11a
, t12a
, t13a
, t14a
, t15a
;
1112 t0
= IN(15) * 16364 + IN(0) * 804;
1113 t1
= IN(15) * 804 - IN(0) * 16364;
1114 t2
= IN(13) * 15893 + IN(2) * 3981;
1115 t3
= IN(13) * 3981 - IN(2) * 15893;
1116 t4
= IN(11) * 14811 + IN(4) * 7005;
1117 t5
= IN(11) * 7005 - IN(4) * 14811;
1118 t6
= IN(9) * 13160 + IN(6) * 9760;
1119 t7
= IN(9) * 9760 - IN(6) * 13160;
1120 t8
= IN(7) * 11003 + IN(8) * 12140;
1121 t9
= IN(7) * 12140 - IN(8) * 11003;
1122 t10
= IN(5) * 8423 + IN(10) * 14053;
1123 t11
= IN(5) * 14053 - IN(10) * 8423;
1124 t12
= IN(3) * 5520 + IN(12) * 15426;
1125 t13
= IN(3) * 15426 - IN(12) * 5520;
1126 t14
= IN(1) * 2404 + IN(14) * 16207;
1127 t15
= IN(1) * 16207 - IN(14) * 2404;
1129 t0a
= (t0
+ t8
+ (1 << 13)) >> 14;
1130 t1a
= (t1
+ t9
+ (1 << 13)) >> 14;
1131 t2a
= (t2
+ t10
+ (1 << 13)) >> 14;
1132 t3a
= (t3
+ t11
+ (1 << 13)) >> 14;
1133 t4a
= (t4
+ t12
+ (1 << 13)) >> 14;
1134 t5a
= (t5
+ t13
+ (1 << 13)) >> 14;
1135 t6a
= (t6
+ t14
+ (1 << 13)) >> 14;
1136 t7a
= (t7
+ t15
+ (1 << 13)) >> 14;
1137 t8a
= (t0
- t8
+ (1 << 13)) >> 14;
1138 t9a
= (t1
- t9
+ (1 << 13)) >> 14;
1139 t10a
= (t2
- t10
+ (1 << 13)) >> 14;
1140 t11a
= (t3
- t11
+ (1 << 13)) >> 14;
1141 t12a
= (t4
- t12
+ (1 << 13)) >> 14;
1142 t13a
= (t5
- t13
+ (1 << 13)) >> 14;
1143 t14a
= (t6
- t14
+ (1 << 13)) >> 14;
1144 t15a
= (t7
- t15
+ (1 << 13)) >> 14;
1146 t8
= t8a
* 16069 + t9a
* 3196;
1147 t9
= t8a
* 3196 - t9a
* 16069;
1148 t10
= t10a
* 9102 + t11a
* 13623;
1149 t11
= t10a
* 13623 - t11a
* 9102;
1150 t12
= t13a
* 16069 - t12a
* 3196;
1151 t13
= t13a
* 3196 + t12a
* 16069;
1152 t14
= t15a
* 9102 - t14a
* 13623;
1153 t15
= t15a
* 13623 + t14a
* 9102;
1163 t8a
= (t8
+ t12
+ (1 << 13)) >> 14;
1164 t9a
= (t9
+ t13
+ (1 << 13)) >> 14;
1165 t10a
= (t10
+ t14
+ (1 << 13)) >> 14;
1166 t11a
= (t11
+ t15
+ (1 << 13)) >> 14;
1167 t12a
= (t8
- t12
+ (1 << 13)) >> 14;
1168 t13a
= (t9
- t13
+ (1 << 13)) >> 14;
1169 t14a
= (t10
- t14
+ (1 << 13)) >> 14;
1170 t15a
= (t11
- t15
+ (1 << 13)) >> 14;
1172 t4a
= t4
* 15137 + t5
* 6270;
1173 t5a
= t4
* 6270 - t5
* 15137;
1174 t6a
= t7
* 15137 - t6
* 6270;
1175 t7a
= t7
* 6270 + t6
* 15137;
1176 t12
= t12a
* 15137 + t13a
* 6270;
1177 t13
= t12a
* 6270 - t13a
* 15137;
1178 t14
= t15a
* 15137 - t14a
* 6270;
1179 t15
= t15a
* 6270 + t14a
* 15137;
1182 out
[15] = -(t1
+ t3
);
1185 out
[ 3] = -((t4a
+ t6a
+ (1 << 13)) >> 14);
1186 out
[12] = (t5a
+ t7a
+ (1 << 13)) >> 14;
1187 t6
= (t4a
- t6a
+ (1 << 13)) >> 14;
1188 t7
= (t5a
- t7a
+ (1 << 13)) >> 14;
1189 out
[ 1] = -(t8a
+ t10a
);
1190 out
[14] = t9a
+ t11a
;
1193 out
[ 2] = (t12
+ t14
+ (1 << 13)) >> 14;
1194 out
[13] = -((t13
+ t15
+ (1 << 13)) >> 14);
1195 t14a
= (t12
- t14
+ (1 << 13)) >> 14;
1196 t15a
= (t13
- t15
+ (1 << 13)) >> 14;
1198 out
[ 7] = ((t2a
+ t3a
) * -11585 + (1 << 13)) >> 14;
1199 out
[ 8] = ((t2a
- t3a
) * 11585 + (1 << 13)) >> 14;
1200 out
[ 4] = ((t7
+ t6
) * 11585 + (1 << 13)) >> 14;
1201 out
[11] = ((t7
- t6
) * 11585 + (1 << 13)) >> 14;
1202 out
[ 6] = ((t11
+ t10
) * 11585 + (1 << 13)) >> 14;
1203 out
[ 9] = ((t11
- t10
) * 11585 + (1 << 13)) >> 14;
1204 out
[ 5] = ((t14a
+ t15a
) * -11585 + (1 << 13)) >> 14;
1205 out
[10] = ((t14a
- t15a
) * 11585 + (1 << 13)) >> 14;
1210 static av_always_inline
void idct32_1d(const int16_t *in
, ptrdiff_t stride
,
1211 int16_t *out
, int pass
)
1213 int t0a
= ((IN(0) + IN(16)) * 11585 + (1 << 13)) >> 14;
1214 int t1a
= ((IN(0) - IN(16)) * 11585 + (1 << 13)) >> 14;
1215 int t2a
= (IN( 8) * 6270 - IN(24) * 15137 + (1 << 13)) >> 14;
1216 int t3a
= (IN( 8) * 15137 + IN(24) * 6270 + (1 << 13)) >> 14;
1217 int t4a
= (IN( 4) * 3196 - IN(28) * 16069 + (1 << 13)) >> 14;
1218 int t7a
= (IN( 4) * 16069 + IN(28) * 3196 + (1 << 13)) >> 14;
1219 int t5a
= (IN(20) * 13623 - IN(12) * 9102 + (1 << 13)) >> 14;
1220 int t6a
= (IN(20) * 9102 + IN(12) * 13623 + (1 << 13)) >> 14;
1221 int t8a
= (IN( 2) * 1606 - IN(30) * 16305 + (1 << 13)) >> 14;
1222 int t15a
= (IN( 2) * 16305 + IN(30) * 1606 + (1 << 13)) >> 14;
1223 int t9a
= (IN(18) * 12665 - IN(14) * 10394 + (1 << 13)) >> 14;
1224 int t14a
= (IN(18) * 10394 + IN(14) * 12665 + (1 << 13)) >> 14;
1225 int t10a
= (IN(10) * 7723 - IN(22) * 14449 + (1 << 13)) >> 14;
1226 int t13a
= (IN(10) * 14449 + IN(22) * 7723 + (1 << 13)) >> 14;
1227 int t11a
= (IN(26) * 15679 - IN( 6) * 4756 + (1 << 13)) >> 14;
1228 int t12a
= (IN(26) * 4756 + IN( 6) * 15679 + (1 << 13)) >> 14;
1229 int t16a
= (IN( 1) * 804 - IN(31) * 16364 + (1 << 13)) >> 14;
1230 int t31a
= (IN( 1) * 16364 + IN(31) * 804 + (1 << 13)) >> 14;
1231 int t17a
= (IN(17) * 12140 - IN(15) * 11003 + (1 << 13)) >> 14;
1232 int t30a
= (IN(17) * 11003 + IN(15) * 12140 + (1 << 13)) >> 14;
1233 int t18a
= (IN( 9) * 7005 - IN(23) * 14811 + (1 << 13)) >> 14;
1234 int t29a
= (IN( 9) * 14811 + IN(23) * 7005 + (1 << 13)) >> 14;
1235 int t19a
= (IN(25) * 15426 - IN( 7) * 5520 + (1 << 13)) >> 14;
1236 int t28a
= (IN(25) * 5520 + IN( 7) * 15426 + (1 << 13)) >> 14;
1237 int t20a
= (IN( 5) * 3981 - IN(27) * 15893 + (1 << 13)) >> 14;
1238 int t27a
= (IN( 5) * 15893 + IN(27) * 3981 + (1 << 13)) >> 14;
1239 int t21a
= (IN(21) * 14053 - IN(11) * 8423 + (1 << 13)) >> 14;
1240 int t26a
= (IN(21) * 8423 + IN(11) * 14053 + (1 << 13)) >> 14;
1241 int t22a
= (IN(13) * 9760 - IN(19) * 13160 + (1 << 13)) >> 14;
1242 int t25a
= (IN(13) * 13160 + IN(19) * 9760 + (1 << 13)) >> 14;
1243 int t23a
= (IN(29) * 16207 - IN( 3) * 2404 + (1 << 13)) >> 14;
1244 int t24a
= (IN(29) * 2404 + IN( 3) * 16207 + (1 << 13)) >> 14;
1256 int t10
= t11a
- t10a
;
1257 int t11
= t11a
+ t10a
;
1258 int t12
= t12a
+ t13a
;
1259 int t13
= t12a
- t13a
;
1260 int t14
= t15a
- t14a
;
1261 int t15
= t15a
+ t14a
;
1262 int t16
= t16a
+ t17a
;
1263 int t17
= t16a
- t17a
;
1264 int t18
= t19a
- t18a
;
1265 int t19
= t19a
+ t18a
;
1266 int t20
= t20a
+ t21a
;
1267 int t21
= t20a
- t21a
;
1268 int t22
= t23a
- t22a
;
1269 int t23
= t23a
+ t22a
;
1270 int t24
= t24a
+ t25a
;
1271 int t25
= t24a
- t25a
;
1272 int t26
= t27a
- t26a
;
1273 int t27
= t27a
+ t26a
;
1274 int t28
= t28a
+ t29a
;
1275 int t29
= t28a
- t29a
;
1276 int t30
= t31a
- t30a
;
1277 int t31
= t31a
+ t30a
;
1279 t5a
= ((t6
- t5
) * 11585 + (1 << 13)) >> 14;
1280 t6a
= ((t6
+ t5
) * 11585 + (1 << 13)) >> 14;
1281 t9a
= ( t14
* 6270 - t9
* 15137 + (1 << 13)) >> 14;
1282 t14a
= ( t14
* 15137 + t9
* 6270 + (1 << 13)) >> 14;
1283 t10a
= (-(t13
* 15137 + t10
* 6270) + (1 << 13)) >> 14;
1284 t13a
= ( t13
* 6270 - t10
* 15137 + (1 << 13)) >> 14;
1285 t17a
= ( t30
* 3196 - t17
* 16069 + (1 << 13)) >> 14;
1286 t30a
= ( t30
* 16069 + t17
* 3196 + (1 << 13)) >> 14;
1287 t18a
= (-(t29
* 16069 + t18
* 3196) + (1 << 13)) >> 14;
1288 t29a
= ( t29
* 3196 - t18
* 16069 + (1 << 13)) >> 14;
1289 t21a
= ( t26
* 13623 - t21
* 9102 + (1 << 13)) >> 14;
1290 t26a
= ( t26
* 9102 + t21
* 13623 + (1 << 13)) >> 14;
1291 t22a
= (-(t25
* 9102 + t22
* 13623) + (1 << 13)) >> 14;
1292 t25a
= ( t25
* 13623 - t22
* 9102 + (1 << 13)) >> 14;
1327 t10a
= ((t13
- t10
) * 11585 + (1 << 13)) >> 14;
1328 t13a
= ((t13
+ t10
) * 11585 + (1 << 13)) >> 14;
1329 t11
= ((t12a
- t11a
) * 11585 + (1 << 13)) >> 14;
1330 t12
= ((t12a
+ t11a
) * 11585 + (1 << 13)) >> 14;
1331 t18a
= ( t29
* 6270 - t18
* 15137 + (1 << 13)) >> 14;
1332 t29a
= ( t29
* 15137 + t18
* 6270 + (1 << 13)) >> 14;
1333 t19
= ( t28a
* 6270 - t19a
* 15137 + (1 << 13)) >> 14;
1334 t28
= ( t28a
* 15137 + t19a
* 6270 + (1 << 13)) >> 14;
1335 t20
= (-(t27a
* 15137 + t20a
* 6270) + (1 << 13)) >> 14;
1336 t27
= ( t27a
* 6270 - t20a
* 15137 + (1 << 13)) >> 14;
1337 t21a
= (-(t26
* 15137 + t21
* 6270) + (1 << 13)) >> 14;
1338 t26a
= ( t26
* 6270 - t21
* 15137 + (1 << 13)) >> 14;
1373 t20
= ((t27a
- t20a
) * 11585 + (1 << 13)) >> 14;
1374 t27
= ((t27a
+ t20a
) * 11585 + (1 << 13)) >> 14;
1375 t21a
= ((t26
- t21
) * 11585 + (1 << 13)) >> 14;
1376 t26a
= ((t26
+ t21
) * 11585 + (1 << 13)) >> 14;
1377 t22
= ((t25a
- t22a
) * 11585 + (1 << 13)) >> 14;
1378 t25
= ((t25a
+ t22a
) * 11585 + (1 << 13)) >> 14;
1379 t23a
= ((t24
- t23
) * 11585 + (1 << 13)) >> 14;
1380 t24a
= ((t24
+ t23
) * 11585 + (1 << 13)) >> 14;
1383 out
[ 1] = t1
+ t30a
;
1385 out
[ 3] = t3
+ t28a
;
1387 out
[ 5] = t5a
+ t26a
;
1388 out
[ 6] = t6a
+ t25
;
1389 out
[ 7] = t7
+ t24a
;
1390 out
[ 8] = t8
+ t23a
;
1391 out
[ 9] = t9a
+ t22
;
1392 out
[10] = t10
+ t21a
;
1393 out
[11] = t11a
+ t20
;
1394 out
[12] = t12a
+ t19a
;
1395 out
[13] = t13
+ t18
;
1396 out
[14] = t14a
+ t17a
;
1397 out
[15] = t15
+ t16
;
1398 out
[16] = t15
- t16
;
1399 out
[17] = t14a
- t17a
;
1400 out
[18] = t13
- t18
;
1401 out
[19] = t12a
- t19a
;
1402 out
[20] = t11a
- t20
;
1403 out
[21] = t10
- t21a
;
1404 out
[22] = t9a
- t22
;
1405 out
[23] = t8
- t23a
;
1406 out
[24] = t7
- t24a
;
1407 out
[25] = t6a
- t25
;
1408 out
[26] = t5a
- t26a
;
1410 out
[28] = t3
- t28a
;
1412 out
[30] = t1
- t30a
;
1416 itxfm_wrapper(idct
, idct
, 32, 6, 1)
1418 static av_always_inline
void iwht4_1d(const int16_t *in
, ptrdiff_t stride
,
1419 int16_t *out
, int pass
)
1421 int t0
, t1
, t2
, t3
, t4
;
1437 t4
= (t0
- t3
) >> 1;
1449 itxfm_wrapper(iwht
, iwht
, 4, 0, 0)
1452 #undef itxfm_wrapper
1455 static av_cold
void vp9dsp_itxfm_init(VP9DSPContext
*dsp
)
1457 #define init_itxfm(tx, sz) \
1458 dsp->itxfm_add[tx][DCT_DCT] = idct_idct_##sz##_add_c; \
1459 dsp->itxfm_add[tx][DCT_ADST] = iadst_idct_##sz##_add_c; \
1460 dsp->itxfm_add[tx][ADST_DCT] = idct_iadst_##sz##_add_c; \
1461 dsp->itxfm_add[tx][ADST_ADST] = iadst_iadst_##sz##_add_c
1463 #define init_idct(tx, nm) \
1464 dsp->itxfm_add[tx][DCT_DCT] = \
1465 dsp->itxfm_add[tx][ADST_DCT] = \
1466 dsp->itxfm_add[tx][DCT_ADST] = \
1467 dsp->itxfm_add[tx][ADST_ADST] = nm##_add_c
1469 init_itxfm(TX_4X4
, 4x4
);
1470 init_itxfm(TX_8X8
, 8x8
);
1471 init_itxfm(TX_16X16
, 16x16
);
1472 init_idct(TX_32X32
, idct_idct_32x32
);
1473 init_idct(4 /* lossless */, iwht_iwht_4x4
);
1479 static av_always_inline
void loop_filter(uint8_t *dst
, int E
, int I
, int H
,
1480 ptrdiff_t stridea
, ptrdiff_t strideb
,
1485 for (i
= 0; i
< 8; i
++, dst
+= stridea
) {
1487 int p3
= dst
[strideb
* -4], p2
= dst
[strideb
* -3];
1488 int p1
= dst
[strideb
* -2], p0
= dst
[strideb
* -1];
1489 int q0
= dst
[strideb
* +0], q1
= dst
[strideb
* +1];
1490 int q2
= dst
[strideb
* +2], q3
= dst
[strideb
* +3];
1492 int fm
= FFABS(p3
- p2
) <= I
&& FFABS(p2
- p1
) <= I
&&
1493 FFABS(p1
- p0
) <= I
&& FFABS(q1
- q0
) <= I
&&
1494 FFABS(q2
- q1
) <= I
&& FFABS(q3
- q2
) <= I
&&
1495 FFABS(p0
- q0
) * 2 + (FFABS(p1
- q1
) >> 1) <= E
;
1496 int flat8out
, flat8in
;
1502 p7
= dst
[strideb
* -8];
1503 p6
= dst
[strideb
* -7];
1504 p5
= dst
[strideb
* -6];
1505 p4
= dst
[strideb
* -5];
1506 q4
= dst
[strideb
* +4];
1507 q5
= dst
[strideb
* +5];
1508 q6
= dst
[strideb
* +6];
1509 q7
= dst
[strideb
* +7];
1511 flat8out
= FFABS(p7
- p0
) <= 1 && FFABS(p6
- p0
) <= 1 &&
1512 FFABS(p5
- p0
) <= 1 && FFABS(p4
- p0
) <= 1 &&
1513 FFABS(q4
- q0
) <= 1 && FFABS(q5
- q0
) <= 1 &&
1514 FFABS(q6
- q0
) <= 1 && FFABS(q7
- q0
) <= 1;
1518 flat8in
= FFABS(p3
- p0
) <= 1 && FFABS(p2
- p0
) <= 1 &&
1519 FFABS(p1
- p0
) <= 1 && FFABS(q1
- q0
) <= 1 &&
1520 FFABS(q2
- q0
) <= 1 && FFABS(q3
- q0
) <= 1;
1522 if (wd
>= 16 && flat8out
&& flat8in
) {
1523 dst
[strideb
* -7] = (p7
+ p7
+ p7
+ p7
+ p7
+ p7
+ p7
+ p6
* 2 +
1524 p5
+ p4
+ p3
+ p2
+ p1
+ p0
+ q0
+ 8) >> 4;
1525 dst
[strideb
* -6] = (p7
+ p7
+ p7
+ p7
+ p7
+ p7
+ p6
+ p5
* 2 +
1526 p4
+ p3
+ p2
+ p1
+ p0
+ q0
+ q1
+ 8) >> 4;
1527 dst
[strideb
* -5] = (p7
+ p7
+ p7
+ p7
+ p7
+ p6
+ p5
+ p4
* 2 +
1528 p3
+ p2
+ p1
+ p0
+ q0
+ q1
+ q2
+ 8) >> 4;
1529 dst
[strideb
* -4] = (p7
+ p7
+ p7
+ p7
+ p6
+ p5
+ p4
+ p3
* 2 +
1530 p2
+ p1
+ p0
+ q0
+ q1
+ q2
+ q3
+ 8) >> 4;
1531 dst
[strideb
* -3] = (p7
+ p7
+ p7
+ p6
+ p5
+ p4
+ p3
+ p2
* 2 +
1532 p1
+ p0
+ q0
+ q1
+ q2
+ q3
+ q4
+ 8) >> 4;
1533 dst
[strideb
* -2] = (p7
+ p7
+ p6
+ p5
+ p4
+ p3
+ p2
+ p1
* 2 +
1534 p0
+ q0
+ q1
+ q2
+ q3
+ q4
+ q5
+ 8) >> 4;
1535 dst
[strideb
* -1] = (p7
+ p6
+ p5
+ p4
+ p3
+ p2
+ p1
+ p0
* 2 +
1536 q0
+ q1
+ q2
+ q3
+ q4
+ q5
+ q6
+ 8) >> 4;
1537 dst
[strideb
* +0] = (p6
+ p5
+ p4
+ p3
+ p2
+ p1
+ p0
+ q0
* 2 +
1538 q1
+ q2
+ q3
+ q4
+ q5
+ q6
+ q7
+ 8) >> 4;
1539 dst
[strideb
* +1] = (p5
+ p4
+ p3
+ p2
+ p1
+ p0
+ q0
+ q1
* 2 +
1540 q2
+ q3
+ q4
+ q5
+ q6
+ q7
+ q7
+ 8) >> 4;
1541 dst
[strideb
* +2] = (p4
+ p3
+ p2
+ p1
+ p0
+ q0
+ q1
+ q2
* 2 +
1542 q3
+ q4
+ q5
+ q6
+ q7
+ q7
+ q7
+ 8) >> 4;
1543 dst
[strideb
* +3] = (p3
+ p2
+ p1
+ p0
+ q0
+ q1
+ q2
+ q3
* 2 +
1544 q4
+ q5
+ q6
+ q7
+ q7
+ q7
+ q7
+ 8) >> 4;
1545 dst
[strideb
* +4] = (p2
+ p1
+ p0
+ q0
+ q1
+ q2
+ q3
+ q4
* 2 +
1546 q5
+ q6
+ q7
+ q7
+ q7
+ q7
+ q7
+ 8) >> 4;
1547 dst
[strideb
* +5] = (p1
+ p0
+ q0
+ q1
+ q2
+ q3
+ q4
+ q5
* 2 +
1548 q6
+ q7
+ q7
+ q7
+ q7
+ q7
+ q7
+ 8) >> 4;
1549 dst
[strideb
* +6] = (p0
+ q0
+ q1
+ q2
+ q3
+ q4
+ q5
+ q6
* 2 +
1550 q7
+ q7
+ q7
+ q7
+ q7
+ q7
+ q7
+ 8) >> 4;
1551 } else if (wd
>= 8 && flat8in
) {
1552 dst
[strideb
* -3] = (p3
+ p3
+ p3
+ 2 * p2
+ p1
+ p0
+ q0
+ 4) >> 3;
1553 dst
[strideb
* -2] = (p3
+ p3
+ p2
+ 2 * p1
+ p0
+ q0
+ q1
+ 4) >> 3;
1554 dst
[strideb
* -1] = (p3
+ p2
+ p1
+ 2 * p0
+ q0
+ q1
+ q2
+ 4) >> 3;
1555 dst
[strideb
* +0] = (p2
+ p1
+ p0
+ 2 * q0
+ q1
+ q2
+ q3
+ 4) >> 3;
1556 dst
[strideb
* +1] = (p1
+ p0
+ q0
+ 2 * q1
+ q2
+ q3
+ q3
+ 4) >> 3;
1557 dst
[strideb
* +2] = (p0
+ q0
+ q1
+ 2 * q2
+ q3
+ q3
+ q3
+ 4) >> 3;
1559 int hev
= FFABS(p1
- p0
) > H
|| FFABS(q1
- q0
) > H
;
1562 int f
= av_clip_int8(3 * (q0
- p0
) + av_clip_int8(p1
- q1
)), f1
, f2
;
1564 f1
= FFMIN(f
+ 4, 127) >> 3;
1565 f2
= FFMIN(f
+ 3, 127) >> 3;
1567 dst
[strideb
* -1] = av_clip_uint8(p0
+ f2
);
1568 dst
[strideb
* +0] = av_clip_uint8(q0
- f1
);
1570 int f
= av_clip_int8(3 * (q0
- p0
)), f1
, f2
;
1572 f1
= FFMIN(f
+ 4, 127) >> 3;
1573 f2
= FFMIN(f
+ 3, 127) >> 3;
1575 dst
[strideb
* -1] = av_clip_uint8(p0
+ f2
);
1576 dst
[strideb
* +0] = av_clip_uint8(q0
- f1
);
1579 dst
[strideb
* -2] = av_clip_uint8(p1
+ f
);
1580 dst
[strideb
* +1] = av_clip_uint8(q1
- f
);
1586 #define lf_8_fn(dir, wd, stridea, strideb) \
1587 static void loop_filter_##dir##_##wd##_8_c(uint8_t *dst, \
1589 int E, int I, int H) \
1591 loop_filter(dst, E, I, H, stridea, strideb, wd); \
1594 #define lf_8_fns(wd) \
1595 lf_8_fn(h, wd, stride, 1) \
1596 lf_8_fn(v, wd, 1, stride)
1605 #define lf_16_fn(dir, stridea) \
1606 static void loop_filter_##dir##_16_16_c(uint8_t *dst, \
1608 int E, int I, int H) \
1610 loop_filter_##dir##_16_8_c(dst, stride, E, I, H); \
1611 loop_filter_##dir##_16_8_c(dst + 8 * stridea, stride, E, I, H); \
1619 #define lf_mix_fn(dir, wd1, wd2, stridea) \
1620 static void loop_filter_##dir##_##wd1##wd2##_16_c(uint8_t *dst, \
1622 int E, int I, int H) \
1624 loop_filter_##dir##_##wd1##_8_c(dst, stride, E & 0xff, I & 0xff, H & 0xff); \
1625 loop_filter_##dir##_##wd2##_8_c(dst + 8 * stridea, stride, E >> 8, I >> 8, H >> 8); \
1628 #define lf_mix_fns(wd1, wd2) \
1629 lf_mix_fn(h, wd1, wd2, stride) \
1630 lf_mix_fn(v, wd1, wd2, 1)
1640 static av_cold
void vp9dsp_loopfilter_init(VP9DSPContext
*dsp
)
1642 dsp
->loop_filter_8
[0][0] = loop_filter_h_4_8_c
;
1643 dsp
->loop_filter_8
[0][1] = loop_filter_v_4_8_c
;
1644 dsp
->loop_filter_8
[1][0] = loop_filter_h_8_8_c
;
1645 dsp
->loop_filter_8
[1][1] = loop_filter_v_8_8_c
;
1646 dsp
->loop_filter_8
[2][0] = loop_filter_h_16_8_c
;
1647 dsp
->loop_filter_8
[2][1] = loop_filter_v_16_8_c
;
1649 dsp
->loop_filter_16
[0] = loop_filter_h_16_16_c
;
1650 dsp
->loop_filter_16
[1] = loop_filter_v_16_16_c
;
1652 dsp
->loop_filter_mix2
[0][0][0] = loop_filter_h_44_16_c
;
1653 dsp
->loop_filter_mix2
[0][0][1] = loop_filter_v_44_16_c
;
1654 dsp
->loop_filter_mix2
[0][1][0] = loop_filter_h_48_16_c
;
1655 dsp
->loop_filter_mix2
[0][1][1] = loop_filter_v_48_16_c
;
1656 dsp
->loop_filter_mix2
[1][0][0] = loop_filter_h_84_16_c
;
1657 dsp
->loop_filter_mix2
[1][0][1] = loop_filter_v_84_16_c
;
1658 dsp
->loop_filter_mix2
[1][1][0] = loop_filter_h_88_16_c
;
1659 dsp
->loop_filter_mix2
[1][1][1] = loop_filter_v_88_16_c
;
1662 static av_always_inline
void copy_c(uint8_t *dst
, ptrdiff_t dst_stride
,
1663 const uint8_t *src
, ptrdiff_t src_stride
,
1667 memcpy(dst
, src
, w
);
1674 static av_always_inline
void avg_c(uint8_t *dst
, ptrdiff_t dst_stride
,
1675 const uint8_t *src
, ptrdiff_t src_stride
,
1681 for (x
= 0; x
< w
; x
+= 4)
1682 AV_WN32A(&dst
[x
], rnd_avg32(AV_RN32A(&dst
[x
]), AV_RN32(&src
[x
])));
1689 #define fpel_fn(type, sz) \
1690 static void type##sz##_c(uint8_t *dst, ptrdiff_t dst_stride, \
1691 const uint8_t *src, ptrdiff_t src_stride, \
1692 int h, int mx, int my) \
1694 type##_c(dst, dst_stride, src, src_stride, sz, h); \
1697 #define copy_avg_fn(sz) \
1710 static const int8_t vp9_subpel_filters
[3][15][8] = {
1711 [FILTER_8TAP_REGULAR
] = {
1712 { 0, 1, -5, 126, 8, -3, 1, 0 },
1713 { -1, 3, -10, 122, 18, -6, 2, 0 },
1714 { -1, 4, -13, 118, 27, -9, 3, -1 },
1715 { -1, 4, -16, 112, 37, -11, 4, -1 },
1716 { -1, 5, -18, 105, 48, -14, 4, -1 },
1717 { -1, 5, -19, 97, 58, -16, 5, -1 },
1718 { -1, 6, -19, 88, 68, -18, 5, -1 },
1719 { -1, 6, -19, 78, 78, -19, 6, -1 },
1720 { -1, 5, -18, 68, 88, -19, 6, -1 },
1721 { -1, 5, -16, 58, 97, -19, 5, -1 },
1722 { -1, 4, -14, 48, 105, -18, 5, -1 },
1723 { -1, 4, -11, 37, 112, -16, 4, -1 },
1724 { -1, 3, -9, 27, 118, -13, 4, -1 },
1725 { 0, 2, -6, 18, 122, -10, 3, -1 },
1726 { 0, 1, -3, 8, 126, -5, 1, 0 },
1727 }, [FILTER_8TAP_SHARP
] = {
1728 { -1, 3, -7, 127, 8, -3, 1, 0 },
1729 { -2, 5, -13, 125, 17, -6, 3, -1 },
1730 { -3, 7, -17, 121, 27, -10, 5, -2 },
1731 { -4, 9, -20, 115, 37, -13, 6, -2 },
1732 { -4, 10, -23, 108, 48, -16, 8, -3 },
1733 { -4, 10, -24, 100, 59, -19, 9, -3 },
1734 { -4, 11, -24, 90, 70, -21, 10, -4 },
1735 { -4, 11, -23, 80, 80, -23, 11, -4 },
1736 { -4, 10, -21, 70, 90, -24, 11, -4 },
1737 { -3, 9, -19, 59, 100, -24, 10, -4 },
1738 { -3, 8, -16, 48, 108, -23, 10, -4 },
1739 { -2, 6, -13, 37, 115, -20, 9, -4 },
1740 { -2, 5, -10, 27, 121, -17, 7, -3 },
1741 { -1, 3, -6, 17, 125, -13, 5, -2 },
1742 { 0, 1, -3, 8, 127, -7, 3, -1 },
1743 }, [FILTER_8TAP_SMOOTH
] = {
1744 { -3, -1, 32, 64, 38, 1, -3, 0 },
1745 { -2, -2, 29, 63, 41, 2, -3, 0 },
1746 { -2, -2, 26, 63, 43, 4, -4, 0 },
1747 { -2, -3, 24, 62, 46, 5, -4, 0 },
1748 { -2, -3, 21, 60, 49, 7, -4, 0 },
1749 { -1, -4, 18, 59, 51, 9, -4, 0 },
1750 { -1, -4, 16, 57, 53, 12, -4, -1 },
1751 { -1, -4, 14, 55, 55, 14, -4, -1 },
1752 { -1, -4, 12, 53, 57, 16, -4, -1 },
1753 { 0, -4, 9, 51, 59, 18, -4, -1 },
1754 { 0, -4, 7, 49, 60, 21, -3, -2 },
1755 { 0, -4, 5, 46, 62, 24, -3, -2 },
1756 { 0, -4, 4, 43, 63, 26, -2, -2 },
1757 { 0, -3, 2, 41, 63, 29, -2, -2 },
1758 { 0, -3, 1, 38, 64, 32, -1, -3 },
1762 #define FILTER_8TAP(src, x, F, stride) \
1763 av_clip_uint8((F[0] * src[x + -3 * stride] + \
1764 F[1] * src[x + -2 * stride] + \
1765 F[2] * src[x + -1 * stride] + \
1766 F[3] * src[x + +0 * stride] + \
1767 F[4] * src[x + +1 * stride] + \
1768 F[5] * src[x + +2 * stride] + \
1769 F[6] * src[x + +3 * stride] + \
1770 F[7] * src[x + +4 * stride] + 64) >> 7)
1772 static av_always_inline
void do_8tap_1d_c(uint8_t *dst
, ptrdiff_t dst_stride
,
1773 const uint8_t *src
, ptrdiff_t src_stride
,
1774 int w
, int h
, ptrdiff_t ds
,
1775 const int8_t *filter
, int avg
)
1780 for (x
= 0; x
< w
; x
++)
1782 dst
[x
] = (dst
[x
] + FILTER_8TAP(src
, x
, filter
, ds
) + 1) >> 1;
1784 dst
[x
] = FILTER_8TAP(src
, x
, filter
, ds
);
1792 #define filter_8tap_1d_fn(opn, opa, dir, ds) \
1793 static av_noinline void opn##_8tap_1d_##dir##_c(uint8_t *dst, ptrdiff_t dst_stride, \
1794 const uint8_t *src, ptrdiff_t src_stride, \
1795 int w, int h, const int8_t *filter) \
1797 do_8tap_1d_c(dst, dst_stride, src, src_stride, w, h, ds, filter, opa); \
1800 filter_8tap_1d_fn(put
, 0, v
, src_stride
)
1801 filter_8tap_1d_fn(put
, 0, h
, 1)
1802 filter_8tap_1d_fn(avg
, 1, v
, src_stride
)
1803 filter_8tap_1d_fn(avg
, 1, h
, 1)
1805 #undef filter_8tap_1d_fn
1807 static av_always_inline
void do_8tap_2d_c(uint8_t *dst
, ptrdiff_t dst_stride
,
1808 const uint8_t *src
, ptrdiff_t src_stride
,
1809 int w
, int h
, const int8_t *filterx
,
1810 const int8_t *filtery
, int avg
)
1813 uint8_t tmp
[64 * 71], *tmp_ptr
= tmp
;
1815 src
-= src_stride
* 3;
1819 for (x
= 0; x
< w
; x
++)
1820 tmp_ptr
[x
] = FILTER_8TAP(src
, x
, filterx
, 1);
1826 tmp_ptr
= tmp
+ 64 * 3;
1830 for (x
= 0; x
< w
; x
++)
1832 dst
[x
] = (dst
[x
] + FILTER_8TAP(tmp_ptr
, x
, filtery
, 64) + 1) >> 1;
1834 dst
[x
] = FILTER_8TAP(tmp_ptr
, x
, filtery
, 64);
1842 #define filter_8tap_2d_fn(opn, opa) \
1843 static av_noinline void opn##_8tap_2d_hv_c(uint8_t *dst, ptrdiff_t dst_stride, \
1844 const uint8_t *src, ptrdiff_t src_stride, \
1845 int w, int h, const int8_t *filterx, \
1846 const int8_t *filtery) \
1848 do_8tap_2d_c(dst, dst_stride, src, src_stride, w, h, filterx, filtery, opa); \
1851 filter_8tap_2d_fn(put
, 0)
1852 filter_8tap_2d_fn(avg
, 1)
1854 #undef filter_8tap_2d_fn
1858 #define filter_fn_1d(sz, dir, dir_m, type, type_idx, avg) \
1859 static void avg##_8tap_##type##_##sz##dir##_c(uint8_t *dst, ptrdiff_t dst_stride, \
1860 const uint8_t *src, ptrdiff_t src_stride, \
1861 int h, int mx, int my) \
1863 avg##_8tap_1d_##dir##_c(dst, dst_stride, src, src_stride, sz, h, \
1864 vp9_subpel_filters[type_idx][dir_m - 1]); \
1867 #define filter_fn_2d(sz, type, type_idx, avg) \
1868 static void avg##_8tap_##type##_##sz##hv_c(uint8_t *dst, ptrdiff_t dst_stride, \
1869 const uint8_t *src, ptrdiff_t src_stride, \
1870 int h, int mx, int my) \
1872 avg##_8tap_2d_hv_c(dst, dst_stride, src, src_stride, sz, h, \
1873 vp9_subpel_filters[type_idx][mx - 1], \
1874 vp9_subpel_filters[type_idx][my - 1]); \
1877 #define FILTER_BILIN(src, x, mxy, stride) \
1878 (src[x] + ((mxy * (src[x + stride] - src[x]) + 8) >> 4))
1880 static av_always_inline
void do_bilin_1d_c(uint8_t *dst
, ptrdiff_t dst_stride
,
1881 const uint8_t *src
, ptrdiff_t src_stride
,
1882 int w
, int h
, ptrdiff_t ds
, int mxy
, int avg
)
1887 for (x
= 0; x
< w
; x
++)
1889 dst
[x
] = (dst
[x
] + FILTER_BILIN(src
, x
, mxy
, ds
) + 1) >> 1;
1891 dst
[x
] = FILTER_BILIN(src
, x
, mxy
, ds
);
1899 #define bilin_1d_fn(opn, opa, dir, ds) \
1900 static av_noinline void opn##_bilin_1d_##dir##_c(uint8_t *dst, ptrdiff_t dst_stride, \
1901 const uint8_t *src, ptrdiff_t src_stride, \
1902 int w, int h, int mxy) \
1904 do_bilin_1d_c(dst, dst_stride, src, src_stride, w, h, ds, mxy, opa); \
1907 bilin_1d_fn(put
, 0, v
, src_stride
)
1908 bilin_1d_fn(put
, 0, h
, 1)
1909 bilin_1d_fn(avg
, 1, v
, src_stride
)
1910 bilin_1d_fn(avg
, 1, h
, 1)
1914 static av_always_inline
void do_bilin_2d_c(uint8_t *dst
, ptrdiff_t dst_stride
,
1915 const uint8_t *src
, ptrdiff_t src_stride
,
1916 int w
, int h
, int mx
, int my
, int avg
)
1918 uint8_t tmp
[64 * 65], *tmp_ptr
= tmp
;
1924 for (x
= 0; x
< w
; x
++)
1925 tmp_ptr
[x
] = FILTER_BILIN(src
, x
, mx
, 1);
1935 for (x
= 0; x
< w
; x
++)
1937 dst
[x
] = (dst
[x
] + FILTER_BILIN(tmp_ptr
, x
, my
, 64) + 1) >> 1;
1939 dst
[x
] = FILTER_BILIN(tmp_ptr
, x
, my
, 64);
1947 #define bilin_2d_fn(opn, opa) \
1948 static av_noinline void opn##_bilin_2d_hv_c(uint8_t *dst, ptrdiff_t dst_stride, \
1949 const uint8_t *src, ptrdiff_t src_stride, \
1950 int w, int h, int mx, int my) \
1952 do_bilin_2d_c(dst, dst_stride, src, src_stride, w, h, mx, my, opa); \
1962 #define bilinf_fn_1d(sz, dir, dir_m, avg) \
1963 static void avg##_bilin_##sz##dir##_c(uint8_t *dst, ptrdiff_t dst_stride, \
1964 const uint8_t *src, ptrdiff_t src_stride, \
1965 int h, int mx, int my) \
1967 avg##_bilin_1d_##dir##_c(dst, dst_stride, src, src_stride, sz, h, dir_m); \
1970 #define bilinf_fn_2d(sz, avg) \
1971 static void avg##_bilin_##sz##hv_c(uint8_t *dst, ptrdiff_t dst_stride, \
1972 const uint8_t *src, ptrdiff_t src_stride, \
1973 int h, int mx, int my) \
1975 avg##_bilin_2d_hv_c(dst, dst_stride, src, src_stride, sz, h, mx, my); \
1978 #define filter_fn(sz, avg) \
1979 filter_fn_1d(sz, h, mx, regular, FILTER_8TAP_REGULAR, avg) \
1980 filter_fn_1d(sz, v, my, regular, FILTER_8TAP_REGULAR, avg) \
1981 filter_fn_2d(sz, regular, FILTER_8TAP_REGULAR, avg) \
1982 filter_fn_1d(sz, h, mx, smooth, FILTER_8TAP_SMOOTH, avg) \
1983 filter_fn_1d(sz, v, my, smooth, FILTER_8TAP_SMOOTH, avg) \
1984 filter_fn_2d(sz, smooth, FILTER_8TAP_SMOOTH, avg) \
1985 filter_fn_1d(sz, h, mx, sharp, FILTER_8TAP_SHARP, avg) \
1986 filter_fn_1d(sz, v, my, sharp, FILTER_8TAP_SHARP, avg) \
1987 filter_fn_2d(sz, sharp, FILTER_8TAP_SHARP, avg) \
1988 bilinf_fn_1d(sz, h, mx, avg) \
1989 bilinf_fn_1d(sz, v, my, avg) \
1990 bilinf_fn_2d(sz, avg)
1992 #define filter_fn_set(avg) \
1993 filter_fn(64, avg) \
1994 filter_fn(32, avg) \
1995 filter_fn(16, avg) \
2003 #undef filter_fn_set
2009 static av_cold
void vp9dsp_mc_init(VP9DSPContext
*dsp
)
2011 #define init_fpel(idx1, idx2, sz, type) \
2012 dsp->mc[idx1][FILTER_8TAP_SMOOTH ][idx2][0][0] = type##sz##_c; \
2013 dsp->mc[idx1][FILTER_8TAP_REGULAR][idx2][0][0] = type##sz##_c; \
2014 dsp->mc[idx1][FILTER_8TAP_SHARP ][idx2][0][0] = type##sz##_c; \
2015 dsp->mc[idx1][FILTER_BILINEAR ][idx2][0][0] = type##sz##_c
2017 #define init_copy_avg(idx, sz) \
2018 init_fpel(idx, 0, sz, copy); \
2019 init_fpel(idx, 1, sz, avg)
2021 init_copy_avg(0, 64);
2022 init_copy_avg(1, 32);
2023 init_copy_avg(2, 16);
2024 init_copy_avg(3, 8);
2025 init_copy_avg(4, 4);
2027 #undef init_copy_avg
2030 #define init_subpel1(idx1, idx2, idxh, idxv, sz, dir, type) \
2031 dsp->mc[idx1][FILTER_8TAP_SMOOTH ][idx2][idxh][idxv] = type##_8tap_smooth_##sz##dir##_c; \
2032 dsp->mc[idx1][FILTER_8TAP_REGULAR][idx2][idxh][idxv] = type##_8tap_regular_##sz##dir##_c; \
2033 dsp->mc[idx1][FILTER_8TAP_SHARP ][idx2][idxh][idxv] = type##_8tap_sharp_##sz##dir##_c; \
2034 dsp->mc[idx1][FILTER_BILINEAR ][idx2][idxh][idxv] = type##_bilin_##sz##dir##_c
2036 #define init_subpel2(idx, idxh, idxv, dir, type) \
2037 init_subpel1(0, idx, idxh, idxv, 64, dir, type); \
2038 init_subpel1(1, idx, idxh, idxv, 32, dir, type); \
2039 init_subpel1(2, idx, idxh, idxv, 16, dir, type); \
2040 init_subpel1(3, idx, idxh, idxv, 8, dir, type); \
2041 init_subpel1(4, idx, idxh, idxv, 4, dir, type)
2043 #define init_subpel3(idx, type) \
2044 init_subpel2(idx, 1, 1, hv, type); \
2045 init_subpel2(idx, 0, 1, v, type); \
2046 init_subpel2(idx, 1, 0, h, type)
2048 init_subpel3(0, put
);
2049 init_subpel3(1, avg
);
2056 av_cold
void ff_vp9dsp_init(VP9DSPContext
*dsp
)
2058 vp9dsp_intrapred_init(dsp
);
2059 vp9dsp_itxfm_init(dsp
);
2060 vp9dsp_loopfilter_init(dsp
);
2061 vp9dsp_mc_init(dsp
);
2063 if (ARCH_X86
) ff_vp9dsp_init_x86(dsp
);