Imported Debian version 2.4.3~trusty1
[deb_ffmpeg.git] / ffmpeg / libavcodec / vp9dsp.c
CommitLineData
2ba45a60
DM
1/*
2 * VP9 compatible video decoder
3 *
4 * Copyright (C) 2013 Ronald S. Bultje <rsbultje gmail com>
5 * Copyright (C) 2013 Clément Bœsch <u pkh me>
6 *
7 * This file is part of FFmpeg.
8 *
9 * FFmpeg is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public
11 * License as published by the Free Software Foundation; either
12 * version 2.1 of the License, or (at your option) any later version.
13 *
14 * FFmpeg is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
18 *
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with FFmpeg; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22 */
23
24#include "libavutil/common.h"
25#include "libavutil/intreadwrite.h"
26#include "vp9dsp.h"
27#include "rnd_avg.h"
28
29// FIXME see whether we can merge parts of this (perhaps at least 4x4 and 8x8)
30// back with h264pred.[ch]
31
32static void vert_4x4_c(uint8_t *dst, ptrdiff_t stride,
33 const uint8_t *left, const uint8_t *top)
34{
35 unsigned p4 = AV_RN32A(top);
36
37 AV_WN32A(dst + stride * 0, p4);
38 AV_WN32A(dst + stride * 1, p4);
39 AV_WN32A(dst + stride * 2, p4);
40 AV_WN32A(dst + stride * 3, p4);
41}
42
43static void vert_8x8_c(uint8_t *dst, ptrdiff_t stride,
44 const uint8_t *left, const uint8_t *top)
45{
46 uint64_t p8 = AV_RN64A(top);
47 int y;
48
49 for (y = 0; y < 8; y++) {
50 AV_WN64A(dst, p8);
51 dst += stride;
52 }
53}
54
55static void vert_16x16_c(uint8_t *dst, ptrdiff_t stride,
56 const uint8_t *left, const uint8_t *top)
57{
58 uint64_t p8a = AV_RN64A(top + 0), p8b = AV_RN64A(top + 8);
59 int y;
60
61 for (y = 0; y < 16; y++) {
62 AV_WN64A(dst + 0, p8a);
63 AV_WN64A(dst + 8, p8b);
64 dst += stride;
65 }
66}
67
68static void vert_32x32_c(uint8_t *dst, ptrdiff_t stride,
69 const uint8_t *left, const uint8_t *top)
70{
71 uint64_t p8a = AV_RN64A(top + 0), p8b = AV_RN64A(top + 8),
72 p8c = AV_RN64A(top + 16), p8d = AV_RN64A(top + 24);
73 int y;
74
75 for (y = 0; y < 32; y++) {
76 AV_WN64A(dst + 0, p8a);
77 AV_WN64A(dst + 8, p8b);
78 AV_WN64A(dst + 16, p8c);
79 AV_WN64A(dst + 24, p8d);
80 dst += stride;
81 }
82}
83
84static void hor_4x4_c(uint8_t *dst, ptrdiff_t stride,
85 const uint8_t *left, const uint8_t *top)
86{
87 AV_WN32A(dst + stride * 0, left[3] * 0x01010101U);
88 AV_WN32A(dst + stride * 1, left[2] * 0x01010101U);
89 AV_WN32A(dst + stride * 2, left[1] * 0x01010101U);
90 AV_WN32A(dst + stride * 3, left[0] * 0x01010101U);
91}
92
93static void hor_8x8_c(uint8_t *dst, ptrdiff_t stride,
94 const uint8_t *left, const uint8_t *top)
95{
96 int y;
97
98 for (y = 0; y < 8; y++) {
99 AV_WN64A(dst, left[7 - y] * 0x0101010101010101ULL);
100 dst += stride;
101 }
102}
103
104static void hor_16x16_c(uint8_t *dst, ptrdiff_t stride,
105 const uint8_t *left, const uint8_t *top)
106{
107 int y;
108
109 for (y = 0; y < 16; y++) {
110 uint64_t p8 = left[15 - y] * 0x0101010101010101ULL;
111
112 AV_WN64A(dst + 0, p8);
113 AV_WN64A(dst + 8, p8);
114 dst += stride;
115 }
116}
117
118static void hor_32x32_c(uint8_t *dst, ptrdiff_t stride,
119 const uint8_t *left, const uint8_t *top)
120{
121 int y;
122
123 for (y = 0; y < 32; y++) {
124 uint64_t p8 = left[31 - y] * 0x0101010101010101ULL;
125
126 AV_WN64A(dst + 0, p8);
127 AV_WN64A(dst + 8, p8);
128 AV_WN64A(dst + 16, p8);
129 AV_WN64A(dst + 24, p8);
130 dst += stride;
131 }
132}
133
134static void tm_4x4_c(uint8_t *dst, ptrdiff_t stride,
135 const uint8_t *left, const uint8_t *top)
136{
137 int y, tl = top[-1];
138
139 for (y = 0; y < 4; y++) {
140 int l_m_tl = left[3 - y] - tl;
141
142 dst[0] = av_clip_uint8(top[0] + l_m_tl);
143 dst[1] = av_clip_uint8(top[1] + l_m_tl);
144 dst[2] = av_clip_uint8(top[2] + l_m_tl);
145 dst[3] = av_clip_uint8(top[3] + l_m_tl);
146 dst += stride;
147 }
148}
149
150static void tm_8x8_c(uint8_t *dst, ptrdiff_t stride,
151 const uint8_t *left, const uint8_t *top)
152{
153 int y, tl = top[-1];
154
155 for (y = 0; y < 8; y++) {
156 int l_m_tl = left[7 - y] - tl;
157
158 dst[0] = av_clip_uint8(top[0] + l_m_tl);
159 dst[1] = av_clip_uint8(top[1] + l_m_tl);
160 dst[2] = av_clip_uint8(top[2] + l_m_tl);
161 dst[3] = av_clip_uint8(top[3] + l_m_tl);
162 dst[4] = av_clip_uint8(top[4] + l_m_tl);
163 dst[5] = av_clip_uint8(top[5] + l_m_tl);
164 dst[6] = av_clip_uint8(top[6] + l_m_tl);
165 dst[7] = av_clip_uint8(top[7] + l_m_tl);
166 dst += stride;
167 }
168}
169
170static void tm_16x16_c(uint8_t *dst, ptrdiff_t stride,
171 const uint8_t *left, const uint8_t *top)
172{
173 int y, tl = top[-1];
174
175 for (y = 0; y < 16; y++) {
176 int l_m_tl = left[15 - y] - tl;
177
178 dst[ 0] = av_clip_uint8(top[ 0] + l_m_tl);
179 dst[ 1] = av_clip_uint8(top[ 1] + l_m_tl);
180 dst[ 2] = av_clip_uint8(top[ 2] + l_m_tl);
181 dst[ 3] = av_clip_uint8(top[ 3] + l_m_tl);
182 dst[ 4] = av_clip_uint8(top[ 4] + l_m_tl);
183 dst[ 5] = av_clip_uint8(top[ 5] + l_m_tl);
184 dst[ 6] = av_clip_uint8(top[ 6] + l_m_tl);
185 dst[ 7] = av_clip_uint8(top[ 7] + l_m_tl);
186 dst[ 8] = av_clip_uint8(top[ 8] + l_m_tl);
187 dst[ 9] = av_clip_uint8(top[ 9] + l_m_tl);
188 dst[10] = av_clip_uint8(top[10] + l_m_tl);
189 dst[11] = av_clip_uint8(top[11] + l_m_tl);
190 dst[12] = av_clip_uint8(top[12] + l_m_tl);
191 dst[13] = av_clip_uint8(top[13] + l_m_tl);
192 dst[14] = av_clip_uint8(top[14] + l_m_tl);
193 dst[15] = av_clip_uint8(top[15] + l_m_tl);
194 dst += stride;
195 }
196}
197
198static void tm_32x32_c(uint8_t *dst, ptrdiff_t stride,
199 const uint8_t *left, const uint8_t *top)
200{
201 int y, tl = top[-1];
202
203 for (y = 0; y < 32; y++) {
204 int l_m_tl = left[31 - y] - tl;
205
206 dst[ 0] = av_clip_uint8(top[ 0] + l_m_tl);
207 dst[ 1] = av_clip_uint8(top[ 1] + l_m_tl);
208 dst[ 2] = av_clip_uint8(top[ 2] + l_m_tl);
209 dst[ 3] = av_clip_uint8(top[ 3] + l_m_tl);
210 dst[ 4] = av_clip_uint8(top[ 4] + l_m_tl);
211 dst[ 5] = av_clip_uint8(top[ 5] + l_m_tl);
212 dst[ 6] = av_clip_uint8(top[ 6] + l_m_tl);
213 dst[ 7] = av_clip_uint8(top[ 7] + l_m_tl);
214 dst[ 8] = av_clip_uint8(top[ 8] + l_m_tl);
215 dst[ 9] = av_clip_uint8(top[ 9] + l_m_tl);
216 dst[10] = av_clip_uint8(top[10] + l_m_tl);
217 dst[11] = av_clip_uint8(top[11] + l_m_tl);
218 dst[12] = av_clip_uint8(top[12] + l_m_tl);
219 dst[13] = av_clip_uint8(top[13] + l_m_tl);
220 dst[14] = av_clip_uint8(top[14] + l_m_tl);
221 dst[15] = av_clip_uint8(top[15] + l_m_tl);
222 dst[16] = av_clip_uint8(top[16] + l_m_tl);
223 dst[17] = av_clip_uint8(top[17] + l_m_tl);
224 dst[18] = av_clip_uint8(top[18] + l_m_tl);
225 dst[19] = av_clip_uint8(top[19] + l_m_tl);
226 dst[20] = av_clip_uint8(top[20] + l_m_tl);
227 dst[21] = av_clip_uint8(top[21] + l_m_tl);
228 dst[22] = av_clip_uint8(top[22] + l_m_tl);
229 dst[23] = av_clip_uint8(top[23] + l_m_tl);
230 dst[24] = av_clip_uint8(top[24] + l_m_tl);
231 dst[25] = av_clip_uint8(top[25] + l_m_tl);
232 dst[26] = av_clip_uint8(top[26] + l_m_tl);
233 dst[27] = av_clip_uint8(top[27] + l_m_tl);
234 dst[28] = av_clip_uint8(top[28] + l_m_tl);
235 dst[29] = av_clip_uint8(top[29] + l_m_tl);
236 dst[30] = av_clip_uint8(top[30] + l_m_tl);
237 dst[31] = av_clip_uint8(top[31] + l_m_tl);
238 dst += stride;
239 }
240}
241
242static void dc_4x4_c(uint8_t *dst, ptrdiff_t stride,
243 const uint8_t *left, const uint8_t *top)
244{
245 unsigned dc = 0x01010101U * ((left[0] + left[1] + left[2] + left[3] +
246 top[0] + top[1] + top[2] + top[3] + 4) >> 3);
247
248 AV_WN32A(dst + stride * 0, dc);
249 AV_WN32A(dst + stride * 1, dc);
250 AV_WN32A(dst + stride * 2, dc);
251 AV_WN32A(dst + stride * 3, dc);
252}
253
254static void dc_8x8_c(uint8_t *dst, ptrdiff_t stride,
255 const uint8_t *left, const uint8_t *top)
256{
257 uint64_t dc = 0x0101010101010101ULL *
258 ((left[0] + left[1] + left[2] + left[3] + left[4] + left[5] +
259 left[6] + left[7] + top[0] + top[1] + top[2] + top[3] +
260 top[4] + top[5] + top[6] + top[7] + 8) >> 4);
261 int y;
262
263 for (y = 0; y < 8; y++) {
264 AV_WN64A(dst, dc);
265 dst += stride;
266 }
267}
268
269static void dc_16x16_c(uint8_t *dst, ptrdiff_t stride,
270 const uint8_t *left, const uint8_t *top)
271{
272 uint64_t dc = 0x0101010101010101ULL *
273 ((left[0] + left[1] + left[2] + left[3] + left[4] + left[5] + left[6] +
274 left[7] + left[8] + left[9] + left[10] + left[11] + left[12] +
275 left[13] + left[14] + left[15] + top[0] + top[1] + top[2] + top[3] +
276 top[4] + top[5] + top[6] + top[7] + top[8] + top[9] + top[10] +
277 top[11] + top[12] + top[13] + top[14] + top[15] + 16) >> 5);
278 int y;
279
280 for (y = 0; y < 16; y++) {
281 AV_WN64A(dst + 0, dc);
282 AV_WN64A(dst + 8, dc);
283 dst += stride;
284 }
285}
286
287static void dc_32x32_c(uint8_t *dst, ptrdiff_t stride,
288 const uint8_t *left, const uint8_t *top)
289{
290 uint64_t dc = 0x0101010101010101ULL *
291 ((left[0] + left[1] + left[2] + left[3] + left[4] + left[5] + left[6] +
292 left[7] + left[8] + left[9] + left[10] + left[11] + left[12] +
293 left[13] + left[14] + left[15] + left[16] + left[17] + left[18] +
294 left[19] + left[20] + left[21] + left[22] + left[23] + left[24] +
295 left[25] + left[26] + left[27] + left[28] + left[29] + left[30] +
296 left[31] + top[0] + top[1] + top[2] + top[3] + top[4] + top[5] +
297 top[6] + top[7] + top[8] + top[9] + top[10] + top[11] + top[12] +
298 top[13] + top[14] + top[15] + top[16] + top[17] + top[18] + top[19] +
299 top[20] + top[21] + top[22] + top[23] + top[24] + top[25] + top[26] +
300 top[27] + top[28] + top[29] + top[30] + top[31] + 32) >> 6);
301 int y;
302
303 for (y = 0; y < 32; y++) {
304 AV_WN64A(dst + 0, dc);
305 AV_WN64A(dst + 8, dc);
306 AV_WN64A(dst + 16, dc);
307 AV_WN64A(dst + 24, dc);
308 dst += stride;
309 }
310}
311
312static void dc_left_4x4_c(uint8_t *dst, ptrdiff_t stride,
313 const uint8_t *left, const uint8_t *top)
314{
315 unsigned dc = 0x01010101U * ((left[0] + left[1] + left[2] + left[3] + 2) >> 2);
316
317 AV_WN32A(dst + stride * 0, dc);
318 AV_WN32A(dst + stride * 1, dc);
319 AV_WN32A(dst + stride * 2, dc);
320 AV_WN32A(dst + stride * 3, dc);
321}
322
323static void dc_left_8x8_c(uint8_t *dst, ptrdiff_t stride,
324 const uint8_t *left, const uint8_t *top)
325{
326 uint64_t dc = 0x0101010101010101ULL *
327 ((left[0] + left[1] + left[2] + left[3] +
328 left[4] + left[5] + left[6] + left[7] + 4) >> 3);
329 int y;
330
331 for (y = 0; y < 8; y++) {
332 AV_WN64A(dst, dc);
333 dst += stride;
334 }
335}
336
337static void dc_left_16x16_c(uint8_t *dst, ptrdiff_t stride,
338 const uint8_t *left, const uint8_t *top)
339{
340 uint64_t dc = 0x0101010101010101ULL *
341 ((left[0] + left[1] + left[2] + left[3] + left[4] + left[5] +
342 left[6] + left[7] + left[8] + left[9] + left[10] + left[11] +
343 left[12] + left[13] + left[14] + left[15] + 8) >> 4);
344 int y;
345
346 for (y = 0; y < 16; y++) {
347 AV_WN64A(dst + 0, dc);
348 AV_WN64A(dst + 8, dc);
349 dst += stride;
350 }
351}
352
353static void dc_left_32x32_c(uint8_t *dst, ptrdiff_t stride,
354 const uint8_t *left, const uint8_t *top)
355{
356 uint64_t dc = 0x0101010101010101ULL *
357 ((left[0] + left[1] + left[2] + left[3] + left[4] + left[5] +
358 left[6] + left[7] + left[8] + left[9] + left[10] + left[11] +
359 left[12] + left[13] + left[14] + left[15] + left[16] + left[17] +
360 left[18] + left[19] + left[20] + left[21] + left[22] + left[23] +
361 left[24] + left[25] + left[26] + left[27] + left[28] + left[29] +
362 left[30] + left[31] + 16) >> 5);
363 int y;
364
365 for (y = 0; y < 32; y++) {
366 AV_WN64A(dst + 0, dc);
367 AV_WN64A(dst + 8, dc);
368 AV_WN64A(dst + 16, dc);
369 AV_WN64A(dst + 24, dc);
370 dst += stride;
371 }
372}
373
374static void dc_top_4x4_c(uint8_t *dst, ptrdiff_t stride,
375 const uint8_t *left, const uint8_t *top)
376{
377 unsigned dc = 0x01010101U * ((top[0] + top[1] + top[2] + top[3] + 2) >> 2);
378
379 AV_WN32A(dst + stride * 0, dc);
380 AV_WN32A(dst + stride * 1, dc);
381 AV_WN32A(dst + stride * 2, dc);
382 AV_WN32A(dst + stride * 3, dc);
383}
384
385static void dc_top_8x8_c(uint8_t *dst, ptrdiff_t stride,
386 const uint8_t *left, const uint8_t *top)
387{
388 uint64_t dc = 0x0101010101010101ULL *
389 ((top[0] + top[1] + top[2] + top[3] +
390 top[4] + top[5] + top[6] + top[7] + 4) >> 3);
391 int y;
392
393 for (y = 0; y < 8; y++) {
394 AV_WN64A(dst, dc);
395 dst += stride;
396 }
397}
398
399static void dc_top_16x16_c(uint8_t *dst, ptrdiff_t stride,
400 const uint8_t *left, const uint8_t *top)
401{
402 uint64_t dc = 0x0101010101010101ULL *
403 ((top[0] + top[1] + top[2] + top[3] + top[4] + top[5] +
404 top[6] + top[7] + top[8] + top[9] + top[10] + top[11] +
405 top[12] + top[13] + top[14] + top[15] + 8) >> 4);
406 int y;
407
408 for (y = 0; y < 16; y++) {
409 AV_WN64A(dst + 0, dc);
410 AV_WN64A(dst + 8, dc);
411 dst += stride;
412 }
413}
414
415static void dc_top_32x32_c(uint8_t *dst, ptrdiff_t stride,
416 const uint8_t *left, const uint8_t *top)
417{
418 uint64_t dc = 0x0101010101010101ULL *
419 ((top[0] + top[1] + top[2] + top[3] + top[4] + top[5] +
420 top[6] + top[7] + top[8] + top[9] + top[10] + top[11] +
421 top[12] + top[13] + top[14] + top[15] + top[16] + top[17] +
422 top[18] + top[19] + top[20] + top[21] + top[22] + top[23] +
423 top[24] + top[25] + top[26] + top[27] + top[28] + top[29] +
424 top[30] + top[31] + 16) >> 5);
425 int y;
426
427 for (y = 0; y < 32; y++) {
428 AV_WN64A(dst + 0, dc);
429 AV_WN64A(dst + 8, dc);
430 AV_WN64A(dst + 16, dc);
431 AV_WN64A(dst + 24, dc);
432 dst += stride;
433 }
434}
435
436static void dc_128_4x4_c(uint8_t *dst, ptrdiff_t stride,
437 const uint8_t *left, const uint8_t *top)
438{
439 AV_WN32A(dst + stride * 0, 0x80808080U);
440 AV_WN32A(dst + stride * 1, 0x80808080U);
441 AV_WN32A(dst + stride * 2, 0x80808080U);
442 AV_WN32A(dst + stride * 3, 0x80808080U);
443}
444
445static void dc_128_8x8_c(uint8_t *dst, ptrdiff_t stride,
446 const uint8_t *left, const uint8_t *top)
447{
448 int y;
449
450 for (y = 0; y < 8; y++) {
451 AV_WN64A(dst, 0x8080808080808080ULL);
452 dst += stride;
453 }
454}
455
456static void dc_128_16x16_c(uint8_t *dst, ptrdiff_t stride,
457 const uint8_t *left, const uint8_t *top)
458{
459 int y;
460
461 for (y = 0; y < 16; y++) {
462 AV_WN64A(dst + 0, 0x8080808080808080ULL);
463 AV_WN64A(dst + 8, 0x8080808080808080ULL);
464 dst += stride;
465 }
466}
467
468static void dc_128_32x32_c(uint8_t *dst, ptrdiff_t stride,
469 const uint8_t *left, const uint8_t *top)
470{
471 int y;
472
473 for (y = 0; y < 32; y++) {
474 AV_WN64A(dst + 0, 0x8080808080808080ULL);
475 AV_WN64A(dst + 8, 0x8080808080808080ULL);
476 AV_WN64A(dst + 16, 0x8080808080808080ULL);
477 AV_WN64A(dst + 24, 0x8080808080808080ULL);
478 dst += stride;
479 }
480}
481
482static void dc_127_4x4_c(uint8_t *dst, ptrdiff_t stride,
483 const uint8_t *left, const uint8_t *top)
484{
485 AV_WN32A(dst + stride * 0, 0x7F7F7F7FU);
486 AV_WN32A(dst + stride * 1, 0x7F7F7F7FU);
487 AV_WN32A(dst + stride * 2, 0x7F7F7F7FU);
488 AV_WN32A(dst + stride * 3, 0x7F7F7F7FU);
489}
490
491static void dc_127_8x8_c(uint8_t *dst, ptrdiff_t stride,
492 const uint8_t *left, const uint8_t *top)
493{
494 int y;
495
496 for (y = 0; y < 8; y++) {
497 AV_WN64A(dst, 0x7F7F7F7F7F7F7F7FULL);
498 dst += stride;
499 }
500}
501
502static void dc_127_16x16_c(uint8_t *dst, ptrdiff_t stride,
503 const uint8_t *left, const uint8_t *top)
504{
505 int y;
506
507 for (y = 0; y < 16; y++) {
508 AV_WN64A(dst + 0, 0x7F7F7F7F7F7F7F7FULL);
509 AV_WN64A(dst + 8, 0x7F7F7F7F7F7F7F7FULL);
510 dst += stride;
511 }
512}
513
514static void dc_127_32x32_c(uint8_t *dst, ptrdiff_t stride,
515 const uint8_t *left, const uint8_t *top)
516{
517 int y;
518
519 for (y = 0; y < 32; y++) {
520 AV_WN64A(dst + 0, 0x7F7F7F7F7F7F7F7FULL);
521 AV_WN64A(dst + 8, 0x7F7F7F7F7F7F7F7FULL);
522 AV_WN64A(dst + 16, 0x7F7F7F7F7F7F7F7FULL);
523 AV_WN64A(dst + 24, 0x7F7F7F7F7F7F7F7FULL);
524 dst += stride;
525 }
526}
527
528static void dc_129_4x4_c(uint8_t *dst, ptrdiff_t stride,
529 const uint8_t *left, const uint8_t *top)
530{
531 AV_WN32A(dst + stride * 0, 0x81818181U);
532 AV_WN32A(dst + stride * 1, 0x81818181U);
533 AV_WN32A(dst + stride * 2, 0x81818181U);
534 AV_WN32A(dst + stride * 3, 0x81818181U);
535}
536
537static void dc_129_8x8_c(uint8_t *dst, ptrdiff_t stride,
538 const uint8_t *left, const uint8_t *top)
539{
540 int y;
541
542 for (y = 0; y < 8; y++) {
543 AV_WN64A(dst, 0x8181818181818181ULL);
544 dst += stride;
545 }
546}
547
548static void dc_129_16x16_c(uint8_t *dst, ptrdiff_t stride,
549 const uint8_t *left, const uint8_t *top)
550{
551 int y;
552
553 for (y = 0; y < 16; y++) {
554 AV_WN64A(dst + 0, 0x8181818181818181ULL);
555 AV_WN64A(dst + 8, 0x8181818181818181ULL);
556 dst += stride;
557 }
558}
559
560static void dc_129_32x32_c(uint8_t *dst, ptrdiff_t stride,
561 const uint8_t *left, const uint8_t *top)
562{
563 int y;
564
565 for (y = 0; y < 32; y++) {
566 AV_WN64A(dst + 0, 0x8181818181818181ULL);
567 AV_WN64A(dst + 8, 0x8181818181818181ULL);
568 AV_WN64A(dst + 16, 0x8181818181818181ULL);
569 AV_WN64A(dst + 24, 0x8181818181818181ULL);
570 dst += stride;
571 }
572}
573
574#define DST(x, y) dst[(x) + (y) * stride]
575
576static void diag_downleft_4x4_c(uint8_t *dst, ptrdiff_t stride,
577 const uint8_t *left, const uint8_t *top)
578{
579 int a0 = top[0], a1 = top[1], a2 = top[2], a3 = top[3],
580 a4 = top[4], a5 = top[5], a6 = top[6], a7 = top[7];
581
582 DST(0,0) = (a0 + a1 * 2 + a2 + 2) >> 2;
583 DST(1,0) = DST(0,1) = (a1 + a2 * 2 + a3 + 2) >> 2;
584 DST(2,0) = DST(1,1) = DST(0,2) = (a2 + a3 * 2 + a4 + 2) >> 2;
585 DST(3,0) = DST(2,1) = DST(1,2) = DST(0,3) = (a3 + a4 * 2 + a5 + 2) >> 2;
586 DST(3,1) = DST(2,2) = DST(1,3) = (a4 + a5 * 2 + a6 + 2) >> 2;
587 DST(3,2) = DST(2,3) = (a5 + a6 * 2 + a7 + 2) >> 2;
588 DST(3,3) = a7; // note: this is different from vp8 and such
589}
590
591#define def_diag_downleft(size) \
592static void diag_downleft_##size##x##size##_c(uint8_t *dst, ptrdiff_t stride, \
593 const uint8_t *left, const uint8_t *top) \
594{ \
595 int i, j; \
596 uint8_t v[size - 1]; \
597\
598 for (i = 0; i < size - 2; i++) \
599 v[i] = (top[i] + top[i + 1] * 2 + top[i + 2] + 2) >> 2; \
600 v[size - 2] = (top[size - 2] + top[size - 1] * 3 + 2) >> 2; \
601\
602 for (j = 0; j < size; j++) { \
603 memcpy(dst + j*stride, v + j, size - 1 - j); \
604 memset(dst + j*stride + size - 1 - j, top[size - 1], j + 1); \
605 } \
606}
607
608def_diag_downleft(8)
609def_diag_downleft(16)
610def_diag_downleft(32)
611
612static void diag_downright_4x4_c(uint8_t *dst, ptrdiff_t stride,
613 const uint8_t *left, const uint8_t *top)
614{
615 int tl = top[-1], a0 = top[0], a1 = top[1], a2 = top[2], a3 = top[3],
616 l0 = left[3], l1 = left[2], l2 = left[1], l3 = left[0];
617
618 DST(0,3) = (l1 + l2 * 2 + l3 + 2) >> 2;
619 DST(0,2) = DST(1,3) = (l0 + l1 * 2 + l2 + 2) >> 2;
620 DST(0,1) = DST(1,2) = DST(2,3) = (tl + l0 * 2 + l1 + 2) >> 2;
621 DST(0,0) = DST(1,1) = DST(2,2) = DST(3,3) = (l0 + tl * 2 + a0 + 2) >> 2;
622 DST(1,0) = DST(2,1) = DST(3,2) = (tl + a0 * 2 + a1 + 2) >> 2;
623 DST(2,0) = DST(3,1) = (a0 + a1 * 2 + a2 + 2) >> 2;
624 DST(3,0) = (a1 + a2 * 2 + a3 + 2) >> 2;
625}
626
627#define def_diag_downright(size) \
628static void diag_downright_##size##x##size##_c(uint8_t *dst, ptrdiff_t stride, \
629 const uint8_t *left, const uint8_t *top) \
630{ \
631 int i, j; \
632 uint8_t v[size + size - 1]; \
633\
634 for (i = 0; i < size - 2; i++) { \
635 v[i ] = (left[i] + left[i + 1] * 2 + left[i + 2] + 2) >> 2; \
636 v[size + 1 + i] = (top[i] + top[i + 1] * 2 + top[i + 2] + 2) >> 2; \
637 } \
638 v[size - 2] = (left[size - 2] + left[size - 1] * 2 + top[-1] + 2) >> 2; \
639 v[size - 1] = (left[size - 1] + top[-1] * 2 + top[ 0] + 2) >> 2; \
640 v[size ] = (top[-1] + top[0] * 2 + top[ 1] + 2) >> 2; \
641\
642 for (j = 0; j < size; j++) \
643 memcpy(dst + j*stride, v + size - 1 - j, size); \
644}
645
646def_diag_downright(8)
647def_diag_downright(16)
648def_diag_downright(32)
649
650static void vert_right_4x4_c(uint8_t *dst, ptrdiff_t stride,
651 const uint8_t *left, const uint8_t *top)
652{
653 int tl = top[-1], a0 = top[0], a1 = top[1], a2 = top[2], a3 = top[3],
654 l0 = left[3], l1 = left[2], l2 = left[1];
655
656 DST(0,3) = (l0 + l1 * 2 + l2 + 2) >> 2;
657 DST(0,2) = (tl + l0 * 2 + l1 + 2) >> 2;
658 DST(0,0) = DST(1,2) = (tl + a0 + 1) >> 1;
659 DST(0,1) = DST(1,3) = (l0 + tl * 2 + a0 + 2) >> 2;
660 DST(1,0) = DST(2,2) = (a0 + a1 + 1) >> 1;
661 DST(1,1) = DST(2,3) = (tl + a0 * 2 + a1 + 2) >> 2;
662 DST(2,0) = DST(3,2) = (a1 + a2 + 1) >> 1;
663 DST(2,1) = DST(3,3) = (a0 + a1 * 2 + a2 + 2) >> 2;
664 DST(3,0) = (a2 + a3 + 1) >> 1;
665 DST(3,1) = (a1 + a2 * 2 + a3 + 2) >> 2;
666}
667
668#define def_vert_right(size) \
669static void vert_right_##size##x##size##_c(uint8_t *dst, ptrdiff_t stride, \
670 const uint8_t *left, const uint8_t *top) \
671{ \
672 int i, j; \
673 uint8_t ve[size + size/2 - 1], vo[size + size/2 - 1]; \
674\
675 for (i = 0; i < size/2 - 2; i++) { \
676 vo[i] = (left[i*2 + 3] + left[i*2 + 2] * 2 + left[i*2 + 1] + 2) >> 2; \
677 ve[i] = (left[i*2 + 4] + left[i*2 + 3] * 2 + left[i*2 + 2] + 2) >> 2; \
678 } \
679 vo[size/2 - 2] = (left[size - 1] + left[size - 2] * 2 + left[size - 3] + 2) >> 2; \
680 ve[size/2 - 2] = (top[-1] + left[size - 1] * 2 + left[size - 2] + 2) >> 2; \
681\
682 ve[size/2 - 1] = (top[-1] + top[0] + 1) >> 1; \
683 vo[size/2 - 1] = (left[size - 1] + top[-1] * 2 + top[0] + 2) >> 2; \
684 for (i = 0; i < size - 1; i++) { \
685 ve[size/2 + i] = (top[i] + top[i + 1] + 1) >> 1; \
686 vo[size/2 + i] = (top[i - 1] + top[i] * 2 + top[i + 1] + 2) >> 2; \
687 } \
688\
689 for (j = 0; j < size / 2; j++) { \
690 memcpy(dst + j*2 *stride, ve + size/2 - 1 - j, size); \
691 memcpy(dst + (j*2 + 1)*stride, vo + size/2 - 1 - j, size); \
692 } \
693}
694
695def_vert_right(8)
696def_vert_right(16)
697def_vert_right(32)
698
699static void hor_down_4x4_c(uint8_t *dst, ptrdiff_t stride,
700 const uint8_t *left, const uint8_t *top)
701{
702 int l0 = left[3], l1 = left[2], l2 = left[1], l3 = left[0],
703 tl = top[-1], a0 = top[0], a1 = top[1], a2 = top[2];
704
705 DST(2,0) = (tl + a0 * 2 + a1 + 2) >> 2;
706 DST(3,0) = (a0 + a1 * 2 + a2 + 2) >> 2;
707 DST(0,0) = DST(2,1) = (tl + l0 + 1) >> 1;
708 DST(1,0) = DST(3,1) = (a0 + tl * 2 + l0 + 2) >> 2;
709 DST(0,1) = DST(2,2) = (l0 + l1 + 1) >> 1;
710 DST(1,1) = DST(3,2) = (tl + l0 * 2 + l1 + 2) >> 2;
711 DST(0,2) = DST(2,3) = (l1 + l2 + 1) >> 1;
712 DST(1,2) = DST(3,3) = (l0 + l1 * 2 + l2 + 2) >> 2;
713 DST(0,3) = (l2 + l3 + 1) >> 1;
714 DST(1,3) = (l1 + l2 * 2 + l3 + 2) >> 2;
715}
716
717#define def_hor_down(size) \
718static void hor_down_##size##x##size##_c(uint8_t *dst, ptrdiff_t stride, \
719 const uint8_t *left, const uint8_t *top) \
720{ \
721 int i, j; \
722 uint8_t v[size * 3 - 2]; \
723\
724 for (i = 0; i < size - 2; i++) { \
725 v[i*2 ] = (left[i + 1] + left[i + 0] + 1) >> 1; \
726 v[i*2 + 1] = (left[i + 2] + left[i + 1] * 2 + left[i + 0] + 2) >> 2; \
727 v[size*2 + i] = (top[i - 1] + top[i] * 2 + top[i + 1] + 2) >> 2; \
728 } \
729 v[size*2 - 2] = (top[-1] + left[size - 1] + 1) >> 1; \
730 v[size*2 - 4] = (left[size - 1] + left[size - 2] + 1) >> 1; \
731 v[size*2 - 1] = (top[0] + top[-1] * 2 + left[size - 1] + 2) >> 2; \
732 v[size*2 - 3] = (top[-1] + left[size - 1] * 2 + left[size - 2] + 2) >> 2; \
733\
734 for (j = 0; j < size; j++) \
735 memcpy(dst + j*stride, v + size*2 - 2 - j*2, size); \
736}
737
738def_hor_down(8)
739def_hor_down(16)
740def_hor_down(32)
741
742static void vert_left_4x4_c(uint8_t *dst, ptrdiff_t stride,
743 const uint8_t *left, const uint8_t *top)
744{
745 int a0 = top[0], a1 = top[1], a2 = top[2], a3 = top[3],
746 a4 = top[4], a5 = top[5], a6 = top[6];
747
748 DST(0,0) = (a0 + a1 + 1) >> 1;
749 DST(0,1) = (a0 + a1 * 2 + a2 + 2) >> 2;
750 DST(1,0) = DST(0,2) = (a1 + a2 + 1) >> 1;
751 DST(1,1) = DST(0,3) = (a1 + a2 * 2 + a3 + 2) >> 2;
752 DST(2,0) = DST(1,2) = (a2 + a3 + 1) >> 1;
753 DST(2,1) = DST(1,3) = (a2 + a3 * 2 + a4 + 2) >> 2;
754 DST(3,0) = DST(2,2) = (a3 + a4 + 1) >> 1;
755 DST(3,1) = DST(2,3) = (a3 + a4 * 2 + a5 + 2) >> 2;
756 DST(3,2) = (a4 + a5 + 1) >> 1;
757 DST(3,3) = (a4 + a5 * 2 + a6 + 2) >> 2;
758}
759
760#define def_vert_left(size) \
761static void vert_left_##size##x##size##_c(uint8_t *dst, ptrdiff_t stride, \
762 const uint8_t *left, const uint8_t *top) \
763{ \
764 int i, j; \
765 uint8_t ve[size - 1], vo[size - 1]; \
766\
767 for (i = 0; i < size - 2; i++) { \
768 ve[i] = (top[i] + top[i + 1] + 1) >> 1; \
769 vo[i] = (top[i] + top[i + 1] * 2 + top[i + 2] + 2) >> 2; \
770 } \
771 ve[size - 2] = (top[size - 2] + top[size - 1] + 1) >> 1; \
772 vo[size - 2] = (top[size - 2] + top[size - 1] * 3 + 2) >> 2; \
773\
774 for (j = 0; j < size / 2; j++) { \
775 memcpy(dst + j*2 * stride, ve + j, size - j - 1); \
776 memset(dst + j*2 * stride + size - j - 1, top[size - 1], j + 1); \
777 memcpy(dst + (j*2 + 1) * stride, vo + j, size - j - 1); \
778 memset(dst + (j*2 + 1) * stride + size - j - 1, top[size - 1], j + 1); \
779 } \
780}
781
782def_vert_left(8)
783def_vert_left(16)
784def_vert_left(32)
785
786static void hor_up_4x4_c(uint8_t *dst, ptrdiff_t stride,
787 const uint8_t *left, const uint8_t *top)
788{
789 int l0 = left[3], l1 = left[2], l2 = left[1], l3 = left[0];
790
791 DST(0,0) = (l0 + l1 + 1) >> 1;
792 DST(1,0) = (l0 + l1 * 2 + l2 + 2) >> 2;
793 DST(0,1) = DST(2,0) = (l1 + l2 + 1) >> 1;
794 DST(1,1) = DST(3,0) = (l1 + l2 * 2 + l3 + 2) >> 2;
795 DST(0,2) = DST(2,1) = (l2 + l3 + 1) >> 1;
796 DST(1,2) = DST(3,1) = (l2 + l3 * 3 + 2) >> 2;
797 DST(0,3) = DST(1,3) = DST(2,2) = DST(2,3) = DST(3,2) = DST(3,3) = l3;
798}
799
800#define def_hor_up(size) \
801static void hor_up_##size##x##size##_c(uint8_t *dst, ptrdiff_t stride, \
802 const uint8_t *left, const uint8_t *top) \
803{ \
804 int i, j; \
805 uint8_t v[size*2 - 2]; \
806\
807 for (i = 0; i < size - 2; i++) { \
808 v[i*2 ] = (left[size - i - 1] + left[size - i - 2] + 1) >> 1; \
809 v[i*2 + 1] = (left[size - i - 1] + left[size - i - 2] * 2 + left[size - i - 3] + 2) >> 2; \
810 } \
811 v[size*2 - 4] = (left[1] + left[0] + 1) >> 1; \
812 v[size*2 - 3] = (left[1] + left[0] * 3 + 2) >> 2; \
813\
814 for (j = 0; j < size / 2; j++) \
815 memcpy(dst + j*stride, v + j*2, size); \
816 for (j = size / 2; j < size; j++) { \
817 memcpy(dst + j*stride, v + j*2, size*2 - 2 - j*2); \
818 memset(dst + j*stride + size*2 - 2 - j*2, left[0], \
819 2 + j*2 - size); \
820 } \
821}
822
823def_hor_up(8)
824def_hor_up(16)
825def_hor_up(32)
826
827#undef DST
828
829static av_cold void vp9dsp_intrapred_init(VP9DSPContext *dsp)
830{
831#define init_intra_pred(tx, sz) \
832 dsp->intra_pred[tx][VERT_PRED] = vert_##sz##_c; \
833 dsp->intra_pred[tx][HOR_PRED] = hor_##sz##_c; \
834 dsp->intra_pred[tx][DC_PRED] = dc_##sz##_c; \
835 dsp->intra_pred[tx][DIAG_DOWN_LEFT_PRED] = diag_downleft_##sz##_c; \
836 dsp->intra_pred[tx][DIAG_DOWN_RIGHT_PRED] = diag_downright_##sz##_c; \
837 dsp->intra_pred[tx][VERT_RIGHT_PRED] = vert_right_##sz##_c; \
838 dsp->intra_pred[tx][HOR_DOWN_PRED] = hor_down_##sz##_c; \
839 dsp->intra_pred[tx][VERT_LEFT_PRED] = vert_left_##sz##_c; \
840 dsp->intra_pred[tx][HOR_UP_PRED] = hor_up_##sz##_c; \
841 dsp->intra_pred[tx][TM_VP8_PRED] = tm_##sz##_c; \
842 dsp->intra_pred[tx][LEFT_DC_PRED] = dc_left_##sz##_c; \
843 dsp->intra_pred[tx][TOP_DC_PRED] = dc_top_##sz##_c; \
844 dsp->intra_pred[tx][DC_128_PRED] = dc_128_##sz##_c; \
845 dsp->intra_pred[tx][DC_127_PRED] = dc_127_##sz##_c; \
846 dsp->intra_pred[tx][DC_129_PRED] = dc_129_##sz##_c
847
848 init_intra_pred(TX_4X4, 4x4);
849 init_intra_pred(TX_8X8, 8x8);
850 init_intra_pred(TX_16X16, 16x16);
851 init_intra_pred(TX_32X32, 32x32);
852
853#undef init_intra_pred
854}
855
856#define itxfm_wrapper(type_a, type_b, sz, bits, has_dconly) \
857static void type_a##_##type_b##_##sz##x##sz##_add_c(uint8_t *dst, \
858 ptrdiff_t stride, \
859 int16_t *block, int eob) \
860{ \
861 int i, j; \
862 int16_t tmp[sz * sz], out[sz]; \
863\
864 if (has_dconly && eob == 1) { \
865 const int t = (((block[0] * 11585 + (1 << 13)) >> 14) \
866 * 11585 + (1 << 13)) >> 14; \
867 block[0] = 0; \
868 for (i = 0; i < sz; i++) { \
869 for (j = 0; j < sz; j++) \
870 dst[j * stride] = av_clip_uint8(dst[j * stride] + \
871 (bits ? \
872 (t + (1 << (bits - 1))) >> bits : \
873 t)); \
874 dst++; \
875 } \
876 return; \
877 } \
878\
879 for (i = 0; i < sz; i++) \
880 type_a##sz##_1d(block + i, sz, tmp + i * sz, 0); \
881 memset(block, 0, sz * sz * sizeof(*block)); \
882 for (i = 0; i < sz; i++) { \
883 type_b##sz##_1d(tmp + i, sz, out, 1); \
884 for (j = 0; j < sz; j++) \
885 dst[j * stride] = av_clip_uint8(dst[j * stride] + \
886 (bits ? \
887 (out[j] + (1 << (bits - 1))) >> bits : \
888 out[j])); \
889 dst++; \
890 } \
891}
892
893#define itxfm_wrap(sz, bits) \
894itxfm_wrapper(idct, idct, sz, bits, 1) \
895itxfm_wrapper(iadst, idct, sz, bits, 0) \
896itxfm_wrapper(idct, iadst, sz, bits, 0) \
897itxfm_wrapper(iadst, iadst, sz, bits, 0)
898
899#define IN(x) in[(x) * stride]
900
901static av_always_inline void idct4_1d(const int16_t *in, ptrdiff_t stride,
902 int16_t *out, int pass)
903{
904 int t0, t1, t2, t3;
905
906 t0 = ((IN(0) + IN(2)) * 11585 + (1 << 13)) >> 14;
907 t1 = ((IN(0) - IN(2)) * 11585 + (1 << 13)) >> 14;
908 t2 = (IN(1) * 6270 - IN(3) * 15137 + (1 << 13)) >> 14;
909 t3 = (IN(1) * 15137 + IN(3) * 6270 + (1 << 13)) >> 14;
910
911 out[0] = t0 + t3;
912 out[1] = t1 + t2;
913 out[2] = t1 - t2;
914 out[3] = t0 - t3;
915}
916
917static av_always_inline void iadst4_1d(const int16_t *in, ptrdiff_t stride,
918 int16_t *out, int pass)
919{
920 int t0, t1, t2, t3;
921
922 t0 = 5283 * IN(0) + 15212 * IN(2) + 9929 * IN(3);
923 t1 = 9929 * IN(0) - 5283 * IN(2) - 15212 * IN(3);
924 t2 = 13377 * (IN(0) - IN(2) + IN(3));
925 t3 = 13377 * IN(1);
926
927 out[0] = (t0 + t3 + (1 << 13)) >> 14;
928 out[1] = (t1 + t3 + (1 << 13)) >> 14;
929 out[2] = (t2 + (1 << 13)) >> 14;
930 out[3] = (t0 + t1 - t3 + (1 << 13)) >> 14;
931}
932
933itxfm_wrap(4, 4)
934
935static av_always_inline void idct8_1d(const int16_t *in, ptrdiff_t stride,
936 int16_t *out, int pass)
937{
938 int t0, t0a, t1, t1a, t2, t2a, t3, t3a, t4, t4a, t5, t5a, t6, t6a, t7, t7a;
939
940 t0a = ((IN(0) + IN(4)) * 11585 + (1 << 13)) >> 14;
941 t1a = ((IN(0) - IN(4)) * 11585 + (1 << 13)) >> 14;
942 t2a = (IN(2) * 6270 - IN(6) * 15137 + (1 << 13)) >> 14;
943 t3a = (IN(2) * 15137 + IN(6) * 6270 + (1 << 13)) >> 14;
944 t4a = (IN(1) * 3196 - IN(7) * 16069 + (1 << 13)) >> 14;
945 t5a = (IN(5) * 13623 - IN(3) * 9102 + (1 << 13)) >> 14;
946 t6a = (IN(5) * 9102 + IN(3) * 13623 + (1 << 13)) >> 14;
947 t7a = (IN(1) * 16069 + IN(7) * 3196 + (1 << 13)) >> 14;
948
949 t0 = t0a + t3a;
950 t1 = t1a + t2a;
951 t2 = t1a - t2a;
952 t3 = t0a - t3a;
953 t4 = t4a + t5a;
954 t5a = t4a - t5a;
955 t7 = t7a + t6a;
956 t6a = t7a - t6a;
957
958 t5 = ((t6a - t5a) * 11585 + (1 << 13)) >> 14;
959 t6 = ((t6a + t5a) * 11585 + (1 << 13)) >> 14;
960
961 out[0] = t0 + t7;
962 out[1] = t1 + t6;
963 out[2] = t2 + t5;
964 out[3] = t3 + t4;
965 out[4] = t3 - t4;
966 out[5] = t2 - t5;
967 out[6] = t1 - t6;
968 out[7] = t0 - t7;
969}
970
971static av_always_inline void iadst8_1d(const int16_t *in, ptrdiff_t stride,
972 int16_t *out, int pass)
973{
974 int t0, t0a, t1, t1a, t2, t2a, t3, t3a, t4, t4a, t5, t5a, t6, t6a, t7, t7a;
975
976 t0a = 16305 * IN(7) + 1606 * IN(0);
977 t1a = 1606 * IN(7) - 16305 * IN(0);
978 t2a = 14449 * IN(5) + 7723 * IN(2);
979 t3a = 7723 * IN(5) - 14449 * IN(2);
980 t4a = 10394 * IN(3) + 12665 * IN(4);
981 t5a = 12665 * IN(3) - 10394 * IN(4);
982 t6a = 4756 * IN(1) + 15679 * IN(6);
983 t7a = 15679 * IN(1) - 4756 * IN(6);
984
985 t0 = (t0a + t4a + (1 << 13)) >> 14;
986 t1 = (t1a + t5a + (1 << 13)) >> 14;
987 t2 = (t2a + t6a + (1 << 13)) >> 14;
988 t3 = (t3a + t7a + (1 << 13)) >> 14;
989 t4 = (t0a - t4a + (1 << 13)) >> 14;
990 t5 = (t1a - t5a + (1 << 13)) >> 14;
991 t6 = (t2a - t6a + (1 << 13)) >> 14;
992 t7 = (t3a - t7a + (1 << 13)) >> 14;
993
994 t4a = 15137 * t4 + 6270 * t5;
995 t5a = 6270 * t4 - 15137 * t5;
996 t6a = 15137 * t7 - 6270 * t6;
997 t7a = 6270 * t7 + 15137 * t6;
998
999 out[0] = t0 + t2;
1000 out[7] = -(t1 + t3);
1001 t2 = t0 - t2;
1002 t3 = t1 - t3;
1003
1004 out[1] = -((t4a + t6a + (1 << 13)) >> 14);
1005 out[6] = (t5a + t7a + (1 << 13)) >> 14;
1006 t6 = (t4a - t6a + (1 << 13)) >> 14;
1007 t7 = (t5a - t7a + (1 << 13)) >> 14;
1008
1009 out[3] = -(((t2 + t3) * 11585 + (1 << 13)) >> 14);
1010 out[4] = ((t2 - t3) * 11585 + (1 << 13)) >> 14;
1011 out[2] = ((t6 + t7) * 11585 + (1 << 13)) >> 14;
1012 out[5] = -(((t6 - t7) * 11585 + (1 << 13)) >> 14);
1013}
1014
1015itxfm_wrap(8, 5)
1016
1017static av_always_inline void idct16_1d(const int16_t *in, ptrdiff_t stride,
1018 int16_t *out, int pass)
1019{
1020 int t0, t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, t11, t12, t13, t14, t15;
1021 int t0a, t1a, t2a, t3a, t4a, t5a, t6a, t7a;
1022 int t8a, t9a, t10a, t11a, t12a, t13a, t14a, t15a;
1023
1024 t0a = ((IN(0) + IN(8)) * 11585 + (1 << 13)) >> 14;
1025 t1a = ((IN(0) - IN(8)) * 11585 + (1 << 13)) >> 14;
1026 t2a = (IN(4) * 6270 - IN(12) * 15137 + (1 << 13)) >> 14;
1027 t3a = (IN(4) * 15137 + IN(12) * 6270 + (1 << 13)) >> 14;
1028 t4a = (IN(2) * 3196 - IN(14) * 16069 + (1 << 13)) >> 14;
1029 t7a = (IN(2) * 16069 + IN(14) * 3196 + (1 << 13)) >> 14;
1030 t5a = (IN(10) * 13623 - IN(6) * 9102 + (1 << 13)) >> 14;
1031 t6a = (IN(10) * 9102 + IN(6) * 13623 + (1 << 13)) >> 14;
1032 t8a = (IN(1) * 1606 - IN(15) * 16305 + (1 << 13)) >> 14;
1033 t15a = (IN(1) * 16305 + IN(15) * 1606 + (1 << 13)) >> 14;
1034 t9a = (IN(9) * 12665 - IN(7) * 10394 + (1 << 13)) >> 14;
1035 t14a = (IN(9) * 10394 + IN(7) * 12665 + (1 << 13)) >> 14;
1036 t10a = (IN(5) * 7723 - IN(11) * 14449 + (1 << 13)) >> 14;
1037 t13a = (IN(5) * 14449 + IN(11) * 7723 + (1 << 13)) >> 14;
1038 t11a = (IN(13) * 15679 - IN(3) * 4756 + (1 << 13)) >> 14;
1039 t12a = (IN(13) * 4756 + IN(3) * 15679 + (1 << 13)) >> 14;
1040
1041 t0 = t0a + t3a;
1042 t1 = t1a + t2a;
1043 t2 = t1a - t2a;
1044 t3 = t0a - t3a;
1045 t4 = t4a + t5a;
1046 t5 = t4a - t5a;
1047 t6 = t7a - t6a;
1048 t7 = t7a + t6a;
1049 t8 = t8a + t9a;
1050 t9 = t8a - t9a;
1051 t10 = t11a - t10a;
1052 t11 = t11a + t10a;
1053 t12 = t12a + t13a;
1054 t13 = t12a - t13a;
1055 t14 = t15a - t14a;
1056 t15 = t15a + t14a;
1057
1058 t5a = ((t6 - t5) * 11585 + (1 << 13)) >> 14;
1059 t6a = ((t6 + t5) * 11585 + (1 << 13)) >> 14;
1060 t9a = ( t14 * 6270 - t9 * 15137 + (1 << 13)) >> 14;
1061 t14a = ( t14 * 15137 + t9 * 6270 + (1 << 13)) >> 14;
1062 t10a = (-(t13 * 15137 + t10 * 6270) + (1 << 13)) >> 14;
1063 t13a = ( t13 * 6270 - t10 * 15137 + (1 << 13)) >> 14;
1064
1065 t0a = t0 + t7;
1066 t1a = t1 + t6a;
1067 t2a = t2 + t5a;
1068 t3a = t3 + t4;
1069 t4 = t3 - t4;
1070 t5 = t2 - t5a;
1071 t6 = t1 - t6a;
1072 t7 = t0 - t7;
1073 t8a = t8 + t11;
1074 t9 = t9a + t10a;
1075 t10 = t9a - t10a;
1076 t11a = t8 - t11;
1077 t12a = t15 - t12;
1078 t13 = t14a - t13a;
1079 t14 = t14a + t13a;
1080 t15a = t15 + t12;
1081
1082 t10a = ((t13 - t10) * 11585 + (1 << 13)) >> 14;
1083 t13a = ((t13 + t10) * 11585 + (1 << 13)) >> 14;
1084 t11 = ((t12a - t11a) * 11585 + (1 << 13)) >> 14;
1085 t12 = ((t12a + t11a) * 11585 + (1 << 13)) >> 14;
1086
1087 out[ 0] = t0a + t15a;
1088 out[ 1] = t1a + t14;
1089 out[ 2] = t2a + t13a;
1090 out[ 3] = t3a + t12;
1091 out[ 4] = t4 + t11;
1092 out[ 5] = t5 + t10a;
1093 out[ 6] = t6 + t9;
1094 out[ 7] = t7 + t8a;
1095 out[ 8] = t7 - t8a;
1096 out[ 9] = t6 - t9;
1097 out[10] = t5 - t10a;
1098 out[11] = t4 - t11;
1099 out[12] = t3a - t12;
1100 out[13] = t2a - t13a;
1101 out[14] = t1a - t14;
1102 out[15] = t0a - t15a;
1103}
1104
1105static av_always_inline void iadst16_1d(const int16_t *in, ptrdiff_t stride,
1106 int16_t *out, int pass)
1107{
1108 int t0, t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, t11, t12, t13, t14, t15;
1109 int t0a, t1a, t2a, t3a, t4a, t5a, t6a, t7a;
1110 int t8a, t9a, t10a, t11a, t12a, t13a, t14a, t15a;
1111
1112 t0 = IN(15) * 16364 + IN(0) * 804;
1113 t1 = IN(15) * 804 - IN(0) * 16364;
1114 t2 = IN(13) * 15893 + IN(2) * 3981;
1115 t3 = IN(13) * 3981 - IN(2) * 15893;
1116 t4 = IN(11) * 14811 + IN(4) * 7005;
1117 t5 = IN(11) * 7005 - IN(4) * 14811;
1118 t6 = IN(9) * 13160 + IN(6) * 9760;
1119 t7 = IN(9) * 9760 - IN(6) * 13160;
1120 t8 = IN(7) * 11003 + IN(8) * 12140;
1121 t9 = IN(7) * 12140 - IN(8) * 11003;
1122 t10 = IN(5) * 8423 + IN(10) * 14053;
1123 t11 = IN(5) * 14053 - IN(10) * 8423;
1124 t12 = IN(3) * 5520 + IN(12) * 15426;
1125 t13 = IN(3) * 15426 - IN(12) * 5520;
1126 t14 = IN(1) * 2404 + IN(14) * 16207;
1127 t15 = IN(1) * 16207 - IN(14) * 2404;
1128
1129 t0a = (t0 + t8 + (1 << 13)) >> 14;
1130 t1a = (t1 + t9 + (1 << 13)) >> 14;
1131 t2a = (t2 + t10 + (1 << 13)) >> 14;
1132 t3a = (t3 + t11 + (1 << 13)) >> 14;
1133 t4a = (t4 + t12 + (1 << 13)) >> 14;
1134 t5a = (t5 + t13 + (1 << 13)) >> 14;
1135 t6a = (t6 + t14 + (1 << 13)) >> 14;
1136 t7a = (t7 + t15 + (1 << 13)) >> 14;
1137 t8a = (t0 - t8 + (1 << 13)) >> 14;
1138 t9a = (t1 - t9 + (1 << 13)) >> 14;
1139 t10a = (t2 - t10 + (1 << 13)) >> 14;
1140 t11a = (t3 - t11 + (1 << 13)) >> 14;
1141 t12a = (t4 - t12 + (1 << 13)) >> 14;
1142 t13a = (t5 - t13 + (1 << 13)) >> 14;
1143 t14a = (t6 - t14 + (1 << 13)) >> 14;
1144 t15a = (t7 - t15 + (1 << 13)) >> 14;
1145
1146 t8 = t8a * 16069 + t9a * 3196;
1147 t9 = t8a * 3196 - t9a * 16069;
1148 t10 = t10a * 9102 + t11a * 13623;
1149 t11 = t10a * 13623 - t11a * 9102;
1150 t12 = t13a * 16069 - t12a * 3196;
1151 t13 = t13a * 3196 + t12a * 16069;
1152 t14 = t15a * 9102 - t14a * 13623;
1153 t15 = t15a * 13623 + t14a * 9102;
1154
1155 t0 = t0a + t4a;
1156 t1 = t1a + t5a;
1157 t2 = t2a + t6a;
1158 t3 = t3a + t7a;
1159 t4 = t0a - t4a;
1160 t5 = t1a - t5a;
1161 t6 = t2a - t6a;
1162 t7 = t3a - t7a;
1163 t8a = (t8 + t12 + (1 << 13)) >> 14;
1164 t9a = (t9 + t13 + (1 << 13)) >> 14;
1165 t10a = (t10 + t14 + (1 << 13)) >> 14;
1166 t11a = (t11 + t15 + (1 << 13)) >> 14;
1167 t12a = (t8 - t12 + (1 << 13)) >> 14;
1168 t13a = (t9 - t13 + (1 << 13)) >> 14;
1169 t14a = (t10 - t14 + (1 << 13)) >> 14;
1170 t15a = (t11 - t15 + (1 << 13)) >> 14;
1171
1172 t4a = t4 * 15137 + t5 * 6270;
1173 t5a = t4 * 6270 - t5 * 15137;
1174 t6a = t7 * 15137 - t6 * 6270;
1175 t7a = t7 * 6270 + t6 * 15137;
1176 t12 = t12a * 15137 + t13a * 6270;
1177 t13 = t12a * 6270 - t13a * 15137;
1178 t14 = t15a * 15137 - t14a * 6270;
1179 t15 = t15a * 6270 + t14a * 15137;
1180
1181 out[ 0] = t0 + t2;
1182 out[15] = -(t1 + t3);
1183 t2a = t0 - t2;
1184 t3a = t1 - t3;
1185 out[ 3] = -((t4a + t6a + (1 << 13)) >> 14);
1186 out[12] = (t5a + t7a + (1 << 13)) >> 14;
1187 t6 = (t4a - t6a + (1 << 13)) >> 14;
1188 t7 = (t5a - t7a + (1 << 13)) >> 14;
1189 out[ 1] = -(t8a + t10a);
1190 out[14] = t9a + t11a;
1191 t10 = t8a - t10a;
1192 t11 = t9a - t11a;
1193 out[ 2] = (t12 + t14 + (1 << 13)) >> 14;
1194 out[13] = -((t13 + t15 + (1 << 13)) >> 14);
1195 t14a = (t12 - t14 + (1 << 13)) >> 14;
1196 t15a = (t13 - t15 + (1 << 13)) >> 14;
1197
1198 out[ 7] = ((t2a + t3a) * -11585 + (1 << 13)) >> 14;
1199 out[ 8] = ((t2a - t3a) * 11585 + (1 << 13)) >> 14;
1200 out[ 4] = ((t7 + t6) * 11585 + (1 << 13)) >> 14;
1201 out[11] = ((t7 - t6) * 11585 + (1 << 13)) >> 14;
1202 out[ 6] = ((t11 + t10) * 11585 + (1 << 13)) >> 14;
1203 out[ 9] = ((t11 - t10) * 11585 + (1 << 13)) >> 14;
1204 out[ 5] = ((t14a + t15a) * -11585 + (1 << 13)) >> 14;
1205 out[10] = ((t14a - t15a) * 11585 + (1 << 13)) >> 14;
1206}
1207
1208itxfm_wrap(16, 6)
1209
1210static av_always_inline void idct32_1d(const int16_t *in, ptrdiff_t stride,
1211 int16_t *out, int pass)
1212{
1213 int t0a = ((IN(0) + IN(16)) * 11585 + (1 << 13)) >> 14;
1214 int t1a = ((IN(0) - IN(16)) * 11585 + (1 << 13)) >> 14;
1215 int t2a = (IN( 8) * 6270 - IN(24) * 15137 + (1 << 13)) >> 14;
1216 int t3a = (IN( 8) * 15137 + IN(24) * 6270 + (1 << 13)) >> 14;
1217 int t4a = (IN( 4) * 3196 - IN(28) * 16069 + (1 << 13)) >> 14;
1218 int t7a = (IN( 4) * 16069 + IN(28) * 3196 + (1 << 13)) >> 14;
1219 int t5a = (IN(20) * 13623 - IN(12) * 9102 + (1 << 13)) >> 14;
1220 int t6a = (IN(20) * 9102 + IN(12) * 13623 + (1 << 13)) >> 14;
1221 int t8a = (IN( 2) * 1606 - IN(30) * 16305 + (1 << 13)) >> 14;
1222 int t15a = (IN( 2) * 16305 + IN(30) * 1606 + (1 << 13)) >> 14;
1223 int t9a = (IN(18) * 12665 - IN(14) * 10394 + (1 << 13)) >> 14;
1224 int t14a = (IN(18) * 10394 + IN(14) * 12665 + (1 << 13)) >> 14;
1225 int t10a = (IN(10) * 7723 - IN(22) * 14449 + (1 << 13)) >> 14;
1226 int t13a = (IN(10) * 14449 + IN(22) * 7723 + (1 << 13)) >> 14;
1227 int t11a = (IN(26) * 15679 - IN( 6) * 4756 + (1 << 13)) >> 14;
1228 int t12a = (IN(26) * 4756 + IN( 6) * 15679 + (1 << 13)) >> 14;
1229 int t16a = (IN( 1) * 804 - IN(31) * 16364 + (1 << 13)) >> 14;
1230 int t31a = (IN( 1) * 16364 + IN(31) * 804 + (1 << 13)) >> 14;
1231 int t17a = (IN(17) * 12140 - IN(15) * 11003 + (1 << 13)) >> 14;
1232 int t30a = (IN(17) * 11003 + IN(15) * 12140 + (1 << 13)) >> 14;
1233 int t18a = (IN( 9) * 7005 - IN(23) * 14811 + (1 << 13)) >> 14;
1234 int t29a = (IN( 9) * 14811 + IN(23) * 7005 + (1 << 13)) >> 14;
1235 int t19a = (IN(25) * 15426 - IN( 7) * 5520 + (1 << 13)) >> 14;
1236 int t28a = (IN(25) * 5520 + IN( 7) * 15426 + (1 << 13)) >> 14;
1237 int t20a = (IN( 5) * 3981 - IN(27) * 15893 + (1 << 13)) >> 14;
1238 int t27a = (IN( 5) * 15893 + IN(27) * 3981 + (1 << 13)) >> 14;
1239 int t21a = (IN(21) * 14053 - IN(11) * 8423 + (1 << 13)) >> 14;
1240 int t26a = (IN(21) * 8423 + IN(11) * 14053 + (1 << 13)) >> 14;
1241 int t22a = (IN(13) * 9760 - IN(19) * 13160 + (1 << 13)) >> 14;
1242 int t25a = (IN(13) * 13160 + IN(19) * 9760 + (1 << 13)) >> 14;
1243 int t23a = (IN(29) * 16207 - IN( 3) * 2404 + (1 << 13)) >> 14;
1244 int t24a = (IN(29) * 2404 + IN( 3) * 16207 + (1 << 13)) >> 14;
1245
1246 int t0 = t0a + t3a;
1247 int t1 = t1a + t2a;
1248 int t2 = t1a - t2a;
1249 int t3 = t0a - t3a;
1250 int t4 = t4a + t5a;
1251 int t5 = t4a - t5a;
1252 int t6 = t7a - t6a;
1253 int t7 = t7a + t6a;
1254 int t8 = t8a + t9a;
1255 int t9 = t8a - t9a;
1256 int t10 = t11a - t10a;
1257 int t11 = t11a + t10a;
1258 int t12 = t12a + t13a;
1259 int t13 = t12a - t13a;
1260 int t14 = t15a - t14a;
1261 int t15 = t15a + t14a;
1262 int t16 = t16a + t17a;
1263 int t17 = t16a - t17a;
1264 int t18 = t19a - t18a;
1265 int t19 = t19a + t18a;
1266 int t20 = t20a + t21a;
1267 int t21 = t20a - t21a;
1268 int t22 = t23a - t22a;
1269 int t23 = t23a + t22a;
1270 int t24 = t24a + t25a;
1271 int t25 = t24a - t25a;
1272 int t26 = t27a - t26a;
1273 int t27 = t27a + t26a;
1274 int t28 = t28a + t29a;
1275 int t29 = t28a - t29a;
1276 int t30 = t31a - t30a;
1277 int t31 = t31a + t30a;
1278
1279 t5a = ((t6 - t5) * 11585 + (1 << 13)) >> 14;
1280 t6a = ((t6 + t5) * 11585 + (1 << 13)) >> 14;
1281 t9a = ( t14 * 6270 - t9 * 15137 + (1 << 13)) >> 14;
1282 t14a = ( t14 * 15137 + t9 * 6270 + (1 << 13)) >> 14;
1283 t10a = (-(t13 * 15137 + t10 * 6270) + (1 << 13)) >> 14;
1284 t13a = ( t13 * 6270 - t10 * 15137 + (1 << 13)) >> 14;
1285 t17a = ( t30 * 3196 - t17 * 16069 + (1 << 13)) >> 14;
1286 t30a = ( t30 * 16069 + t17 * 3196 + (1 << 13)) >> 14;
1287 t18a = (-(t29 * 16069 + t18 * 3196) + (1 << 13)) >> 14;
1288 t29a = ( t29 * 3196 - t18 * 16069 + (1 << 13)) >> 14;
1289 t21a = ( t26 * 13623 - t21 * 9102 + (1 << 13)) >> 14;
1290 t26a = ( t26 * 9102 + t21 * 13623 + (1 << 13)) >> 14;
1291 t22a = (-(t25 * 9102 + t22 * 13623) + (1 << 13)) >> 14;
1292 t25a = ( t25 * 13623 - t22 * 9102 + (1 << 13)) >> 14;
1293
1294 t0a = t0 + t7;
1295 t1a = t1 + t6a;
1296 t2a = t2 + t5a;
1297 t3a = t3 + t4;
1298 t4a = t3 - t4;
1299 t5 = t2 - t5a;
1300 t6 = t1 - t6a;
1301 t7a = t0 - t7;
1302 t8a = t8 + t11;
1303 t9 = t9a + t10a;
1304 t10 = t9a - t10a;
1305 t11a = t8 - t11;
1306 t12a = t15 - t12;
1307 t13 = t14a - t13a;
1308 t14 = t14a + t13a;
1309 t15a = t15 + t12;
1310 t16a = t16 + t19;
1311 t17 = t17a + t18a;
1312 t18 = t17a - t18a;
1313 t19a = t16 - t19;
1314 t20a = t23 - t20;
1315 t21 = t22a - t21a;
1316 t22 = t22a + t21a;
1317 t23a = t23 + t20;
1318 t24a = t24 + t27;
1319 t25 = t25a + t26a;
1320 t26 = t25a - t26a;
1321 t27a = t24 - t27;
1322 t28a = t31 - t28;
1323 t29 = t30a - t29a;
1324 t30 = t30a + t29a;
1325 t31a = t31 + t28;
1326
1327 t10a = ((t13 - t10) * 11585 + (1 << 13)) >> 14;
1328 t13a = ((t13 + t10) * 11585 + (1 << 13)) >> 14;
1329 t11 = ((t12a - t11a) * 11585 + (1 << 13)) >> 14;
1330 t12 = ((t12a + t11a) * 11585 + (1 << 13)) >> 14;
1331 t18a = ( t29 * 6270 - t18 * 15137 + (1 << 13)) >> 14;
1332 t29a = ( t29 * 15137 + t18 * 6270 + (1 << 13)) >> 14;
1333 t19 = ( t28a * 6270 - t19a * 15137 + (1 << 13)) >> 14;
1334 t28 = ( t28a * 15137 + t19a * 6270 + (1 << 13)) >> 14;
1335 t20 = (-(t27a * 15137 + t20a * 6270) + (1 << 13)) >> 14;
1336 t27 = ( t27a * 6270 - t20a * 15137 + (1 << 13)) >> 14;
1337 t21a = (-(t26 * 15137 + t21 * 6270) + (1 << 13)) >> 14;
1338 t26a = ( t26 * 6270 - t21 * 15137 + (1 << 13)) >> 14;
1339
1340 t0 = t0a + t15a;
1341 t1 = t1a + t14;
1342 t2 = t2a + t13a;
1343 t3 = t3a + t12;
1344 t4 = t4a + t11;
1345 t5a = t5 + t10a;
1346 t6a = t6 + t9;
1347 t7 = t7a + t8a;
1348 t8 = t7a - t8a;
1349 t9a = t6 - t9;
1350 t10 = t5 - t10a;
1351 t11a = t4a - t11;
1352 t12a = t3a - t12;
1353 t13 = t2a - t13a;
1354 t14a = t1a - t14;
1355 t15 = t0a - t15a;
1356 t16 = t16a + t23a;
1357 t17a = t17 + t22;
1358 t18 = t18a + t21a;
1359 t19a = t19 + t20;
1360 t20a = t19 - t20;
1361 t21 = t18a - t21a;
1362 t22a = t17 - t22;
1363 t23 = t16a - t23a;
1364 t24 = t31a - t24a;
1365 t25a = t30 - t25;
1366 t26 = t29a - t26a;
1367 t27a = t28 - t27;
1368 t28a = t28 + t27;
1369 t29 = t29a + t26a;
1370 t30a = t30 + t25;
1371 t31 = t31a + t24a;
1372
1373 t20 = ((t27a - t20a) * 11585 + (1 << 13)) >> 14;
1374 t27 = ((t27a + t20a) * 11585 + (1 << 13)) >> 14;
1375 t21a = ((t26 - t21 ) * 11585 + (1 << 13)) >> 14;
1376 t26a = ((t26 + t21 ) * 11585 + (1 << 13)) >> 14;
1377 t22 = ((t25a - t22a) * 11585 + (1 << 13)) >> 14;
1378 t25 = ((t25a + t22a) * 11585 + (1 << 13)) >> 14;
1379 t23a = ((t24 - t23 ) * 11585 + (1 << 13)) >> 14;
1380 t24a = ((t24 + t23 ) * 11585 + (1 << 13)) >> 14;
1381
1382 out[ 0] = t0 + t31;
1383 out[ 1] = t1 + t30a;
1384 out[ 2] = t2 + t29;
1385 out[ 3] = t3 + t28a;
1386 out[ 4] = t4 + t27;
1387 out[ 5] = t5a + t26a;
1388 out[ 6] = t6a + t25;
1389 out[ 7] = t7 + t24a;
1390 out[ 8] = t8 + t23a;
1391 out[ 9] = t9a + t22;
1392 out[10] = t10 + t21a;
1393 out[11] = t11a + t20;
1394 out[12] = t12a + t19a;
1395 out[13] = t13 + t18;
1396 out[14] = t14a + t17a;
1397 out[15] = t15 + t16;
1398 out[16] = t15 - t16;
1399 out[17] = t14a - t17a;
1400 out[18] = t13 - t18;
1401 out[19] = t12a - t19a;
1402 out[20] = t11a - t20;
1403 out[21] = t10 - t21a;
1404 out[22] = t9a - t22;
1405 out[23] = t8 - t23a;
1406 out[24] = t7 - t24a;
1407 out[25] = t6a - t25;
1408 out[26] = t5a - t26a;
1409 out[27] = t4 - t27;
1410 out[28] = t3 - t28a;
1411 out[29] = t2 - t29;
1412 out[30] = t1 - t30a;
1413 out[31] = t0 - t31;
1414}
1415
1416itxfm_wrapper(idct, idct, 32, 6, 1)
1417
1418static av_always_inline void iwht4_1d(const int16_t *in, ptrdiff_t stride,
1419 int16_t *out, int pass)
1420{
1421 int t0, t1, t2, t3, t4;
1422
1423 if (pass == 0) {
1424 t0 = IN(0) >> 2;
1425 t1 = IN(3) >> 2;
1426 t2 = IN(1) >> 2;
1427 t3 = IN(2) >> 2;
1428 } else {
1429 t0 = IN(0);
1430 t1 = IN(3);
1431 t2 = IN(1);
1432 t3 = IN(2);
1433 }
1434
1435 t0 += t2;
1436 t3 -= t1;
1437 t4 = (t0 - t3) >> 1;
1438 t1 = t4 - t1;
1439 t2 = t4 - t2;
1440 t0 -= t1;
1441 t3 += t2;
1442
1443 out[0] = t0;
1444 out[1] = t1;
1445 out[2] = t2;
1446 out[3] = t3;
1447}
1448
1449itxfm_wrapper(iwht, iwht, 4, 0, 0)
1450
1451#undef IN
1452#undef itxfm_wrapper
1453#undef itxfm_wrap
1454
1455static av_cold void vp9dsp_itxfm_init(VP9DSPContext *dsp)
1456{
1457#define init_itxfm(tx, sz) \
1458 dsp->itxfm_add[tx][DCT_DCT] = idct_idct_##sz##_add_c; \
1459 dsp->itxfm_add[tx][DCT_ADST] = iadst_idct_##sz##_add_c; \
1460 dsp->itxfm_add[tx][ADST_DCT] = idct_iadst_##sz##_add_c; \
1461 dsp->itxfm_add[tx][ADST_ADST] = iadst_iadst_##sz##_add_c
1462
1463#define init_idct(tx, nm) \
1464 dsp->itxfm_add[tx][DCT_DCT] = \
1465 dsp->itxfm_add[tx][ADST_DCT] = \
1466 dsp->itxfm_add[tx][DCT_ADST] = \
1467 dsp->itxfm_add[tx][ADST_ADST] = nm##_add_c
1468
1469 init_itxfm(TX_4X4, 4x4);
1470 init_itxfm(TX_8X8, 8x8);
1471 init_itxfm(TX_16X16, 16x16);
1472 init_idct(TX_32X32, idct_idct_32x32);
1473 init_idct(4 /* lossless */, iwht_iwht_4x4);
1474
1475#undef init_itxfm
1476#undef init_idct
1477}
1478
1479static av_always_inline void loop_filter(uint8_t *dst, int E, int I, int H,
1480 ptrdiff_t stridea, ptrdiff_t strideb,
1481 int wd)
1482{
1483 int i;
1484
1485 for (i = 0; i < 8; i++, dst += stridea) {
1486 int p7, p6, p5, p4;
1487 int p3 = dst[strideb * -4], p2 = dst[strideb * -3];
1488 int p1 = dst[strideb * -2], p0 = dst[strideb * -1];
1489 int q0 = dst[strideb * +0], q1 = dst[strideb * +1];
1490 int q2 = dst[strideb * +2], q3 = dst[strideb * +3];
1491 int q4, q5, q6, q7;
1492 int fm = FFABS(p3 - p2) <= I && FFABS(p2 - p1) <= I &&
1493 FFABS(p1 - p0) <= I && FFABS(q1 - q0) <= I &&
1494 FFABS(q2 - q1) <= I && FFABS(q3 - q2) <= I &&
1495 FFABS(p0 - q0) * 2 + (FFABS(p1 - q1) >> 1) <= E;
1496 int flat8out, flat8in;
1497
1498 if (!fm)
1499 continue;
1500
1501 if (wd >= 16) {
1502 p7 = dst[strideb * -8];
1503 p6 = dst[strideb * -7];
1504 p5 = dst[strideb * -6];
1505 p4 = dst[strideb * -5];
1506 q4 = dst[strideb * +4];
1507 q5 = dst[strideb * +5];
1508 q6 = dst[strideb * +6];
1509 q7 = dst[strideb * +7];
1510
1511 flat8out = FFABS(p7 - p0) <= 1 && FFABS(p6 - p0) <= 1 &&
1512 FFABS(p5 - p0) <= 1 && FFABS(p4 - p0) <= 1 &&
1513 FFABS(q4 - q0) <= 1 && FFABS(q5 - q0) <= 1 &&
1514 FFABS(q6 - q0) <= 1 && FFABS(q7 - q0) <= 1;
1515 }
1516
1517 if (wd >= 8)
1518 flat8in = FFABS(p3 - p0) <= 1 && FFABS(p2 - p0) <= 1 &&
1519 FFABS(p1 - p0) <= 1 && FFABS(q1 - q0) <= 1 &&
1520 FFABS(q2 - q0) <= 1 && FFABS(q3 - q0) <= 1;
1521
1522 if (wd >= 16 && flat8out && flat8in) {
1523 dst[strideb * -7] = (p7 + p7 + p7 + p7 + p7 + p7 + p7 + p6 * 2 +
1524 p5 + p4 + p3 + p2 + p1 + p0 + q0 + 8) >> 4;
1525 dst[strideb * -6] = (p7 + p7 + p7 + p7 + p7 + p7 + p6 + p5 * 2 +
1526 p4 + p3 + p2 + p1 + p0 + q0 + q1 + 8) >> 4;
1527 dst[strideb * -5] = (p7 + p7 + p7 + p7 + p7 + p6 + p5 + p4 * 2 +
1528 p3 + p2 + p1 + p0 + q0 + q1 + q2 + 8) >> 4;
1529 dst[strideb * -4] = (p7 + p7 + p7 + p7 + p6 + p5 + p4 + p3 * 2 +
1530 p2 + p1 + p0 + q0 + q1 + q2 + q3 + 8) >> 4;
1531 dst[strideb * -3] = (p7 + p7 + p7 + p6 + p5 + p4 + p3 + p2 * 2 +
1532 p1 + p0 + q0 + q1 + q2 + q3 + q4 + 8) >> 4;
1533 dst[strideb * -2] = (p7 + p7 + p6 + p5 + p4 + p3 + p2 + p1 * 2 +
1534 p0 + q0 + q1 + q2 + q3 + q4 + q5 + 8) >> 4;
1535 dst[strideb * -1] = (p7 + p6 + p5 + p4 + p3 + p2 + p1 + p0 * 2 +
1536 q0 + q1 + q2 + q3 + q4 + q5 + q6 + 8) >> 4;
1537 dst[strideb * +0] = (p6 + p5 + p4 + p3 + p2 + p1 + p0 + q0 * 2 +
1538 q1 + q2 + q3 + q4 + q5 + q6 + q7 + 8) >> 4;
1539 dst[strideb * +1] = (p5 + p4 + p3 + p2 + p1 + p0 + q0 + q1 * 2 +
1540 q2 + q3 + q4 + q5 + q6 + q7 + q7 + 8) >> 4;
1541 dst[strideb * +2] = (p4 + p3 + p2 + p1 + p0 + q0 + q1 + q2 * 2 +
1542 q3 + q4 + q5 + q6 + q7 + q7 + q7 + 8) >> 4;
1543 dst[strideb * +3] = (p3 + p2 + p1 + p0 + q0 + q1 + q2 + q3 * 2 +
1544 q4 + q5 + q6 + q7 + q7 + q7 + q7 + 8) >> 4;
1545 dst[strideb * +4] = (p2 + p1 + p0 + q0 + q1 + q2 + q3 + q4 * 2 +
1546 q5 + q6 + q7 + q7 + q7 + q7 + q7 + 8) >> 4;
1547 dst[strideb * +5] = (p1 + p0 + q0 + q1 + q2 + q3 + q4 + q5 * 2 +
1548 q6 + q7 + q7 + q7 + q7 + q7 + q7 + 8) >> 4;
1549 dst[strideb * +6] = (p0 + q0 + q1 + q2 + q3 + q4 + q5 + q6 * 2 +
1550 q7 + q7 + q7 + q7 + q7 + q7 + q7 + 8) >> 4;
1551 } else if (wd >= 8 && flat8in) {
1552 dst[strideb * -3] = (p3 + p3 + p3 + 2 * p2 + p1 + p0 + q0 + 4) >> 3;
1553 dst[strideb * -2] = (p3 + p3 + p2 + 2 * p1 + p0 + q0 + q1 + 4) >> 3;
1554 dst[strideb * -1] = (p3 + p2 + p1 + 2 * p0 + q0 + q1 + q2 + 4) >> 3;
1555 dst[strideb * +0] = (p2 + p1 + p0 + 2 * q0 + q1 + q2 + q3 + 4) >> 3;
1556 dst[strideb * +1] = (p1 + p0 + q0 + 2 * q1 + q2 + q3 + q3 + 4) >> 3;
1557 dst[strideb * +2] = (p0 + q0 + q1 + 2 * q2 + q3 + q3 + q3 + 4) >> 3;
1558 } else {
1559 int hev = FFABS(p1 - p0) > H || FFABS(q1 - q0) > H;
1560
1561 if (hev) {
1562 int f = av_clip_int8(3 * (q0 - p0) + av_clip_int8(p1 - q1)), f1, f2;
1563
1564 f1 = FFMIN(f + 4, 127) >> 3;
1565 f2 = FFMIN(f + 3, 127) >> 3;
1566
1567 dst[strideb * -1] = av_clip_uint8(p0 + f2);
1568 dst[strideb * +0] = av_clip_uint8(q0 - f1);
1569 } else {
1570 int f = av_clip_int8(3 * (q0 - p0)), f1, f2;
1571
1572 f1 = FFMIN(f + 4, 127) >> 3;
1573 f2 = FFMIN(f + 3, 127) >> 3;
1574
1575 dst[strideb * -1] = av_clip_uint8(p0 + f2);
1576 dst[strideb * +0] = av_clip_uint8(q0 - f1);
1577
1578 f = (f1 + 1) >> 1;
1579 dst[strideb * -2] = av_clip_uint8(p1 + f);
1580 dst[strideb * +1] = av_clip_uint8(q1 - f);
1581 }
1582 }
1583 }
1584}
1585
1586#define lf_8_fn(dir, wd, stridea, strideb) \
1587static void loop_filter_##dir##_##wd##_8_c(uint8_t *dst, \
1588 ptrdiff_t stride, \
1589 int E, int I, int H) \
1590{ \
1591 loop_filter(dst, E, I, H, stridea, strideb, wd); \
1592}
1593
1594#define lf_8_fns(wd) \
1595lf_8_fn(h, wd, stride, 1) \
1596lf_8_fn(v, wd, 1, stride)
1597
1598lf_8_fns(4)
1599lf_8_fns(8)
1600lf_8_fns(16)
1601
1602#undef lf_8_fn
1603#undef lf_8_fns
1604
1605#define lf_16_fn(dir, stridea) \
1606static void loop_filter_##dir##_16_16_c(uint8_t *dst, \
1607 ptrdiff_t stride, \
1608 int E, int I, int H) \
1609{ \
1610 loop_filter_##dir##_16_8_c(dst, stride, E, I, H); \
1611 loop_filter_##dir##_16_8_c(dst + 8 * stridea, stride, E, I, H); \
1612}
1613
1614lf_16_fn(h, stride)
1615lf_16_fn(v, 1)
1616
1617#undef lf_16_fn
1618
1619#define lf_mix_fn(dir, wd1, wd2, stridea) \
1620static void loop_filter_##dir##_##wd1##wd2##_16_c(uint8_t *dst, \
1621 ptrdiff_t stride, \
1622 int E, int I, int H) \
1623{ \
1624 loop_filter_##dir##_##wd1##_8_c(dst, stride, E & 0xff, I & 0xff, H & 0xff); \
1625 loop_filter_##dir##_##wd2##_8_c(dst + 8 * stridea, stride, E >> 8, I >> 8, H >> 8); \
1626}
1627
1628#define lf_mix_fns(wd1, wd2) \
1629lf_mix_fn(h, wd1, wd2, stride) \
1630lf_mix_fn(v, wd1, wd2, 1)
1631
1632lf_mix_fns(4, 4)
1633lf_mix_fns(4, 8)
1634lf_mix_fns(8, 4)
1635lf_mix_fns(8, 8)
1636
1637#undef lf_mix_fn
1638#undef lf_mix_fns
1639
1640static av_cold void vp9dsp_loopfilter_init(VP9DSPContext *dsp)
1641{
1642 dsp->loop_filter_8[0][0] = loop_filter_h_4_8_c;
1643 dsp->loop_filter_8[0][1] = loop_filter_v_4_8_c;
1644 dsp->loop_filter_8[1][0] = loop_filter_h_8_8_c;
1645 dsp->loop_filter_8[1][1] = loop_filter_v_8_8_c;
1646 dsp->loop_filter_8[2][0] = loop_filter_h_16_8_c;
1647 dsp->loop_filter_8[2][1] = loop_filter_v_16_8_c;
1648
1649 dsp->loop_filter_16[0] = loop_filter_h_16_16_c;
1650 dsp->loop_filter_16[1] = loop_filter_v_16_16_c;
1651
1652 dsp->loop_filter_mix2[0][0][0] = loop_filter_h_44_16_c;
1653 dsp->loop_filter_mix2[0][0][1] = loop_filter_v_44_16_c;
1654 dsp->loop_filter_mix2[0][1][0] = loop_filter_h_48_16_c;
1655 dsp->loop_filter_mix2[0][1][1] = loop_filter_v_48_16_c;
1656 dsp->loop_filter_mix2[1][0][0] = loop_filter_h_84_16_c;
1657 dsp->loop_filter_mix2[1][0][1] = loop_filter_v_84_16_c;
1658 dsp->loop_filter_mix2[1][1][0] = loop_filter_h_88_16_c;
1659 dsp->loop_filter_mix2[1][1][1] = loop_filter_v_88_16_c;
1660}
1661
1662static av_always_inline void copy_c(uint8_t *dst, ptrdiff_t dst_stride,
1663 const uint8_t *src, ptrdiff_t src_stride,
1664 int w, int h)
1665{
1666 do {
1667 memcpy(dst, src, w);
1668
1669 dst += dst_stride;
1670 src += src_stride;
1671 } while (--h);
1672}
1673
1674static av_always_inline void avg_c(uint8_t *dst, ptrdiff_t dst_stride,
1675 const uint8_t *src, ptrdiff_t src_stride,
1676 int w, int h)
1677{
1678 do {
1679 int x;
1680
1681 for (x = 0; x < w; x += 4)
1682 AV_WN32A(&dst[x], rnd_avg32(AV_RN32A(&dst[x]), AV_RN32(&src[x])));
1683
1684 dst += dst_stride;
1685 src += src_stride;
1686 } while (--h);
1687}
1688
1689#define fpel_fn(type, sz) \
1690static void type##sz##_c(uint8_t *dst, ptrdiff_t dst_stride, \
1691 const uint8_t *src, ptrdiff_t src_stride, \
1692 int h, int mx, int my) \
1693{ \
1694 type##_c(dst, dst_stride, src, src_stride, sz, h); \
1695}
1696
1697#define copy_avg_fn(sz) \
1698fpel_fn(copy, sz) \
1699fpel_fn(avg, sz)
1700
1701copy_avg_fn(64)
1702copy_avg_fn(32)
1703copy_avg_fn(16)
1704copy_avg_fn(8)
1705copy_avg_fn(4)
1706
1707#undef fpel_fn
1708#undef copy_avg_fn
1709
1710static const int8_t vp9_subpel_filters[3][15][8] = {
1711 [FILTER_8TAP_REGULAR] = {
1712 { 0, 1, -5, 126, 8, -3, 1, 0 },
1713 { -1, 3, -10, 122, 18, -6, 2, 0 },
1714 { -1, 4, -13, 118, 27, -9, 3, -1 },
1715 { -1, 4, -16, 112, 37, -11, 4, -1 },
1716 { -1, 5, -18, 105, 48, -14, 4, -1 },
1717 { -1, 5, -19, 97, 58, -16, 5, -1 },
1718 { -1, 6, -19, 88, 68, -18, 5, -1 },
1719 { -1, 6, -19, 78, 78, -19, 6, -1 },
1720 { -1, 5, -18, 68, 88, -19, 6, -1 },
1721 { -1, 5, -16, 58, 97, -19, 5, -1 },
1722 { -1, 4, -14, 48, 105, -18, 5, -1 },
1723 { -1, 4, -11, 37, 112, -16, 4, -1 },
1724 { -1, 3, -9, 27, 118, -13, 4, -1 },
1725 { 0, 2, -6, 18, 122, -10, 3, -1 },
1726 { 0, 1, -3, 8, 126, -5, 1, 0 },
1727 }, [FILTER_8TAP_SHARP] = {
1728 { -1, 3, -7, 127, 8, -3, 1, 0 },
1729 { -2, 5, -13, 125, 17, -6, 3, -1 },
1730 { -3, 7, -17, 121, 27, -10, 5, -2 },
1731 { -4, 9, -20, 115, 37, -13, 6, -2 },
1732 { -4, 10, -23, 108, 48, -16, 8, -3 },
1733 { -4, 10, -24, 100, 59, -19, 9, -3 },
1734 { -4, 11, -24, 90, 70, -21, 10, -4 },
1735 { -4, 11, -23, 80, 80, -23, 11, -4 },
1736 { -4, 10, -21, 70, 90, -24, 11, -4 },
1737 { -3, 9, -19, 59, 100, -24, 10, -4 },
1738 { -3, 8, -16, 48, 108, -23, 10, -4 },
1739 { -2, 6, -13, 37, 115, -20, 9, -4 },
1740 { -2, 5, -10, 27, 121, -17, 7, -3 },
1741 { -1, 3, -6, 17, 125, -13, 5, -2 },
1742 { 0, 1, -3, 8, 127, -7, 3, -1 },
1743 }, [FILTER_8TAP_SMOOTH] = {
1744 { -3, -1, 32, 64, 38, 1, -3, 0 },
1745 { -2, -2, 29, 63, 41, 2, -3, 0 },
1746 { -2, -2, 26, 63, 43, 4, -4, 0 },
1747 { -2, -3, 24, 62, 46, 5, -4, 0 },
1748 { -2, -3, 21, 60, 49, 7, -4, 0 },
1749 { -1, -4, 18, 59, 51, 9, -4, 0 },
1750 { -1, -4, 16, 57, 53, 12, -4, -1 },
1751 { -1, -4, 14, 55, 55, 14, -4, -1 },
1752 { -1, -4, 12, 53, 57, 16, -4, -1 },
1753 { 0, -4, 9, 51, 59, 18, -4, -1 },
1754 { 0, -4, 7, 49, 60, 21, -3, -2 },
1755 { 0, -4, 5, 46, 62, 24, -3, -2 },
1756 { 0, -4, 4, 43, 63, 26, -2, -2 },
1757 { 0, -3, 2, 41, 63, 29, -2, -2 },
1758 { 0, -3, 1, 38, 64, 32, -1, -3 },
1759 }
1760};
1761
1762#define FILTER_8TAP(src, x, F, stride) \
1763 av_clip_uint8((F[0] * src[x + -3 * stride] + \
1764 F[1] * src[x + -2 * stride] + \
1765 F[2] * src[x + -1 * stride] + \
1766 F[3] * src[x + +0 * stride] + \
1767 F[4] * src[x + +1 * stride] + \
1768 F[5] * src[x + +2 * stride] + \
1769 F[6] * src[x + +3 * stride] + \
1770 F[7] * src[x + +4 * stride] + 64) >> 7)
1771
1772static av_always_inline void do_8tap_1d_c(uint8_t *dst, ptrdiff_t dst_stride,
1773 const uint8_t *src, ptrdiff_t src_stride,
1774 int w, int h, ptrdiff_t ds,
1775 const int8_t *filter, int avg)
1776{
1777 do {
1778 int x;
1779
1780 for (x = 0; x < w; x++)
1781 if (avg) {
1782 dst[x] = (dst[x] + FILTER_8TAP(src, x, filter, ds) + 1) >> 1;
1783 } else {
1784 dst[x] = FILTER_8TAP(src, x, filter, ds);
1785 }
1786
1787 dst += dst_stride;
1788 src += src_stride;
1789 } while (--h);
1790}
1791
1792#define filter_8tap_1d_fn(opn, opa, dir, ds) \
1793static av_noinline void opn##_8tap_1d_##dir##_c(uint8_t *dst, ptrdiff_t dst_stride, \
1794 const uint8_t *src, ptrdiff_t src_stride, \
1795 int w, int h, const int8_t *filter) \
1796{ \
1797 do_8tap_1d_c(dst, dst_stride, src, src_stride, w, h, ds, filter, opa); \
1798}
1799
1800filter_8tap_1d_fn(put, 0, v, src_stride)
1801filter_8tap_1d_fn(put, 0, h, 1)
1802filter_8tap_1d_fn(avg, 1, v, src_stride)
1803filter_8tap_1d_fn(avg, 1, h, 1)
1804
1805#undef filter_8tap_1d_fn
1806
1807static av_always_inline void do_8tap_2d_c(uint8_t *dst, ptrdiff_t dst_stride,
1808 const uint8_t *src, ptrdiff_t src_stride,
1809 int w, int h, const int8_t *filterx,
1810 const int8_t *filtery, int avg)
1811{
1812 int tmp_h = h + 7;
1813 uint8_t tmp[64 * 71], *tmp_ptr = tmp;
1814
1815 src -= src_stride * 3;
1816 do {
1817 int x;
1818
1819 for (x = 0; x < w; x++)
1820 tmp_ptr[x] = FILTER_8TAP(src, x, filterx, 1);
1821
1822 tmp_ptr += 64;
1823 src += src_stride;
1824 } while (--tmp_h);
1825
1826 tmp_ptr = tmp + 64 * 3;
1827 do {
1828 int x;
1829
1830 for (x = 0; x < w; x++)
1831 if (avg) {
1832 dst[x] = (dst[x] + FILTER_8TAP(tmp_ptr, x, filtery, 64) + 1) >> 1;
1833 } else {
1834 dst[x] = FILTER_8TAP(tmp_ptr, x, filtery, 64);
1835 }
1836
1837 tmp_ptr += 64;
1838 dst += dst_stride;
1839 } while (--h);
1840}
1841
1842#define filter_8tap_2d_fn(opn, opa) \
1843static av_noinline void opn##_8tap_2d_hv_c(uint8_t *dst, ptrdiff_t dst_stride, \
1844 const uint8_t *src, ptrdiff_t src_stride, \
1845 int w, int h, const int8_t *filterx, \
1846 const int8_t *filtery) \
1847{ \
1848 do_8tap_2d_c(dst, dst_stride, src, src_stride, w, h, filterx, filtery, opa); \
1849}
1850
1851filter_8tap_2d_fn(put, 0)
1852filter_8tap_2d_fn(avg, 1)
1853
1854#undef filter_8tap_2d_fn
1855
1856#undef FILTER_8TAP
1857
1858#define filter_fn_1d(sz, dir, dir_m, type, type_idx, avg) \
1859static void avg##_8tap_##type##_##sz##dir##_c(uint8_t *dst, ptrdiff_t dst_stride, \
1860 const uint8_t *src, ptrdiff_t src_stride, \
1861 int h, int mx, int my) \
1862{ \
1863 avg##_8tap_1d_##dir##_c(dst, dst_stride, src, src_stride, sz, h, \
1864 vp9_subpel_filters[type_idx][dir_m - 1]); \
1865}
1866
1867#define filter_fn_2d(sz, type, type_idx, avg) \
1868static void avg##_8tap_##type##_##sz##hv_c(uint8_t *dst, ptrdiff_t dst_stride, \
1869 const uint8_t *src, ptrdiff_t src_stride, \
1870 int h, int mx, int my) \
1871{ \
1872 avg##_8tap_2d_hv_c(dst, dst_stride, src, src_stride, sz, h, \
1873 vp9_subpel_filters[type_idx][mx - 1], \
1874 vp9_subpel_filters[type_idx][my - 1]); \
1875}
1876
1877#define FILTER_BILIN(src, x, mxy, stride) \
1878 (src[x] + ((mxy * (src[x + stride] - src[x]) + 8) >> 4))
1879
1880static av_always_inline void do_bilin_1d_c(uint8_t *dst, ptrdiff_t dst_stride,
1881 const uint8_t *src, ptrdiff_t src_stride,
1882 int w, int h, ptrdiff_t ds, int mxy, int avg)
1883{
1884 do {
1885 int x;
1886
1887 for (x = 0; x < w; x++)
1888 if (avg) {
1889 dst[x] = (dst[x] + FILTER_BILIN(src, x, mxy, ds) + 1) >> 1;
1890 } else {
1891 dst[x] = FILTER_BILIN(src, x, mxy, ds);
1892 }
1893
1894 dst += dst_stride;
1895 src += src_stride;
1896 } while (--h);
1897}
1898
1899#define bilin_1d_fn(opn, opa, dir, ds) \
1900static av_noinline void opn##_bilin_1d_##dir##_c(uint8_t *dst, ptrdiff_t dst_stride, \
1901 const uint8_t *src, ptrdiff_t src_stride, \
1902 int w, int h, int mxy) \
1903{ \
1904 do_bilin_1d_c(dst, dst_stride, src, src_stride, w, h, ds, mxy, opa); \
1905}
1906
1907bilin_1d_fn(put, 0, v, src_stride)
1908bilin_1d_fn(put, 0, h, 1)
1909bilin_1d_fn(avg, 1, v, src_stride)
1910bilin_1d_fn(avg, 1, h, 1)
1911
1912#undef bilin_1d_fn
1913
1914static av_always_inline void do_bilin_2d_c(uint8_t *dst, ptrdiff_t dst_stride,
1915 const uint8_t *src, ptrdiff_t src_stride,
1916 int w, int h, int mx, int my, int avg)
1917{
1918 uint8_t tmp[64 * 65], *tmp_ptr = tmp;
1919 int tmp_h = h + 1;
1920
1921 do {
1922 int x;
1923
1924 for (x = 0; x < w; x++)
1925 tmp_ptr[x] = FILTER_BILIN(src, x, mx, 1);
1926
1927 tmp_ptr += 64;
1928 src += src_stride;
1929 } while (--tmp_h);
1930
1931 tmp_ptr = tmp;
1932 do {
1933 int x;
1934
1935 for (x = 0; x < w; x++)
1936 if (avg) {
1937 dst[x] = (dst[x] + FILTER_BILIN(tmp_ptr, x, my, 64) + 1) >> 1;
1938 } else {
1939 dst[x] = FILTER_BILIN(tmp_ptr, x, my, 64);
1940 }
1941
1942 tmp_ptr += 64;
1943 dst += dst_stride;
1944 } while (--h);
1945}
1946
1947#define bilin_2d_fn(opn, opa) \
1948static av_noinline void opn##_bilin_2d_hv_c(uint8_t *dst, ptrdiff_t dst_stride, \
1949 const uint8_t *src, ptrdiff_t src_stride, \
1950 int w, int h, int mx, int my) \
1951{ \
1952 do_bilin_2d_c(dst, dst_stride, src, src_stride, w, h, mx, my, opa); \
1953}
1954
1955bilin_2d_fn(put, 0)
1956bilin_2d_fn(avg, 1)
1957
1958#undef bilin_2d_fn
1959
1960#undef FILTER_BILIN
1961
1962#define bilinf_fn_1d(sz, dir, dir_m, avg) \
1963static void avg##_bilin_##sz##dir##_c(uint8_t *dst, ptrdiff_t dst_stride, \
1964 const uint8_t *src, ptrdiff_t src_stride, \
1965 int h, int mx, int my) \
1966{ \
1967 avg##_bilin_1d_##dir##_c(dst, dst_stride, src, src_stride, sz, h, dir_m); \
1968}
1969
1970#define bilinf_fn_2d(sz, avg) \
1971static void avg##_bilin_##sz##hv_c(uint8_t *dst, ptrdiff_t dst_stride, \
1972 const uint8_t *src, ptrdiff_t src_stride, \
1973 int h, int mx, int my) \
1974{ \
1975 avg##_bilin_2d_hv_c(dst, dst_stride, src, src_stride, sz, h, mx, my); \
1976}
1977
1978#define filter_fn(sz, avg) \
1979filter_fn_1d(sz, h, mx, regular, FILTER_8TAP_REGULAR, avg) \
1980filter_fn_1d(sz, v, my, regular, FILTER_8TAP_REGULAR, avg) \
1981filter_fn_2d(sz, regular, FILTER_8TAP_REGULAR, avg) \
1982filter_fn_1d(sz, h, mx, smooth, FILTER_8TAP_SMOOTH, avg) \
1983filter_fn_1d(sz, v, my, smooth, FILTER_8TAP_SMOOTH, avg) \
1984filter_fn_2d(sz, smooth, FILTER_8TAP_SMOOTH, avg) \
1985filter_fn_1d(sz, h, mx, sharp, FILTER_8TAP_SHARP, avg) \
1986filter_fn_1d(sz, v, my, sharp, FILTER_8TAP_SHARP, avg) \
1987filter_fn_2d(sz, sharp, FILTER_8TAP_SHARP, avg) \
1988bilinf_fn_1d(sz, h, mx, avg) \
1989bilinf_fn_1d(sz, v, my, avg) \
1990bilinf_fn_2d(sz, avg)
1991
1992#define filter_fn_set(avg) \
1993filter_fn(64, avg) \
1994filter_fn(32, avg) \
1995filter_fn(16, avg) \
1996filter_fn(8, avg) \
1997filter_fn(4, avg)
1998
1999filter_fn_set(put)
2000filter_fn_set(avg)
2001
2002#undef filter_fn
2003#undef filter_fn_set
2004#undef filter_fn_1d
2005#undef filter_fn_2d
2006#undef bilinf_fn_1d
2007#undef bilinf_fn_2d
2008
2009static av_cold void vp9dsp_mc_init(VP9DSPContext *dsp)
2010{
2011#define init_fpel(idx1, idx2, sz, type) \
2012 dsp->mc[idx1][FILTER_8TAP_SMOOTH ][idx2][0][0] = type##sz##_c; \
2013 dsp->mc[idx1][FILTER_8TAP_REGULAR][idx2][0][0] = type##sz##_c; \
2014 dsp->mc[idx1][FILTER_8TAP_SHARP ][idx2][0][0] = type##sz##_c; \
2015 dsp->mc[idx1][FILTER_BILINEAR ][idx2][0][0] = type##sz##_c
2016
2017#define init_copy_avg(idx, sz) \
2018 init_fpel(idx, 0, sz, copy); \
2019 init_fpel(idx, 1, sz, avg)
2020
2021 init_copy_avg(0, 64);
2022 init_copy_avg(1, 32);
2023 init_copy_avg(2, 16);
2024 init_copy_avg(3, 8);
2025 init_copy_avg(4, 4);
2026
2027#undef init_copy_avg
2028#undef init_fpel
2029
2030#define init_subpel1(idx1, idx2, idxh, idxv, sz, dir, type) \
2031 dsp->mc[idx1][FILTER_8TAP_SMOOTH ][idx2][idxh][idxv] = type##_8tap_smooth_##sz##dir##_c; \
2032 dsp->mc[idx1][FILTER_8TAP_REGULAR][idx2][idxh][idxv] = type##_8tap_regular_##sz##dir##_c; \
2033 dsp->mc[idx1][FILTER_8TAP_SHARP ][idx2][idxh][idxv] = type##_8tap_sharp_##sz##dir##_c; \
2034 dsp->mc[idx1][FILTER_BILINEAR ][idx2][idxh][idxv] = type##_bilin_##sz##dir##_c
2035
2036#define init_subpel2(idx, idxh, idxv, dir, type) \
2037 init_subpel1(0, idx, idxh, idxv, 64, dir, type); \
2038 init_subpel1(1, idx, idxh, idxv, 32, dir, type); \
2039 init_subpel1(2, idx, idxh, idxv, 16, dir, type); \
2040 init_subpel1(3, idx, idxh, idxv, 8, dir, type); \
2041 init_subpel1(4, idx, idxh, idxv, 4, dir, type)
2042
2043#define init_subpel3(idx, type) \
2044 init_subpel2(idx, 1, 1, hv, type); \
2045 init_subpel2(idx, 0, 1, v, type); \
2046 init_subpel2(idx, 1, 0, h, type)
2047
2048 init_subpel3(0, put);
2049 init_subpel3(1, avg);
2050
2051#undef init_subpel1
2052#undef init_subpel2
2053#undef init_subpel3
2054}
2055
2056av_cold void ff_vp9dsp_init(VP9DSPContext *dsp)
2057{
2058 vp9dsp_intrapred_init(dsp);
2059 vp9dsp_itxfm_init(dsp);
2060 vp9dsp_loopfilter_init(dsp);
2061 vp9dsp_mc_init(dsp);
2062
2063 if (ARCH_X86) ff_vp9dsp_init_x86(dsp);
2064}