Imported Debian version 2.4.3~trusty1
[deb_ffmpeg.git] / ffmpeg / libavcodec / vp8dsp.c
1 /*
2 * Copyright (C) 2010 David Conrad
3 * Copyright (C) 2010 Ronald S. Bultje
4 * Copyright (C) 2014 Peter Ross
5 *
6 * This file is part of FFmpeg.
7 *
8 * FFmpeg is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
12 *
13 * FFmpeg is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with FFmpeg; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 */
22
23 /**
24 * @file
25 * VP8 compatible video decoder
26 */
27
28 #include "libavutil/common.h"
29 #include "libavutil/intreadwrite.h"
30
31 #include "mathops.h"
32 #include "vp8dsp.h"
33
34 #define MK_IDCT_DC_ADD4_C(name) \
35 static void name ## _idct_dc_add4uv_c(uint8_t *dst, int16_t block[4][16], \
36 ptrdiff_t stride) \
37 { \
38 name ## _idct_dc_add_c(dst + stride * 0 + 0, block[0], stride); \
39 name ## _idct_dc_add_c(dst + stride * 0 + 4, block[1], stride); \
40 name ## _idct_dc_add_c(dst + stride * 4 + 0, block[2], stride); \
41 name ## _idct_dc_add_c(dst + stride * 4 + 4, block[3], stride); \
42 } \
43 \
44 static void name ## _idct_dc_add4y_c(uint8_t *dst, int16_t block[4][16], \
45 ptrdiff_t stride) \
46 { \
47 name ## _idct_dc_add_c(dst + 0, block[0], stride); \
48 name ## _idct_dc_add_c(dst + 4, block[1], stride); \
49 name ## _idct_dc_add_c(dst + 8, block[2], stride); \
50 name ## _idct_dc_add_c(dst + 12, block[3], stride); \
51 }
52
53 #if CONFIG_VP7_DECODER
54 static void vp7_luma_dc_wht_c(int16_t block[4][4][16], int16_t dc[16])
55 {
56 int i, a1, b1, c1, d1;
57 int16_t tmp[16];
58
59 for (i = 0; i < 4; i++) {
60 a1 = (dc[i * 4 + 0] + dc[i * 4 + 2]) * 23170;
61 b1 = (dc[i * 4 + 0] - dc[i * 4 + 2]) * 23170;
62 c1 = dc[i * 4 + 1] * 12540 - dc[i * 4 + 3] * 30274;
63 d1 = dc[i * 4 + 1] * 30274 + dc[i * 4 + 3] * 12540;
64 tmp[i * 4 + 0] = (a1 + d1) >> 14;
65 tmp[i * 4 + 3] = (a1 - d1) >> 14;
66 tmp[i * 4 + 1] = (b1 + c1) >> 14;
67 tmp[i * 4 + 2] = (b1 - c1) >> 14;
68 }
69
70 for (i = 0; i < 4; i++) {
71 a1 = (tmp[i + 0] + tmp[i + 8]) * 23170;
72 b1 = (tmp[i + 0] - tmp[i + 8]) * 23170;
73 c1 = tmp[i + 4] * 12540 - tmp[i + 12] * 30274;
74 d1 = tmp[i + 4] * 30274 + tmp[i + 12] * 12540;
75 AV_ZERO64(dc + i * 4);
76 block[0][i][0] = (a1 + d1 + 0x20000) >> 18;
77 block[3][i][0] = (a1 - d1 + 0x20000) >> 18;
78 block[1][i][0] = (b1 + c1 + 0x20000) >> 18;
79 block[2][i][0] = (b1 - c1 + 0x20000) >> 18;
80 }
81 }
82
83 static void vp7_luma_dc_wht_dc_c(int16_t block[4][4][16], int16_t dc[16])
84 {
85 int i, val = (23170 * (23170 * dc[0] >> 14) + 0x20000) >> 18;
86 dc[0] = 0;
87
88 for (i = 0; i < 4; i++) {
89 block[i][0][0] = val;
90 block[i][1][0] = val;
91 block[i][2][0] = val;
92 block[i][3][0] = val;
93 }
94 }
95
96 static void vp7_idct_add_c(uint8_t *dst, int16_t block[16], ptrdiff_t stride)
97 {
98 int i, a1, b1, c1, d1;
99 int16_t tmp[16];
100
101 for (i = 0; i < 4; i++) {
102 a1 = (block[i * 4 + 0] + block[i * 4 + 2]) * 23170;
103 b1 = (block[i * 4 + 0] - block[i * 4 + 2]) * 23170;
104 c1 = block[i * 4 + 1] * 12540 - block[i * 4 + 3] * 30274;
105 d1 = block[i * 4 + 1] * 30274 + block[i * 4 + 3] * 12540;
106 AV_ZERO64(block + i * 4);
107 tmp[i * 4 + 0] = (a1 + d1) >> 14;
108 tmp[i * 4 + 3] = (a1 - d1) >> 14;
109 tmp[i * 4 + 1] = (b1 + c1) >> 14;
110 tmp[i * 4 + 2] = (b1 - c1) >> 14;
111 }
112
113 for (i = 0; i < 4; i++) {
114 a1 = (tmp[i + 0] + tmp[i + 8]) * 23170;
115 b1 = (tmp[i + 0] - tmp[i + 8]) * 23170;
116 c1 = tmp[i + 4] * 12540 - tmp[i + 12] * 30274;
117 d1 = tmp[i + 4] * 30274 + tmp[i + 12] * 12540;
118 dst[0 * stride + i] = av_clip_uint8(dst[0 * stride + i] +
119 ((a1 + d1 + 0x20000) >> 18));
120 dst[3 * stride + i] = av_clip_uint8(dst[3 * stride + i] +
121 ((a1 - d1 + 0x20000) >> 18));
122 dst[1 * stride + i] = av_clip_uint8(dst[1 * stride + i] +
123 ((b1 + c1 + 0x20000) >> 18));
124 dst[2 * stride + i] = av_clip_uint8(dst[2 * stride + i] +
125 ((b1 - c1 + 0x20000) >> 18));
126 }
127 }
128
129 static void vp7_idct_dc_add_c(uint8_t *dst, int16_t block[16], ptrdiff_t stride)
130 {
131 int i, dc = (23170 * (23170 * block[0] >> 14) + 0x20000) >> 18;
132 block[0] = 0;
133
134 for (i = 0; i < 4; i++) {
135 dst[0] = av_clip_uint8(dst[0] + dc);
136 dst[1] = av_clip_uint8(dst[1] + dc);
137 dst[2] = av_clip_uint8(dst[2] + dc);
138 dst[3] = av_clip_uint8(dst[3] + dc);
139 dst += stride;
140 }
141 }
142
143 MK_IDCT_DC_ADD4_C(vp7)
144 #endif /* CONFIG_VP7_DECODER */
145
146 // TODO: Maybe add dequant
147 #if CONFIG_VP8_DECODER
148 static void vp8_luma_dc_wht_c(int16_t block[4][4][16], int16_t dc[16])
149 {
150 int i, t0, t1, t2, t3;
151
152 for (i = 0; i < 4; i++) {
153 t0 = dc[0 * 4 + i] + dc[3 * 4 + i];
154 t1 = dc[1 * 4 + i] + dc[2 * 4 + i];
155 t2 = dc[1 * 4 + i] - dc[2 * 4 + i];
156 t3 = dc[0 * 4 + i] - dc[3 * 4 + i];
157
158 dc[0 * 4 + i] = t0 + t1;
159 dc[1 * 4 + i] = t3 + t2;
160 dc[2 * 4 + i] = t0 - t1;
161 dc[3 * 4 + i] = t3 - t2;
162 }
163
164 for (i = 0; i < 4; i++) {
165 t0 = dc[i * 4 + 0] + dc[i * 4 + 3] + 3; // rounding
166 t1 = dc[i * 4 + 1] + dc[i * 4 + 2];
167 t2 = dc[i * 4 + 1] - dc[i * 4 + 2];
168 t3 = dc[i * 4 + 0] - dc[i * 4 + 3] + 3; // rounding
169 AV_ZERO64(dc + i * 4);
170
171 block[i][0][0] = (t0 + t1) >> 3;
172 block[i][1][0] = (t3 + t2) >> 3;
173 block[i][2][0] = (t0 - t1) >> 3;
174 block[i][3][0] = (t3 - t2) >> 3;
175 }
176 }
177
178 static void vp8_luma_dc_wht_dc_c(int16_t block[4][4][16], int16_t dc[16])
179 {
180 int i, val = (dc[0] + 3) >> 3;
181 dc[0] = 0;
182
183 for (i = 0; i < 4; i++) {
184 block[i][0][0] = val;
185 block[i][1][0] = val;
186 block[i][2][0] = val;
187 block[i][3][0] = val;
188 }
189 }
190
191 #define MUL_20091(a) ((((a) * 20091) >> 16) + (a))
192 #define MUL_35468(a) (((a) * 35468) >> 16)
193
194 static void vp8_idct_add_c(uint8_t *dst, int16_t block[16], ptrdiff_t stride)
195 {
196 int i, t0, t1, t2, t3;
197 int16_t tmp[16];
198
199 for (i = 0; i < 4; i++) {
200 t0 = block[0 * 4 + i] + block[2 * 4 + i];
201 t1 = block[0 * 4 + i] - block[2 * 4 + i];
202 t2 = MUL_35468(block[1 * 4 + i]) - MUL_20091(block[3 * 4 + i]);
203 t3 = MUL_20091(block[1 * 4 + i]) + MUL_35468(block[3 * 4 + i]);
204 block[0 * 4 + i] = 0;
205 block[1 * 4 + i] = 0;
206 block[2 * 4 + i] = 0;
207 block[3 * 4 + i] = 0;
208
209 tmp[i * 4 + 0] = t0 + t3;
210 tmp[i * 4 + 1] = t1 + t2;
211 tmp[i * 4 + 2] = t1 - t2;
212 tmp[i * 4 + 3] = t0 - t3;
213 }
214
215 for (i = 0; i < 4; i++) {
216 t0 = tmp[0 * 4 + i] + tmp[2 * 4 + i];
217 t1 = tmp[0 * 4 + i] - tmp[2 * 4 + i];
218 t2 = MUL_35468(tmp[1 * 4 + i]) - MUL_20091(tmp[3 * 4 + i]);
219 t3 = MUL_20091(tmp[1 * 4 + i]) + MUL_35468(tmp[3 * 4 + i]);
220
221 dst[0] = av_clip_uint8(dst[0] + ((t0 + t3 + 4) >> 3));
222 dst[1] = av_clip_uint8(dst[1] + ((t1 + t2 + 4) >> 3));
223 dst[2] = av_clip_uint8(dst[2] + ((t1 - t2 + 4) >> 3));
224 dst[3] = av_clip_uint8(dst[3] + ((t0 - t3 + 4) >> 3));
225 dst += stride;
226 }
227 }
228
229 static void vp8_idct_dc_add_c(uint8_t *dst, int16_t block[16], ptrdiff_t stride)
230 {
231 int i, dc = (block[0] + 4) >> 3;
232 block[0] = 0;
233
234 for (i = 0; i < 4; i++) {
235 dst[0] = av_clip_uint8(dst[0] + dc);
236 dst[1] = av_clip_uint8(dst[1] + dc);
237 dst[2] = av_clip_uint8(dst[2] + dc);
238 dst[3] = av_clip_uint8(dst[3] + dc);
239 dst += stride;
240 }
241 }
242
243 MK_IDCT_DC_ADD4_C(vp8)
244 #endif /* CONFIG_VP8_DECODER */
245
246 // because I like only having two parameters to pass functions...
247 #define LOAD_PIXELS \
248 int av_unused p3 = p[-4 * stride]; \
249 int av_unused p2 = p[-3 * stride]; \
250 int av_unused p1 = p[-2 * stride]; \
251 int av_unused p0 = p[-1 * stride]; \
252 int av_unused q0 = p[ 0 * stride]; \
253 int av_unused q1 = p[ 1 * stride]; \
254 int av_unused q2 = p[ 2 * stride]; \
255 int av_unused q3 = p[ 3 * stride];
256
257 #define clip_int8(n) (cm[(n) + 0x80] - 0x80)
258
259 static av_always_inline void filter_common(uint8_t *p, ptrdiff_t stride,
260 int is4tap, int is_vp7)
261 {
262 LOAD_PIXELS
263 int a, f1, f2;
264 const uint8_t *cm = ff_crop_tab + MAX_NEG_CROP;
265
266 a = 3 * (q0 - p0);
267
268 if (is4tap)
269 a += clip_int8(p1 - q1);
270
271 a = clip_int8(a);
272
273 // We deviate from the spec here with c(a+3) >> 3
274 // since that's what libvpx does.
275 f1 = FFMIN(a + 4, 127) >> 3;
276
277 if (is_vp7)
278 f2 = f1 - ((a & 7) == 4);
279 else
280 f2 = FFMIN(a + 3, 127) >> 3;
281
282 // Despite what the spec says, we do need to clamp here to
283 // be bitexact with libvpx.
284 p[-1 * stride] = cm[p0 + f2];
285 p[ 0 * stride] = cm[q0 - f1];
286
287 // only used for _inner on blocks without high edge variance
288 if (!is4tap) {
289 a = (f1 + 1) >> 1;
290 p[-2 * stride] = cm[p1 + a];
291 p[ 1 * stride] = cm[q1 - a];
292 }
293 }
294
295 static av_always_inline void vp7_filter_common(uint8_t *p, ptrdiff_t stride,
296 int is4tap)
297 {
298 filter_common(p, stride, is4tap, IS_VP7);
299 }
300
301 static av_always_inline void vp8_filter_common(uint8_t *p, ptrdiff_t stride,
302 int is4tap)
303 {
304 filter_common(p, stride, is4tap, IS_VP8);
305 }
306
307 static av_always_inline int vp7_simple_limit(uint8_t *p, ptrdiff_t stride,
308 int flim)
309 {
310 LOAD_PIXELS
311 return FFABS(p0 - q0) <= flim;
312 }
313
314 static av_always_inline int vp8_simple_limit(uint8_t *p, ptrdiff_t stride,
315 int flim)
316 {
317 LOAD_PIXELS
318 return 2 * FFABS(p0 - q0) + (FFABS(p1 - q1) >> 1) <= flim;
319 }
320
321 /**
322 * E - limit at the macroblock edge
323 * I - limit for interior difference
324 */
325 #define NORMAL_LIMIT(vpn) \
326 static av_always_inline int vp ## vpn ## _normal_limit(uint8_t *p, \
327 ptrdiff_t stride, \
328 int E, int I) \
329 { \
330 LOAD_PIXELS \
331 return vp ## vpn ## _simple_limit(p, stride, E) && \
332 FFABS(p3 - p2) <= I && FFABS(p2 - p1) <= I && \
333 FFABS(p1 - p0) <= I && FFABS(q3 - q2) <= I && \
334 FFABS(q2 - q1) <= I && FFABS(q1 - q0) <= I; \
335 }
336
337 NORMAL_LIMIT(7)
338 NORMAL_LIMIT(8)
339
340 // high edge variance
341 static av_always_inline int hev(uint8_t *p, ptrdiff_t stride, int thresh)
342 {
343 LOAD_PIXELS
344 return FFABS(p1 - p0) > thresh || FFABS(q1 - q0) > thresh;
345 }
346
347 static av_always_inline void filter_mbedge(uint8_t *p, ptrdiff_t stride)
348 {
349 int a0, a1, a2, w;
350 const uint8_t *cm = ff_crop_tab + MAX_NEG_CROP;
351
352 LOAD_PIXELS
353
354 w = clip_int8(p1 - q1);
355 w = clip_int8(w + 3 * (q0 - p0));
356
357 a0 = (27 * w + 63) >> 7;
358 a1 = (18 * w + 63) >> 7;
359 a2 = (9 * w + 63) >> 7;
360
361 p[-3 * stride] = cm[p2 + a2];
362 p[-2 * stride] = cm[p1 + a1];
363 p[-1 * stride] = cm[p0 + a0];
364 p[ 0 * stride] = cm[q0 - a0];
365 p[ 1 * stride] = cm[q1 - a1];
366 p[ 2 * stride] = cm[q2 - a2];
367 }
368
369 #define LOOP_FILTER(vpn, dir, size, stridea, strideb, maybe_inline) \
370 static maybe_inline \
371 void vpn ## _ ## dir ## _loop_filter ## size ## _c(uint8_t *dst, \
372 ptrdiff_t stride, \
373 int flim_E, int flim_I, \
374 int hev_thresh) \
375 { \
376 int i; \
377 for (i = 0; i < size; i++) \
378 if (vpn ## _normal_limit(dst + i * stridea, strideb, \
379 flim_E, flim_I)) { \
380 if (hev(dst + i * stridea, strideb, hev_thresh)) \
381 vpn ## _filter_common(dst + i * stridea, strideb, 1); \
382 else \
383 filter_mbedge(dst + i * stridea, strideb); \
384 } \
385 } \
386 \
387 static maybe_inline \
388 void vpn ## _ ## dir ## _loop_filter ## size ## _inner_c(uint8_t *dst, \
389 ptrdiff_t stride, \
390 int flim_E, \
391 int flim_I, \
392 int hev_thresh) \
393 { \
394 int i; \
395 for (i = 0; i < size; i++) \
396 if (vpn ## _normal_limit(dst + i * stridea, strideb, \
397 flim_E, flim_I)) { \
398 int hv = hev(dst + i * stridea, strideb, hev_thresh); \
399 if (hv) \
400 vpn ## _filter_common(dst + i * stridea, strideb, 1); \
401 else \
402 vpn ## _filter_common(dst + i * stridea, strideb, 0); \
403 } \
404 }
405
406 #define UV_LOOP_FILTER(vpn, dir, stridea, strideb) \
407 LOOP_FILTER(vpn, dir, 8, stridea, strideb, av_always_inline) \
408 static void vpn ## _ ## dir ## _loop_filter8uv_c(uint8_t *dstU, \
409 uint8_t *dstV, \
410 ptrdiff_t stride, int fE, \
411 int fI, int hev_thresh) \
412 { \
413 vpn ## _ ## dir ## _loop_filter8_c(dstU, stride, fE, fI, hev_thresh); \
414 vpn ## _ ## dir ## _loop_filter8_c(dstV, stride, fE, fI, hev_thresh); \
415 } \
416 \
417 static void vpn ## _ ## dir ## _loop_filter8uv_inner_c(uint8_t *dstU, \
418 uint8_t *dstV, \
419 ptrdiff_t stride, \
420 int fE, int fI, \
421 int hev_thresh) \
422 { \
423 vpn ## _ ## dir ## _loop_filter8_inner_c(dstU, stride, fE, fI, \
424 hev_thresh); \
425 vpn ## _ ## dir ## _loop_filter8_inner_c(dstV, stride, fE, fI, \
426 hev_thresh); \
427 }
428
429 #define LOOP_FILTER_SIMPLE(vpn) \
430 static void vpn ## _v_loop_filter_simple_c(uint8_t *dst, ptrdiff_t stride, \
431 int flim) \
432 { \
433 int i; \
434 for (i = 0; i < 16; i++) \
435 if (vpn ## _simple_limit(dst + i, stride, flim)) \
436 vpn ## _filter_common(dst + i, stride, 1); \
437 } \
438 \
439 static void vpn ## _h_loop_filter_simple_c(uint8_t *dst, ptrdiff_t stride, \
440 int flim) \
441 { \
442 int i; \
443 for (i = 0; i < 16; i++) \
444 if (vpn ## _simple_limit(dst + i * stride, 1, flim)) \
445 vpn ## _filter_common(dst + i * stride, 1, 1); \
446 }
447
448 #define LOOP_FILTERS(vpn) \
449 LOOP_FILTER(vpn, v, 16, 1, stride, ) \
450 LOOP_FILTER(vpn, h, 16, stride, 1, ) \
451 UV_LOOP_FILTER(vpn, v, 1, stride) \
452 UV_LOOP_FILTER(vpn, h, stride, 1) \
453 LOOP_FILTER_SIMPLE(vpn) \
454
455 static const uint8_t subpel_filters[7][6] = {
456 { 0, 6, 123, 12, 1, 0 },
457 { 2, 11, 108, 36, 8, 1 },
458 { 0, 9, 93, 50, 6, 0 },
459 { 3, 16, 77, 77, 16, 3 },
460 { 0, 6, 50, 93, 9, 0 },
461 { 1, 8, 36, 108, 11, 2 },
462 { 0, 1, 12, 123, 6, 0 },
463 };
464
465 #define PUT_PIXELS(WIDTH) \
466 static void put_vp8_pixels ## WIDTH ## _c(uint8_t *dst, ptrdiff_t dststride, \
467 uint8_t *src, ptrdiff_t srcstride, \
468 int h, int x, int y) \
469 { \
470 int i; \
471 for (i = 0; i < h; i++, dst += dststride, src += srcstride) \
472 memcpy(dst, src, WIDTH); \
473 }
474
475 PUT_PIXELS(16)
476 PUT_PIXELS(8)
477 PUT_PIXELS(4)
478
479 #define FILTER_6TAP(src, F, stride) \
480 cm[(F[2] * src[x + 0 * stride] - F[1] * src[x - 1 * stride] + \
481 F[0] * src[x - 2 * stride] + F[3] * src[x + 1 * stride] - \
482 F[4] * src[x + 2 * stride] + F[5] * src[x + 3 * stride] + 64) >> 7]
483
484 #define FILTER_4TAP(src, F, stride) \
485 cm[(F[2] * src[x + 0 * stride] - F[1] * src[x - 1 * stride] + \
486 F[3] * src[x + 1 * stride] - F[4] * src[x + 2 * stride] + 64) >> 7]
487
488 #define VP8_EPEL_H(SIZE, TAPS) \
489 static void put_vp8_epel ## SIZE ## _h ## TAPS ## _c(uint8_t *dst, \
490 ptrdiff_t dststride, \
491 uint8_t *src, \
492 ptrdiff_t srcstride, \
493 int h, int mx, int my) \
494 { \
495 const uint8_t *filter = subpel_filters[mx - 1]; \
496 const uint8_t *cm = ff_crop_tab + MAX_NEG_CROP; \
497 int x, y; \
498 for (y = 0; y < h; y++) { \
499 for (x = 0; x < SIZE; x++) \
500 dst[x] = FILTER_ ## TAPS ## TAP(src, filter, 1); \
501 dst += dststride; \
502 src += srcstride; \
503 } \
504 }
505
506 #define VP8_EPEL_V(SIZE, TAPS) \
507 static void put_vp8_epel ## SIZE ## _v ## TAPS ## _c(uint8_t *dst, \
508 ptrdiff_t dststride, \
509 uint8_t *src, \
510 ptrdiff_t srcstride, \
511 int h, int mx, int my) \
512 { \
513 const uint8_t *filter = subpel_filters[my - 1]; \
514 const uint8_t *cm = ff_crop_tab + MAX_NEG_CROP; \
515 int x, y; \
516 for (y = 0; y < h; y++) { \
517 for (x = 0; x < SIZE; x++) \
518 dst[x] = FILTER_ ## TAPS ## TAP(src, filter, srcstride); \
519 dst += dststride; \
520 src += srcstride; \
521 } \
522 }
523
524 #define VP8_EPEL_HV(SIZE, HTAPS, VTAPS) \
525 static void \
526 put_vp8_epel ## SIZE ## _h ## HTAPS ## v ## VTAPS ## _c(uint8_t *dst, \
527 ptrdiff_t dststride, \
528 uint8_t *src, \
529 ptrdiff_t srcstride, \
530 int h, int mx, \
531 int my) \
532 { \
533 const uint8_t *filter = subpel_filters[mx - 1]; \
534 const uint8_t *cm = ff_crop_tab + MAX_NEG_CROP; \
535 int x, y; \
536 uint8_t tmp_array[(2 * SIZE + VTAPS - 1) * SIZE]; \
537 uint8_t *tmp = tmp_array; \
538 src -= (2 - (VTAPS == 4)) * srcstride; \
539 \
540 for (y = 0; y < h + VTAPS - 1; y++) { \
541 for (x = 0; x < SIZE; x++) \
542 tmp[x] = FILTER_ ## HTAPS ## TAP(src, filter, 1); \
543 tmp += SIZE; \
544 src += srcstride; \
545 } \
546 tmp = tmp_array + (2 - (VTAPS == 4)) * SIZE; \
547 filter = subpel_filters[my - 1]; \
548 \
549 for (y = 0; y < h; y++) { \
550 for (x = 0; x < SIZE; x++) \
551 dst[x] = FILTER_ ## VTAPS ## TAP(tmp, filter, SIZE); \
552 dst += dststride; \
553 tmp += SIZE; \
554 } \
555 }
556
557 VP8_EPEL_H(16, 4)
558 VP8_EPEL_H(8, 4)
559 VP8_EPEL_H(4, 4)
560 VP8_EPEL_H(16, 6)
561 VP8_EPEL_H(8, 6)
562 VP8_EPEL_H(4, 6)
563 VP8_EPEL_V(16, 4)
564 VP8_EPEL_V(8, 4)
565 VP8_EPEL_V(4, 4)
566 VP8_EPEL_V(16, 6)
567 VP8_EPEL_V(8, 6)
568 VP8_EPEL_V(4, 6)
569
570 VP8_EPEL_HV(16, 4, 4)
571 VP8_EPEL_HV(8, 4, 4)
572 VP8_EPEL_HV(4, 4, 4)
573 VP8_EPEL_HV(16, 4, 6)
574 VP8_EPEL_HV(8, 4, 6)
575 VP8_EPEL_HV(4, 4, 6)
576 VP8_EPEL_HV(16, 6, 4)
577 VP8_EPEL_HV(8, 6, 4)
578 VP8_EPEL_HV(4, 6, 4)
579 VP8_EPEL_HV(16, 6, 6)
580 VP8_EPEL_HV(8, 6, 6)
581 VP8_EPEL_HV(4, 6, 6)
582
583 #define VP8_BILINEAR(SIZE) \
584 static void put_vp8_bilinear ## SIZE ## _h_c(uint8_t *dst, ptrdiff_t dstride, \
585 uint8_t *src, ptrdiff_t sstride, \
586 int h, int mx, int my) \
587 { \
588 int a = 8 - mx, b = mx; \
589 int x, y; \
590 for (y = 0; y < h; y++) { \
591 for (x = 0; x < SIZE; x++) \
592 dst[x] = (a * src[x] + b * src[x + 1] + 4) >> 3; \
593 dst += dstride; \
594 src += sstride; \
595 } \
596 } \
597 \
598 static void put_vp8_bilinear ## SIZE ## _v_c(uint8_t *dst, ptrdiff_t dstride, \
599 uint8_t *src, ptrdiff_t sstride, \
600 int h, int mx, int my) \
601 { \
602 int c = 8 - my, d = my; \
603 int x, y; \
604 for (y = 0; y < h; y++) { \
605 for (x = 0; x < SIZE; x++) \
606 dst[x] = (c * src[x] + d * src[x + sstride] + 4) >> 3; \
607 dst += dstride; \
608 src += sstride; \
609 } \
610 } \
611 \
612 static void put_vp8_bilinear ## SIZE ## _hv_c(uint8_t *dst, \
613 ptrdiff_t dstride, \
614 uint8_t *src, \
615 ptrdiff_t sstride, \
616 int h, int mx, int my) \
617 { \
618 int a = 8 - mx, b = mx; \
619 int c = 8 - my, d = my; \
620 int x, y; \
621 uint8_t tmp_array[(2 * SIZE + 1) * SIZE]; \
622 uint8_t *tmp = tmp_array; \
623 for (y = 0; y < h + 1; y++) { \
624 for (x = 0; x < SIZE; x++) \
625 tmp[x] = (a * src[x] + b * src[x + 1] + 4) >> 3; \
626 tmp += SIZE; \
627 src += sstride; \
628 } \
629 tmp = tmp_array; \
630 for (y = 0; y < h; y++) { \
631 for (x = 0; x < SIZE; x++) \
632 dst[x] = (c * tmp[x] + d * tmp[x + SIZE] + 4) >> 3; \
633 dst += dstride; \
634 tmp += SIZE; \
635 } \
636 }
637
638 VP8_BILINEAR(16)
639 VP8_BILINEAR(8)
640 VP8_BILINEAR(4)
641
642 #define VP78_MC_FUNC(IDX, SIZE) \
643 dsp->put_vp8_epel_pixels_tab[IDX][0][0] = put_vp8_pixels ## SIZE ## _c; \
644 dsp->put_vp8_epel_pixels_tab[IDX][0][1] = put_vp8_epel ## SIZE ## _h4_c; \
645 dsp->put_vp8_epel_pixels_tab[IDX][0][2] = put_vp8_epel ## SIZE ## _h6_c; \
646 dsp->put_vp8_epel_pixels_tab[IDX][1][0] = put_vp8_epel ## SIZE ## _v4_c; \
647 dsp->put_vp8_epel_pixels_tab[IDX][1][1] = put_vp8_epel ## SIZE ## _h4v4_c; \
648 dsp->put_vp8_epel_pixels_tab[IDX][1][2] = put_vp8_epel ## SIZE ## _h6v4_c; \
649 dsp->put_vp8_epel_pixels_tab[IDX][2][0] = put_vp8_epel ## SIZE ## _v6_c; \
650 dsp->put_vp8_epel_pixels_tab[IDX][2][1] = put_vp8_epel ## SIZE ## _h4v6_c; \
651 dsp->put_vp8_epel_pixels_tab[IDX][2][2] = put_vp8_epel ## SIZE ## _h6v6_c
652
653 #define VP78_BILINEAR_MC_FUNC(IDX, SIZE) \
654 dsp->put_vp8_bilinear_pixels_tab[IDX][0][0] = put_vp8_pixels ## SIZE ## _c; \
655 dsp->put_vp8_bilinear_pixels_tab[IDX][0][1] = put_vp8_bilinear ## SIZE ## _h_c; \
656 dsp->put_vp8_bilinear_pixels_tab[IDX][0][2] = put_vp8_bilinear ## SIZE ## _h_c; \
657 dsp->put_vp8_bilinear_pixels_tab[IDX][1][0] = put_vp8_bilinear ## SIZE ## _v_c; \
658 dsp->put_vp8_bilinear_pixels_tab[IDX][1][1] = put_vp8_bilinear ## SIZE ## _hv_c; \
659 dsp->put_vp8_bilinear_pixels_tab[IDX][1][2] = put_vp8_bilinear ## SIZE ## _hv_c; \
660 dsp->put_vp8_bilinear_pixels_tab[IDX][2][0] = put_vp8_bilinear ## SIZE ## _v_c; \
661 dsp->put_vp8_bilinear_pixels_tab[IDX][2][1] = put_vp8_bilinear ## SIZE ## _hv_c; \
662 dsp->put_vp8_bilinear_pixels_tab[IDX][2][2] = put_vp8_bilinear ## SIZE ## _hv_c
663
664 av_cold void ff_vp78dsp_init(VP8DSPContext *dsp)
665 {
666 VP78_MC_FUNC(0, 16);
667 VP78_MC_FUNC(1, 8);
668 VP78_MC_FUNC(2, 4);
669
670 VP78_BILINEAR_MC_FUNC(0, 16);
671 VP78_BILINEAR_MC_FUNC(1, 8);
672 VP78_BILINEAR_MC_FUNC(2, 4);
673
674 if (ARCH_ARM)
675 ff_vp78dsp_init_arm(dsp);
676 if (ARCH_PPC)
677 ff_vp78dsp_init_ppc(dsp);
678 if (ARCH_X86)
679 ff_vp78dsp_init_x86(dsp);
680 }
681
682 #if CONFIG_VP7_DECODER
683 LOOP_FILTERS(vp7)
684
685 av_cold void ff_vp7dsp_init(VP8DSPContext *dsp)
686 {
687 dsp->vp8_luma_dc_wht = vp7_luma_dc_wht_c;
688 dsp->vp8_luma_dc_wht_dc = vp7_luma_dc_wht_dc_c;
689 dsp->vp8_idct_add = vp7_idct_add_c;
690 dsp->vp8_idct_dc_add = vp7_idct_dc_add_c;
691 dsp->vp8_idct_dc_add4y = vp7_idct_dc_add4y_c;
692 dsp->vp8_idct_dc_add4uv = vp7_idct_dc_add4uv_c;
693
694 dsp->vp8_v_loop_filter16y = vp7_v_loop_filter16_c;
695 dsp->vp8_h_loop_filter16y = vp7_h_loop_filter16_c;
696 dsp->vp8_v_loop_filter8uv = vp7_v_loop_filter8uv_c;
697 dsp->vp8_h_loop_filter8uv = vp7_h_loop_filter8uv_c;
698
699 dsp->vp8_v_loop_filter16y_inner = vp7_v_loop_filter16_inner_c;
700 dsp->vp8_h_loop_filter16y_inner = vp7_h_loop_filter16_inner_c;
701 dsp->vp8_v_loop_filter8uv_inner = vp7_v_loop_filter8uv_inner_c;
702 dsp->vp8_h_loop_filter8uv_inner = vp7_h_loop_filter8uv_inner_c;
703
704 dsp->vp8_v_loop_filter_simple = vp7_v_loop_filter_simple_c;
705 dsp->vp8_h_loop_filter_simple = vp7_h_loop_filter_simple_c;
706 }
707 #endif /* CONFIG_VP7_DECODER */
708
709 #if CONFIG_VP8_DECODER
710 LOOP_FILTERS(vp8)
711
712 av_cold void ff_vp8dsp_init(VP8DSPContext *dsp)
713 {
714 dsp->vp8_luma_dc_wht = vp8_luma_dc_wht_c;
715 dsp->vp8_luma_dc_wht_dc = vp8_luma_dc_wht_dc_c;
716 dsp->vp8_idct_add = vp8_idct_add_c;
717 dsp->vp8_idct_dc_add = vp8_idct_dc_add_c;
718 dsp->vp8_idct_dc_add4y = vp8_idct_dc_add4y_c;
719 dsp->vp8_idct_dc_add4uv = vp8_idct_dc_add4uv_c;
720
721 dsp->vp8_v_loop_filter16y = vp8_v_loop_filter16_c;
722 dsp->vp8_h_loop_filter16y = vp8_h_loop_filter16_c;
723 dsp->vp8_v_loop_filter8uv = vp8_v_loop_filter8uv_c;
724 dsp->vp8_h_loop_filter8uv = vp8_h_loop_filter8uv_c;
725
726 dsp->vp8_v_loop_filter16y_inner = vp8_v_loop_filter16_inner_c;
727 dsp->vp8_h_loop_filter16y_inner = vp8_h_loop_filter16_inner_c;
728 dsp->vp8_v_loop_filter8uv_inner = vp8_v_loop_filter8uv_inner_c;
729 dsp->vp8_h_loop_filter8uv_inner = vp8_h_loop_filter8uv_inner_c;
730
731 dsp->vp8_v_loop_filter_simple = vp8_v_loop_filter_simple_c;
732 dsp->vp8_h_loop_filter_simple = vp8_h_loop_filter_simple_c;
733
734 if (ARCH_ARM)
735 ff_vp8dsp_init_arm(dsp);
736 if (ARCH_X86)
737 ff_vp8dsp_init_x86(dsp);
738 }
739 #endif /* CONFIG_VP8_DECODER */