Commit | Line | Data |
---|---|---|
2ba45a60 DM |
1 | /* |
2 | * GMC (Global Motion Compensation), AltiVec-enabled | |
3 | * | |
4 | * Copyright (c) 2003 Romain Dolbeau <romain@dolbeau.org> | |
5 | * | |
6 | * This file is part of FFmpeg. | |
7 | * | |
8 | * FFmpeg is free software; you can redistribute it and/or | |
9 | * modify it under the terms of the GNU Lesser General Public | |
10 | * License as published by the Free Software Foundation; either | |
11 | * version 2.1 of the License, or (at your option) any later version. | |
12 | * | |
13 | * FFmpeg is distributed in the hope that it will be useful, | |
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
16 | * Lesser General Public License for more details. | |
17 | * | |
18 | * You should have received a copy of the GNU Lesser General Public | |
19 | * License along with FFmpeg; if not, write to the Free Software | |
20 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA | |
21 | */ | |
22 | ||
23 | #include "libavutil/mem.h" | |
24 | #include "libavutil/ppc/types_altivec.h" | |
25 | #include "libavutil/ppc/util_altivec.h" | |
26 | #include "libavcodec/mpegvideodsp.h" | |
27 | ||
28 | #if HAVE_ALTIVEC | |
29 | /* AltiVec-enhanced gmc1. ATM this code assumes stride is a multiple of 8 | |
30 | * to preserve proper dst alignment. */ | |
31 | static void gmc1_altivec(uint8_t *dst /* align 8 */, uint8_t *src /* align1 */, | |
32 | int stride, int h, int x16, int y16, int rounder) | |
33 | { | |
34 | int i; | |
35 | const DECLARE_ALIGNED(16, unsigned short, rounder_a) = rounder; | |
36 | const DECLARE_ALIGNED(16, unsigned short, ABCD)[8] = { | |
37 | (16 - x16) * (16 - y16), /* A */ | |
38 | (x16) * (16 - y16), /* B */ | |
39 | (16 - x16) * (y16), /* C */ | |
40 | (x16) * (y16), /* D */ | |
41 | 0, 0, 0, 0 /* padding */ | |
42 | }; | |
43 | register const vector unsigned char vczero = | |
44 | (const vector unsigned char) vec_splat_u8(0); | |
45 | register const vector unsigned short vcsr8 = | |
46 | (const vector unsigned short) vec_splat_u16(8); | |
47 | register vector unsigned char dstv, dstv2, srcvB, srcvC, srcvD; | |
48 | register vector unsigned short tempB, tempC, tempD; | |
49 | unsigned long dst_odd = (unsigned long) dst & 0x0000000F; | |
50 | unsigned long src_really_odd = (unsigned long) src & 0x0000000F; | |
51 | register vector unsigned short tempA = | |
52 | vec_ld(0, (const unsigned short *) ABCD); | |
53 | register vector unsigned short Av = vec_splat(tempA, 0); | |
54 | register vector unsigned short Bv = vec_splat(tempA, 1); | |
55 | register vector unsigned short Cv = vec_splat(tempA, 2); | |
56 | register vector unsigned short Dv = vec_splat(tempA, 3); | |
57 | register vector unsigned short rounderV = | |
58 | vec_splat((vec_u16) vec_lde(0, &rounder_a), 0); | |
59 | ||
60 | /* we'll be able to pick-up our 9 char elements at src from those | |
61 | * 32 bytes we load the first batch here, as inside the loop we can | |
62 | * reuse 'src + stride' from one iteration as the 'src' of the next. */ | |
63 | register vector unsigned char src_0 = vec_ld(0, src); | |
64 | register vector unsigned char src_1 = vec_ld(16, src); | |
65 | register vector unsigned char srcvA = vec_perm(src_0, src_1, | |
66 | vec_lvsl(0, src)); | |
67 | ||
68 | if (src_really_odd != 0x0000000F) | |
69 | /* If (src & 0xF) == 0xF, then (src + 1) is properly aligned | |
70 | * on the second vector. */ | |
71 | srcvB = vec_perm(src_0, src_1, vec_lvsl(1, src)); | |
72 | else | |
73 | srcvB = src_1; | |
74 | srcvA = vec_mergeh(vczero, srcvA); | |
75 | srcvB = vec_mergeh(vczero, srcvB); | |
76 | ||
77 | for (i = 0; i < h; i++) { | |
78 | dst_odd = (unsigned long) dst & 0x0000000F; | |
79 | src_really_odd = (((unsigned long) src) + stride) & 0x0000000F; | |
80 | ||
81 | dstv = vec_ld(0, dst); | |
82 | ||
83 | /* We'll be able to pick-up our 9 char elements at src + stride from | |
84 | * those 32 bytes then reuse the resulting 2 vectors srvcC and srcvD | |
85 | * as the next srcvA and srcvB. */ | |
86 | src_0 = vec_ld(stride + 0, src); | |
87 | src_1 = vec_ld(stride + 16, src); | |
88 | srcvC = vec_perm(src_0, src_1, vec_lvsl(stride + 0, src)); | |
89 | ||
90 | if (src_really_odd != 0x0000000F) | |
91 | /* If (src & 0xF) == 0xF, then (src + 1) is properly aligned | |
92 | * on the second vector. */ | |
93 | srcvD = vec_perm(src_0, src_1, vec_lvsl(stride + 1, src)); | |
94 | else | |
95 | srcvD = src_1; | |
96 | ||
97 | srcvC = vec_mergeh(vczero, srcvC); | |
98 | srcvD = vec_mergeh(vczero, srcvD); | |
99 | ||
100 | /* OK, now we (finally) do the math :-) | |
101 | * Those four instructions replace 32 int muls & 32 int adds. | |
102 | * Isn't AltiVec nice? */ | |
103 | tempA = vec_mladd((vector unsigned short) srcvA, Av, rounderV); | |
104 | tempB = vec_mladd((vector unsigned short) srcvB, Bv, tempA); | |
105 | tempC = vec_mladd((vector unsigned short) srcvC, Cv, tempB); | |
106 | tempD = vec_mladd((vector unsigned short) srcvD, Dv, tempC); | |
107 | ||
108 | srcvA = srcvC; | |
109 | srcvB = srcvD; | |
110 | ||
111 | tempD = vec_sr(tempD, vcsr8); | |
112 | ||
113 | dstv2 = vec_pack(tempD, (vector unsigned short) vczero); | |
114 | ||
115 | if (dst_odd) | |
116 | dstv2 = vec_perm(dstv, dstv2, vcprm(0, 1, s0, s1)); | |
117 | else | |
118 | dstv2 = vec_perm(dstv, dstv2, vcprm(s0, s1, 2, 3)); | |
119 | ||
120 | vec_st(dstv2, 0, dst); | |
121 | ||
122 | dst += stride; | |
123 | src += stride; | |
124 | } | |
125 | } | |
126 | #endif /* HAVE_ALTIVEC */ | |
127 | ||
128 | av_cold void ff_mpegvideodsp_init_ppc(MpegVideoDSPContext *c) | |
129 | { | |
130 | #if HAVE_ALTIVEC | |
131 | c->gmc1 = gmc1_altivec; | |
132 | #endif /* HAVE_ALTIVEC */ | |
133 | } |