7696954335449ed9e9628a3056d3f0467adf174b
2 * GMC (Global Motion Compensation), AltiVec-enabled
4 * Copyright (c) 2003 Romain Dolbeau <romain@dolbeau.org>
6 * This file is part of FFmpeg.
8 * FFmpeg is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
13 * FFmpeg is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with FFmpeg; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 #include "libavutil/mem.h"
24 #include "libavutil/ppc/types_altivec.h"
25 #include "libavutil/ppc/util_altivec.h"
26 #include "libavcodec/mpegvideodsp.h"
29 /* AltiVec-enhanced gmc1. ATM this code assumes stride is a multiple of 8
30 * to preserve proper dst alignment. */
31 static void gmc1_altivec(uint8_t *dst
/* align 8 */, uint8_t *src
/* align1 */,
32 int stride
, int h
, int x16
, int y16
, int rounder
)
35 const DECLARE_ALIGNED(16, unsigned short, rounder_a
) = rounder
;
36 const DECLARE_ALIGNED(16, unsigned short, ABCD
)[8] = {
37 (16 - x16
) * (16 - y16
), /* A */
38 (x16
) * (16 - y16
), /* B */
39 (16 - x16
) * (y16
), /* C */
40 (x16
) * (y16
), /* D */
41 0, 0, 0, 0 /* padding */
43 register const vector
unsigned char vczero
=
44 (const vector
unsigned char) vec_splat_u8(0);
45 register const vector
unsigned short vcsr8
=
46 (const vector
unsigned short) vec_splat_u16(8);
47 register vector
unsigned char dstv
, dstv2
, srcvB
, srcvC
, srcvD
;
48 register vector
unsigned short tempB
, tempC
, tempD
;
49 unsigned long dst_odd
= (unsigned long) dst
& 0x0000000F;
50 unsigned long src_really_odd
= (unsigned long) src
& 0x0000000F;
51 register vector
unsigned short tempA
=
52 vec_ld(0, (const unsigned short *) ABCD
);
53 register vector
unsigned short Av
= vec_splat(tempA
, 0);
54 register vector
unsigned short Bv
= vec_splat(tempA
, 1);
55 register vector
unsigned short Cv
= vec_splat(tempA
, 2);
56 register vector
unsigned short Dv
= vec_splat(tempA
, 3);
57 register vector
unsigned short rounderV
=
58 vec_splat((vec_u16
) vec_lde(0, &rounder_a
), 0);
60 /* we'll be able to pick-up our 9 char elements at src from those
61 * 32 bytes we load the first batch here, as inside the loop we can
62 * reuse 'src + stride' from one iteration as the 'src' of the next. */
63 register vector
unsigned char src_0
= vec_ld(0, src
);
64 register vector
unsigned char src_1
= vec_ld(16, src
);
65 register vector
unsigned char srcvA
= vec_perm(src_0
, src_1
,
68 if (src_really_odd
!= 0x0000000F)
69 /* If (src & 0xF) == 0xF, then (src + 1) is properly aligned
70 * on the second vector. */
71 srcvB
= vec_perm(src_0
, src_1
, vec_lvsl(1, src
));
74 srcvA
= vec_mergeh(vczero
, srcvA
);
75 srcvB
= vec_mergeh(vczero
, srcvB
);
77 for (i
= 0; i
< h
; i
++) {
78 dst_odd
= (unsigned long) dst
& 0x0000000F;
79 src_really_odd
= (((unsigned long) src
) + stride
) & 0x0000000F;
81 dstv
= vec_ld(0, dst
);
83 /* We'll be able to pick-up our 9 char elements at src + stride from
84 * those 32 bytes then reuse the resulting 2 vectors srvcC and srcvD
85 * as the next srcvA and srcvB. */
86 src_0
= vec_ld(stride
+ 0, src
);
87 src_1
= vec_ld(stride
+ 16, src
);
88 srcvC
= vec_perm(src_0
, src_1
, vec_lvsl(stride
+ 0, src
));
90 if (src_really_odd
!= 0x0000000F)
91 /* If (src & 0xF) == 0xF, then (src + 1) is properly aligned
92 * on the second vector. */
93 srcvD
= vec_perm(src_0
, src_1
, vec_lvsl(stride
+ 1, src
));
97 srcvC
= vec_mergeh(vczero
, srcvC
);
98 srcvD
= vec_mergeh(vczero
, srcvD
);
100 /* OK, now we (finally) do the math :-)
101 * Those four instructions replace 32 int muls & 32 int adds.
102 * Isn't AltiVec nice? */
103 tempA
= vec_mladd((vector
unsigned short) srcvA
, Av
, rounderV
);
104 tempB
= vec_mladd((vector
unsigned short) srcvB
, Bv
, tempA
);
105 tempC
= vec_mladd((vector
unsigned short) srcvC
, Cv
, tempB
);
106 tempD
= vec_mladd((vector
unsigned short) srcvD
, Dv
, tempC
);
111 tempD
= vec_sr(tempD
, vcsr8
);
113 dstv2
= vec_pack(tempD
, (vector
unsigned short) vczero
);
116 dstv2
= vec_perm(dstv
, dstv2
, vcprm(0, 1, s0
, s1
));
118 dstv2
= vec_perm(dstv
, dstv2
, vcprm(s0
, s1
, 2, 3));
120 vec_st(dstv2
, 0, dst
);
126 #endif /* HAVE_ALTIVEC */
128 av_cold
void ff_mpegvideodsp_init_ppc(MpegVideoDSPContext
*c
)
131 c
->gmc1
= gmc1_altivec
;
132 #endif /* HAVE_ALTIVEC */