| 1 | /* |
| 2 | * Copyright (c) 2011 Janne Grunau <janne-libav@jannau.net> |
| 3 | * |
| 4 | * This file is part of FFmpeg. |
| 5 | * |
| 6 | * FFmpeg is free software; you can redistribute it and/or |
| 7 | * modify it under the terms of the GNU Lesser General Public |
| 8 | * License as published by the Free Software Foundation; either |
| 9 | * version 2.1 of the License, or (at your option) any later version. |
| 10 | * |
| 11 | * FFmpeg is distributed in the hope that it will be useful, |
| 12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
| 14 | * Lesser General Public License for more details. |
| 15 | * |
| 16 | * You should have received a copy of the GNU Lesser General Public |
| 17 | * License along with FFmpeg; if not, write to the Free Software |
| 18 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA |
| 19 | */ |
| 20 | |
| 21 | #include "libavutil/arm/asm.S" |
| 22 | #include "neon.S" |
| 23 | |
| 24 | .macro rv34_inv_transform r0 |
| 25 | vld1.16 {q14-q15}, [\r0,:128] |
| 26 | vmov.s16 d0, #13 |
| 27 | vshll.s16 q12, d29, #3 |
| 28 | vshll.s16 q13, d29, #4 |
| 29 | vshll.s16 q9, d31, #3 |
| 30 | vshll.s16 q1, d31, #4 |
| 31 | vmull.s16 q10, d28, d0 |
| 32 | vmlal.s16 q10, d30, d0 |
| 33 | vmull.s16 q11, d28, d0 |
| 34 | vmlsl.s16 q11, d30, d0 |
| 35 | vsubw.s16 q12, q12, d29 @ z2 = block[i+4*1]*7 |
| 36 | vaddw.s16 q13, q13, d29 @ z3 = block[i+4*1]*17 |
| 37 | vsubw.s16 q9, q9, d31 |
| 38 | vaddw.s16 q1, q1, d31 |
| 39 | vadd.s32 q13, q13, q9 @ z3 = 17*block[i+4*1] + 7*block[i+4*3] |
| 40 | vsub.s32 q12, q12, q1 @ z2 = 7*block[i+4*1] - 17*block[i+4*3] |
| 41 | vadd.s32 q1, q10, q13 @ z0 + z3 |
| 42 | vadd.s32 q2, q11, q12 @ z1 + z2 |
| 43 | vsub.s32 q8, q10, q13 @ z0 - z3 |
| 44 | vsub.s32 q3, q11, q12 @ z1 - z2 |
| 45 | vtrn.32 q1, q2 |
| 46 | vtrn.32 q3, q8 |
| 47 | vswp d3, d6 |
| 48 | vswp d5, d16 |
| 49 | vmov.s32 d0, #13 |
| 50 | vadd.s32 q10, q1, q3 |
| 51 | vsub.s32 q11, q1, q3 |
| 52 | vshl.s32 q12, q2, #3 |
| 53 | vshl.s32 q9, q2, #4 |
| 54 | vmul.s32 q13, q11, d0[0] |
| 55 | vshl.s32 q11, q8, #4 |
| 56 | vadd.s32 q9, q9, q2 |
| 57 | vshl.s32 q15, q8, #3 |
| 58 | vsub.s32 q12, q12, q2 |
| 59 | vadd.s32 q11, q11, q8 |
| 60 | vmul.s32 q14, q10, d0[0] |
| 61 | vsub.s32 q8, q15, q8 |
| 62 | vsub.s32 q12, q12, q11 |
| 63 | vadd.s32 q9, q9, q8 |
| 64 | vadd.s32 q2, q13, q12 @ z1 + z2 |
| 65 | vadd.s32 q1, q14, q9 @ z0 + z3 |
| 66 | vsub.s32 q3, q13, q12 @ z1 - z2 |
| 67 | vsub.s32 q15, q14, q9 @ z0 - z3 |
| 68 | .endm |
| 69 | |
| 70 | /* void rv34_idct_add_c(uint8_t *dst, int stride, int16_t *block) */ |
| 71 | function ff_rv34_idct_add_neon, export=1 |
| 72 | mov r3, r0 |
| 73 | rv34_inv_transform r2 |
| 74 | vmov.i16 q12, #0 |
| 75 | vrshrn.s32 d16, q1, #10 @ (z0 + z3) >> 10 |
| 76 | vrshrn.s32 d17, q2, #10 @ (z1 + z2) >> 10 |
| 77 | vrshrn.s32 d18, q3, #10 @ (z1 - z2) >> 10 |
| 78 | vrshrn.s32 d19, q15, #10 @ (z0 - z3) >> 10 |
| 79 | vld1.32 {d28[]}, [r0,:32], r1 |
| 80 | vld1.32 {d29[]}, [r0,:32], r1 |
| 81 | vtrn.32 q8, q9 |
| 82 | vld1.32 {d28[1]}, [r0,:32], r1 |
| 83 | vld1.32 {d29[1]}, [r0,:32], r1 |
| 84 | vst1.16 {q12}, [r2,:128]! @ memset(block, 0, 16) |
| 85 | vst1.16 {q12}, [r2,:128] @ memset(block+16, 0, 16) |
| 86 | vtrn.16 d16, d17 |
| 87 | vtrn.32 d28, d29 |
| 88 | vtrn.16 d18, d19 |
| 89 | vaddw.u8 q0, q8, d28 |
| 90 | vaddw.u8 q1, q9, d29 |
| 91 | vqmovun.s16 d28, q0 |
| 92 | vqmovun.s16 d29, q1 |
| 93 | vst1.32 {d28[0]}, [r3,:32], r1 |
| 94 | vst1.32 {d28[1]}, [r3,:32], r1 |
| 95 | vst1.32 {d29[0]}, [r3,:32], r1 |
| 96 | vst1.32 {d29[1]}, [r3,:32], r1 |
| 97 | bx lr |
| 98 | endfunc |
| 99 | |
| 100 | /* void rv34_inv_transform_noround_neon(int16_t *block); */ |
| 101 | function ff_rv34_inv_transform_noround_neon, export=1 |
| 102 | rv34_inv_transform r0 |
| 103 | vshl.s32 q11, q2, #1 |
| 104 | vshl.s32 q10, q1, #1 |
| 105 | vshl.s32 q12, q3, #1 |
| 106 | vshl.s32 q13, q15, #1 |
| 107 | vadd.s32 q11, q11, q2 |
| 108 | vadd.s32 q10, q10, q1 |
| 109 | vadd.s32 q12, q12, q3 |
| 110 | vadd.s32 q13, q13, q15 |
| 111 | vshrn.s32 d0, q10, #11 @ (z0 + z3)*3 >> 11 |
| 112 | vshrn.s32 d1, q11, #11 @ (z1 + z2)*3 >> 11 |
| 113 | vshrn.s32 d2, q12, #11 @ (z1 - z2)*3 >> 11 |
| 114 | vshrn.s32 d3, q13, #11 @ (z0 - z3)*3 >> 11 |
| 115 | vst4.16 {d0[0], d1[0], d2[0], d3[0]}, [r0,:64]! |
| 116 | vst4.16 {d0[1], d1[1], d2[1], d3[1]}, [r0,:64]! |
| 117 | vst4.16 {d0[2], d1[2], d2[2], d3[2]}, [r0,:64]! |
| 118 | vst4.16 {d0[3], d1[3], d2[3], d3[3]}, [r0,:64]! |
| 119 | bx lr |
| 120 | endfunc |
| 121 | |
| 122 | /* void ff_rv34_idct_dc_add_neon(uint8_t *dst, int stride, int dc) */ |
| 123 | function ff_rv34_idct_dc_add_neon, export=1 |
| 124 | mov r3, r0 |
| 125 | vld1.32 {d28[]}, [r0,:32], r1 |
| 126 | vld1.32 {d29[]}, [r0,:32], r1 |
| 127 | vdup.16 d0, r2 |
| 128 | vmov.s16 d1, #169 |
| 129 | vld1.32 {d28[1]}, [r0,:32], r1 |
| 130 | vmull.s16 q1, d0, d1 @ dc * 13 * 13 |
| 131 | vld1.32 {d29[1]}, [r0,:32], r1 |
| 132 | vrshrn.s32 d0, q1, #10 @ (dc * 13 * 13 + 0x200) >> 10 |
| 133 | vmov d1, d0 |
| 134 | vaddw.u8 q2, q0, d28 |
| 135 | vaddw.u8 q3, q0, d29 |
| 136 | vqmovun.s16 d28, q2 |
| 137 | vqmovun.s16 d29, q3 |
| 138 | vst1.32 {d28[0]}, [r3,:32], r1 |
| 139 | vst1.32 {d29[0]}, [r3,:32], r1 |
| 140 | vst1.32 {d28[1]}, [r3,:32], r1 |
| 141 | vst1.32 {d29[1]}, [r3,:32], r1 |
| 142 | bx lr |
| 143 | endfunc |
| 144 | |
| 145 | /* void rv34_inv_transform_dc_noround_c(int16_t *block) */ |
| 146 | function ff_rv34_inv_transform_noround_dc_neon, export=1 |
| 147 | vld1.16 {d28[]}, [r0,:16] @ block[0] |
| 148 | vmov.i16 d4, #251 |
| 149 | vorr.s16 d4, #256 @ 13^2 * 3 |
| 150 | vmull.s16 q3, d28, d4 |
| 151 | vshrn.s32 d0, q3, #11 |
| 152 | vmov.i16 d1, d0 |
| 153 | vst1.64 {q0}, [r0,:128]! |
| 154 | vst1.64 {q0}, [r0,:128]! |
| 155 | bx lr |
| 156 | endfunc |