| 1 | /* |
| 2 | * Copyright (c) 2008 Mans Rullgard <mans@mansr.com> |
| 3 | * |
| 4 | * This file is part of FFmpeg. |
| 5 | * |
| 6 | * FFmpeg is free software; you can redistribute it and/or |
| 7 | * modify it under the terms of the GNU Lesser General Public |
| 8 | * License as published by the Free Software Foundation; either |
| 9 | * version 2.1 of the License, or (at your option) any later version. |
| 10 | * |
| 11 | * FFmpeg is distributed in the hope that it will be useful, |
| 12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
| 14 | * Lesser General Public License for more details. |
| 15 | * |
| 16 | * You should have received a copy of the GNU Lesser General Public |
| 17 | * License along with FFmpeg; if not, write to the Free Software |
| 18 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA |
| 19 | */ |
| 20 | |
| 21 | #include "libavutil/arm/asm.S" |
| 22 | |
| 23 | function ff_h264_idct_add_neon, export=1 |
| 24 | vld1.64 {d0-d3}, [r1,:128] |
| 25 | vmov.i16 q15, #0 |
| 26 | |
| 27 | vswp d1, d2 |
| 28 | vst1.16 {q15}, [r1,:128]! |
| 29 | vadd.i16 d4, d0, d1 |
| 30 | vst1.16 {q15}, [r1,:128]! |
| 31 | vshr.s16 q8, q1, #1 |
| 32 | vsub.i16 d5, d0, d1 |
| 33 | vadd.i16 d6, d2, d17 |
| 34 | vsub.i16 d7, d16, d3 |
| 35 | vadd.i16 q0, q2, q3 |
| 36 | vsub.i16 q1, q2, q3 |
| 37 | |
| 38 | vtrn.16 d0, d1 |
| 39 | vtrn.16 d3, d2 |
| 40 | vtrn.32 d0, d3 |
| 41 | vtrn.32 d1, d2 |
| 42 | |
| 43 | vadd.i16 d4, d0, d3 |
| 44 | vld1.32 {d18[0]}, [r0,:32], r2 |
| 45 | vswp d1, d3 |
| 46 | vshr.s16 q8, q1, #1 |
| 47 | vld1.32 {d19[1]}, [r0,:32], r2 |
| 48 | vsub.i16 d5, d0, d1 |
| 49 | vld1.32 {d18[1]}, [r0,:32], r2 |
| 50 | vadd.i16 d6, d16, d3 |
| 51 | vld1.32 {d19[0]}, [r0,:32], r2 |
| 52 | vsub.i16 d7, d2, d17 |
| 53 | sub r0, r0, r2, lsl #2 |
| 54 | vadd.i16 q0, q2, q3 |
| 55 | vsub.i16 q1, q2, q3 |
| 56 | |
| 57 | vrshr.s16 q0, q0, #6 |
| 58 | vrshr.s16 q1, q1, #6 |
| 59 | |
| 60 | vaddw.u8 q0, q0, d18 |
| 61 | vaddw.u8 q1, q1, d19 |
| 62 | |
| 63 | vqmovun.s16 d0, q0 |
| 64 | vqmovun.s16 d1, q1 |
| 65 | |
| 66 | vst1.32 {d0[0]}, [r0,:32], r2 |
| 67 | vst1.32 {d1[1]}, [r0,:32], r2 |
| 68 | vst1.32 {d0[1]}, [r0,:32], r2 |
| 69 | vst1.32 {d1[0]}, [r0,:32], r2 |
| 70 | |
| 71 | sub r1, r1, #32 |
| 72 | bx lr |
| 73 | endfunc |
| 74 | |
| 75 | function ff_h264_idct_dc_add_neon, export=1 |
| 76 | mov r3, #0 |
| 77 | vld1.16 {d2[],d3[]}, [r1,:16] |
| 78 | strh r3, [r1] |
| 79 | vrshr.s16 q1, q1, #6 |
| 80 | vld1.32 {d0[0]}, [r0,:32], r2 |
| 81 | vld1.32 {d0[1]}, [r0,:32], r2 |
| 82 | vaddw.u8 q2, q1, d0 |
| 83 | vld1.32 {d1[0]}, [r0,:32], r2 |
| 84 | vld1.32 {d1[1]}, [r0,:32], r2 |
| 85 | vaddw.u8 q1, q1, d1 |
| 86 | vqmovun.s16 d0, q2 |
| 87 | vqmovun.s16 d1, q1 |
| 88 | sub r0, r0, r2, lsl #2 |
| 89 | vst1.32 {d0[0]}, [r0,:32], r2 |
| 90 | vst1.32 {d0[1]}, [r0,:32], r2 |
| 91 | vst1.32 {d1[0]}, [r0,:32], r2 |
| 92 | vst1.32 {d1[1]}, [r0,:32], r2 |
| 93 | bx lr |
| 94 | endfunc |
| 95 | |
| 96 | function ff_h264_idct_add16_neon, export=1 |
| 97 | push {r4-r8,lr} |
| 98 | mov r4, r0 |
| 99 | mov r5, r1 |
| 100 | mov r1, r2 |
| 101 | mov r2, r3 |
| 102 | ldr r6, [sp, #24] |
| 103 | movrel r7, scan8 |
| 104 | mov ip, #16 |
| 105 | 1: ldrb r8, [r7], #1 |
| 106 | ldr r0, [r5], #4 |
| 107 | ldrb r8, [r6, r8] |
| 108 | subs r8, r8, #1 |
| 109 | blt 2f |
| 110 | ldrsh lr, [r1] |
| 111 | add r0, r0, r4 |
| 112 | it ne |
| 113 | movne lr, #0 |
| 114 | cmp lr, #0 |
| 115 | ite ne |
| 116 | adrne lr, X(ff_h264_idct_dc_add_neon) + CONFIG_THUMB |
| 117 | adreq lr, X(ff_h264_idct_add_neon) + CONFIG_THUMB |
| 118 | blx lr |
| 119 | 2: subs ip, ip, #1 |
| 120 | add r1, r1, #32 |
| 121 | bne 1b |
| 122 | pop {r4-r8,pc} |
| 123 | endfunc |
| 124 | |
| 125 | function ff_h264_idct_add16intra_neon, export=1 |
| 126 | push {r4-r8,lr} |
| 127 | mov r4, r0 |
| 128 | mov r5, r1 |
| 129 | mov r1, r2 |
| 130 | mov r2, r3 |
| 131 | ldr r6, [sp, #24] |
| 132 | movrel r7, scan8 |
| 133 | mov ip, #16 |
| 134 | 1: ldrb r8, [r7], #1 |
| 135 | ldr r0, [r5], #4 |
| 136 | ldrb r8, [r6, r8] |
| 137 | add r0, r0, r4 |
| 138 | cmp r8, #0 |
| 139 | ldrsh r8, [r1] |
| 140 | iteet ne |
| 141 | adrne lr, X(ff_h264_idct_add_neon) + CONFIG_THUMB |
| 142 | adreq lr, X(ff_h264_idct_dc_add_neon) + CONFIG_THUMB |
| 143 | cmpeq r8, #0 |
| 144 | blxne lr |
| 145 | subs ip, ip, #1 |
| 146 | add r1, r1, #32 |
| 147 | bne 1b |
| 148 | pop {r4-r8,pc} |
| 149 | endfunc |
| 150 | |
| 151 | function ff_h264_idct_add8_neon, export=1 |
| 152 | push {r4-r10,lr} |
| 153 | ldm r0, {r4,r9} |
| 154 | add r5, r1, #16*4 |
| 155 | add r1, r2, #16*32 |
| 156 | mov r2, r3 |
| 157 | mov r10, r1 |
| 158 | ldr r6, [sp, #32] |
| 159 | movrel r7, scan8+16 |
| 160 | mov r12, #0 |
| 161 | 1: ldrb r8, [r7, r12] |
| 162 | ldr r0, [r5, r12, lsl #2] |
| 163 | ldrb r8, [r6, r8] |
| 164 | add r0, r0, r4 |
| 165 | add r1, r10, r12, lsl #5 |
| 166 | cmp r8, #0 |
| 167 | ldrsh r8, [r1] |
| 168 | iteet ne |
| 169 | adrne lr, X(ff_h264_idct_add_neon) + CONFIG_THUMB |
| 170 | adreq lr, X(ff_h264_idct_dc_add_neon) + CONFIG_THUMB |
| 171 | cmpeq r8, #0 |
| 172 | blxne lr |
| 173 | add r12, r12, #1 |
| 174 | cmp r12, #4 |
| 175 | itt eq |
| 176 | moveq r12, #16 |
| 177 | moveq r4, r9 |
| 178 | cmp r12, #20 |
| 179 | blt 1b |
| 180 | pop {r4-r10,pc} |
| 181 | endfunc |
| 182 | |
| 183 | .macro idct8x8_cols pass |
| 184 | .if \pass == 0 |
| 185 | qa .req q2 |
| 186 | qb .req q14 |
| 187 | vshr.s16 q2, q10, #1 |
| 188 | vadd.i16 q0, q8, q12 |
| 189 | vld1.16 {q14-q15},[r1,:128] |
| 190 | vst1.16 {q3}, [r1,:128]! |
| 191 | vst1.16 {q3}, [r1,:128]! |
| 192 | vsub.i16 q1, q8, q12 |
| 193 | vshr.s16 q3, q14, #1 |
| 194 | vsub.i16 q2, q2, q14 |
| 195 | vadd.i16 q3, q3, q10 |
| 196 | .else |
| 197 | qa .req q14 |
| 198 | qb .req q2 |
| 199 | vtrn.32 q8, q10 |
| 200 | vtrn.16 q12, q13 |
| 201 | vtrn.32 q9, q11 |
| 202 | vtrn.32 q12, q2 |
| 203 | vtrn.32 q13, q15 |
| 204 | vswp d21, d4 |
| 205 | vshr.s16 q14, q10, #1 |
| 206 | vswp d17, d24 |
| 207 | vshr.s16 q3, q2, #1 |
| 208 | vswp d19, d26 |
| 209 | vadd.i16 q0, q8, q12 |
| 210 | vswp d23, d30 |
| 211 | vsub.i16 q1, q8, q12 |
| 212 | vsub.i16 q14, q14, q2 |
| 213 | vadd.i16 q3, q3, q10 |
| 214 | .endif |
| 215 | vadd.i16 q10, q1, qa |
| 216 | vsub.i16 q12, q1, qa |
| 217 | vadd.i16 q8, q0, q3 |
| 218 | vsub.i16 qb, q0, q3 |
| 219 | vsub.i16 q0, q13, q11 |
| 220 | vadd.i16 q1, q15, q9 |
| 221 | vsub.i16 qa, q15, q9 |
| 222 | vadd.i16 q3, q13, q11 |
| 223 | vsub.i16 q0, q0, q15 |
| 224 | vsub.i16 q1, q1, q11 |
| 225 | vadd.i16 qa, qa, q13 |
| 226 | vadd.i16 q3, q3, q9 |
| 227 | vshr.s16 q9, q9, #1 |
| 228 | vshr.s16 q11, q11, #1 |
| 229 | vshr.s16 q13, q13, #1 |
| 230 | vshr.s16 q15, q15, #1 |
| 231 | vsub.i16 q0, q0, q15 |
| 232 | vsub.i16 q1, q1, q11 |
| 233 | vadd.i16 qa, qa, q13 |
| 234 | vadd.i16 q3, q3, q9 |
| 235 | vshr.s16 q9, q0, #2 |
| 236 | vshr.s16 q11, q1, #2 |
| 237 | vshr.s16 q13, qa, #2 |
| 238 | vshr.s16 q15, q3, #2 |
| 239 | vsub.i16 q3, q3, q9 |
| 240 | vsub.i16 qa, q11, qa |
| 241 | vadd.i16 q1, q1, q13 |
| 242 | vadd.i16 q0, q0, q15 |
| 243 | .if \pass == 0 |
| 244 | vsub.i16 q15, q8, q3 |
| 245 | vadd.i16 q8, q8, q3 |
| 246 | vadd.i16 q9, q10, q2 |
| 247 | vsub.i16 q2, q10, q2 |
| 248 | vtrn.16 q8, q9 |
| 249 | vadd.i16 q10, q12, q1 |
| 250 | vtrn.16 q2, q15 |
| 251 | vadd.i16 q11, q14, q0 |
| 252 | vsub.i16 q13, q12, q1 |
| 253 | vtrn.16 q10, q11 |
| 254 | vsub.i16 q12, q14, q0 |
| 255 | .else |
| 256 | vsub.i16 q15, q8, q3 |
| 257 | vadd.i16 q8, q8, q3 |
| 258 | vadd.i16 q9, q10, q14 |
| 259 | vsub.i16 q14, q10, q14 |
| 260 | vadd.i16 q10, q12, q1 |
| 261 | vsub.i16 q13, q12, q1 |
| 262 | vadd.i16 q11, q2, q0 |
| 263 | vsub.i16 q12, q2, q0 |
| 264 | .endif |
| 265 | .unreq qa |
| 266 | .unreq qb |
| 267 | .endm |
| 268 | |
| 269 | function ff_h264_idct8_add_neon, export=1 |
| 270 | vmov.i16 q3, #0 |
| 271 | vld1.16 {q8-q9}, [r1,:128] |
| 272 | vst1.16 {q3}, [r1,:128]! |
| 273 | vst1.16 {q3}, [r1,:128]! |
| 274 | vld1.16 {q10-q11},[r1,:128] |
| 275 | vst1.16 {q3}, [r1,:128]! |
| 276 | vst1.16 {q3}, [r1,:128]! |
| 277 | vld1.16 {q12-q13},[r1,:128] |
| 278 | vst1.16 {q3}, [r1,:128]! |
| 279 | vst1.16 {q3}, [r1,:128]! |
| 280 | |
| 281 | idct8x8_cols 0 |
| 282 | idct8x8_cols 1 |
| 283 | |
| 284 | mov r3, r0 |
| 285 | vrshr.s16 q8, q8, #6 |
| 286 | vld1.8 {d0}, [r0,:64], r2 |
| 287 | vrshr.s16 q9, q9, #6 |
| 288 | vld1.8 {d1}, [r0,:64], r2 |
| 289 | vrshr.s16 q10, q10, #6 |
| 290 | vld1.8 {d2}, [r0,:64], r2 |
| 291 | vrshr.s16 q11, q11, #6 |
| 292 | vld1.8 {d3}, [r0,:64], r2 |
| 293 | vrshr.s16 q12, q12, #6 |
| 294 | vld1.8 {d4}, [r0,:64], r2 |
| 295 | vrshr.s16 q13, q13, #6 |
| 296 | vld1.8 {d5}, [r0,:64], r2 |
| 297 | vrshr.s16 q14, q14, #6 |
| 298 | vld1.8 {d6}, [r0,:64], r2 |
| 299 | vrshr.s16 q15, q15, #6 |
| 300 | vld1.8 {d7}, [r0,:64], r2 |
| 301 | vaddw.u8 q8, q8, d0 |
| 302 | vaddw.u8 q9, q9, d1 |
| 303 | vaddw.u8 q10, q10, d2 |
| 304 | vqmovun.s16 d0, q8 |
| 305 | vaddw.u8 q11, q11, d3 |
| 306 | vqmovun.s16 d1, q9 |
| 307 | vaddw.u8 q12, q12, d4 |
| 308 | vqmovun.s16 d2, q10 |
| 309 | vst1.8 {d0}, [r3,:64], r2 |
| 310 | vaddw.u8 q13, q13, d5 |
| 311 | vqmovun.s16 d3, q11 |
| 312 | vst1.8 {d1}, [r3,:64], r2 |
| 313 | vaddw.u8 q14, q14, d6 |
| 314 | vqmovun.s16 d4, q12 |
| 315 | vst1.8 {d2}, [r3,:64], r2 |
| 316 | vaddw.u8 q15, q15, d7 |
| 317 | vqmovun.s16 d5, q13 |
| 318 | vst1.8 {d3}, [r3,:64], r2 |
| 319 | vqmovun.s16 d6, q14 |
| 320 | vqmovun.s16 d7, q15 |
| 321 | vst1.8 {d4}, [r3,:64], r2 |
| 322 | vst1.8 {d5}, [r3,:64], r2 |
| 323 | vst1.8 {d6}, [r3,:64], r2 |
| 324 | vst1.8 {d7}, [r3,:64], r2 |
| 325 | |
| 326 | sub r1, r1, #128 |
| 327 | bx lr |
| 328 | endfunc |
| 329 | |
| 330 | function ff_h264_idct8_dc_add_neon, export=1 |
| 331 | mov r3, #0 |
| 332 | vld1.16 {d30[],d31[]},[r1,:16] |
| 333 | strh r3, [r1] |
| 334 | vld1.32 {d0}, [r0,:64], r2 |
| 335 | vrshr.s16 q15, q15, #6 |
| 336 | vld1.32 {d1}, [r0,:64], r2 |
| 337 | vld1.32 {d2}, [r0,:64], r2 |
| 338 | vaddw.u8 q8, q15, d0 |
| 339 | vld1.32 {d3}, [r0,:64], r2 |
| 340 | vaddw.u8 q9, q15, d1 |
| 341 | vld1.32 {d4}, [r0,:64], r2 |
| 342 | vaddw.u8 q10, q15, d2 |
| 343 | vld1.32 {d5}, [r0,:64], r2 |
| 344 | vaddw.u8 q11, q15, d3 |
| 345 | vld1.32 {d6}, [r0,:64], r2 |
| 346 | vaddw.u8 q12, q15, d4 |
| 347 | vld1.32 {d7}, [r0,:64], r2 |
| 348 | vaddw.u8 q13, q15, d5 |
| 349 | vaddw.u8 q14, q15, d6 |
| 350 | vaddw.u8 q15, q15, d7 |
| 351 | vqmovun.s16 d0, q8 |
| 352 | vqmovun.s16 d1, q9 |
| 353 | vqmovun.s16 d2, q10 |
| 354 | vqmovun.s16 d3, q11 |
| 355 | sub r0, r0, r2, lsl #3 |
| 356 | vst1.32 {d0}, [r0,:64], r2 |
| 357 | vqmovun.s16 d4, q12 |
| 358 | vst1.32 {d1}, [r0,:64], r2 |
| 359 | vqmovun.s16 d5, q13 |
| 360 | vst1.32 {d2}, [r0,:64], r2 |
| 361 | vqmovun.s16 d6, q14 |
| 362 | vst1.32 {d3}, [r0,:64], r2 |
| 363 | vqmovun.s16 d7, q15 |
| 364 | vst1.32 {d4}, [r0,:64], r2 |
| 365 | vst1.32 {d5}, [r0,:64], r2 |
| 366 | vst1.32 {d6}, [r0,:64], r2 |
| 367 | vst1.32 {d7}, [r0,:64], r2 |
| 368 | bx lr |
| 369 | endfunc |
| 370 | |
| 371 | function ff_h264_idct8_add4_neon, export=1 |
| 372 | push {r4-r8,lr} |
| 373 | mov r4, r0 |
| 374 | mov r5, r1 |
| 375 | mov r1, r2 |
| 376 | mov r2, r3 |
| 377 | ldr r6, [sp, #24] |
| 378 | movrel r7, scan8 |
| 379 | mov r12, #16 |
| 380 | 1: ldrb r8, [r7], #4 |
| 381 | ldr r0, [r5], #16 |
| 382 | ldrb r8, [r6, r8] |
| 383 | subs r8, r8, #1 |
| 384 | blt 2f |
| 385 | ldrsh lr, [r1] |
| 386 | add r0, r0, r4 |
| 387 | it ne |
| 388 | movne lr, #0 |
| 389 | cmp lr, #0 |
| 390 | ite ne |
| 391 | adrne lr, X(ff_h264_idct8_dc_add_neon) + CONFIG_THUMB |
| 392 | adreq lr, X(ff_h264_idct8_add_neon) + CONFIG_THUMB |
| 393 | blx lr |
| 394 | 2: subs r12, r12, #4 |
| 395 | add r1, r1, #128 |
| 396 | bne 1b |
| 397 | pop {r4-r8,pc} |
| 398 | endfunc |
| 399 | |
| 400 | const scan8 |
| 401 | .byte 4+ 1*8, 5+ 1*8, 4+ 2*8, 5+ 2*8 |
| 402 | .byte 6+ 1*8, 7+ 1*8, 6+ 2*8, 7+ 2*8 |
| 403 | .byte 4+ 3*8, 5+ 3*8, 4+ 4*8, 5+ 4*8 |
| 404 | .byte 6+ 3*8, 7+ 3*8, 6+ 4*8, 7+ 4*8 |
| 405 | .byte 4+ 6*8, 5+ 6*8, 4+ 7*8, 5+ 7*8 |
| 406 | .byte 6+ 6*8, 7+ 6*8, 6+ 7*8, 7+ 7*8 |
| 407 | .byte 4+ 8*8, 5+ 8*8, 4+ 9*8, 5+ 9*8 |
| 408 | .byte 6+ 8*8, 7+ 8*8, 6+ 9*8, 7+ 9*8 |
| 409 | .byte 4+11*8, 5+11*8, 4+12*8, 5+12*8 |
| 410 | .byte 6+11*8, 7+11*8, 6+12*8, 7+12*8 |
| 411 | .byte 4+13*8, 5+13*8, 4+14*8, 5+14*8 |
| 412 | .byte 6+13*8, 7+13*8, 6+14*8, 7+14*8 |
| 413 | endconst |