| 1 | /* |
| 2 | * Copyright (c) 2008 Mans Rullgard <mans@mansr.com> |
| 3 | * |
| 4 | * This file is part of FFmpeg. |
| 5 | * |
| 6 | * FFmpeg is free software; you can redistribute it and/or |
| 7 | * modify it under the terms of the GNU Lesser General Public |
| 8 | * License as published by the Free Software Foundation; either |
| 9 | * version 2.1 of the License, or (at your option) any later version. |
| 10 | * |
| 11 | * FFmpeg is distributed in the hope that it will be useful, |
| 12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
| 14 | * Lesser General Public License for more details. |
| 15 | * |
| 16 | * You should have received a copy of the GNU Lesser General Public |
| 17 | * License along with FFmpeg; if not, write to the Free Software |
| 18 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA |
| 19 | */ |
| 20 | |
| 21 | #include "libavutil/arm/asm.S" |
| 22 | #include "neon.S" |
| 23 | |
| 24 | /* H.264 loop filter */ |
| 25 | |
| 26 | .macro h264_loop_filter_start |
| 27 | ldr r12, [sp] |
| 28 | tst r2, r2 |
| 29 | ldr r12, [r12] |
| 30 | it ne |
| 31 | tstne r3, r3 |
| 32 | vmov.32 d24[0], r12 |
| 33 | and r12, r12, r12, lsl #16 |
| 34 | it eq |
| 35 | bxeq lr |
| 36 | ands r12, r12, r12, lsl #8 |
| 37 | it lt |
| 38 | bxlt lr |
| 39 | .endm |
| 40 | |
| 41 | .macro h264_loop_filter_luma |
| 42 | vdup.8 q11, r2 @ alpha |
| 43 | vmovl.u8 q12, d24 |
| 44 | vabd.u8 q6, q8, q0 @ abs(p0 - q0) |
| 45 | vmovl.u16 q12, d24 |
| 46 | vabd.u8 q14, q9, q8 @ abs(p1 - p0) |
| 47 | vsli.16 q12, q12, #8 |
| 48 | vabd.u8 q15, q1, q0 @ abs(q1 - q0) |
| 49 | vsli.32 q12, q12, #16 |
| 50 | vclt.u8 q6, q6, q11 @ < alpha |
| 51 | vdup.8 q11, r3 @ beta |
| 52 | vclt.s8 q7, q12, #0 |
| 53 | vclt.u8 q14, q14, q11 @ < beta |
| 54 | vclt.u8 q15, q15, q11 @ < beta |
| 55 | vbic q6, q6, q7 |
| 56 | vabd.u8 q4, q10, q8 @ abs(p2 - p0) |
| 57 | vand q6, q6, q14 |
| 58 | vabd.u8 q5, q2, q0 @ abs(q2 - q0) |
| 59 | vclt.u8 q4, q4, q11 @ < beta |
| 60 | vand q6, q6, q15 |
| 61 | vclt.u8 q5, q5, q11 @ < beta |
| 62 | vand q4, q4, q6 |
| 63 | vand q5, q5, q6 |
| 64 | vand q12, q12, q6 |
| 65 | vrhadd.u8 q14, q8, q0 |
| 66 | vsub.i8 q6, q12, q4 |
| 67 | vqadd.u8 q7, q9, q12 |
| 68 | vhadd.u8 q10, q10, q14 |
| 69 | vsub.i8 q6, q6, q5 |
| 70 | vhadd.u8 q14, q2, q14 |
| 71 | vmin.u8 q7, q7, q10 |
| 72 | vqsub.u8 q11, q9, q12 |
| 73 | vqadd.u8 q2, q1, q12 |
| 74 | vmax.u8 q7, q7, q11 |
| 75 | vqsub.u8 q11, q1, q12 |
| 76 | vmin.u8 q14, q2, q14 |
| 77 | vmovl.u8 q2, d0 |
| 78 | vmax.u8 q14, q14, q11 |
| 79 | vmovl.u8 q10, d1 |
| 80 | vsubw.u8 q2, q2, d16 |
| 81 | vsubw.u8 q10, q10, d17 |
| 82 | vshl.i16 q2, q2, #2 |
| 83 | vshl.i16 q10, q10, #2 |
| 84 | vaddw.u8 q2, q2, d18 |
| 85 | vaddw.u8 q10, q10, d19 |
| 86 | vsubw.u8 q2, q2, d2 |
| 87 | vsubw.u8 q10, q10, d3 |
| 88 | vrshrn.i16 d4, q2, #3 |
| 89 | vrshrn.i16 d5, q10, #3 |
| 90 | vbsl q4, q7, q9 |
| 91 | vbsl q5, q14, q1 |
| 92 | vneg.s8 q7, q6 |
| 93 | vmovl.u8 q14, d16 |
| 94 | vmin.s8 q2, q2, q6 |
| 95 | vmovl.u8 q6, d17 |
| 96 | vmax.s8 q2, q2, q7 |
| 97 | vmovl.u8 q11, d0 |
| 98 | vmovl.u8 q12, d1 |
| 99 | vaddw.s8 q14, q14, d4 |
| 100 | vaddw.s8 q6, q6, d5 |
| 101 | vsubw.s8 q11, q11, d4 |
| 102 | vsubw.s8 q12, q12, d5 |
| 103 | vqmovun.s16 d16, q14 |
| 104 | vqmovun.s16 d17, q6 |
| 105 | vqmovun.s16 d0, q11 |
| 106 | vqmovun.s16 d1, q12 |
| 107 | .endm |
| 108 | |
| 109 | function ff_h264_v_loop_filter_luma_neon, export=1 |
| 110 | h264_loop_filter_start |
| 111 | |
| 112 | vld1.8 {d0, d1}, [r0,:128], r1 |
| 113 | vld1.8 {d2, d3}, [r0,:128], r1 |
| 114 | vld1.8 {d4, d5}, [r0,:128], r1 |
| 115 | sub r0, r0, r1, lsl #2 |
| 116 | sub r0, r0, r1, lsl #1 |
| 117 | vld1.8 {d20,d21}, [r0,:128], r1 |
| 118 | vld1.8 {d18,d19}, [r0,:128], r1 |
| 119 | vld1.8 {d16,d17}, [r0,:128], r1 |
| 120 | |
| 121 | vpush {d8-d15} |
| 122 | |
| 123 | h264_loop_filter_luma |
| 124 | |
| 125 | sub r0, r0, r1, lsl #1 |
| 126 | vst1.8 {d8, d9}, [r0,:128], r1 |
| 127 | vst1.8 {d16,d17}, [r0,:128], r1 |
| 128 | vst1.8 {d0, d1}, [r0,:128], r1 |
| 129 | vst1.8 {d10,d11}, [r0,:128] |
| 130 | |
| 131 | vpop {d8-d15} |
| 132 | bx lr |
| 133 | endfunc |
| 134 | |
| 135 | function ff_h264_h_loop_filter_luma_neon, export=1 |
| 136 | h264_loop_filter_start |
| 137 | |
| 138 | sub r0, r0, #4 |
| 139 | vld1.8 {d6}, [r0], r1 |
| 140 | vld1.8 {d20}, [r0], r1 |
| 141 | vld1.8 {d18}, [r0], r1 |
| 142 | vld1.8 {d16}, [r0], r1 |
| 143 | vld1.8 {d0}, [r0], r1 |
| 144 | vld1.8 {d2}, [r0], r1 |
| 145 | vld1.8 {d4}, [r0], r1 |
| 146 | vld1.8 {d26}, [r0], r1 |
| 147 | vld1.8 {d7}, [r0], r1 |
| 148 | vld1.8 {d21}, [r0], r1 |
| 149 | vld1.8 {d19}, [r0], r1 |
| 150 | vld1.8 {d17}, [r0], r1 |
| 151 | vld1.8 {d1}, [r0], r1 |
| 152 | vld1.8 {d3}, [r0], r1 |
| 153 | vld1.8 {d5}, [r0], r1 |
| 154 | vld1.8 {d27}, [r0], r1 |
| 155 | |
| 156 | transpose_8x8 q3, q10, q9, q8, q0, q1, q2, q13 |
| 157 | |
| 158 | vpush {d8-d15} |
| 159 | |
| 160 | h264_loop_filter_luma |
| 161 | |
| 162 | transpose_4x4 q4, q8, q0, q5 |
| 163 | |
| 164 | sub r0, r0, r1, lsl #4 |
| 165 | add r0, r0, #2 |
| 166 | vst1.32 {d8[0]}, [r0], r1 |
| 167 | vst1.32 {d16[0]}, [r0], r1 |
| 168 | vst1.32 {d0[0]}, [r0], r1 |
| 169 | vst1.32 {d10[0]}, [r0], r1 |
| 170 | vst1.32 {d8[1]}, [r0], r1 |
| 171 | vst1.32 {d16[1]}, [r0], r1 |
| 172 | vst1.32 {d0[1]}, [r0], r1 |
| 173 | vst1.32 {d10[1]}, [r0], r1 |
| 174 | vst1.32 {d9[0]}, [r0], r1 |
| 175 | vst1.32 {d17[0]}, [r0], r1 |
| 176 | vst1.32 {d1[0]}, [r0], r1 |
| 177 | vst1.32 {d11[0]}, [r0], r1 |
| 178 | vst1.32 {d9[1]}, [r0], r1 |
| 179 | vst1.32 {d17[1]}, [r0], r1 |
| 180 | vst1.32 {d1[1]}, [r0], r1 |
| 181 | vst1.32 {d11[1]}, [r0], r1 |
| 182 | |
| 183 | vpop {d8-d15} |
| 184 | bx lr |
| 185 | endfunc |
| 186 | |
| 187 | .macro h264_loop_filter_chroma |
| 188 | vdup.8 d22, r2 @ alpha |
| 189 | vmovl.u8 q12, d24 |
| 190 | vabd.u8 d26, d16, d0 @ abs(p0 - q0) |
| 191 | vmovl.u8 q2, d0 |
| 192 | vabd.u8 d28, d18, d16 @ abs(p1 - p0) |
| 193 | vsubw.u8 q2, q2, d16 |
| 194 | vsli.16 d24, d24, #8 |
| 195 | vshl.i16 q2, q2, #2 |
| 196 | vabd.u8 d30, d2, d0 @ abs(q1 - q0) |
| 197 | vaddw.u8 q2, q2, d18 |
| 198 | vclt.u8 d26, d26, d22 @ < alpha |
| 199 | vsubw.u8 q2, q2, d2 |
| 200 | vdup.8 d22, r3 @ beta |
| 201 | vrshrn.i16 d4, q2, #3 |
| 202 | vclt.u8 d28, d28, d22 @ < beta |
| 203 | vclt.u8 d30, d30, d22 @ < beta |
| 204 | vmin.s8 d4, d4, d24 |
| 205 | vneg.s8 d25, d24 |
| 206 | vand d26, d26, d28 |
| 207 | vmax.s8 d4, d4, d25 |
| 208 | vand d26, d26, d30 |
| 209 | vmovl.u8 q11, d0 |
| 210 | vand d4, d4, d26 |
| 211 | vmovl.u8 q14, d16 |
| 212 | vaddw.s8 q14, q14, d4 |
| 213 | vsubw.s8 q11, q11, d4 |
| 214 | vqmovun.s16 d16, q14 |
| 215 | vqmovun.s16 d0, q11 |
| 216 | .endm |
| 217 | |
| 218 | function ff_h264_v_loop_filter_chroma_neon, export=1 |
| 219 | h264_loop_filter_start |
| 220 | |
| 221 | sub r0, r0, r1, lsl #1 |
| 222 | vld1.8 {d18}, [r0,:64], r1 |
| 223 | vld1.8 {d16}, [r0,:64], r1 |
| 224 | vld1.8 {d0}, [r0,:64], r1 |
| 225 | vld1.8 {d2}, [r0,:64] |
| 226 | |
| 227 | h264_loop_filter_chroma |
| 228 | |
| 229 | sub r0, r0, r1, lsl #1 |
| 230 | vst1.8 {d16}, [r0,:64], r1 |
| 231 | vst1.8 {d0}, [r0,:64], r1 |
| 232 | |
| 233 | bx lr |
| 234 | endfunc |
| 235 | |
| 236 | function ff_h264_h_loop_filter_chroma_neon, export=1 |
| 237 | h264_loop_filter_start |
| 238 | |
| 239 | sub r0, r0, #2 |
| 240 | vld1.32 {d18[0]}, [r0], r1 |
| 241 | vld1.32 {d16[0]}, [r0], r1 |
| 242 | vld1.32 {d0[0]}, [r0], r1 |
| 243 | vld1.32 {d2[0]}, [r0], r1 |
| 244 | vld1.32 {d18[1]}, [r0], r1 |
| 245 | vld1.32 {d16[1]}, [r0], r1 |
| 246 | vld1.32 {d0[1]}, [r0], r1 |
| 247 | vld1.32 {d2[1]}, [r0], r1 |
| 248 | |
| 249 | vtrn.16 d18, d0 |
| 250 | vtrn.16 d16, d2 |
| 251 | vtrn.8 d18, d16 |
| 252 | vtrn.8 d0, d2 |
| 253 | |
| 254 | h264_loop_filter_chroma |
| 255 | |
| 256 | vtrn.16 d18, d0 |
| 257 | vtrn.16 d16, d2 |
| 258 | vtrn.8 d18, d16 |
| 259 | vtrn.8 d0, d2 |
| 260 | |
| 261 | sub r0, r0, r1, lsl #3 |
| 262 | vst1.32 {d18[0]}, [r0], r1 |
| 263 | vst1.32 {d16[0]}, [r0], r1 |
| 264 | vst1.32 {d0[0]}, [r0], r1 |
| 265 | vst1.32 {d2[0]}, [r0], r1 |
| 266 | vst1.32 {d18[1]}, [r0], r1 |
| 267 | vst1.32 {d16[1]}, [r0], r1 |
| 268 | vst1.32 {d0[1]}, [r0], r1 |
| 269 | vst1.32 {d2[1]}, [r0], r1 |
| 270 | |
| 271 | bx lr |
| 272 | endfunc |
| 273 | |
| 274 | @ Biweighted prediction |
| 275 | |
| 276 | .macro biweight_16 macs, macd |
| 277 | vdup.8 d0, r4 |
| 278 | vdup.8 d1, r5 |
| 279 | vmov q2, q8 |
| 280 | vmov q3, q8 |
| 281 | 1: subs r3, r3, #2 |
| 282 | vld1.8 {d20-d21},[r0,:128], r2 |
| 283 | \macd q2, d0, d20 |
| 284 | pld [r0] |
| 285 | \macd q3, d0, d21 |
| 286 | vld1.8 {d22-d23},[r1,:128], r2 |
| 287 | \macs q2, d1, d22 |
| 288 | pld [r1] |
| 289 | \macs q3, d1, d23 |
| 290 | vmov q12, q8 |
| 291 | vld1.8 {d28-d29},[r0,:128], r2 |
| 292 | vmov q13, q8 |
| 293 | \macd q12, d0, d28 |
| 294 | pld [r0] |
| 295 | \macd q13, d0, d29 |
| 296 | vld1.8 {d30-d31},[r1,:128], r2 |
| 297 | \macs q12, d1, d30 |
| 298 | pld [r1] |
| 299 | \macs q13, d1, d31 |
| 300 | vshl.s16 q2, q2, q9 |
| 301 | vshl.s16 q3, q3, q9 |
| 302 | vqmovun.s16 d4, q2 |
| 303 | vqmovun.s16 d5, q3 |
| 304 | vshl.s16 q12, q12, q9 |
| 305 | vshl.s16 q13, q13, q9 |
| 306 | vqmovun.s16 d24, q12 |
| 307 | vqmovun.s16 d25, q13 |
| 308 | vmov q3, q8 |
| 309 | vst1.8 {d4- d5}, [r6,:128], r2 |
| 310 | vmov q2, q8 |
| 311 | vst1.8 {d24-d25},[r6,:128], r2 |
| 312 | bne 1b |
| 313 | pop {r4-r6, pc} |
| 314 | .endm |
| 315 | |
| 316 | .macro biweight_8 macs, macd |
| 317 | vdup.8 d0, r4 |
| 318 | vdup.8 d1, r5 |
| 319 | vmov q1, q8 |
| 320 | vmov q10, q8 |
| 321 | 1: subs r3, r3, #2 |
| 322 | vld1.8 {d4},[r0,:64], r2 |
| 323 | \macd q1, d0, d4 |
| 324 | pld [r0] |
| 325 | vld1.8 {d5},[r1,:64], r2 |
| 326 | \macs q1, d1, d5 |
| 327 | pld [r1] |
| 328 | vld1.8 {d6},[r0,:64], r2 |
| 329 | \macd q10, d0, d6 |
| 330 | pld [r0] |
| 331 | vld1.8 {d7},[r1,:64], r2 |
| 332 | \macs q10, d1, d7 |
| 333 | pld [r1] |
| 334 | vshl.s16 q1, q1, q9 |
| 335 | vqmovun.s16 d2, q1 |
| 336 | vshl.s16 q10, q10, q9 |
| 337 | vqmovun.s16 d4, q10 |
| 338 | vmov q10, q8 |
| 339 | vst1.8 {d2},[r6,:64], r2 |
| 340 | vmov q1, q8 |
| 341 | vst1.8 {d4},[r6,:64], r2 |
| 342 | bne 1b |
| 343 | pop {r4-r6, pc} |
| 344 | .endm |
| 345 | |
| 346 | .macro biweight_4 macs, macd |
| 347 | vdup.8 d0, r4 |
| 348 | vdup.8 d1, r5 |
| 349 | vmov q1, q8 |
| 350 | vmov q10, q8 |
| 351 | 1: subs r3, r3, #4 |
| 352 | vld1.32 {d4[0]},[r0,:32], r2 |
| 353 | vld1.32 {d4[1]},[r0,:32], r2 |
| 354 | \macd q1, d0, d4 |
| 355 | pld [r0] |
| 356 | vld1.32 {d5[0]},[r1,:32], r2 |
| 357 | vld1.32 {d5[1]},[r1,:32], r2 |
| 358 | \macs q1, d1, d5 |
| 359 | pld [r1] |
| 360 | blt 2f |
| 361 | vld1.32 {d6[0]},[r0,:32], r2 |
| 362 | vld1.32 {d6[1]},[r0,:32], r2 |
| 363 | \macd q10, d0, d6 |
| 364 | pld [r0] |
| 365 | vld1.32 {d7[0]},[r1,:32], r2 |
| 366 | vld1.32 {d7[1]},[r1,:32], r2 |
| 367 | \macs q10, d1, d7 |
| 368 | pld [r1] |
| 369 | vshl.s16 q1, q1, q9 |
| 370 | vqmovun.s16 d2, q1 |
| 371 | vshl.s16 q10, q10, q9 |
| 372 | vqmovun.s16 d4, q10 |
| 373 | vmov q10, q8 |
| 374 | vst1.32 {d2[0]},[r6,:32], r2 |
| 375 | vst1.32 {d2[1]},[r6,:32], r2 |
| 376 | vmov q1, q8 |
| 377 | vst1.32 {d4[0]},[r6,:32], r2 |
| 378 | vst1.32 {d4[1]},[r6,:32], r2 |
| 379 | bne 1b |
| 380 | pop {r4-r6, pc} |
| 381 | 2: vshl.s16 q1, q1, q9 |
| 382 | vqmovun.s16 d2, q1 |
| 383 | vst1.32 {d2[0]},[r6,:32], r2 |
| 384 | vst1.32 {d2[1]},[r6,:32], r2 |
| 385 | pop {r4-r6, pc} |
| 386 | .endm |
| 387 | |
| 388 | .macro biweight_func w |
| 389 | function ff_biweight_h264_pixels_\w\()_neon, export=1 |
| 390 | push {r4-r6, lr} |
| 391 | ldr r12, [sp, #16] |
| 392 | add r4, sp, #20 |
| 393 | ldm r4, {r4-r6} |
| 394 | lsr lr, r4, #31 |
| 395 | add r6, r6, #1 |
| 396 | eors lr, lr, r5, lsr #30 |
| 397 | orr r6, r6, #1 |
| 398 | vdup.16 q9, r12 |
| 399 | lsl r6, r6, r12 |
| 400 | vmvn q9, q9 |
| 401 | vdup.16 q8, r6 |
| 402 | mov r6, r0 |
| 403 | beq 10f |
| 404 | subs lr, lr, #1 |
| 405 | beq 20f |
| 406 | subs lr, lr, #1 |
| 407 | beq 30f |
| 408 | b 40f |
| 409 | 10: biweight_\w vmlal.u8, vmlal.u8 |
| 410 | 20: rsb r4, r4, #0 |
| 411 | biweight_\w vmlal.u8, vmlsl.u8 |
| 412 | 30: rsb r4, r4, #0 |
| 413 | rsb r5, r5, #0 |
| 414 | biweight_\w vmlsl.u8, vmlsl.u8 |
| 415 | 40: rsb r5, r5, #0 |
| 416 | biweight_\w vmlsl.u8, vmlal.u8 |
| 417 | endfunc |
| 418 | .endm |
| 419 | |
| 420 | biweight_func 16 |
| 421 | biweight_func 8 |
| 422 | biweight_func 4 |
| 423 | |
| 424 | @ Weighted prediction |
| 425 | |
| 426 | .macro weight_16 add |
| 427 | vdup.8 d0, r12 |
| 428 | 1: subs r2, r2, #2 |
| 429 | vld1.8 {d20-d21},[r0,:128], r1 |
| 430 | vmull.u8 q2, d0, d20 |
| 431 | pld [r0] |
| 432 | vmull.u8 q3, d0, d21 |
| 433 | vld1.8 {d28-d29},[r0,:128], r1 |
| 434 | vmull.u8 q12, d0, d28 |
| 435 | pld [r0] |
| 436 | vmull.u8 q13, d0, d29 |
| 437 | \add q2, q8, q2 |
| 438 | vrshl.s16 q2, q2, q9 |
| 439 | \add q3, q8, q3 |
| 440 | vrshl.s16 q3, q3, q9 |
| 441 | vqmovun.s16 d4, q2 |
| 442 | vqmovun.s16 d5, q3 |
| 443 | \add q12, q8, q12 |
| 444 | vrshl.s16 q12, q12, q9 |
| 445 | \add q13, q8, q13 |
| 446 | vrshl.s16 q13, q13, q9 |
| 447 | vqmovun.s16 d24, q12 |
| 448 | vqmovun.s16 d25, q13 |
| 449 | vst1.8 {d4- d5}, [r4,:128], r1 |
| 450 | vst1.8 {d24-d25},[r4,:128], r1 |
| 451 | bne 1b |
| 452 | pop {r4, pc} |
| 453 | .endm |
| 454 | |
| 455 | .macro weight_8 add |
| 456 | vdup.8 d0, r12 |
| 457 | 1: subs r2, r2, #2 |
| 458 | vld1.8 {d4},[r0,:64], r1 |
| 459 | vmull.u8 q1, d0, d4 |
| 460 | pld [r0] |
| 461 | vld1.8 {d6},[r0,:64], r1 |
| 462 | vmull.u8 q10, d0, d6 |
| 463 | \add q1, q8, q1 |
| 464 | pld [r0] |
| 465 | vrshl.s16 q1, q1, q9 |
| 466 | vqmovun.s16 d2, q1 |
| 467 | \add q10, q8, q10 |
| 468 | vrshl.s16 q10, q10, q9 |
| 469 | vqmovun.s16 d4, q10 |
| 470 | vst1.8 {d2},[r4,:64], r1 |
| 471 | vst1.8 {d4},[r4,:64], r1 |
| 472 | bne 1b |
| 473 | pop {r4, pc} |
| 474 | .endm |
| 475 | |
| 476 | .macro weight_4 add |
| 477 | vdup.8 d0, r12 |
| 478 | vmov q1, q8 |
| 479 | vmov q10, q8 |
| 480 | 1: subs r2, r2, #4 |
| 481 | vld1.32 {d4[0]},[r0,:32], r1 |
| 482 | vld1.32 {d4[1]},[r0,:32], r1 |
| 483 | vmull.u8 q1, d0, d4 |
| 484 | pld [r0] |
| 485 | blt 2f |
| 486 | vld1.32 {d6[0]},[r0,:32], r1 |
| 487 | vld1.32 {d6[1]},[r0,:32], r1 |
| 488 | vmull.u8 q10, d0, d6 |
| 489 | pld [r0] |
| 490 | \add q1, q8, q1 |
| 491 | vrshl.s16 q1, q1, q9 |
| 492 | vqmovun.s16 d2, q1 |
| 493 | \add q10, q8, q10 |
| 494 | vrshl.s16 q10, q10, q9 |
| 495 | vqmovun.s16 d4, q10 |
| 496 | vmov q10, q8 |
| 497 | vst1.32 {d2[0]},[r4,:32], r1 |
| 498 | vst1.32 {d2[1]},[r4,:32], r1 |
| 499 | vmov q1, q8 |
| 500 | vst1.32 {d4[0]},[r4,:32], r1 |
| 501 | vst1.32 {d4[1]},[r4,:32], r1 |
| 502 | bne 1b |
| 503 | pop {r4, pc} |
| 504 | 2: \add q1, q8, q1 |
| 505 | vrshl.s16 q1, q1, q9 |
| 506 | vqmovun.s16 d2, q1 |
| 507 | vst1.32 {d2[0]},[r4,:32], r1 |
| 508 | vst1.32 {d2[1]},[r4,:32], r1 |
| 509 | pop {r4, pc} |
| 510 | .endm |
| 511 | |
| 512 | .macro weight_func w |
| 513 | function ff_weight_h264_pixels_\w\()_neon, export=1 |
| 514 | push {r4, lr} |
| 515 | ldr r12, [sp, #8] |
| 516 | ldr r4, [sp, #12] |
| 517 | cmp r3, #1 |
| 518 | lsl r4, r4, r3 |
| 519 | vdup.16 q8, r4 |
| 520 | mov r4, r0 |
| 521 | ble 20f |
| 522 | rsb lr, r3, #1 |
| 523 | vdup.16 q9, lr |
| 524 | cmp r12, #0 |
| 525 | blt 10f |
| 526 | weight_\w vhadd.s16 |
| 527 | 10: rsb r12, r12, #0 |
| 528 | weight_\w vhsub.s16 |
| 529 | 20: rsb lr, r3, #0 |
| 530 | vdup.16 q9, lr |
| 531 | cmp r12, #0 |
| 532 | blt 10f |
| 533 | weight_\w vadd.s16 |
| 534 | 10: rsb r12, r12, #0 |
| 535 | weight_\w vsub.s16 |
| 536 | endfunc |
| 537 | .endm |
| 538 | |
| 539 | weight_func 16 |
| 540 | weight_func 8 |
| 541 | weight_func 4 |