| 1 | /***************************************************************************** |
| 2 | * Copyright (C) 2013 x265 project |
| 3 | * |
| 4 | * Authors: Mandar Gurav <mandar@multicorewareinc.com> |
| 5 | * Deepthi Devaki Akkoorath <deepthidevaki@multicorewareinc.com> |
| 6 | * Mahesh Pittala <mahesh@multicorewareinc.com> |
| 7 | * Rajesh Paulraj <rajesh@multicorewareinc.com> |
| 8 | * Min Chen <min.chen@multicorewareinc.com> |
| 9 | * Praveen Kumar Tiwari <praveen@multicorewareinc.com> |
| 10 | * Nabajit Deka <nabajit@multicorewareinc.com> |
| 11 | * |
| 12 | * This program is free software; you can redistribute it and/or modify |
| 13 | * it under the terms of the GNU General Public License as published by |
| 14 | * the Free Software Foundation; either version 2 of the License, or |
| 15 | * (at your option) any later version. |
| 16 | * |
| 17 | * This program is distributed in the hope that it will be useful, |
| 18 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 19 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 20 | * GNU General Public License for more details. |
| 21 | * |
| 22 | * You should have received a copy of the GNU General Public License |
| 23 | * along with this program; if not, write to the Free Software |
| 24 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111, USA. |
| 25 | * |
| 26 | * This program is also available under a commercial proprietary license. |
| 27 | * For more information, contact us at license @ x265.com. |
| 28 | *****************************************************************************/ |
| 29 | |
| 30 | #include "common.h" |
| 31 | #include "primitives.h" |
| 32 | |
| 33 | using namespace x265; |
| 34 | |
| 35 | #if _MSC_VER |
| 36 | #pragma warning(disable: 4127) // conditional expression is constant, typical for templated functions |
| 37 | #endif |
| 38 | |
| 39 | namespace { |
| 40 | // anonymous file-static namespace |
| 41 | |
| 42 | // Fast DST Algorithm. Full matrix multiplication for DST and Fast DST algorithm |
| 43 | // give identical results |
| 44 | void fastForwardDst(const int16_t* block, int16_t* coeff, int shift) // input block, output coeff |
| 45 | { |
| 46 | int c[4]; |
| 47 | int rnd_factor = 1 << (shift - 1); |
| 48 | |
| 49 | for (int i = 0; i < 4; i++) |
| 50 | { |
| 51 | // Intermediate Variables |
| 52 | c[0] = block[4 * i + 0] + block[4 * i + 3]; |
| 53 | c[1] = block[4 * i + 1] + block[4 * i + 3]; |
| 54 | c[2] = block[4 * i + 0] - block[4 * i + 1]; |
| 55 | c[3] = 74 * block[4 * i + 2]; |
| 56 | |
| 57 | coeff[i] = (int16_t)((29 * c[0] + 55 * c[1] + c[3] + rnd_factor) >> shift); |
| 58 | coeff[4 + i] = (int16_t)((74 * (block[4 * i + 0] + block[4 * i + 1] - block[4 * i + 3]) + rnd_factor) >> shift); |
| 59 | coeff[8 + i] = (int16_t)((29 * c[2] + 55 * c[0] - c[3] + rnd_factor) >> shift); |
| 60 | coeff[12 + i] = (int16_t)((55 * c[2] - 29 * c[1] + c[3] + rnd_factor) >> shift); |
| 61 | } |
| 62 | } |
| 63 | |
| 64 | void inversedst(const int16_t* tmp, int16_t* block, int shift) // input tmp, output block |
| 65 | { |
| 66 | int i, c[4]; |
| 67 | int rnd_factor = 1 << (shift - 1); |
| 68 | |
| 69 | for (i = 0; i < 4; i++) |
| 70 | { |
| 71 | // Intermediate Variables |
| 72 | c[0] = tmp[i] + tmp[8 + i]; |
| 73 | c[1] = tmp[8 + i] + tmp[12 + i]; |
| 74 | c[2] = tmp[i] - tmp[12 + i]; |
| 75 | c[3] = 74 * tmp[4 + i]; |
| 76 | |
| 77 | block[4 * i + 0] = (int16_t)Clip3(-32768, 32767, (29 * c[0] + 55 * c[1] + c[3] + rnd_factor) >> shift); |
| 78 | block[4 * i + 1] = (int16_t)Clip3(-32768, 32767, (55 * c[2] - 29 * c[1] + c[3] + rnd_factor) >> shift); |
| 79 | block[4 * i + 2] = (int16_t)Clip3(-32768, 32767, (74 * (tmp[i] - tmp[8 + i] + tmp[12 + i]) + rnd_factor) >> shift); |
| 80 | block[4 * i + 3] = (int16_t)Clip3(-32768, 32767, (55 * c[0] + 29 * c[2] - c[3] + rnd_factor) >> shift); |
| 81 | } |
| 82 | } |
| 83 | |
| 84 | void partialButterfly16(const int16_t* src, int16_t* dst, int shift, int line) |
| 85 | { |
| 86 | int j, k; |
| 87 | int E[8], O[8]; |
| 88 | int EE[4], EO[4]; |
| 89 | int EEE[2], EEO[2]; |
| 90 | int add = 1 << (shift - 1); |
| 91 | |
| 92 | for (j = 0; j < line; j++) |
| 93 | { |
| 94 | /* E and O */ |
| 95 | for (k = 0; k < 8; k++) |
| 96 | { |
| 97 | E[k] = src[k] + src[15 - k]; |
| 98 | O[k] = src[k] - src[15 - k]; |
| 99 | } |
| 100 | |
| 101 | /* EE and EO */ |
| 102 | for (k = 0; k < 4; k++) |
| 103 | { |
| 104 | EE[k] = E[k] + E[7 - k]; |
| 105 | EO[k] = E[k] - E[7 - k]; |
| 106 | } |
| 107 | |
| 108 | /* EEE and EEO */ |
| 109 | EEE[0] = EE[0] + EE[3]; |
| 110 | EEO[0] = EE[0] - EE[3]; |
| 111 | EEE[1] = EE[1] + EE[2]; |
| 112 | EEO[1] = EE[1] - EE[2]; |
| 113 | |
| 114 | dst[0] = (int16_t)((g_t16[0][0] * EEE[0] + g_t16[0][1] * EEE[1] + add) >> shift); |
| 115 | dst[8 * line] = (int16_t)((g_t16[8][0] * EEE[0] + g_t16[8][1] * EEE[1] + add) >> shift); |
| 116 | dst[4 * line] = (int16_t)((g_t16[4][0] * EEO[0] + g_t16[4][1] * EEO[1] + add) >> shift); |
| 117 | dst[12 * line] = (int16_t)((g_t16[12][0] * EEO[0] + g_t16[12][1] * EEO[1] + add) >> shift); |
| 118 | |
| 119 | for (k = 2; k < 16; k += 4) |
| 120 | { |
| 121 | dst[k * line] = (int16_t)((g_t16[k][0] * EO[0] + g_t16[k][1] * EO[1] + g_t16[k][2] * EO[2] + |
| 122 | g_t16[k][3] * EO[3] + add) >> shift); |
| 123 | } |
| 124 | |
| 125 | for (k = 1; k < 16; k += 2) |
| 126 | { |
| 127 | dst[k * line] = (int16_t)((g_t16[k][0] * O[0] + g_t16[k][1] * O[1] + g_t16[k][2] * O[2] + g_t16[k][3] * O[3] + |
| 128 | g_t16[k][4] * O[4] + g_t16[k][5] * O[5] + g_t16[k][6] * O[6] + g_t16[k][7] * O[7] + |
| 129 | add) >> shift); |
| 130 | } |
| 131 | |
| 132 | src += 16; |
| 133 | dst++; |
| 134 | } |
| 135 | } |
| 136 | |
| 137 | void partialButterfly32(const int16_t* src, int16_t* dst, int shift, int line) |
| 138 | { |
| 139 | int j, k; |
| 140 | int E[16], O[16]; |
| 141 | int EE[8], EO[8]; |
| 142 | int EEE[4], EEO[4]; |
| 143 | int EEEE[2], EEEO[2]; |
| 144 | int add = 1 << (shift - 1); |
| 145 | |
| 146 | for (j = 0; j < line; j++) |
| 147 | { |
| 148 | /* E and O*/ |
| 149 | for (k = 0; k < 16; k++) |
| 150 | { |
| 151 | E[k] = src[k] + src[31 - k]; |
| 152 | O[k] = src[k] - src[31 - k]; |
| 153 | } |
| 154 | |
| 155 | /* EE and EO */ |
| 156 | for (k = 0; k < 8; k++) |
| 157 | { |
| 158 | EE[k] = E[k] + E[15 - k]; |
| 159 | EO[k] = E[k] - E[15 - k]; |
| 160 | } |
| 161 | |
| 162 | /* EEE and EEO */ |
| 163 | for (k = 0; k < 4; k++) |
| 164 | { |
| 165 | EEE[k] = EE[k] + EE[7 - k]; |
| 166 | EEO[k] = EE[k] - EE[7 - k]; |
| 167 | } |
| 168 | |
| 169 | /* EEEE and EEEO */ |
| 170 | EEEE[0] = EEE[0] + EEE[3]; |
| 171 | EEEO[0] = EEE[0] - EEE[3]; |
| 172 | EEEE[1] = EEE[1] + EEE[2]; |
| 173 | EEEO[1] = EEE[1] - EEE[2]; |
| 174 | |
| 175 | dst[0] = (int16_t)((g_t32[0][0] * EEEE[0] + g_t32[0][1] * EEEE[1] + add) >> shift); |
| 176 | dst[16 * line] = (int16_t)((g_t32[16][0] * EEEE[0] + g_t32[16][1] * EEEE[1] + add) >> shift); |
| 177 | dst[8 * line] = (int16_t)((g_t32[8][0] * EEEO[0] + g_t32[8][1] * EEEO[1] + add) >> shift); |
| 178 | dst[24 * line] = (int16_t)((g_t32[24][0] * EEEO[0] + g_t32[24][1] * EEEO[1] + add) >> shift); |
| 179 | for (k = 4; k < 32; k += 8) |
| 180 | { |
| 181 | dst[k * line] = (int16_t)((g_t32[k][0] * EEO[0] + g_t32[k][1] * EEO[1] + g_t32[k][2] * EEO[2] + |
| 182 | g_t32[k][3] * EEO[3] + add) >> shift); |
| 183 | } |
| 184 | |
| 185 | for (k = 2; k < 32; k += 4) |
| 186 | { |
| 187 | dst[k * line] = (int16_t)((g_t32[k][0] * EO[0] + g_t32[k][1] * EO[1] + g_t32[k][2] * EO[2] + |
| 188 | g_t32[k][3] * EO[3] + g_t32[k][4] * EO[4] + g_t32[k][5] * EO[5] + |
| 189 | g_t32[k][6] * EO[6] + g_t32[k][7] * EO[7] + add) >> shift); |
| 190 | } |
| 191 | |
| 192 | for (k = 1; k < 32; k += 2) |
| 193 | { |
| 194 | dst[k * line] = (int16_t)((g_t32[k][0] * O[0] + g_t32[k][1] * O[1] + g_t32[k][2] * O[2] + g_t32[k][3] * O[3] + |
| 195 | g_t32[k][4] * O[4] + g_t32[k][5] * O[5] + g_t32[k][6] * O[6] + g_t32[k][7] * O[7] + |
| 196 | g_t32[k][8] * O[8] + g_t32[k][9] * O[9] + g_t32[k][10] * O[10] + g_t32[k][11] * |
| 197 | O[11] + g_t32[k][12] * O[12] + g_t32[k][13] * O[13] + g_t32[k][14] * O[14] + |
| 198 | g_t32[k][15] * O[15] + add) >> shift); |
| 199 | } |
| 200 | |
| 201 | src += 32; |
| 202 | dst++; |
| 203 | } |
| 204 | } |
| 205 | |
| 206 | void partialButterfly8(const int16_t* src, int16_t* dst, int shift, int line) |
| 207 | { |
| 208 | int j, k; |
| 209 | int E[4], O[4]; |
| 210 | int EE[2], EO[2]; |
| 211 | int add = 1 << (shift - 1); |
| 212 | |
| 213 | for (j = 0; j < line; j++) |
| 214 | { |
| 215 | /* E and O*/ |
| 216 | for (k = 0; k < 4; k++) |
| 217 | { |
| 218 | E[k] = src[k] + src[7 - k]; |
| 219 | O[k] = src[k] - src[7 - k]; |
| 220 | } |
| 221 | |
| 222 | /* EE and EO */ |
| 223 | EE[0] = E[0] + E[3]; |
| 224 | EO[0] = E[0] - E[3]; |
| 225 | EE[1] = E[1] + E[2]; |
| 226 | EO[1] = E[1] - E[2]; |
| 227 | |
| 228 | dst[0] = (int16_t)((g_t8[0][0] * EE[0] + g_t8[0][1] * EE[1] + add) >> shift); |
| 229 | dst[4 * line] = (int16_t)((g_t8[4][0] * EE[0] + g_t8[4][1] * EE[1] + add) >> shift); |
| 230 | dst[2 * line] = (int16_t)((g_t8[2][0] * EO[0] + g_t8[2][1] * EO[1] + add) >> shift); |
| 231 | dst[6 * line] = (int16_t)((g_t8[6][0] * EO[0] + g_t8[6][1] * EO[1] + add) >> shift); |
| 232 | |
| 233 | dst[line] = (int16_t)((g_t8[1][0] * O[0] + g_t8[1][1] * O[1] + g_t8[1][2] * O[2] + g_t8[1][3] * O[3] + add) >> shift); |
| 234 | dst[3 * line] = (int16_t)((g_t8[3][0] * O[0] + g_t8[3][1] * O[1] + g_t8[3][2] * O[2] + g_t8[3][3] * O[3] + add) >> shift); |
| 235 | dst[5 * line] = (int16_t)((g_t8[5][0] * O[0] + g_t8[5][1] * O[1] + g_t8[5][2] * O[2] + g_t8[5][3] * O[3] + add) >> shift); |
| 236 | dst[7 * line] = (int16_t)((g_t8[7][0] * O[0] + g_t8[7][1] * O[1] + g_t8[7][2] * O[2] + g_t8[7][3] * O[3] + add) >> shift); |
| 237 | |
| 238 | src += 8; |
| 239 | dst++; |
| 240 | } |
| 241 | } |
| 242 | |
| 243 | void partialButterflyInverse4(const int16_t* src, int16_t* dst, int shift, int line) |
| 244 | { |
| 245 | int j; |
| 246 | int E[2], O[2]; |
| 247 | int add = 1 << (shift - 1); |
| 248 | |
| 249 | for (j = 0; j < line; j++) |
| 250 | { |
| 251 | /* Utilizing symmetry properties to the maximum to minimize the number of multiplications */ |
| 252 | O[0] = g_t4[1][0] * src[line] + g_t4[3][0] * src[3 * line]; |
| 253 | O[1] = g_t4[1][1] * src[line] + g_t4[3][1] * src[3 * line]; |
| 254 | E[0] = g_t4[0][0] * src[0] + g_t4[2][0] * src[2 * line]; |
| 255 | E[1] = g_t4[0][1] * src[0] + g_t4[2][1] * src[2 * line]; |
| 256 | |
| 257 | /* Combining even and odd terms at each hierarchy levels to calculate the final spatial domain vector */ |
| 258 | dst[0] = (int16_t)(Clip3(-32768, 32767, (E[0] + O[0] + add) >> shift)); |
| 259 | dst[1] = (int16_t)(Clip3(-32768, 32767, (E[1] + O[1] + add) >> shift)); |
| 260 | dst[2] = (int16_t)(Clip3(-32768, 32767, (E[1] - O[1] + add) >> shift)); |
| 261 | dst[3] = (int16_t)(Clip3(-32768, 32767, (E[0] - O[0] + add) >> shift)); |
| 262 | |
| 263 | src++; |
| 264 | dst += 4; |
| 265 | } |
| 266 | } |
| 267 | |
| 268 | void partialButterflyInverse8(const int16_t* src, int16_t* dst, int shift, int line) |
| 269 | { |
| 270 | int j, k; |
| 271 | int E[4], O[4]; |
| 272 | int EE[2], EO[2]; |
| 273 | int add = 1 << (shift - 1); |
| 274 | |
| 275 | for (j = 0; j < line; j++) |
| 276 | { |
| 277 | /* Utilizing symmetry properties to the maximum to minimize the number of multiplications */ |
| 278 | for (k = 0; k < 4; k++) |
| 279 | { |
| 280 | O[k] = g_t8[1][k] * src[line] + g_t8[3][k] * src[3 * line] + g_t8[5][k] * src[5 * line] + g_t8[7][k] * src[7 * line]; |
| 281 | } |
| 282 | |
| 283 | EO[0] = g_t8[2][0] * src[2 * line] + g_t8[6][0] * src[6 * line]; |
| 284 | EO[1] = g_t8[2][1] * src[2 * line] + g_t8[6][1] * src[6 * line]; |
| 285 | EE[0] = g_t8[0][0] * src[0] + g_t8[4][0] * src[4 * line]; |
| 286 | EE[1] = g_t8[0][1] * src[0] + g_t8[4][1] * src[4 * line]; |
| 287 | |
| 288 | /* Combining even and odd terms at each hierarchy levels to calculate the final spatial domain vector */ |
| 289 | E[0] = EE[0] + EO[0]; |
| 290 | E[3] = EE[0] - EO[0]; |
| 291 | E[1] = EE[1] + EO[1]; |
| 292 | E[2] = EE[1] - EO[1]; |
| 293 | for (k = 0; k < 4; k++) |
| 294 | { |
| 295 | dst[k] = (int16_t)Clip3(-32768, 32767, (E[k] + O[k] + add) >> shift); |
| 296 | dst[k + 4] = (int16_t)Clip3(-32768, 32767, (E[3 - k] - O[3 - k] + add) >> shift); |
| 297 | } |
| 298 | |
| 299 | src++; |
| 300 | dst += 8; |
| 301 | } |
| 302 | } |
| 303 | |
| 304 | void partialButterflyInverse16(const int16_t* src, int16_t* dst, int shift, int line) |
| 305 | { |
| 306 | int j, k; |
| 307 | int E[8], O[8]; |
| 308 | int EE[4], EO[4]; |
| 309 | int EEE[2], EEO[2]; |
| 310 | int add = 1 << (shift - 1); |
| 311 | |
| 312 | for (j = 0; j < line; j++) |
| 313 | { |
| 314 | /* Utilizing symmetry properties to the maximum to minimize the number of multiplications */ |
| 315 | for (k = 0; k < 8; k++) |
| 316 | { |
| 317 | O[k] = g_t16[1][k] * src[line] + g_t16[3][k] * src[3 * line] + g_t16[5][k] * src[5 * line] + g_t16[7][k] * src[7 * line] + |
| 318 | g_t16[9][k] * src[9 * line] + g_t16[11][k] * src[11 * line] + g_t16[13][k] * src[13 * line] + g_t16[15][k] * src[15 * line]; |
| 319 | } |
| 320 | |
| 321 | for (k = 0; k < 4; k++) |
| 322 | { |
| 323 | EO[k] = g_t16[2][k] * src[2 * line] + g_t16[6][k] * src[6 * line] + g_t16[10][k] * src[10 * line] + g_t16[14][k] * src[14 * line]; |
| 324 | } |
| 325 | |
| 326 | EEO[0] = g_t16[4][0] * src[4 * line] + g_t16[12][0] * src[12 * line]; |
| 327 | EEE[0] = g_t16[0][0] * src[0] + g_t16[8][0] * src[8 * line]; |
| 328 | EEO[1] = g_t16[4][1] * src[4 * line] + g_t16[12][1] * src[12 * line]; |
| 329 | EEE[1] = g_t16[0][1] * src[0] + g_t16[8][1] * src[8 * line]; |
| 330 | |
| 331 | /* Combining even and odd terms at each hierarchy levels to calculate the final spatial domain vector */ |
| 332 | for (k = 0; k < 2; k++) |
| 333 | { |
| 334 | EE[k] = EEE[k] + EEO[k]; |
| 335 | EE[k + 2] = EEE[1 - k] - EEO[1 - k]; |
| 336 | } |
| 337 | |
| 338 | for (k = 0; k < 4; k++) |
| 339 | { |
| 340 | E[k] = EE[k] + EO[k]; |
| 341 | E[k + 4] = EE[3 - k] - EO[3 - k]; |
| 342 | } |
| 343 | |
| 344 | for (k = 0; k < 8; k++) |
| 345 | { |
| 346 | dst[k] = (int16_t)Clip3(-32768, 32767, (E[k] + O[k] + add) >> shift); |
| 347 | dst[k + 8] = (int16_t)Clip3(-32768, 32767, (E[7 - k] - O[7 - k] + add) >> shift); |
| 348 | } |
| 349 | |
| 350 | src++; |
| 351 | dst += 16; |
| 352 | } |
| 353 | } |
| 354 | |
| 355 | void partialButterflyInverse32(const int16_t* src, int16_t* dst, int shift, int line) |
| 356 | { |
| 357 | int j, k; |
| 358 | int E[16], O[16]; |
| 359 | int EE[8], EO[8]; |
| 360 | int EEE[4], EEO[4]; |
| 361 | int EEEE[2], EEEO[2]; |
| 362 | int add = 1 << (shift - 1); |
| 363 | |
| 364 | for (j = 0; j < line; j++) |
| 365 | { |
| 366 | /* Utilizing symmetry properties to the maximum to minimize the number of multiplications */ |
| 367 | for (k = 0; k < 16; k++) |
| 368 | { |
| 369 | O[k] = g_t32[1][k] * src[line] + g_t32[3][k] * src[3 * line] + g_t32[5][k] * src[5 * line] + g_t32[7][k] * src[7 * line] + |
| 370 | g_t32[9][k] * src[9 * line] + g_t32[11][k] * src[11 * line] + g_t32[13][k] * src[13 * line] + g_t32[15][k] * src[15 * line] + |
| 371 | g_t32[17][k] * src[17 * line] + g_t32[19][k] * src[19 * line] + g_t32[21][k] * src[21 * line] + g_t32[23][k] * src[23 * line] + |
| 372 | g_t32[25][k] * src[25 * line] + g_t32[27][k] * src[27 * line] + g_t32[29][k] * src[29 * line] + g_t32[31][k] * src[31 * line]; |
| 373 | } |
| 374 | |
| 375 | for (k = 0; k < 8; k++) |
| 376 | { |
| 377 | EO[k] = g_t32[2][k] * src[2 * line] + g_t32[6][k] * src[6 * line] + g_t32[10][k] * src[10 * line] + g_t32[14][k] * src[14 * line] + |
| 378 | g_t32[18][k] * src[18 * line] + g_t32[22][k] * src[22 * line] + g_t32[26][k] * src[26 * line] + g_t32[30][k] * src[30 * line]; |
| 379 | } |
| 380 | |
| 381 | for (k = 0; k < 4; k++) |
| 382 | { |
| 383 | EEO[k] = g_t32[4][k] * src[4 * line] + g_t32[12][k] * src[12 * line] + g_t32[20][k] * src[20 * line] + g_t32[28][k] * src[28 * line]; |
| 384 | } |
| 385 | |
| 386 | EEEO[0] = g_t32[8][0] * src[8 * line] + g_t32[24][0] * src[24 * line]; |
| 387 | EEEO[1] = g_t32[8][1] * src[8 * line] + g_t32[24][1] * src[24 * line]; |
| 388 | EEEE[0] = g_t32[0][0] * src[0] + g_t32[16][0] * src[16 * line]; |
| 389 | EEEE[1] = g_t32[0][1] * src[0] + g_t32[16][1] * src[16 * line]; |
| 390 | |
| 391 | /* Combining even and odd terms at each hierarchy levels to calculate the final spatial domain vector */ |
| 392 | EEE[0] = EEEE[0] + EEEO[0]; |
| 393 | EEE[3] = EEEE[0] - EEEO[0]; |
| 394 | EEE[1] = EEEE[1] + EEEO[1]; |
| 395 | EEE[2] = EEEE[1] - EEEO[1]; |
| 396 | for (k = 0; k < 4; k++) |
| 397 | { |
| 398 | EE[k] = EEE[k] + EEO[k]; |
| 399 | EE[k + 4] = EEE[3 - k] - EEO[3 - k]; |
| 400 | } |
| 401 | |
| 402 | for (k = 0; k < 8; k++) |
| 403 | { |
| 404 | E[k] = EE[k] + EO[k]; |
| 405 | E[k + 8] = EE[7 - k] - EO[7 - k]; |
| 406 | } |
| 407 | |
| 408 | for (k = 0; k < 16; k++) |
| 409 | { |
| 410 | dst[k] = (int16_t)Clip3(-32768, 32767, (E[k] + O[k] + add) >> shift); |
| 411 | dst[k + 16] = (int16_t)Clip3(-32768, 32767, (E[15 - k] - O[15 - k] + add) >> shift); |
| 412 | } |
| 413 | |
| 414 | src++; |
| 415 | dst += 32; |
| 416 | } |
| 417 | } |
| 418 | |
| 419 | void partialButterfly4(const int16_t* src, int16_t* dst, int shift, int line) |
| 420 | { |
| 421 | int j; |
| 422 | int E[2], O[2]; |
| 423 | int add = 1 << (shift - 1); |
| 424 | |
| 425 | for (j = 0; j < line; j++) |
| 426 | { |
| 427 | /* E and O */ |
| 428 | E[0] = src[0] + src[3]; |
| 429 | O[0] = src[0] - src[3]; |
| 430 | E[1] = src[1] + src[2]; |
| 431 | O[1] = src[1] - src[2]; |
| 432 | |
| 433 | dst[0] = (int16_t)((g_t4[0][0] * E[0] + g_t4[0][1] * E[1] + add) >> shift); |
| 434 | dst[2 * line] = (int16_t)((g_t4[2][0] * E[0] + g_t4[2][1] * E[1] + add) >> shift); |
| 435 | dst[line] = (int16_t)((g_t4[1][0] * O[0] + g_t4[1][1] * O[1] + add) >> shift); |
| 436 | dst[3 * line] = (int16_t)((g_t4[3][0] * O[0] + g_t4[3][1] * O[1] + add) >> shift); |
| 437 | |
| 438 | src += 4; |
| 439 | dst++; |
| 440 | } |
| 441 | } |
| 442 | |
| 443 | void dst4_c(const int16_t* src, int16_t* dst, intptr_t srcStride) |
| 444 | { |
| 445 | const int shift_1st = 1 + X265_DEPTH - 8; |
| 446 | const int shift_2nd = 8; |
| 447 | |
| 448 | ALIGN_VAR_32(int16_t, coef[4 * 4]); |
| 449 | ALIGN_VAR_32(int16_t, block[4 * 4]); |
| 450 | |
| 451 | for (int i = 0; i < 4; i++) |
| 452 | { |
| 453 | memcpy(&block[i * 4], &src[i * srcStride], 4 * sizeof(int16_t)); |
| 454 | } |
| 455 | |
| 456 | fastForwardDst(block, coef, shift_1st); |
| 457 | fastForwardDst(coef, dst, shift_2nd); |
| 458 | } |
| 459 | |
| 460 | void dct4_c(const int16_t* src, int16_t* dst, intptr_t srcStride) |
| 461 | { |
| 462 | const int shift_1st = 1 + X265_DEPTH - 8; |
| 463 | const int shift_2nd = 8; |
| 464 | |
| 465 | ALIGN_VAR_32(int16_t, coef[4 * 4]); |
| 466 | ALIGN_VAR_32(int16_t, block[4 * 4]); |
| 467 | |
| 468 | for (int i = 0; i < 4; i++) |
| 469 | { |
| 470 | memcpy(&block[i * 4], &src[i * srcStride], 4 * sizeof(int16_t)); |
| 471 | } |
| 472 | |
| 473 | partialButterfly4(block, coef, shift_1st, 4); |
| 474 | partialButterfly4(coef, dst, shift_2nd, 4); |
| 475 | } |
| 476 | |
| 477 | void dct8_c(const int16_t* src, int16_t* dst, intptr_t srcStride) |
| 478 | { |
| 479 | const int shift_1st = 2 + X265_DEPTH - 8; |
| 480 | const int shift_2nd = 9; |
| 481 | |
| 482 | ALIGN_VAR_32(int16_t, coef[8 * 8]); |
| 483 | ALIGN_VAR_32(int16_t, block[8 * 8]); |
| 484 | |
| 485 | for (int i = 0; i < 8; i++) |
| 486 | { |
| 487 | memcpy(&block[i * 8], &src[i * srcStride], 8 * sizeof(int16_t)); |
| 488 | } |
| 489 | |
| 490 | partialButterfly8(block, coef, shift_1st, 8); |
| 491 | partialButterfly8(coef, dst, shift_2nd, 8); |
| 492 | } |
| 493 | |
| 494 | void dct16_c(const int16_t* src, int16_t* dst, intptr_t srcStride) |
| 495 | { |
| 496 | const int shift_1st = 3 + X265_DEPTH - 8; |
| 497 | const int shift_2nd = 10; |
| 498 | |
| 499 | ALIGN_VAR_32(int16_t, coef[16 * 16]); |
| 500 | ALIGN_VAR_32(int16_t, block[16 * 16]); |
| 501 | |
| 502 | for (int i = 0; i < 16; i++) |
| 503 | { |
| 504 | memcpy(&block[i * 16], &src[i * srcStride], 16 * sizeof(int16_t)); |
| 505 | } |
| 506 | |
| 507 | partialButterfly16(block, coef, shift_1st, 16); |
| 508 | partialButterfly16(coef, dst, shift_2nd, 16); |
| 509 | } |
| 510 | |
| 511 | void dct32_c(const int16_t* src, int16_t* dst, intptr_t srcStride) |
| 512 | { |
| 513 | const int shift_1st = 4 + X265_DEPTH - 8; |
| 514 | const int shift_2nd = 11; |
| 515 | |
| 516 | ALIGN_VAR_32(int16_t, coef[32 * 32]); |
| 517 | ALIGN_VAR_32(int16_t, block[32 * 32]); |
| 518 | |
| 519 | for (int i = 0; i < 32; i++) |
| 520 | { |
| 521 | memcpy(&block[i * 32], &src[i * srcStride], 32 * sizeof(int16_t)); |
| 522 | } |
| 523 | |
| 524 | partialButterfly32(block, coef, shift_1st, 32); |
| 525 | partialButterfly32(coef, dst, shift_2nd, 32); |
| 526 | } |
| 527 | |
| 528 | void idst4_c(const int16_t* src, int16_t* dst, intptr_t dstStride) |
| 529 | { |
| 530 | const int shift_1st = 7; |
| 531 | const int shift_2nd = 12 - (X265_DEPTH - 8); |
| 532 | |
| 533 | ALIGN_VAR_32(int16_t, coef[4 * 4]); |
| 534 | ALIGN_VAR_32(int16_t, block[4 * 4]); |
| 535 | |
| 536 | inversedst(src, coef, shift_1st); // Forward DST BY FAST ALGORITHM, block input, coef output |
| 537 | inversedst(coef, block, shift_2nd); // Forward DST BY FAST ALGORITHM, coef input, coeff output |
| 538 | |
| 539 | for (int i = 0; i < 4; i++) |
| 540 | { |
| 541 | memcpy(&dst[i * dstStride], &block[i * 4], 4 * sizeof(int16_t)); |
| 542 | } |
| 543 | } |
| 544 | |
| 545 | void idct4_c(const int16_t* src, int16_t* dst, intptr_t dstStride) |
| 546 | { |
| 547 | const int shift_1st = 7; |
| 548 | const int shift_2nd = 12 - (X265_DEPTH - 8); |
| 549 | |
| 550 | ALIGN_VAR_32(int16_t, coef[4 * 4]); |
| 551 | ALIGN_VAR_32(int16_t, block[4 * 4]); |
| 552 | |
| 553 | partialButterflyInverse4(src, coef, shift_1st, 4); // Forward DST BY FAST ALGORITHM, block input, coef output |
| 554 | partialButterflyInverse4(coef, block, shift_2nd, 4); // Forward DST BY FAST ALGORITHM, coef input, coeff output |
| 555 | |
| 556 | for (int i = 0; i < 4; i++) |
| 557 | { |
| 558 | memcpy(&dst[i * dstStride], &block[i * 4], 4 * sizeof(int16_t)); |
| 559 | } |
| 560 | } |
| 561 | |
| 562 | void idct8_c(const int16_t* src, int16_t* dst, intptr_t dstStride) |
| 563 | { |
| 564 | const int shift_1st = 7; |
| 565 | const int shift_2nd = 12 - (X265_DEPTH - 8); |
| 566 | |
| 567 | ALIGN_VAR_32(int16_t, coef[8 * 8]); |
| 568 | ALIGN_VAR_32(int16_t, block[8 * 8]); |
| 569 | |
| 570 | partialButterflyInverse8(src, coef, shift_1st, 8); |
| 571 | partialButterflyInverse8(coef, block, shift_2nd, 8); |
| 572 | |
| 573 | for (int i = 0; i < 8; i++) |
| 574 | { |
| 575 | memcpy(&dst[i * dstStride], &block[i * 8], 8 * sizeof(int16_t)); |
| 576 | } |
| 577 | } |
| 578 | |
| 579 | void idct16_c(const int16_t* src, int16_t* dst, intptr_t dstStride) |
| 580 | { |
| 581 | const int shift_1st = 7; |
| 582 | const int shift_2nd = 12 - (X265_DEPTH - 8); |
| 583 | |
| 584 | ALIGN_VAR_32(int16_t, coef[16 * 16]); |
| 585 | ALIGN_VAR_32(int16_t, block[16 * 16]); |
| 586 | |
| 587 | partialButterflyInverse16(src, coef, shift_1st, 16); |
| 588 | partialButterflyInverse16(coef, block, shift_2nd, 16); |
| 589 | |
| 590 | for (int i = 0; i < 16; i++) |
| 591 | { |
| 592 | memcpy(&dst[i * dstStride], &block[i * 16], 16 * sizeof(int16_t)); |
| 593 | } |
| 594 | } |
| 595 | |
| 596 | void idct32_c(const int16_t* src, int16_t* dst, intptr_t dstStride) |
| 597 | { |
| 598 | const int shift_1st = 7; |
| 599 | const int shift_2nd = 12 - (X265_DEPTH - 8); |
| 600 | |
| 601 | ALIGN_VAR_32(int16_t, coef[32 * 32]); |
| 602 | ALIGN_VAR_32(int16_t, block[32 * 32]); |
| 603 | |
| 604 | partialButterflyInverse32(src, coef, shift_1st, 32); |
| 605 | partialButterflyInverse32(coef, block, shift_2nd, 32); |
| 606 | |
| 607 | for (int i = 0; i < 32; i++) |
| 608 | { |
| 609 | memcpy(&dst[i * dstStride], &block[i * 32], 32 * sizeof(int16_t)); |
| 610 | } |
| 611 | } |
| 612 | |
| 613 | void dequant_normal_c(const int16_t* quantCoef, int16_t* coef, int num, int scale, int shift) |
| 614 | { |
| 615 | #if HIGH_BIT_DEPTH |
| 616 | X265_CHECK(scale < 32768 || ((scale & 3) == 0 && shift > 2), "dequant invalid scale %d\n", scale); |
| 617 | #else |
| 618 | // NOTE: maximum of scale is (72 * 256) |
| 619 | X265_CHECK(scale < 32768, "dequant invalid scale %d\n", scale); |
| 620 | #endif |
| 621 | X265_CHECK(num <= 32 * 32, "dequant num %d too large\n", num); |
| 622 | X265_CHECK((num % 8) == 0, "dequant num %d not multiple of 8\n", num); |
| 623 | X265_CHECK(shift <= 10, "shift too large %d\n", shift); |
| 624 | X265_CHECK(((intptr_t)coef & 31) == 0, "dequant coef buffer not aligned\n"); |
| 625 | |
| 626 | int add, coeffQ; |
| 627 | |
| 628 | add = 1 << (shift - 1); |
| 629 | |
| 630 | for (int n = 0; n < num; n++) |
| 631 | { |
| 632 | coeffQ = (quantCoef[n] * scale + add) >> shift; |
| 633 | coef[n] = (int16_t)Clip3(-32768, 32767, coeffQ); |
| 634 | } |
| 635 | } |
| 636 | |
| 637 | void dequant_scaling_c(const int16_t* quantCoef, const int32_t* deQuantCoef, int16_t* coef, int num, int per, int shift) |
| 638 | { |
| 639 | X265_CHECK(num <= 32 * 32, "dequant num %d too large\n", num); |
| 640 | |
| 641 | int add, coeffQ; |
| 642 | |
| 643 | shift += 4; |
| 644 | |
| 645 | if (shift > per) |
| 646 | { |
| 647 | add = 1 << (shift - per - 1); |
| 648 | |
| 649 | for (int n = 0; n < num; n++) |
| 650 | { |
| 651 | coeffQ = ((quantCoef[n] * deQuantCoef[n]) + add) >> (shift - per); |
| 652 | coef[n] = (int16_t)Clip3(-32768, 32767, coeffQ); |
| 653 | } |
| 654 | } |
| 655 | else |
| 656 | { |
| 657 | for (int n = 0; n < num; n++) |
| 658 | { |
| 659 | coeffQ = Clip3(-32768, 32767, quantCoef[n] * deQuantCoef[n]); |
| 660 | coef[n] = (int16_t)Clip3(-32768, 32767, coeffQ << (per - shift)); |
| 661 | } |
| 662 | } |
| 663 | } |
| 664 | |
| 665 | uint32_t quant_c(const int16_t* coef, const int32_t* quantCoeff, int32_t* deltaU, int16_t* qCoef, int qBits, int add, int numCoeff) |
| 666 | { |
| 667 | X265_CHECK(qBits >= 8, "qBits less than 8\n"); |
| 668 | X265_CHECK((numCoeff % 16) == 0, "numCoeff must be multiple of 16\n"); |
| 669 | int qBits8 = qBits - 8; |
| 670 | uint32_t numSig = 0; |
| 671 | |
| 672 | for (int blockpos = 0; blockpos < numCoeff; blockpos++) |
| 673 | { |
| 674 | int level = coef[blockpos]; |
| 675 | int sign = (level < 0 ? -1 : 1); |
| 676 | |
| 677 | int tmplevel = abs(level) * quantCoeff[blockpos]; |
| 678 | level = ((tmplevel + add) >> qBits); |
| 679 | deltaU[blockpos] = ((tmplevel - (level << qBits)) >> qBits8); |
| 680 | if (level) |
| 681 | ++numSig; |
| 682 | level *= sign; |
| 683 | qCoef[blockpos] = (int16_t)Clip3(-32768, 32767, level); |
| 684 | } |
| 685 | |
| 686 | return numSig; |
| 687 | } |
| 688 | |
| 689 | uint32_t nquant_c(const int16_t* coef, const int32_t* quantCoeff, int16_t* qCoef, int qBits, int add, int numCoeff) |
| 690 | { |
| 691 | X265_CHECK((numCoeff % 16) == 0, "number of quant coeff is not multiple of 4x4\n"); |
| 692 | X265_CHECK((uint32_t)add < ((uint32_t)1 << qBits), "2 ^ qBits less than add\n"); |
| 693 | X265_CHECK(((intptr_t)quantCoeff & 31) == 0, "quantCoeff buffer not aligned\n"); |
| 694 | |
| 695 | uint32_t numSig = 0; |
| 696 | |
| 697 | for (int blockpos = 0; blockpos < numCoeff; blockpos++) |
| 698 | { |
| 699 | int level = coef[blockpos]; |
| 700 | int sign = (level < 0 ? -1 : 1); |
| 701 | |
| 702 | int tmplevel = abs(level) * quantCoeff[blockpos]; |
| 703 | level = ((tmplevel + add) >> qBits); |
| 704 | if (level) |
| 705 | ++numSig; |
| 706 | level *= sign; |
| 707 | qCoef[blockpos] = (int16_t)Clip3(-32768, 32767, level); |
| 708 | } |
| 709 | |
| 710 | return numSig; |
| 711 | } |
| 712 | |
| 713 | int count_nonzero_c(const int16_t* quantCoeff, int numCoeff) |
| 714 | { |
| 715 | X265_CHECK(((intptr_t)quantCoeff & 15) == 0, "quant buffer not aligned\n"); |
| 716 | X265_CHECK(numCoeff > 0 && (numCoeff & 15) == 0, "numCoeff invalid %d\n", numCoeff); |
| 717 | |
| 718 | int count = 0; |
| 719 | |
| 720 | for (int i = 0; i < numCoeff; i++) |
| 721 | { |
| 722 | count += quantCoeff[i] != 0; |
| 723 | } |
| 724 | |
| 725 | return count; |
| 726 | } |
| 727 | |
| 728 | template<int trSize> |
| 729 | uint32_t copy_count(int16_t* coeff, const int16_t* residual, intptr_t resiStride) |
| 730 | { |
| 731 | uint32_t numSig = 0; |
| 732 | for (int k = 0; k < trSize; k++) |
| 733 | { |
| 734 | for (int j = 0; j < trSize; j++) |
| 735 | { |
| 736 | coeff[k * trSize + j] = residual[k * resiStride + j]; |
| 737 | numSig += (residual[k * resiStride + j] != 0); |
| 738 | } |
| 739 | } |
| 740 | |
| 741 | return numSig; |
| 742 | } |
| 743 | |
| 744 | void denoiseDct_c(int16_t* dctCoef, uint32_t* resSum, const uint16_t* offset, int numCoeff) |
| 745 | { |
| 746 | for (int i = 0; i < numCoeff; i++) |
| 747 | { |
| 748 | int level = dctCoef[i]; |
| 749 | int sign = level >> 31; |
| 750 | level = (level + sign) ^ sign; |
| 751 | resSum[i] += level; |
| 752 | level -= offset[i]; |
| 753 | dctCoef[i] = (int16_t)(level < 0 ? 0 : (level ^ sign) - sign); |
| 754 | } |
| 755 | } |
| 756 | |
| 757 | } // closing - anonymous file-static namespace |
| 758 | |
| 759 | namespace x265 { |
| 760 | // x265 private namespace |
| 761 | |
| 762 | void Setup_C_DCTPrimitives(EncoderPrimitives& p) |
| 763 | { |
| 764 | p.dequant_scaling = dequant_scaling_c; |
| 765 | p.dequant_normal = dequant_normal_c; |
| 766 | p.quant = quant_c; |
| 767 | p.nquant = nquant_c; |
| 768 | p.dct[DST_4x4] = dst4_c; |
| 769 | p.dct[DCT_4x4] = dct4_c; |
| 770 | p.dct[DCT_8x8] = dct8_c; |
| 771 | p.dct[DCT_16x16] = dct16_c; |
| 772 | p.dct[DCT_32x32] = dct32_c; |
| 773 | p.idct[IDST_4x4] = idst4_c; |
| 774 | p.idct[IDCT_4x4] = idct4_c; |
| 775 | p.idct[IDCT_8x8] = idct8_c; |
| 776 | p.idct[IDCT_16x16] = idct16_c; |
| 777 | p.idct[IDCT_32x32] = idct32_c; |
| 778 | p.count_nonzero = count_nonzero_c; |
| 779 | p.denoiseDct = denoiseDct_c; |
| 780 | |
| 781 | p.copy_cnt[BLOCK_4x4] = copy_count<4>; |
| 782 | p.copy_cnt[BLOCK_8x8] = copy_count<8>; |
| 783 | p.copy_cnt[BLOCK_16x16] = copy_count<16>; |
| 784 | p.copy_cnt[BLOCK_32x32] = copy_count<32>; |
| 785 | } |
| 786 | } |