Imported Upstream version 1.4
[deb_x265.git] / source / common / dct.cpp
1 /*****************************************************************************
2 * Copyright (C) 2013 x265 project
3 *
4 * Authors: Mandar Gurav <mandar@multicorewareinc.com>
5 * Deepthi Devaki Akkoorath <deepthidevaki@multicorewareinc.com>
6 * Mahesh Pittala <mahesh@multicorewareinc.com>
7 * Rajesh Paulraj <rajesh@multicorewareinc.com>
8 * Min Chen <min.chen@multicorewareinc.com>
9 * Praveen Kumar Tiwari <praveen@multicorewareinc.com>
10 * Nabajit Deka <nabajit@multicorewareinc.com>
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111, USA.
25 *
26 * This program is also available under a commercial proprietary license.
27 * For more information, contact us at license @ x265.com.
28 *****************************************************************************/
29
30 #include "common.h"
31 #include "primitives.h"
32
33 using namespace x265;
34
35 #if _MSC_VER
36 #pragma warning(disable: 4127) // conditional expression is constant, typical for templated functions
37 #endif
38
39 namespace {
40 // anonymous file-static namespace
41
42 // Fast DST Algorithm. Full matrix multiplication for DST and Fast DST algorithm
43 // give identical results
44 void fastForwardDst(int16_t *block, int16_t *coeff, int shift) // input block, output coeff
45 {
46 int c[4];
47 int rnd_factor = 1 << (shift - 1);
48
49 for (int i = 0; i < 4; i++)
50 {
51 // Intermediate Variables
52 c[0] = block[4 * i + 0] + block[4 * i + 3];
53 c[1] = block[4 * i + 1] + block[4 * i + 3];
54 c[2] = block[4 * i + 0] - block[4 * i + 1];
55 c[3] = 74 * block[4 * i + 2];
56
57 coeff[i] = (int16_t)((29 * c[0] + 55 * c[1] + c[3] + rnd_factor) >> shift);
58 coeff[4 + i] = (int16_t)((74 * (block[4 * i + 0] + block[4 * i + 1] - block[4 * i + 3]) + rnd_factor) >> shift);
59 coeff[8 + i] = (int16_t)((29 * c[2] + 55 * c[0] - c[3] + rnd_factor) >> shift);
60 coeff[12 + i] = (int16_t)((55 * c[2] - 29 * c[1] + c[3] + rnd_factor) >> shift);
61 }
62 }
63
64 void inversedst(int16_t *tmp, int16_t *block, int shift) // input tmp, output block
65 {
66 int i, c[4];
67 int rnd_factor = 1 << (shift - 1);
68
69 for (i = 0; i < 4; i++)
70 {
71 // Intermediate Variables
72 c[0] = tmp[i] + tmp[8 + i];
73 c[1] = tmp[8 + i] + tmp[12 + i];
74 c[2] = tmp[i] - tmp[12 + i];
75 c[3] = 74 * tmp[4 + i];
76
77 block[4 * i + 0] = (int16_t)Clip3(-32768, 32767, (29 * c[0] + 55 * c[1] + c[3] + rnd_factor) >> shift);
78 block[4 * i + 1] = (int16_t)Clip3(-32768, 32767, (55 * c[2] - 29 * c[1] + c[3] + rnd_factor) >> shift);
79 block[4 * i + 2] = (int16_t)Clip3(-32768, 32767, (74 * (tmp[i] - tmp[8 + i] + tmp[12 + i]) + rnd_factor) >> shift);
80 block[4 * i + 3] = (int16_t)Clip3(-32768, 32767, (55 * c[0] + 29 * c[2] - c[3] + rnd_factor) >> shift);
81 }
82 }
83
84 void partialButterfly16(int16_t *src, int16_t *dst, int shift, int line)
85 {
86 int j, k;
87 int E[8], O[8];
88 int EE[4], EO[4];
89 int EEE[2], EEO[2];
90 int add = 1 << (shift - 1);
91
92 for (j = 0; j < line; j++)
93 {
94 /* E and O */
95 for (k = 0; k < 8; k++)
96 {
97 E[k] = src[k] + src[15 - k];
98 O[k] = src[k] - src[15 - k];
99 }
100
101 /* EE and EO */
102 for (k = 0; k < 4; k++)
103 {
104 EE[k] = E[k] + E[7 - k];
105 EO[k] = E[k] - E[7 - k];
106 }
107
108 /* EEE and EEO */
109 EEE[0] = EE[0] + EE[3];
110 EEO[0] = EE[0] - EE[3];
111 EEE[1] = EE[1] + EE[2];
112 EEO[1] = EE[1] - EE[2];
113
114 dst[0] = (int16_t)((g_t16[0][0] * EEE[0] + g_t16[0][1] * EEE[1] + add) >> shift);
115 dst[8 * line] = (int16_t)((g_t16[8][0] * EEE[0] + g_t16[8][1] * EEE[1] + add) >> shift);
116 dst[4 * line] = (int16_t)((g_t16[4][0] * EEO[0] + g_t16[4][1] * EEO[1] + add) >> shift);
117 dst[12 * line] = (int16_t)((g_t16[12][0] * EEO[0] + g_t16[12][1] * EEO[1] + add) >> shift);
118
119 for (k = 2; k < 16; k += 4)
120 {
121 dst[k * line] = (int16_t)((g_t16[k][0] * EO[0] + g_t16[k][1] * EO[1] + g_t16[k][2] * EO[2] +
122 g_t16[k][3] * EO[3] + add) >> shift);
123 }
124
125 for (k = 1; k < 16; k += 2)
126 {
127 dst[k * line] = (int16_t)((g_t16[k][0] * O[0] + g_t16[k][1] * O[1] + g_t16[k][2] * O[2] + g_t16[k][3] * O[3] +
128 g_t16[k][4] * O[4] + g_t16[k][5] * O[5] + g_t16[k][6] * O[6] + g_t16[k][7] * O[7] +
129 add) >> shift);
130 }
131
132 src += 16;
133 dst++;
134 }
135 }
136
137 void partialButterfly32(int16_t *src, int16_t *dst, int shift, int line)
138 {
139 int j, k;
140 int E[16], O[16];
141 int EE[8], EO[8];
142 int EEE[4], EEO[4];
143 int EEEE[2], EEEO[2];
144 int add = 1 << (shift - 1);
145
146 for (j = 0; j < line; j++)
147 {
148 /* E and O*/
149 for (k = 0; k < 16; k++)
150 {
151 E[k] = src[k] + src[31 - k];
152 O[k] = src[k] - src[31 - k];
153 }
154
155 /* EE and EO */
156 for (k = 0; k < 8; k++)
157 {
158 EE[k] = E[k] + E[15 - k];
159 EO[k] = E[k] - E[15 - k];
160 }
161
162 /* EEE and EEO */
163 for (k = 0; k < 4; k++)
164 {
165 EEE[k] = EE[k] + EE[7 - k];
166 EEO[k] = EE[k] - EE[7 - k];
167 }
168
169 /* EEEE and EEEO */
170 EEEE[0] = EEE[0] + EEE[3];
171 EEEO[0] = EEE[0] - EEE[3];
172 EEEE[1] = EEE[1] + EEE[2];
173 EEEO[1] = EEE[1] - EEE[2];
174
175 dst[0] = (int16_t)((g_t32[0][0] * EEEE[0] + g_t32[0][1] * EEEE[1] + add) >> shift);
176 dst[16 * line] = (int16_t)((g_t32[16][0] * EEEE[0] + g_t32[16][1] * EEEE[1] + add) >> shift);
177 dst[8 * line] = (int16_t)((g_t32[8][0] * EEEO[0] + g_t32[8][1] * EEEO[1] + add) >> shift);
178 dst[24 * line] = (int16_t)((g_t32[24][0] * EEEO[0] + g_t32[24][1] * EEEO[1] + add) >> shift);
179 for (k = 4; k < 32; k += 8)
180 {
181 dst[k * line] = (int16_t)((g_t32[k][0] * EEO[0] + g_t32[k][1] * EEO[1] + g_t32[k][2] * EEO[2] +
182 g_t32[k][3] * EEO[3] + add) >> shift);
183 }
184
185 for (k = 2; k < 32; k += 4)
186 {
187 dst[k * line] = (int16_t)((g_t32[k][0] * EO[0] + g_t32[k][1] * EO[1] + g_t32[k][2] * EO[2] +
188 g_t32[k][3] * EO[3] + g_t32[k][4] * EO[4] + g_t32[k][5] * EO[5] +
189 g_t32[k][6] * EO[6] + g_t32[k][7] * EO[7] + add) >> shift);
190 }
191
192 for (k = 1; k < 32; k += 2)
193 {
194 dst[k * line] = (int16_t)((g_t32[k][0] * O[0] + g_t32[k][1] * O[1] + g_t32[k][2] * O[2] + g_t32[k][3] * O[3] +
195 g_t32[k][4] * O[4] + g_t32[k][5] * O[5] + g_t32[k][6] * O[6] + g_t32[k][7] * O[7] +
196 g_t32[k][8] * O[8] + g_t32[k][9] * O[9] + g_t32[k][10] * O[10] + g_t32[k][11] *
197 O[11] + g_t32[k][12] * O[12] + g_t32[k][13] * O[13] + g_t32[k][14] * O[14] +
198 g_t32[k][15] * O[15] + add) >> shift);
199 }
200
201 src += 32;
202 dst++;
203 }
204 }
205
206 void partialButterfly8(int16_t *src, int16_t *dst, int shift, int line)
207 {
208 int j, k;
209 int E[4], O[4];
210 int EE[2], EO[2];
211 int add = 1 << (shift - 1);
212
213 for (j = 0; j < line; j++)
214 {
215 /* E and O*/
216 for (k = 0; k < 4; k++)
217 {
218 E[k] = src[k] + src[7 - k];
219 O[k] = src[k] - src[7 - k];
220 }
221
222 /* EE and EO */
223 EE[0] = E[0] + E[3];
224 EO[0] = E[0] - E[3];
225 EE[1] = E[1] + E[2];
226 EO[1] = E[1] - E[2];
227
228 dst[0] = (int16_t)((g_t8[0][0] * EE[0] + g_t8[0][1] * EE[1] + add) >> shift);
229 dst[4 * line] = (int16_t)((g_t8[4][0] * EE[0] + g_t8[4][1] * EE[1] + add) >> shift);
230 dst[2 * line] = (int16_t)((g_t8[2][0] * EO[0] + g_t8[2][1] * EO[1] + add) >> shift);
231 dst[6 * line] = (int16_t)((g_t8[6][0] * EO[0] + g_t8[6][1] * EO[1] + add) >> shift);
232
233 dst[line] = (int16_t)((g_t8[1][0] * O[0] + g_t8[1][1] * O[1] + g_t8[1][2] * O[2] + g_t8[1][3] * O[3] + add) >> shift);
234 dst[3 * line] = (int16_t)((g_t8[3][0] * O[0] + g_t8[3][1] * O[1] + g_t8[3][2] * O[2] + g_t8[3][3] * O[3] + add) >> shift);
235 dst[5 * line] = (int16_t)((g_t8[5][0] * O[0] + g_t8[5][1] * O[1] + g_t8[5][2] * O[2] + g_t8[5][3] * O[3] + add) >> shift);
236 dst[7 * line] = (int16_t)((g_t8[7][0] * O[0] + g_t8[7][1] * O[1] + g_t8[7][2] * O[2] + g_t8[7][3] * O[3] + add) >> shift);
237
238 src += 8;
239 dst++;
240 }
241 }
242
243 void partialButterflyInverse4(int16_t *src, int16_t *dst, int shift, int line)
244 {
245 int j;
246 int E[2], O[2];
247 int add = 1 << (shift - 1);
248
249 for (j = 0; j < line; j++)
250 {
251 /* Utilizing symmetry properties to the maximum to minimize the number of multiplications */
252 O[0] = g_t4[1][0] * src[line] + g_t4[3][0] * src[3 * line];
253 O[1] = g_t4[1][1] * src[line] + g_t4[3][1] * src[3 * line];
254 E[0] = g_t4[0][0] * src[0] + g_t4[2][0] * src[2 * line];
255 E[1] = g_t4[0][1] * src[0] + g_t4[2][1] * src[2 * line];
256
257 /* Combining even and odd terms at each hierarchy levels to calculate the final spatial domain vector */
258 dst[0] = (int16_t)(Clip3(-32768, 32767, (E[0] + O[0] + add) >> shift));
259 dst[1] = (int16_t)(Clip3(-32768, 32767, (E[1] + O[1] + add) >> shift));
260 dst[2] = (int16_t)(Clip3(-32768, 32767, (E[1] - O[1] + add) >> shift));
261 dst[3] = (int16_t)(Clip3(-32768, 32767, (E[0] - O[0] + add) >> shift));
262
263 src++;
264 dst += 4;
265 }
266 }
267
268 void partialButterflyInverse8(int16_t *src, int16_t *dst, int shift, int line)
269 {
270 int j, k;
271 int E[4], O[4];
272 int EE[2], EO[2];
273 int add = 1 << (shift - 1);
274
275 for (j = 0; j < line; j++)
276 {
277 /* Utilizing symmetry properties to the maximum to minimize the number of multiplications */
278 for (k = 0; k < 4; k++)
279 {
280 O[k] = g_t8[1][k] * src[line] + g_t8[3][k] * src[3 * line] + g_t8[5][k] * src[5 * line] + g_t8[7][k] * src[7 * line];
281 }
282
283 EO[0] = g_t8[2][0] * src[2 * line] + g_t8[6][0] * src[6 * line];
284 EO[1] = g_t8[2][1] * src[2 * line] + g_t8[6][1] * src[6 * line];
285 EE[0] = g_t8[0][0] * src[0] + g_t8[4][0] * src[4 * line];
286 EE[1] = g_t8[0][1] * src[0] + g_t8[4][1] * src[4 * line];
287
288 /* Combining even and odd terms at each hierarchy levels to calculate the final spatial domain vector */
289 E[0] = EE[0] + EO[0];
290 E[3] = EE[0] - EO[0];
291 E[1] = EE[1] + EO[1];
292 E[2] = EE[1] - EO[1];
293 for (k = 0; k < 4; k++)
294 {
295 dst[k] = (int16_t)Clip3(-32768, 32767, (E[k] + O[k] + add) >> shift);
296 dst[k + 4] = (int16_t)Clip3(-32768, 32767, (E[3 - k] - O[3 - k] + add) >> shift);
297 }
298
299 src++;
300 dst += 8;
301 }
302 }
303
304 void partialButterflyInverse16(int16_t *src, int16_t *dst, int shift, int line)
305 {
306 int j, k;
307 int E[8], O[8];
308 int EE[4], EO[4];
309 int EEE[2], EEO[2];
310 int add = 1 << (shift - 1);
311
312 for (j = 0; j < line; j++)
313 {
314 /* Utilizing symmetry properties to the maximum to minimize the number of multiplications */
315 for (k = 0; k < 8; k++)
316 {
317 O[k] = g_t16[1][k] * src[line] + g_t16[3][k] * src[3 * line] + g_t16[5][k] * src[5 * line] + g_t16[7][k] * src[7 * line] +
318 g_t16[9][k] * src[9 * line] + g_t16[11][k] * src[11 * line] + g_t16[13][k] * src[13 * line] + g_t16[15][k] * src[15 * line];
319 }
320
321 for (k = 0; k < 4; k++)
322 {
323 EO[k] = g_t16[2][k] * src[2 * line] + g_t16[6][k] * src[6 * line] + g_t16[10][k] * src[10 * line] + g_t16[14][k] * src[14 * line];
324 }
325
326 EEO[0] = g_t16[4][0] * src[4 * line] + g_t16[12][0] * src[12 * line];
327 EEE[0] = g_t16[0][0] * src[0] + g_t16[8][0] * src[8 * line];
328 EEO[1] = g_t16[4][1] * src[4 * line] + g_t16[12][1] * src[12 * line];
329 EEE[1] = g_t16[0][1] * src[0] + g_t16[8][1] * src[8 * line];
330
331 /* Combining even and odd terms at each hierarchy levels to calculate the final spatial domain vector */
332 for (k = 0; k < 2; k++)
333 {
334 EE[k] = EEE[k] + EEO[k];
335 EE[k + 2] = EEE[1 - k] - EEO[1 - k];
336 }
337
338 for (k = 0; k < 4; k++)
339 {
340 E[k] = EE[k] + EO[k];
341 E[k + 4] = EE[3 - k] - EO[3 - k];
342 }
343
344 for (k = 0; k < 8; k++)
345 {
346 dst[k] = (int16_t)Clip3(-32768, 32767, (E[k] + O[k] + add) >> shift);
347 dst[k + 8] = (int16_t)Clip3(-32768, 32767, (E[7 - k] - O[7 - k] + add) >> shift);
348 }
349
350 src++;
351 dst += 16;
352 }
353 }
354
355 void partialButterflyInverse32(int16_t *src, int16_t *dst, int shift, int line)
356 {
357 int j, k;
358 int E[16], O[16];
359 int EE[8], EO[8];
360 int EEE[4], EEO[4];
361 int EEEE[2], EEEO[2];
362 int add = 1 << (shift - 1);
363
364 for (j = 0; j < line; j++)
365 {
366 /* Utilizing symmetry properties to the maximum to minimize the number of multiplications */
367 for (k = 0; k < 16; k++)
368 {
369 O[k] = g_t32[1][k] * src[line] + g_t32[3][k] * src[3 * line] + g_t32[5][k] * src[5 * line] + g_t32[7][k] * src[7 * line] +
370 g_t32[9][k] * src[9 * line] + g_t32[11][k] * src[11 * line] + g_t32[13][k] * src[13 * line] + g_t32[15][k] * src[15 * line] +
371 g_t32[17][k] * src[17 * line] + g_t32[19][k] * src[19 * line] + g_t32[21][k] * src[21 * line] + g_t32[23][k] * src[23 * line] +
372 g_t32[25][k] * src[25 * line] + g_t32[27][k] * src[27 * line] + g_t32[29][k] * src[29 * line] + g_t32[31][k] * src[31 * line];
373 }
374
375 for (k = 0; k < 8; k++)
376 {
377 EO[k] = g_t32[2][k] * src[2 * line] + g_t32[6][k] * src[6 * line] + g_t32[10][k] * src[10 * line] + g_t32[14][k] * src[14 * line] +
378 g_t32[18][k] * src[18 * line] + g_t32[22][k] * src[22 * line] + g_t32[26][k] * src[26 * line] + g_t32[30][k] * src[30 * line];
379 }
380
381 for (k = 0; k < 4; k++)
382 {
383 EEO[k] = g_t32[4][k] * src[4 * line] + g_t32[12][k] * src[12 * line] + g_t32[20][k] * src[20 * line] + g_t32[28][k] * src[28 * line];
384 }
385
386 EEEO[0] = g_t32[8][0] * src[8 * line] + g_t32[24][0] * src[24 * line];
387 EEEO[1] = g_t32[8][1] * src[8 * line] + g_t32[24][1] * src[24 * line];
388 EEEE[0] = g_t32[0][0] * src[0] + g_t32[16][0] * src[16 * line];
389 EEEE[1] = g_t32[0][1] * src[0] + g_t32[16][1] * src[16 * line];
390
391 /* Combining even and odd terms at each hierarchy levels to calculate the final spatial domain vector */
392 EEE[0] = EEEE[0] + EEEO[0];
393 EEE[3] = EEEE[0] - EEEO[0];
394 EEE[1] = EEEE[1] + EEEO[1];
395 EEE[2] = EEEE[1] - EEEO[1];
396 for (k = 0; k < 4; k++)
397 {
398 EE[k] = EEE[k] + EEO[k];
399 EE[k + 4] = EEE[3 - k] - EEO[3 - k];
400 }
401
402 for (k = 0; k < 8; k++)
403 {
404 E[k] = EE[k] + EO[k];
405 E[k + 8] = EE[7 - k] - EO[7 - k];
406 }
407
408 for (k = 0; k < 16; k++)
409 {
410 dst[k] = (int16_t)Clip3(-32768, 32767, (E[k] + O[k] + add) >> shift);
411 dst[k + 16] = (int16_t)Clip3(-32768, 32767, (E[15 - k] - O[15 - k] + add) >> shift);
412 }
413
414 src++;
415 dst += 32;
416 }
417 }
418
419 void partialButterfly4(int16_t *src, int16_t *dst, int shift, int line)
420 {
421 int j;
422 int E[2], O[2];
423 int add = 1 << (shift - 1);
424
425 for (j = 0; j < line; j++)
426 {
427 /* E and O */
428 E[0] = src[0] + src[3];
429 O[0] = src[0] - src[3];
430 E[1] = src[1] + src[2];
431 O[1] = src[1] - src[2];
432
433 dst[0] = (int16_t)((g_t4[0][0] * E[0] + g_t4[0][1] * E[1] + add) >> shift);
434 dst[2 * line] = (int16_t)((g_t4[2][0] * E[0] + g_t4[2][1] * E[1] + add) >> shift);
435 dst[line] = (int16_t)((g_t4[1][0] * O[0] + g_t4[1][1] * O[1] + add) >> shift);
436 dst[3 * line] = (int16_t)((g_t4[3][0] * O[0] + g_t4[3][1] * O[1] + add) >> shift);
437
438 src += 4;
439 dst++;
440 }
441 }
442
443 void dst4_c(int16_t *src, int32_t *dst, intptr_t stride)
444 {
445 const int shift_1st = 1 + X265_DEPTH - 8;
446 const int shift_2nd = 8;
447
448 ALIGN_VAR_32(int16_t, coef[4 * 4]);
449 ALIGN_VAR_32(int16_t, block[4 * 4]);
450
451 for (int i = 0; i < 4; i++)
452 {
453 memcpy(&block[i * 4], &src[i * stride], 4 * sizeof(int16_t));
454 }
455
456 fastForwardDst(block, coef, shift_1st);
457 fastForwardDst(coef, block, shift_2nd);
458
459 #define N (4)
460 for (int i = 0; i < N; i++)
461 {
462 for (int j = 0; j < N; j++)
463 {
464 dst[i * N + j] = block[i * N + j];
465 }
466 }
467
468 #undef N
469 }
470
471 void dct4_c(int16_t *src, int32_t *dst, intptr_t stride)
472 {
473 const int shift_1st = 1 + X265_DEPTH - 8;
474 const int shift_2nd = 8;
475
476 ALIGN_VAR_32(int16_t, coef[4 * 4]);
477 ALIGN_VAR_32(int16_t, block[4 * 4]);
478
479 for (int i = 0; i < 4; i++)
480 {
481 memcpy(&block[i * 4], &src[i * stride], 4 * sizeof(int16_t));
482 }
483
484 partialButterfly4(block, coef, shift_1st, 4);
485 partialButterfly4(coef, block, shift_2nd, 4);
486 #define N (4)
487 for (int i = 0; i < N; i++)
488 {
489 for (int j = 0; j < N; j++)
490 {
491 dst[i * N + j] = block[i * N + j];
492 }
493 }
494
495 #undef N
496 }
497
498 void dct8_c(int16_t *src, int32_t *dst, intptr_t stride)
499 {
500 const int shift_1st = 2 + X265_DEPTH - 8;
501 const int shift_2nd = 9;
502
503 ALIGN_VAR_32(int16_t, coef[8 * 8]);
504 ALIGN_VAR_32(int16_t, block[8 * 8]);
505
506 for (int i = 0; i < 8; i++)
507 {
508 memcpy(&block[i * 8], &src[i * stride], 8 * sizeof(int16_t));
509 }
510
511 partialButterfly8(block, coef, shift_1st, 8);
512 partialButterfly8(coef, block, shift_2nd, 8);
513
514 #define N (8)
515 for (int i = 0; i < N; i++)
516 {
517 for (int j = 0; j < N; j++)
518 {
519 dst[i * N + j] = block[i * N + j];
520 }
521 }
522
523 #undef N
524 }
525
526 void dct16_c(int16_t *src, int32_t *dst, intptr_t stride)
527 {
528 const int shift_1st = 3 + X265_DEPTH - 8;
529 const int shift_2nd = 10;
530
531 ALIGN_VAR_32(int16_t, coef[16 * 16]);
532 ALIGN_VAR_32(int16_t, block[16 * 16]);
533
534 for (int i = 0; i < 16; i++)
535 {
536 memcpy(&block[i * 16], &src[i * stride], 16 * sizeof(int16_t));
537 }
538
539 partialButterfly16(block, coef, shift_1st, 16);
540 partialButterfly16(coef, block, shift_2nd, 16);
541
542 #define N (16)
543 for (int i = 0; i < N; i++)
544 {
545 for (int j = 0; j < N; j++)
546 {
547 dst[i * N + j] = block[i * N + j];
548 }
549 }
550
551 #undef N
552 }
553
554 void dct32_c(int16_t *src, int32_t *dst, intptr_t stride)
555 {
556 const int shift_1st = 4 + X265_DEPTH - 8;
557 const int shift_2nd = 11;
558
559 ALIGN_VAR_32(int16_t, coef[32 * 32]);
560 ALIGN_VAR_32(int16_t, block[32 * 32]);
561
562 for (int i = 0; i < 32; i++)
563 {
564 memcpy(&block[i * 32], &src[i * stride], 32 * sizeof(int16_t));
565 }
566
567 partialButterfly32(block, coef, shift_1st, 32);
568 partialButterfly32(coef, block, shift_2nd, 32);
569
570 #define N (32)
571 for (int i = 0; i < N; i++)
572 {
573 for (int j = 0; j < N; j++)
574 {
575 dst[i * N + j] = block[i * N + j];
576 }
577 }
578
579 #undef N
580 }
581
582 void idst4_c(int32_t *src, int16_t *dst, intptr_t stride)
583 {
584 const int shift_1st = 7;
585 const int shift_2nd = 12 - (X265_DEPTH - 8);
586
587 ALIGN_VAR_32(int16_t, coef[4 * 4]);
588 ALIGN_VAR_32(int16_t, block[4 * 4]);
589
590 #define N (4)
591 for (int i = 0; i < N; i++)
592 {
593 for (int j = 0; j < N; j++)
594 {
595 block[i * N + j] = (int16_t)src[i * N + j];
596 }
597 }
598
599 #undef N
600
601 inversedst(block, coef, shift_1st); // Forward DST BY FAST ALGORITHM, block input, coef output
602 inversedst(coef, block, shift_2nd); // Forward DST BY FAST ALGORITHM, coef input, coeff output
603
604 for (int i = 0; i < 4; i++)
605 {
606 memcpy(&dst[i * stride], &block[i * 4], 4 * sizeof(int16_t));
607 }
608 }
609
610 void idct4_c(int32_t *src, int16_t *dst, intptr_t stride)
611 {
612 const int shift_1st = 7;
613 const int shift_2nd = 12 - (X265_DEPTH - 8);
614
615 ALIGN_VAR_32(int16_t, coef[4 * 4]);
616 ALIGN_VAR_32(int16_t, block[4 * 4]);
617
618 #define N (4)
619 for (int i = 0; i < N; i++)
620 {
621 for (int j = 0; j < N; j++)
622 {
623 block[i * N + j] = (int16_t)src[i * N + j];
624 }
625 }
626
627 #undef N
628
629 partialButterflyInverse4(block, coef, shift_1st, 4); // Forward DST BY FAST ALGORITHM, block input, coef output
630 partialButterflyInverse4(coef, block, shift_2nd, 4); // Forward DST BY FAST ALGORITHM, coef input, coeff output
631
632 for (int i = 0; i < 4; i++)
633 {
634 memcpy(&dst[i * stride], &block[i * 4], 4 * sizeof(int16_t));
635 }
636 }
637
638 void idct8_c(int32_t *src, int16_t *dst, intptr_t stride)
639 {
640 const int shift_1st = 7;
641 const int shift_2nd = 12 - (X265_DEPTH - 8);
642
643 ALIGN_VAR_32(int16_t, coef[8 * 8]);
644 ALIGN_VAR_32(int16_t, block[8 * 8]);
645
646 #define N (8)
647 for (int i = 0; i < N; i++)
648 {
649 for (int j = 0; j < N; j++)
650 {
651 block[i * N + j] = (int16_t)src[i * N + j];
652 }
653 }
654
655 #undef N
656
657 partialButterflyInverse8(block, coef, shift_1st, 8);
658 partialButterflyInverse8(coef, block, shift_2nd, 8);
659 for (int i = 0; i < 8; i++)
660 {
661 memcpy(&dst[i * stride], &block[i * 8], 8 * sizeof(int16_t));
662 }
663 }
664
665 void idct16_c(int32_t *src, int16_t *dst, intptr_t stride)
666 {
667 const int shift_1st = 7;
668 const int shift_2nd = 12 - (X265_DEPTH - 8);
669
670 ALIGN_VAR_32(int16_t, coef[16 * 16]);
671 ALIGN_VAR_32(int16_t, block[16 * 16]);
672
673 #define N (16)
674 for (int i = 0; i < N; i++)
675 {
676 for (int j = 0; j < N; j++)
677 {
678 block[i * N + j] = (int16_t)src[i * N + j];
679 }
680 }
681
682 #undef N
683
684 partialButterflyInverse16(block, coef, shift_1st, 16);
685 partialButterflyInverse16(coef, block, shift_2nd, 16);
686 for (int i = 0; i < 16; i++)
687 {
688 memcpy(&dst[i * stride], &block[i * 16], 16 * sizeof(int16_t));
689 }
690 }
691
692 void idct32_c(int32_t *src, int16_t *dst, intptr_t stride)
693 {
694 const int shift_1st = 7;
695 const int shift_2nd = 12 - (X265_DEPTH - 8);
696
697 ALIGN_VAR_32(int16_t, coef[32 * 32]);
698 ALIGN_VAR_32(int16_t, block[32 * 32]);
699
700 #define N (32)
701 for (int i = 0; i < N; i++)
702 {
703 for (int j = 0; j < N; j++)
704 {
705 block[i * N + j] = (int16_t)src[i * N + j];
706 }
707 }
708
709 #undef N
710
711 partialButterflyInverse32(block, coef, shift_1st, 32);
712 partialButterflyInverse32(coef, block, shift_2nd, 32);
713
714 for (int i = 0; i < 32; i++)
715 {
716 memcpy(&dst[i * stride], &block[i * 32], 32 * sizeof(int16_t));
717 }
718 }
719
720 void dequant_normal_c(const int16_t* quantCoef, int32_t* coef, int num, int scale, int shift)
721 {
722 #if HIGH_BIT_DEPTH
723 X265_CHECK(scale < 32768 || ((scale & 3) == 0 && shift > 2), "dequant invalid scale %d\n", scale);
724 #else
725 // NOTE: maximum of scale is (72 * 256)
726 X265_CHECK(scale < 32768, "dequant invalid scale %d\n", scale);
727 #endif
728 X265_CHECK(num <= 32 * 32, "dequant num %d too large\n", num);
729 X265_CHECK((num % 8) == 0, "dequant num %d not multiple of 8\n", num);
730 X265_CHECK(shift <= 10, "shift too large %d\n", shift);
731 X265_CHECK(((intptr_t)coef & 31) == 0, "dequant coef buffer not aligned\n");
732
733 int add, coeffQ;
734
735 add = 1 << (shift - 1);
736
737 for (int n = 0; n < num; n++)
738 {
739 coeffQ = (quantCoef[n] * scale + add) >> shift;
740 coef[n] = Clip3(-32768, 32767, coeffQ);
741 }
742 }
743
744 void dequant_scaling_c(const int16_t* quantCoef, const int32_t *deQuantCoef, int32_t* coef, int num, int per, int shift)
745 {
746 X265_CHECK(num <= 32 * 32, "dequant num %d too large\n", num);
747
748 int add, coeffQ;
749
750 shift += 4;
751
752 if (shift > per)
753 {
754 add = 1 << (shift - per - 1);
755
756 for (int n = 0; n < num; n++)
757 {
758 coeffQ = ((quantCoef[n] * deQuantCoef[n]) + add) >> (shift - per);
759 coef[n] = Clip3(-32768, 32767, coeffQ);
760 }
761 }
762 else
763 {
764 for (int n = 0; n < num; n++)
765 {
766 coeffQ = Clip3(-32768, 32767, quantCoef[n] * deQuantCoef[n]);
767 coef[n] = Clip3(-32768, 32767, coeffQ << (per - shift));
768 }
769 }
770 }
771
772 uint32_t quant_c(int32_t* coef, int32_t* quantCoeff, int32_t* deltaU, int16_t* qCoef, int qBits, int add, int numCoeff)
773 {
774 X265_CHECK(qBits >= 8, "qBits less than 8\n");
775 X265_CHECK((numCoeff % 16) == 0, "numCoeff must be multiple of 16\n");
776 int qBits8 = qBits - 8;
777 uint32_t numSig = 0;
778
779 for (int blockpos = 0; blockpos < numCoeff; blockpos++)
780 {
781 int level = coef[blockpos];
782 int sign = (level < 0 ? -1 : 1);
783
784 int tmplevel = abs(level) * quantCoeff[blockpos];
785 level = ((tmplevel + add) >> qBits);
786 deltaU[blockpos] = ((tmplevel - (level << qBits)) >> qBits8);
787 if (level)
788 ++numSig;
789 level *= sign;
790 qCoef[blockpos] = (int16_t)Clip3(-32768, 32767, level);
791 }
792
793 return numSig;
794 }
795
796 uint32_t nquant_c(int32_t* coef, int32_t* quantCoeff, int16_t* qCoef, int qBits, int add, int numCoeff)
797 {
798 X265_CHECK((numCoeff % 16) == 0, "number of quant coeff is not multiple of 4x4\n");
799 X265_CHECK((uint32_t)add < ((uint32_t)1 << qBits), "2 ^ qBits less than add\n");
800 X265_CHECK(((intptr_t)quantCoeff & 31) == 0, "quantCoeff buffer not aligned\n");
801
802 uint32_t numSig = 0;
803
804 for (int blockpos = 0; blockpos < numCoeff; blockpos++)
805 {
806 int level = coef[blockpos];
807 int sign = (level < 0 ? -1 : 1);
808
809 int tmplevel = abs(level) * quantCoeff[blockpos];
810 level = ((tmplevel + add) >> qBits);
811 if (level)
812 ++numSig;
813 level *= sign;
814 qCoef[blockpos] = (int16_t)Clip3(-32768, 32767, level);
815 }
816
817 return numSig;
818 }
819
820 int count_nonzero_c(const int16_t *quantCoeff, int numCoeff)
821 {
822 X265_CHECK(((intptr_t)quantCoeff & 15) == 0, "quant buffer not aligned\n");
823 X265_CHECK(numCoeff > 0 && (numCoeff & 15) == 0, "numCoeff invalid %d\n", numCoeff);
824
825 int count = 0;
826
827 for (int i = 0; i < numCoeff; i++)
828 {
829 count += quantCoeff[i] != 0;
830 }
831
832 return count;
833 }
834
835 template<int trSize>
836 uint32_t copy_count(int16_t* coeff, int16_t* residual, intptr_t stride)
837 {
838 uint32_t numSig = 0;
839 for (int k = 0; k < trSize; k++)
840 {
841 for (int j = 0; j < trSize; j++)
842 {
843 coeff[k * trSize + j] = residual[k * stride + j];
844 numSig += (residual[k * stride + j] != 0);
845 }
846 }
847
848 return numSig;
849 }
850
851 void denoiseDct_c(int32_t* dctCoef, uint32_t* resSum, uint16_t* offset, int numCoeff)
852 {
853 for (int i = 0; i < numCoeff; i++)
854 {
855 int level = dctCoef[i];
856 int sign = level >> 31;
857 level = (level + sign) ^ sign;
858 resSum[i] += level;
859 level -= offset[i];
860 dctCoef[i] = level < 0 ? 0 : (level ^ sign) - sign;
861 }
862 }
863
864 } // closing - anonymous file-static namespace
865
866 namespace x265 {
867 // x265 private namespace
868
869 void Setup_C_DCTPrimitives(EncoderPrimitives& p)
870 {
871 p.dequant_scaling = dequant_scaling_c;
872 p.dequant_normal = dequant_normal_c;
873 p.quant = quant_c;
874 p.nquant = nquant_c;
875 p.dct[DST_4x4] = dst4_c;
876 p.dct[DCT_4x4] = dct4_c;
877 p.dct[DCT_8x8] = dct8_c;
878 p.dct[DCT_16x16] = dct16_c;
879 p.dct[DCT_32x32] = dct32_c;
880 p.idct[IDST_4x4] = idst4_c;
881 p.idct[IDCT_4x4] = idct4_c;
882 p.idct[IDCT_8x8] = idct8_c;
883 p.idct[IDCT_16x16] = idct16_c;
884 p.idct[IDCT_32x32] = idct32_c;
885 p.count_nonzero = count_nonzero_c;
886 p.denoiseDct = denoiseDct_c;
887
888 p.copy_cnt[BLOCK_4x4] = copy_count<4>;
889 p.copy_cnt[BLOCK_8x8] = copy_count<8>;
890 p.copy_cnt[BLOCK_16x16] = copy_count<16>;
891 p.copy_cnt[BLOCK_32x32] = copy_count<32>;
892 }
893 }