Imported Debian version 2.4.3~trusty1
[deb_ffmpeg.git] / ffmpeg / libswscale / input.c
CommitLineData
2ba45a60
DM
1/*
2 * Copyright (C) 2001-2012 Michael Niedermayer <michaelni@gmx.at>
3 *
4 * This file is part of FFmpeg.
5 *
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
10 *
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19 */
20
21#include <math.h>
22#include <stdint.h>
23#include <stdio.h>
24#include <string.h>
25
26#include "libavutil/avutil.h"
27#include "libavutil/bswap.h"
28#include "libavutil/cpu.h"
29#include "libavutil/intreadwrite.h"
30#include "libavutil/mathematics.h"
31#include "libavutil/pixdesc.h"
32#include "libavutil/avassert.h"
33#include "config.h"
34#include "rgb2rgb.h"
35#include "swscale.h"
36#include "swscale_internal.h"
37
38#define input_pixel(pos) (isBE(origin) ? AV_RB16(pos) : AV_RL16(pos))
39
40#define r ((origin == AV_PIX_FMT_BGR48BE || origin == AV_PIX_FMT_BGR48LE || origin == AV_PIX_FMT_BGRA64BE || origin == AV_PIX_FMT_BGRA64LE) ? b_r : r_b)
41#define b ((origin == AV_PIX_FMT_BGR48BE || origin == AV_PIX_FMT_BGR48LE || origin == AV_PIX_FMT_BGRA64BE || origin == AV_PIX_FMT_BGRA64LE) ? r_b : b_r)
42
43static av_always_inline void
44rgb64ToY_c_template(uint16_t *dst, const uint16_t *src, int width,
45 enum AVPixelFormat origin, int32_t *rgb2yuv)
46{
47 int32_t ry = rgb2yuv[RY_IDX], gy = rgb2yuv[GY_IDX], by = rgb2yuv[BY_IDX];
48 int i;
49 for (i = 0; i < width; i++) {
50 unsigned int r_b = input_pixel(&src[i*4+0]);
51 unsigned int g = input_pixel(&src[i*4+1]);
52 unsigned int b_r = input_pixel(&src[i*4+2]);
53
54 dst[i] = (ry*r + gy*g + by*b + (0x2001<<(RGB2YUV_SHIFT-1))) >> RGB2YUV_SHIFT;
55 }
56}
57
58static av_always_inline void
59rgb64ToUV_c_template(uint16_t *dstU, uint16_t *dstV,
60 const uint16_t *src1, const uint16_t *src2,
61 int width, enum AVPixelFormat origin, int32_t *rgb2yuv)
62{
63 int i;
64 int32_t ru = rgb2yuv[RU_IDX], gu = rgb2yuv[GU_IDX], bu = rgb2yuv[BU_IDX];
65 int32_t rv = rgb2yuv[RV_IDX], gv = rgb2yuv[GV_IDX], bv = rgb2yuv[BV_IDX];
66 av_assert1(src1==src2);
67 for (i = 0; i < width; i++) {
68 int r_b = input_pixel(&src1[i*4+0]);
69 int g = input_pixel(&src1[i*4+1]);
70 int b_r = input_pixel(&src1[i*4+2]);
71
72 dstU[i] = (ru*r + gu*g + bu*b + (0x10001<<(RGB2YUV_SHIFT-1))) >> RGB2YUV_SHIFT;
73 dstV[i] = (rv*r + gv*g + bv*b + (0x10001<<(RGB2YUV_SHIFT-1))) >> RGB2YUV_SHIFT;
74 }
75}
76
77static av_always_inline void
78rgb64ToUV_half_c_template(uint16_t *dstU, uint16_t *dstV,
79 const uint16_t *src1, const uint16_t *src2,
80 int width, enum AVPixelFormat origin, int32_t *rgb2yuv)
81{
82 int i;
83 int32_t ru = rgb2yuv[RU_IDX], gu = rgb2yuv[GU_IDX], bu = rgb2yuv[BU_IDX];
84 int32_t rv = rgb2yuv[RV_IDX], gv = rgb2yuv[GV_IDX], bv = rgb2yuv[BV_IDX];
85 av_assert1(src1==src2);
86 for (i = 0; i < width; i++) {
87 int r_b = (input_pixel(&src1[8 * i + 0]) + input_pixel(&src1[8 * i + 4]) + 1) >> 1;
88 int g = (input_pixel(&src1[8 * i + 1]) + input_pixel(&src1[8 * i + 5]) + 1) >> 1;
89 int b_r = (input_pixel(&src1[8 * i + 2]) + input_pixel(&src1[8 * i + 6]) + 1) >> 1;
90
91 dstU[i]= (ru*r + gu*g + bu*b + (0x10001<<(RGB2YUV_SHIFT-1))) >> RGB2YUV_SHIFT;
92 dstV[i]= (rv*r + gv*g + bv*b + (0x10001<<(RGB2YUV_SHIFT-1))) >> RGB2YUV_SHIFT;
93 }
94}
95
96#define rgb64funcs(pattern, BE_LE, origin) \
97static void pattern ## 64 ## BE_LE ## ToY_c(uint8_t *_dst, const uint8_t *_src, const uint8_t *unused0, const uint8_t *unused1,\
98 int width, uint32_t *rgb2yuv) \
99{ \
100 const uint16_t *src = (const uint16_t *) _src; \
101 uint16_t *dst = (uint16_t *) _dst; \
102 rgb64ToY_c_template(dst, src, width, origin, rgb2yuv); \
103} \
104 \
105static void pattern ## 64 ## BE_LE ## ToUV_c(uint8_t *_dstU, uint8_t *_dstV, \
106 const uint8_t *unused0, const uint8_t *_src1, const uint8_t *_src2, \
107 int width, uint32_t *rgb2yuv) \
108{ \
109 const uint16_t *src1 = (const uint16_t *) _src1, \
110 *src2 = (const uint16_t *) _src2; \
111 uint16_t *dstU = (uint16_t *) _dstU, *dstV = (uint16_t *) _dstV; \
112 rgb64ToUV_c_template(dstU, dstV, src1, src2, width, origin, rgb2yuv); \
113} \
114 \
115static void pattern ## 64 ## BE_LE ## ToUV_half_c(uint8_t *_dstU, uint8_t *_dstV, \
116 const uint8_t *unused0, const uint8_t *_src1, const uint8_t *_src2, \
117 int width, uint32_t *rgb2yuv) \
118{ \
119 const uint16_t *src1 = (const uint16_t *) _src1, \
120 *src2 = (const uint16_t *) _src2; \
121 uint16_t *dstU = (uint16_t *) _dstU, *dstV = (uint16_t *) _dstV; \
122 rgb64ToUV_half_c_template(dstU, dstV, src1, src2, width, origin, rgb2yuv); \
123}
124
125rgb64funcs(rgb, LE, AV_PIX_FMT_RGBA64LE)
126rgb64funcs(rgb, BE, AV_PIX_FMT_RGBA64BE)
127rgb64funcs(bgr, LE, AV_PIX_FMT_BGRA64LE)
128rgb64funcs(bgr, BE, AV_PIX_FMT_BGRA64BE)
129
130static av_always_inline void rgb48ToY_c_template(uint16_t *dst,
131 const uint16_t *src, int width,
132 enum AVPixelFormat origin,
133 int32_t *rgb2yuv)
134{
135 int32_t ry = rgb2yuv[RY_IDX], gy = rgb2yuv[GY_IDX], by = rgb2yuv[BY_IDX];
136 int i;
137 for (i = 0; i < width; i++) {
138 unsigned int r_b = input_pixel(&src[i * 3 + 0]);
139 unsigned int g = input_pixel(&src[i * 3 + 1]);
140 unsigned int b_r = input_pixel(&src[i * 3 + 2]);
141
142 dst[i] = (ry*r + gy*g + by*b + (0x2001 << (RGB2YUV_SHIFT - 1))) >> RGB2YUV_SHIFT;
143 }
144}
145
146static av_always_inline void rgb48ToUV_c_template(uint16_t *dstU,
147 uint16_t *dstV,
148 const uint16_t *src1,
149 const uint16_t *src2,
150 int width,
151 enum AVPixelFormat origin,
152 int32_t *rgb2yuv)
153{
154 int i;
155 int32_t ru = rgb2yuv[RU_IDX], gu = rgb2yuv[GU_IDX], bu = rgb2yuv[BU_IDX];
156 int32_t rv = rgb2yuv[RV_IDX], gv = rgb2yuv[GV_IDX], bv = rgb2yuv[BV_IDX];
157 av_assert1(src1 == src2);
158 for (i = 0; i < width; i++) {
159 int r_b = input_pixel(&src1[i * 3 + 0]);
160 int g = input_pixel(&src1[i * 3 + 1]);
161 int b_r = input_pixel(&src1[i * 3 + 2]);
162
163 dstU[i] = (ru*r + gu*g + bu*b + (0x10001 << (RGB2YUV_SHIFT - 1))) >> RGB2YUV_SHIFT;
164 dstV[i] = (rv*r + gv*g + bv*b + (0x10001 << (RGB2YUV_SHIFT - 1))) >> RGB2YUV_SHIFT;
165 }
166}
167
168static av_always_inline void rgb48ToUV_half_c_template(uint16_t *dstU,
169 uint16_t *dstV,
170 const uint16_t *src1,
171 const uint16_t *src2,
172 int width,
173 enum AVPixelFormat origin,
174 int32_t *rgb2yuv)
175{
176 int i;
177 int32_t ru = rgb2yuv[RU_IDX], gu = rgb2yuv[GU_IDX], bu = rgb2yuv[BU_IDX];
178 int32_t rv = rgb2yuv[RV_IDX], gv = rgb2yuv[GV_IDX], bv = rgb2yuv[BV_IDX];
179 av_assert1(src1 == src2);
180 for (i = 0; i < width; i++) {
181 int r_b = (input_pixel(&src1[6 * i + 0]) +
182 input_pixel(&src1[6 * i + 3]) + 1) >> 1;
183 int g = (input_pixel(&src1[6 * i + 1]) +
184 input_pixel(&src1[6 * i + 4]) + 1) >> 1;
185 int b_r = (input_pixel(&src1[6 * i + 2]) +
186 input_pixel(&src1[6 * i + 5]) + 1) >> 1;
187
188 dstU[i] = (ru*r + gu*g + bu*b + (0x10001 << (RGB2YUV_SHIFT - 1))) >> RGB2YUV_SHIFT;
189 dstV[i] = (rv*r + gv*g + bv*b + (0x10001 << (RGB2YUV_SHIFT - 1))) >> RGB2YUV_SHIFT;
190 }
191}
192
193#undef r
194#undef b
195#undef input_pixel
196
197#define rgb48funcs(pattern, BE_LE, origin) \
198static void pattern ## 48 ## BE_LE ## ToY_c(uint8_t *_dst, \
199 const uint8_t *_src, \
200 const uint8_t *unused0, const uint8_t *unused1,\
201 int width, \
202 uint32_t *rgb2yuv) \
203{ \
204 const uint16_t *src = (const uint16_t *)_src; \
205 uint16_t *dst = (uint16_t *)_dst; \
206 rgb48ToY_c_template(dst, src, width, origin, rgb2yuv); \
207} \
208 \
209static void pattern ## 48 ## BE_LE ## ToUV_c(uint8_t *_dstU, \
210 uint8_t *_dstV, \
211 const uint8_t *unused0, \
212 const uint8_t *_src1, \
213 const uint8_t *_src2, \
214 int width, \
215 uint32_t *rgb2yuv) \
216{ \
217 const uint16_t *src1 = (const uint16_t *)_src1, \
218 *src2 = (const uint16_t *)_src2; \
219 uint16_t *dstU = (uint16_t *)_dstU, \
220 *dstV = (uint16_t *)_dstV; \
221 rgb48ToUV_c_template(dstU, dstV, src1, src2, width, origin, rgb2yuv); \
222} \
223 \
224static void pattern ## 48 ## BE_LE ## ToUV_half_c(uint8_t *_dstU, \
225 uint8_t *_dstV, \
226 const uint8_t *unused0, \
227 const uint8_t *_src1, \
228 const uint8_t *_src2, \
229 int width, \
230 uint32_t *rgb2yuv) \
231{ \
232 const uint16_t *src1 = (const uint16_t *)_src1, \
233 *src2 = (const uint16_t *)_src2; \
234 uint16_t *dstU = (uint16_t *)_dstU, \
235 *dstV = (uint16_t *)_dstV; \
236 rgb48ToUV_half_c_template(dstU, dstV, src1, src2, width, origin, rgb2yuv); \
237}
238
239rgb48funcs(rgb, LE, AV_PIX_FMT_RGB48LE)
240rgb48funcs(rgb, BE, AV_PIX_FMT_RGB48BE)
241rgb48funcs(bgr, LE, AV_PIX_FMT_BGR48LE)
242rgb48funcs(bgr, BE, AV_PIX_FMT_BGR48BE)
243
244#define input_pixel(i) ((origin == AV_PIX_FMT_RGBA || \
245 origin == AV_PIX_FMT_BGRA || \
246 origin == AV_PIX_FMT_ARGB || \
247 origin == AV_PIX_FMT_ABGR) \
248 ? AV_RN32A(&src[(i) * 4]) \
249 : (isBE(origin) ? AV_RB16(&src[(i) * 2]) \
250 : AV_RL16(&src[(i) * 2])))
251
252static av_always_inline void rgb16_32ToY_c_template(int16_t *dst,
253 const uint8_t *src,
254 int width,
255 enum AVPixelFormat origin,
256 int shr, int shg,
257 int shb, int shp,
258 int maskr, int maskg,
259 int maskb, int rsh,
260 int gsh, int bsh, int S,
261 int32_t *rgb2yuv)
262{
263 const int ry = rgb2yuv[RY_IDX]<<rsh, gy = rgb2yuv[GY_IDX]<<gsh, by = rgb2yuv[BY_IDX]<<bsh;
264 const unsigned rnd = (32<<((S)-1)) + (1<<(S-7));
265 int i;
266
267 for (i = 0; i < width; i++) {
268 int px = input_pixel(i) >> shp;
269 int b = (px & maskb) >> shb;
270 int g = (px & maskg) >> shg;
271 int r = (px & maskr) >> shr;
272
273 dst[i] = (ry * r + gy * g + by * b + rnd) >> ((S)-6);
274 }
275}
276
277static av_always_inline void rgb16_32ToUV_c_template(int16_t *dstU,
278 int16_t *dstV,
279 const uint8_t *src,
280 int width,
281 enum AVPixelFormat origin,
282 int shr, int shg,
283 int shb, int shp,
284 int maskr, int maskg,
285 int maskb, int rsh,
286 int gsh, int bsh, int S,
287 int32_t *rgb2yuv)
288{
289 const int ru = rgb2yuv[RU_IDX] << rsh, gu = rgb2yuv[GU_IDX] << gsh, bu = rgb2yuv[BU_IDX] << bsh,
290 rv = rgb2yuv[RV_IDX] << rsh, gv = rgb2yuv[GV_IDX] << gsh, bv = rgb2yuv[BV_IDX] << bsh;
291 const unsigned rnd = (256u<<((S)-1)) + (1<<(S-7));
292 int i;
293
294 for (i = 0; i < width; i++) {
295 int px = input_pixel(i) >> shp;
296 int b = (px & maskb) >> shb;
297 int g = (px & maskg) >> shg;
298 int r = (px & maskr) >> shr;
299
300 dstU[i] = (ru * r + gu * g + bu * b + rnd) >> ((S)-6);
301 dstV[i] = (rv * r + gv * g + bv * b + rnd) >> ((S)-6);
302 }
303}
304
305static av_always_inline void rgb16_32ToUV_half_c_template(int16_t *dstU,
306 int16_t *dstV,
307 const uint8_t *src,
308 int width,
309 enum AVPixelFormat origin,
310 int shr, int shg,
311 int shb, int shp,
312 int maskr, int maskg,
313 int maskb, int rsh,
314 int gsh, int bsh, int S,
315 int32_t *rgb2yuv)
316{
317 const int ru = rgb2yuv[RU_IDX] << rsh, gu = rgb2yuv[GU_IDX] << gsh, bu = rgb2yuv[BU_IDX] << bsh,
318 rv = rgb2yuv[RV_IDX] << rsh, gv = rgb2yuv[GV_IDX] << gsh, bv = rgb2yuv[BV_IDX] << bsh,
319 maskgx = ~(maskr | maskb);
320 const unsigned rnd = (256U<<(S)) + (1<<(S-6));
321 int i;
322
323 maskr |= maskr << 1;
324 maskb |= maskb << 1;
325 maskg |= maskg << 1;
326 for (i = 0; i < width; i++) {
327 unsigned px0 = input_pixel(2 * i + 0) >> shp;
328 unsigned px1 = input_pixel(2 * i + 1) >> shp;
329 int b, r, g = (px0 & maskgx) + (px1 & maskgx);
330 int rb = px0 + px1 - g;
331
332 b = (rb & maskb) >> shb;
333 if (shp ||
334 origin == AV_PIX_FMT_BGR565LE || origin == AV_PIX_FMT_BGR565BE ||
335 origin == AV_PIX_FMT_RGB565LE || origin == AV_PIX_FMT_RGB565BE) {
336 g >>= shg;
337 } else {
338 g = (g & maskg) >> shg;
339 }
340 r = (rb & maskr) >> shr;
341
342 dstU[i] = (ru * r + gu * g + bu * b + (unsigned)rnd) >> ((S)-6+1);
343 dstV[i] = (rv * r + gv * g + bv * b + (unsigned)rnd) >> ((S)-6+1);
344 }
345}
346
347#undef input_pixel
348
349#define rgb16_32_wrapper(fmt, name, shr, shg, shb, shp, maskr, \
350 maskg, maskb, rsh, gsh, bsh, S) \
351static void name ## ToY_c(uint8_t *dst, const uint8_t *src, const uint8_t *unused1, const uint8_t *unused2, \
352 int width, uint32_t *tab) \
353{ \
354 rgb16_32ToY_c_template((int16_t*)dst, src, width, fmt, shr, shg, shb, shp, \
355 maskr, maskg, maskb, rsh, gsh, bsh, S, tab); \
356} \
357 \
358static void name ## ToUV_c(uint8_t *dstU, uint8_t *dstV, \
359 const uint8_t *unused0, const uint8_t *src, const uint8_t *dummy, \
360 int width, uint32_t *tab) \
361{ \
362 rgb16_32ToUV_c_template((int16_t*)dstU, (int16_t*)dstV, src, width, fmt, \
363 shr, shg, shb, shp, \
364 maskr, maskg, maskb, rsh, gsh, bsh, S, tab);\
365} \
366 \
367static void name ## ToUV_half_c(uint8_t *dstU, uint8_t *dstV, \
368 const uint8_t *unused0, const uint8_t *src, \
369 const uint8_t *dummy, \
370 int width, uint32_t *tab) \
371{ \
372 rgb16_32ToUV_half_c_template((int16_t*)dstU, (int16_t*)dstV, src, width, fmt, \
373 shr, shg, shb, shp, \
374 maskr, maskg, maskb, \
375 rsh, gsh, bsh, S, tab); \
376}
377
378rgb16_32_wrapper(AV_PIX_FMT_BGR32, bgr32, 16, 0, 0, 0, 0xFF0000, 0xFF00, 0x00FF, 8, 0, 8, RGB2YUV_SHIFT + 8)
379rgb16_32_wrapper(AV_PIX_FMT_BGR32_1, bgr321, 16, 0, 0, 8, 0xFF0000, 0xFF00, 0x00FF, 8, 0, 8, RGB2YUV_SHIFT + 8)
380rgb16_32_wrapper(AV_PIX_FMT_RGB32, rgb32, 0, 0, 16, 0, 0x00FF, 0xFF00, 0xFF0000, 8, 0, 8, RGB2YUV_SHIFT + 8)
381rgb16_32_wrapper(AV_PIX_FMT_RGB32_1, rgb321, 0, 0, 16, 8, 0x00FF, 0xFF00, 0xFF0000, 8, 0, 8, RGB2YUV_SHIFT + 8)
382rgb16_32_wrapper(AV_PIX_FMT_BGR565LE, bgr16le, 0, 0, 0, 0, 0x001F, 0x07E0, 0xF800, 11, 5, 0, RGB2YUV_SHIFT + 8)
383rgb16_32_wrapper(AV_PIX_FMT_BGR555LE, bgr15le, 0, 0, 0, 0, 0x001F, 0x03E0, 0x7C00, 10, 5, 0, RGB2YUV_SHIFT + 7)
384rgb16_32_wrapper(AV_PIX_FMT_BGR444LE, bgr12le, 0, 0, 0, 0, 0x000F, 0x00F0, 0x0F00, 8, 4, 0, RGB2YUV_SHIFT + 4)
385rgb16_32_wrapper(AV_PIX_FMT_RGB565LE, rgb16le, 0, 0, 0, 0, 0xF800, 0x07E0, 0x001F, 0, 5, 11, RGB2YUV_SHIFT + 8)
386rgb16_32_wrapper(AV_PIX_FMT_RGB555LE, rgb15le, 0, 0, 0, 0, 0x7C00, 0x03E0, 0x001F, 0, 5, 10, RGB2YUV_SHIFT + 7)
387rgb16_32_wrapper(AV_PIX_FMT_RGB444LE, rgb12le, 0, 0, 0, 0, 0x0F00, 0x00F0, 0x000F, 0, 4, 8, RGB2YUV_SHIFT + 4)
388rgb16_32_wrapper(AV_PIX_FMT_BGR565BE, bgr16be, 0, 0, 0, 0, 0x001F, 0x07E0, 0xF800, 11, 5, 0, RGB2YUV_SHIFT + 8)
389rgb16_32_wrapper(AV_PIX_FMT_BGR555BE, bgr15be, 0, 0, 0, 0, 0x001F, 0x03E0, 0x7C00, 10, 5, 0, RGB2YUV_SHIFT + 7)
390rgb16_32_wrapper(AV_PIX_FMT_BGR444BE, bgr12be, 0, 0, 0, 0, 0x000F, 0x00F0, 0x0F00, 8, 4, 0, RGB2YUV_SHIFT + 4)
391rgb16_32_wrapper(AV_PIX_FMT_RGB565BE, rgb16be, 0, 0, 0, 0, 0xF800, 0x07E0, 0x001F, 0, 5, 11, RGB2YUV_SHIFT + 8)
392rgb16_32_wrapper(AV_PIX_FMT_RGB555BE, rgb15be, 0, 0, 0, 0, 0x7C00, 0x03E0, 0x001F, 0, 5, 10, RGB2YUV_SHIFT + 7)
393rgb16_32_wrapper(AV_PIX_FMT_RGB444BE, rgb12be, 0, 0, 0, 0, 0x0F00, 0x00F0, 0x000F, 0, 4, 8, RGB2YUV_SHIFT + 4)
394
395static void gbr24pToUV_half_c(uint8_t *_dstU, uint8_t *_dstV,
396 const uint8_t *gsrc, const uint8_t *bsrc, const uint8_t *rsrc,
397 int width, uint32_t *rgb2yuv)
398{
399 uint16_t *dstU = (uint16_t *)_dstU;
400 uint16_t *dstV = (uint16_t *)_dstV;
401 int32_t ru = rgb2yuv[RU_IDX], gu = rgb2yuv[GU_IDX], bu = rgb2yuv[BU_IDX];
402 int32_t rv = rgb2yuv[RV_IDX], gv = rgb2yuv[GV_IDX], bv = rgb2yuv[BV_IDX];
403
404 int i;
405 for (i = 0; i < width; i++) {
406 unsigned int g = gsrc[2*i] + gsrc[2*i+1];
407 unsigned int b = bsrc[2*i] + bsrc[2*i+1];
408 unsigned int r = rsrc[2*i] + rsrc[2*i+1];
409
410 dstU[i] = (ru*r + gu*g + bu*b + (0x4001<<(RGB2YUV_SHIFT-6))) >> (RGB2YUV_SHIFT-6+1);
411 dstV[i] = (rv*r + gv*g + bv*b + (0x4001<<(RGB2YUV_SHIFT-6))) >> (RGB2YUV_SHIFT-6+1);
412 }
413}
414
415static void rgba64ToA_c(uint8_t *_dst, const uint8_t *_src, const uint8_t *unused1,
416 const uint8_t *unused2, int width, uint32_t *unused)
417{
418 int16_t *dst = (int16_t *)_dst;
419 const uint16_t *src = (const uint16_t *)_src;
420 int i;
421 for (i = 0; i < width; i++)
422 dst[i] = src[4 * i + 3];
423}
424
425static void abgrToA_c(uint8_t *_dst, const uint8_t *src, const uint8_t *unused1, const uint8_t *unused2, int width, uint32_t *unused)
426{
427 int16_t *dst = (int16_t *)_dst;
428 int i;
429 for (i=0; i<width; i++) {
430 dst[i]= src[4*i]<<6;
431 }
432}
433
434static void rgbaToA_c(uint8_t *_dst, const uint8_t *src, const uint8_t *unused1, const uint8_t *unused2, int width, uint32_t *unused)
435{
436 int16_t *dst = (int16_t *)_dst;
437 int i;
438 for (i=0; i<width; i++) {
439 dst[i]= src[4*i+3]<<6;
440 }
441}
442
443static void palToA_c(uint8_t *_dst, const uint8_t *src, const uint8_t *unused1, const uint8_t *unused2, int width, uint32_t *pal)
444{
445 int16_t *dst = (int16_t *)_dst;
446 int i;
447 for (i=0; i<width; i++) {
448 int d= src[i];
449
450 dst[i]= (pal[d] >> 24)<<6;
451 }
452}
453
454static void palToY_c(uint8_t *_dst, const uint8_t *src, const uint8_t *unused1, const uint8_t *unused2, int width, uint32_t *pal)
455{
456 int16_t *dst = (int16_t *)_dst;
457 int i;
458 for (i = 0; i < width; i++) {
459 int d = src[i];
460
461 dst[i] = (pal[d] & 0xFF)<<6;
462 }
463}
464
465static void palToUV_c(uint8_t *_dstU, uint8_t *_dstV,
466 const uint8_t *unused0, const uint8_t *src1, const uint8_t *src2,
467 int width, uint32_t *pal)
468{
469 uint16_t *dstU = (uint16_t *)_dstU;
470 int16_t *dstV = (int16_t *)_dstV;
471 int i;
472 av_assert1(src1 == src2);
473 for (i = 0; i < width; i++) {
474 int p = pal[src1[i]];
475
476 dstU[i] = (uint8_t)(p>> 8)<<6;
477 dstV[i] = (uint8_t)(p>>16)<<6;
478 }
479}
480
481static void monowhite2Y_c(uint8_t *_dst, const uint8_t *src, const uint8_t *unused1, const uint8_t *unused2, int width, uint32_t *unused)
482{
483 int16_t *dst = (int16_t *)_dst;
484 int i, j;
485 width = (width + 7) >> 3;
486 for (i = 0; i < width; i++) {
487 int d = ~src[i];
488 for (j = 0; j < 8; j++)
489 dst[8*i+j]= ((d>>(7-j))&1) * 16383;
490 }
491 if(width&7){
492 int d= ~src[i];
493 for (j = 0; j < (width&7); j++)
494 dst[8*i+j]= ((d>>(7-j))&1) * 16383;
495 }
496}
497
498static void monoblack2Y_c(uint8_t *_dst, const uint8_t *src, const uint8_t *unused1, const uint8_t *unused2, int width, uint32_t *unused)
499{
500 int16_t *dst = (int16_t *)_dst;
501 int i, j;
502 width = (width + 7) >> 3;
503 for (i = 0; i < width; i++) {
504 int d = src[i];
505 for (j = 0; j < 8; j++)
506 dst[8*i+j]= ((d>>(7-j))&1) * 16383;
507 }
508 if(width&7){
509 int d = src[i];
510 for (j = 0; j < (width&7); j++)
511 dst[8*i+j] = ((d>>(7-j))&1) * 16383;
512 }
513}
514
515static void yuy2ToY_c(uint8_t *dst, const uint8_t *src, const uint8_t *unused1, const uint8_t *unused2, int width,
516 uint32_t *unused)
517{
518 int i;
519 for (i = 0; i < width; i++)
520 dst[i] = src[2 * i];
521}
522
523static void yuy2ToUV_c(uint8_t *dstU, uint8_t *dstV, const uint8_t *unused0, const uint8_t *src1,
524 const uint8_t *src2, int width, uint32_t *unused)
525{
526 int i;
527 for (i = 0; i < width; i++) {
528 dstU[i] = src1[4 * i + 1];
529 dstV[i] = src1[4 * i + 3];
530 }
531 av_assert1(src1 == src2);
532}
533
534static void yvy2ToUV_c(uint8_t *dstU, uint8_t *dstV, const uint8_t *unused0, const uint8_t *src1,
535 const uint8_t *src2, int width, uint32_t *unused)
536{
537 int i;
538 for (i = 0; i < width; i++) {
539 dstV[i] = src1[4 * i + 1];
540 dstU[i] = src1[4 * i + 3];
541 }
542 av_assert1(src1 == src2);
543}
544
545static void bswap16Y_c(uint8_t *_dst, const uint8_t *_src, const uint8_t *unused1, const uint8_t *unused2, int width,
546 uint32_t *unused)
547{
548 int i;
549 const uint16_t *src = (const uint16_t *)_src;
550 uint16_t *dst = (uint16_t *)_dst;
551 for (i = 0; i < width; i++)
552 dst[i] = av_bswap16(src[i]);
553}
554
555static void bswap16UV_c(uint8_t *_dstU, uint8_t *_dstV, const uint8_t *unused0, const uint8_t *_src1,
556 const uint8_t *_src2, int width, uint32_t *unused)
557{
558 int i;
559 const uint16_t *src1 = (const uint16_t *)_src1,
560 *src2 = (const uint16_t *)_src2;
561 uint16_t *dstU = (uint16_t *)_dstU, *dstV = (uint16_t *)_dstV;
562 for (i = 0; i < width; i++) {
563 dstU[i] = av_bswap16(src1[i]);
564 dstV[i] = av_bswap16(src2[i]);
565 }
566}
567
568static void read_ya16le_gray_c(uint8_t *dst, const uint8_t *src, const uint8_t *unused1, const uint8_t *unused2, int width,
569 uint32_t *unused)
570{
571 int i;
572 for (i = 0; i < width; i++)
573 AV_WN16(dst + i * 2, AV_RL16(src + i * 4));
574}
575
576static void read_ya16le_alpha_c(uint8_t *dst, const uint8_t *src, const uint8_t *unused1, const uint8_t *unused2, int width,
577 uint32_t *unused)
578{
579 int i;
580 for (i = 0; i < width; i++)
581 AV_WN16(dst + i * 2, AV_RL16(src + i * 4 + 2));
582}
583
584static void read_ya16be_gray_c(uint8_t *dst, const uint8_t *src, const uint8_t *unused1, const uint8_t *unused2, int width,
585 uint32_t *unused)
586{
587 int i;
588 for (i = 0; i < width; i++)
589 AV_WN16(dst + i * 2, AV_RB16(src + i * 4));
590}
591
592static void read_ya16be_alpha_c(uint8_t *dst, const uint8_t *src, const uint8_t *unused1, const uint8_t *unused2, int width,
593 uint32_t *unused)
594{
595 int i;
596 for (i = 0; i < width; i++)
597 AV_WN16(dst + i * 2, AV_RB16(src + i * 4 + 2));
598}
599
600/* This is almost identical to the previous, end exists only because
601 * yuy2ToY/UV)(dst, src + 1, ...) would have 100% unaligned accesses. */
602static void uyvyToY_c(uint8_t *dst, const uint8_t *src, const uint8_t *unused1, const uint8_t *unused2, int width,
603 uint32_t *unused)
604{
605 int i;
606 for (i = 0; i < width; i++)
607 dst[i] = src[2 * i + 1];
608}
609
610static void uyvyToUV_c(uint8_t *dstU, uint8_t *dstV, const uint8_t *unused0, const uint8_t *src1,
611 const uint8_t *src2, int width, uint32_t *unused)
612{
613 int i;
614 for (i = 0; i < width; i++) {
615 dstU[i] = src1[4 * i + 0];
616 dstV[i] = src1[4 * i + 2];
617 }
618 av_assert1(src1 == src2);
619}
620
621static av_always_inline void nvXXtoUV_c(uint8_t *dst1, uint8_t *dst2,
622 const uint8_t *src, int width)
623{
624 int i;
625 for (i = 0; i < width; i++) {
626 dst1[i] = src[2 * i + 0];
627 dst2[i] = src[2 * i + 1];
628 }
629}
630
631static void nv12ToUV_c(uint8_t *dstU, uint8_t *dstV,
632 const uint8_t *unused0, const uint8_t *src1, const uint8_t *src2,
633 int width, uint32_t *unused)
634{
635 nvXXtoUV_c(dstU, dstV, src1, width);
636}
637
638static void nv21ToUV_c(uint8_t *dstU, uint8_t *dstV,
639 const uint8_t *unused0, const uint8_t *src1, const uint8_t *src2,
640 int width, uint32_t *unused)
641{
642 nvXXtoUV_c(dstV, dstU, src1, width);
643}
644
645#define input_pixel(pos) (isBE(origin) ? AV_RB16(pos) : AV_RL16(pos))
646
647static void bgr24ToY_c(uint8_t *_dst, const uint8_t *src, const uint8_t *unused1, const uint8_t *unused2,
648 int width, uint32_t *rgb2yuv)
649{
650 int16_t *dst = (int16_t *)_dst;
651 int32_t ry = rgb2yuv[RY_IDX], gy = rgb2yuv[GY_IDX], by = rgb2yuv[BY_IDX];
652 int i;
653 for (i = 0; i < width; i++) {
654 int b = src[i * 3 + 0];
655 int g = src[i * 3 + 1];
656 int r = src[i * 3 + 2];
657
658 dst[i] = ((ry*r + gy*g + by*b + (32<<(RGB2YUV_SHIFT-1)) + (1<<(RGB2YUV_SHIFT-7)))>>(RGB2YUV_SHIFT-6));
659 }
660}
661
662static void bgr24ToUV_c(uint8_t *_dstU, uint8_t *_dstV, const uint8_t *unused0, const uint8_t *src1,
663 const uint8_t *src2, int width, uint32_t *rgb2yuv)
664{
665 int16_t *dstU = (int16_t *)_dstU;
666 int16_t *dstV = (int16_t *)_dstV;
667 int32_t ru = rgb2yuv[RU_IDX], gu = rgb2yuv[GU_IDX], bu = rgb2yuv[BU_IDX];
668 int32_t rv = rgb2yuv[RV_IDX], gv = rgb2yuv[GV_IDX], bv = rgb2yuv[BV_IDX];
669 int i;
670 for (i = 0; i < width; i++) {
671 int b = src1[3 * i + 0];
672 int g = src1[3 * i + 1];
673 int r = src1[3 * i + 2];
674
675 dstU[i] = (ru*r + gu*g + bu*b + (256<<(RGB2YUV_SHIFT-1)) + (1<<(RGB2YUV_SHIFT-7)))>>(RGB2YUV_SHIFT-6);
676 dstV[i] = (rv*r + gv*g + bv*b + (256<<(RGB2YUV_SHIFT-1)) + (1<<(RGB2YUV_SHIFT-7)))>>(RGB2YUV_SHIFT-6);
677 }
678 av_assert1(src1 == src2);
679}
680
681static void bgr24ToUV_half_c(uint8_t *_dstU, uint8_t *_dstV, const uint8_t *unused0, const uint8_t *src1,
682 const uint8_t *src2, int width, uint32_t *rgb2yuv)
683{
684 int16_t *dstU = (int16_t *)_dstU;
685 int16_t *dstV = (int16_t *)_dstV;
686 int i;
687 int32_t ru = rgb2yuv[RU_IDX], gu = rgb2yuv[GU_IDX], bu = rgb2yuv[BU_IDX];
688 int32_t rv = rgb2yuv[RV_IDX], gv = rgb2yuv[GV_IDX], bv = rgb2yuv[BV_IDX];
689 for (i = 0; i < width; i++) {
690 int b = src1[6 * i + 0] + src1[6 * i + 3];
691 int g = src1[6 * i + 1] + src1[6 * i + 4];
692 int r = src1[6 * i + 2] + src1[6 * i + 5];
693
694 dstU[i] = (ru*r + gu*g + bu*b + (256<<RGB2YUV_SHIFT) + (1<<(RGB2YUV_SHIFT-6)))>>(RGB2YUV_SHIFT-5);
695 dstV[i] = (rv*r + gv*g + bv*b + (256<<RGB2YUV_SHIFT) + (1<<(RGB2YUV_SHIFT-6)))>>(RGB2YUV_SHIFT-5);
696 }
697 av_assert1(src1 == src2);
698}
699
700static void rgb24ToY_c(uint8_t *_dst, const uint8_t *src, const uint8_t *unused1, const uint8_t *unused2, int width,
701 uint32_t *rgb2yuv)
702{
703 int16_t *dst = (int16_t *)_dst;
704 int32_t ry = rgb2yuv[RY_IDX], gy = rgb2yuv[GY_IDX], by = rgb2yuv[BY_IDX];
705 int i;
706 for (i = 0; i < width; i++) {
707 int r = src[i * 3 + 0];
708 int g = src[i * 3 + 1];
709 int b = src[i * 3 + 2];
710
711 dst[i] = ((ry*r + gy*g + by*b + (32<<(RGB2YUV_SHIFT-1)) + (1<<(RGB2YUV_SHIFT-7)))>>(RGB2YUV_SHIFT-6));
712 }
713}
714
715static void rgb24ToUV_c(uint8_t *_dstU, uint8_t *_dstV, const uint8_t *unused0, const uint8_t *src1,
716 const uint8_t *src2, int width, uint32_t *rgb2yuv)
717{
718 int16_t *dstU = (int16_t *)_dstU;
719 int16_t *dstV = (int16_t *)_dstV;
720 int i;
721 int32_t ru = rgb2yuv[RU_IDX], gu = rgb2yuv[GU_IDX], bu = rgb2yuv[BU_IDX];
722 int32_t rv = rgb2yuv[RV_IDX], gv = rgb2yuv[GV_IDX], bv = rgb2yuv[BV_IDX];
723 av_assert1(src1 == src2);
724 for (i = 0; i < width; i++) {
725 int r = src1[3 * i + 0];
726 int g = src1[3 * i + 1];
727 int b = src1[3 * i + 2];
728
729 dstU[i] = (ru*r + gu*g + bu*b + (256<<(RGB2YUV_SHIFT-1)) + (1<<(RGB2YUV_SHIFT-7)))>>(RGB2YUV_SHIFT-6);
730 dstV[i] = (rv*r + gv*g + bv*b + (256<<(RGB2YUV_SHIFT-1)) + (1<<(RGB2YUV_SHIFT-7)))>>(RGB2YUV_SHIFT-6);
731 }
732}
733
734static void rgb24ToUV_half_c(uint8_t *_dstU, uint8_t *_dstV, const uint8_t *unused0, const uint8_t *src1,
735 const uint8_t *src2, int width, uint32_t *rgb2yuv)
736{
737 int16_t *dstU = (int16_t *)_dstU;
738 int16_t *dstV = (int16_t *)_dstV;
739 int i;
740 int32_t ru = rgb2yuv[RU_IDX], gu = rgb2yuv[GU_IDX], bu = rgb2yuv[BU_IDX];
741 int32_t rv = rgb2yuv[RV_IDX], gv = rgb2yuv[GV_IDX], bv = rgb2yuv[BV_IDX];
742 av_assert1(src1 == src2);
743 for (i = 0; i < width; i++) {
744 int r = src1[6 * i + 0] + src1[6 * i + 3];
745 int g = src1[6 * i + 1] + src1[6 * i + 4];
746 int b = src1[6 * i + 2] + src1[6 * i + 5];
747
748 dstU[i] = (ru*r + gu*g + bu*b + (256<<RGB2YUV_SHIFT) + (1<<(RGB2YUV_SHIFT-6)))>>(RGB2YUV_SHIFT-5);
749 dstV[i] = (rv*r + gv*g + bv*b + (256<<RGB2YUV_SHIFT) + (1<<(RGB2YUV_SHIFT-6)))>>(RGB2YUV_SHIFT-5);
750 }
751}
752
753static void planar_rgb_to_y(uint8_t *_dst, const uint8_t *src[4], int width, int32_t *rgb2yuv)
754{
755 uint16_t *dst = (uint16_t *)_dst;
756 int32_t ry = rgb2yuv[RY_IDX], gy = rgb2yuv[GY_IDX], by = rgb2yuv[BY_IDX];
757 int i;
758 for (i = 0; i < width; i++) {
759 int g = src[0][i];
760 int b = src[1][i];
761 int r = src[2][i];
762
763 dst[i] = (ry*r + gy*g + by*b + (0x801<<(RGB2YUV_SHIFT-7))) >> (RGB2YUV_SHIFT-6);
764 }
765}
766
767static void planar_rgb_to_a(uint8_t *_dst, const uint8_t *src[4], int width, int32_t *unused)
768{
769 uint16_t *dst = (uint16_t *)_dst;
770 int i;
771 for (i = 0; i < width; i++)
772 dst[i] = src[3][i] << 6;
773}
774
775static void planar_rgb_to_uv(uint8_t *_dstU, uint8_t *_dstV, const uint8_t *src[4], int width, int32_t *rgb2yuv)
776{
777 uint16_t *dstU = (uint16_t *)_dstU;
778 uint16_t *dstV = (uint16_t *)_dstV;
779 int32_t ru = rgb2yuv[RU_IDX], gu = rgb2yuv[GU_IDX], bu = rgb2yuv[BU_IDX];
780 int32_t rv = rgb2yuv[RV_IDX], gv = rgb2yuv[GV_IDX], bv = rgb2yuv[BV_IDX];
781 int i;
782 for (i = 0; i < width; i++) {
783 int g = src[0][i];
784 int b = src[1][i];
785 int r = src[2][i];
786
787 dstU[i] = (ru*r + gu*g + bu*b + (0x4001<<(RGB2YUV_SHIFT-7))) >> (RGB2YUV_SHIFT-6);
788 dstV[i] = (rv*r + gv*g + bv*b + (0x4001<<(RGB2YUV_SHIFT-7))) >> (RGB2YUV_SHIFT-6);
789 }
790}
791
792#define rdpx(src) \
793 is_be ? AV_RB16(src) : AV_RL16(src)
794static av_always_inline void planar_rgb16_to_y(uint8_t *_dst, const uint8_t *_src[4],
795 int width, int bpc, int is_be, int32_t *rgb2yuv)
796{
797 int i;
798 const uint16_t **src = (const uint16_t **)_src;
799 uint16_t *dst = (uint16_t *)_dst;
800 int32_t ry = rgb2yuv[RY_IDX], gy = rgb2yuv[GY_IDX], by = rgb2yuv[BY_IDX];
801 int shift = bpc < 16 ? bpc : 14;
802 for (i = 0; i < width; i++) {
803 int g = rdpx(src[0] + i);
804 int b = rdpx(src[1] + i);
805 int r = rdpx(src[2] + i);
806
807 dst[i] = ((ry*r + gy*g + by*b + (33 << (RGB2YUV_SHIFT + bpc - 9))) >> (RGB2YUV_SHIFT + shift - 14));
808 }
809}
810
811static av_always_inline void planar_rgb16_to_uv(uint8_t *_dstU, uint8_t *_dstV,
812 const uint8_t *_src[4], int width,
813 int bpc, int is_be, int32_t *rgb2yuv)
814{
815 int i;
816 const uint16_t **src = (const uint16_t **)_src;
817 uint16_t *dstU = (uint16_t *)_dstU;
818 uint16_t *dstV = (uint16_t *)_dstV;
819 int32_t ru = rgb2yuv[RU_IDX], gu = rgb2yuv[GU_IDX], bu = rgb2yuv[BU_IDX];
820 int32_t rv = rgb2yuv[RV_IDX], gv = rgb2yuv[GV_IDX], bv = rgb2yuv[BV_IDX];
821 int shift = bpc < 16 ? bpc : 14;
822 for (i = 0; i < width; i++) {
823 int g = rdpx(src[0] + i);
824 int b = rdpx(src[1] + i);
825 int r = rdpx(src[2] + i);
826
827 dstU[i] = (ru*r + gu*g + bu*b + (257 << (RGB2YUV_SHIFT + bpc - 9))) >> (RGB2YUV_SHIFT + shift - 14);
828 dstV[i] = (rv*r + gv*g + bv*b + (257 << (RGB2YUV_SHIFT + bpc - 9))) >> (RGB2YUV_SHIFT + shift - 14);
829 }
830}
831#undef rdpx
832
833#define rgb9plus_planar_funcs_endian(nbits, endian_name, endian) \
834static void planar_rgb##nbits##endian_name##_to_y(uint8_t *dst, const uint8_t *src[4], \
835 int w, int32_t *rgb2yuv) \
836{ \
837 planar_rgb16_to_y(dst, src, w, nbits, endian, rgb2yuv); \
838} \
839static void planar_rgb##nbits##endian_name##_to_uv(uint8_t *dstU, uint8_t *dstV, \
840 const uint8_t *src[4], int w, int32_t *rgb2yuv) \
841{ \
842 planar_rgb16_to_uv(dstU, dstV, src, w, nbits, endian, rgb2yuv); \
843} \
844
845#define rgb9plus_planar_funcs(nbits) \
846 rgb9plus_planar_funcs_endian(nbits, le, 0) \
847 rgb9plus_planar_funcs_endian(nbits, be, 1)
848
849rgb9plus_planar_funcs(9)
850rgb9plus_planar_funcs(10)
851rgb9plus_planar_funcs(12)
852rgb9plus_planar_funcs(14)
853rgb9plus_planar_funcs(16)
854
855av_cold void ff_sws_init_input_funcs(SwsContext *c)
856{
857 enum AVPixelFormat srcFormat = c->srcFormat;
858
859 c->chrToYV12 = NULL;
860 switch (srcFormat) {
861 case AV_PIX_FMT_YUYV422:
862 c->chrToYV12 = yuy2ToUV_c;
863 break;
864 case AV_PIX_FMT_YVYU422:
865 c->chrToYV12 = yvy2ToUV_c;
866 break;
867 case AV_PIX_FMT_UYVY422:
868 c->chrToYV12 = uyvyToUV_c;
869 break;
870 case AV_PIX_FMT_NV12:
871 c->chrToYV12 = nv12ToUV_c;
872 break;
873 case AV_PIX_FMT_NV21:
874 c->chrToYV12 = nv21ToUV_c;
875 break;
876 case AV_PIX_FMT_RGB8:
877 case AV_PIX_FMT_BGR8:
878 case AV_PIX_FMT_PAL8:
879 case AV_PIX_FMT_BGR4_BYTE:
880 case AV_PIX_FMT_RGB4_BYTE:
881 c->chrToYV12 = palToUV_c;
882 break;
883 case AV_PIX_FMT_GBRP9LE:
884 c->readChrPlanar = planar_rgb9le_to_uv;
885 break;
886 case AV_PIX_FMT_GBRP10LE:
887 c->readChrPlanar = planar_rgb10le_to_uv;
888 break;
889 case AV_PIX_FMT_GBRP12LE:
890 c->readChrPlanar = planar_rgb12le_to_uv;
891 break;
892 case AV_PIX_FMT_GBRP14LE:
893 c->readChrPlanar = planar_rgb14le_to_uv;
894 break;
895 case AV_PIX_FMT_GBRAP16LE:
896 case AV_PIX_FMT_GBRP16LE:
897 c->readChrPlanar = planar_rgb16le_to_uv;
898 break;
899 case AV_PIX_FMT_GBRP9BE:
900 c->readChrPlanar = planar_rgb9be_to_uv;
901 break;
902 case AV_PIX_FMT_GBRP10BE:
903 c->readChrPlanar = planar_rgb10be_to_uv;
904 break;
905 case AV_PIX_FMT_GBRP12BE:
906 c->readChrPlanar = planar_rgb12be_to_uv;
907 break;
908 case AV_PIX_FMT_GBRP14BE:
909 c->readChrPlanar = planar_rgb14be_to_uv;
910 break;
911 case AV_PIX_FMT_GBRAP16BE:
912 case AV_PIX_FMT_GBRP16BE:
913 c->readChrPlanar = planar_rgb16be_to_uv;
914 break;
915 case AV_PIX_FMT_GBRAP:
916 case AV_PIX_FMT_GBRP:
917 c->readChrPlanar = planar_rgb_to_uv;
918 break;
919#if HAVE_BIGENDIAN
920 case AV_PIX_FMT_YUV444P9LE:
921 case AV_PIX_FMT_YUV422P9LE:
922 case AV_PIX_FMT_YUV420P9LE:
923 case AV_PIX_FMT_YUV422P10LE:
924 case AV_PIX_FMT_YUV444P10LE:
925 case AV_PIX_FMT_YUV420P10LE:
926 case AV_PIX_FMT_YUV422P12LE:
927 case AV_PIX_FMT_YUV444P12LE:
928 case AV_PIX_FMT_YUV420P12LE:
929 case AV_PIX_FMT_YUV422P14LE:
930 case AV_PIX_FMT_YUV444P14LE:
931 case AV_PIX_FMT_YUV420P14LE:
932 case AV_PIX_FMT_YUV420P16LE:
933 case AV_PIX_FMT_YUV422P16LE:
934 case AV_PIX_FMT_YUV444P16LE:
935
936 case AV_PIX_FMT_YUVA444P9LE:
937 case AV_PIX_FMT_YUVA422P9LE:
938 case AV_PIX_FMT_YUVA420P9LE:
939 case AV_PIX_FMT_YUVA444P10LE:
940 case AV_PIX_FMT_YUVA422P10LE:
941 case AV_PIX_FMT_YUVA420P10LE:
942 case AV_PIX_FMT_YUVA420P16LE:
943 case AV_PIX_FMT_YUVA422P16LE:
944 case AV_PIX_FMT_YUVA444P16LE:
945 c->chrToYV12 = bswap16UV_c;
946 break;
947#else
948 case AV_PIX_FMT_YUV444P9BE:
949 case AV_PIX_FMT_YUV422P9BE:
950 case AV_PIX_FMT_YUV420P9BE:
951 case AV_PIX_FMT_YUV444P10BE:
952 case AV_PIX_FMT_YUV422P10BE:
953 case AV_PIX_FMT_YUV420P10BE:
954 case AV_PIX_FMT_YUV444P12BE:
955 case AV_PIX_FMT_YUV422P12BE:
956 case AV_PIX_FMT_YUV420P12BE:
957 case AV_PIX_FMT_YUV444P14BE:
958 case AV_PIX_FMT_YUV422P14BE:
959 case AV_PIX_FMT_YUV420P14BE:
960 case AV_PIX_FMT_YUV420P16BE:
961 case AV_PIX_FMT_YUV422P16BE:
962 case AV_PIX_FMT_YUV444P16BE:
963
964 case AV_PIX_FMT_YUVA444P9BE:
965 case AV_PIX_FMT_YUVA422P9BE:
966 case AV_PIX_FMT_YUVA420P9BE:
967 case AV_PIX_FMT_YUVA444P10BE:
968 case AV_PIX_FMT_YUVA422P10BE:
969 case AV_PIX_FMT_YUVA420P10BE:
970 case AV_PIX_FMT_YUVA420P16BE:
971 case AV_PIX_FMT_YUVA422P16BE:
972 case AV_PIX_FMT_YUVA444P16BE:
973 c->chrToYV12 = bswap16UV_c;
974 break;
975#endif
976 }
977 if (c->chrSrcHSubSample) {
978 switch (srcFormat) {
979 case AV_PIX_FMT_RGBA64BE:
980 c->chrToYV12 = rgb64BEToUV_half_c;
981 break;
982 case AV_PIX_FMT_RGBA64LE:
983 c->chrToYV12 = rgb64LEToUV_half_c;
984 break;
985 case AV_PIX_FMT_BGRA64BE:
986 c->chrToYV12 = bgr64BEToUV_half_c;
987 break;
988 case AV_PIX_FMT_BGRA64LE:
989 c->chrToYV12 = bgr64LEToUV_half_c;
990 break;
991 case AV_PIX_FMT_RGB48BE:
992 c->chrToYV12 = rgb48BEToUV_half_c;
993 break;
994 case AV_PIX_FMT_RGB48LE:
995 c->chrToYV12 = rgb48LEToUV_half_c;
996 break;
997 case AV_PIX_FMT_BGR48BE:
998 c->chrToYV12 = bgr48BEToUV_half_c;
999 break;
1000 case AV_PIX_FMT_BGR48LE:
1001 c->chrToYV12 = bgr48LEToUV_half_c;
1002 break;
1003 case AV_PIX_FMT_RGB32:
1004 c->chrToYV12 = bgr32ToUV_half_c;
1005 break;
1006 case AV_PIX_FMT_RGB32_1:
1007 c->chrToYV12 = bgr321ToUV_half_c;
1008 break;
1009 case AV_PIX_FMT_BGR24:
1010 c->chrToYV12 = bgr24ToUV_half_c;
1011 break;
1012 case AV_PIX_FMT_BGR565LE:
1013 c->chrToYV12 = bgr16leToUV_half_c;
1014 break;
1015 case AV_PIX_FMT_BGR565BE:
1016 c->chrToYV12 = bgr16beToUV_half_c;
1017 break;
1018 case AV_PIX_FMT_BGR555LE:
1019 c->chrToYV12 = bgr15leToUV_half_c;
1020 break;
1021 case AV_PIX_FMT_BGR555BE:
1022 c->chrToYV12 = bgr15beToUV_half_c;
1023 break;
1024 case AV_PIX_FMT_GBRAP:
1025 case AV_PIX_FMT_GBRP:
1026 c->chrToYV12 = gbr24pToUV_half_c;
1027 break;
1028 case AV_PIX_FMT_BGR444LE:
1029 c->chrToYV12 = bgr12leToUV_half_c;
1030 break;
1031 case AV_PIX_FMT_BGR444BE:
1032 c->chrToYV12 = bgr12beToUV_half_c;
1033 break;
1034 case AV_PIX_FMT_BGR32:
1035 c->chrToYV12 = rgb32ToUV_half_c;
1036 break;
1037 case AV_PIX_FMT_BGR32_1:
1038 c->chrToYV12 = rgb321ToUV_half_c;
1039 break;
1040 case AV_PIX_FMT_RGB24:
1041 c->chrToYV12 = rgb24ToUV_half_c;
1042 break;
1043 case AV_PIX_FMT_RGB565LE:
1044 c->chrToYV12 = rgb16leToUV_half_c;
1045 break;
1046 case AV_PIX_FMT_RGB565BE:
1047 c->chrToYV12 = rgb16beToUV_half_c;
1048 break;
1049 case AV_PIX_FMT_RGB555LE:
1050 c->chrToYV12 = rgb15leToUV_half_c;
1051 break;
1052 case AV_PIX_FMT_RGB555BE:
1053 c->chrToYV12 = rgb15beToUV_half_c;
1054 break;
1055 case AV_PIX_FMT_RGB444LE:
1056 c->chrToYV12 = rgb12leToUV_half_c;
1057 break;
1058 case AV_PIX_FMT_RGB444BE:
1059 c->chrToYV12 = rgb12beToUV_half_c;
1060 break;
1061 }
1062 } else {
1063 switch (srcFormat) {
1064 case AV_PIX_FMT_RGBA64BE:
1065 c->chrToYV12 = rgb64BEToUV_c;
1066 break;
1067 case AV_PIX_FMT_RGBA64LE:
1068 c->chrToYV12 = rgb64LEToUV_c;
1069 break;
1070 case AV_PIX_FMT_BGRA64BE:
1071 c->chrToYV12 = bgr64BEToUV_c;
1072 break;
1073 case AV_PIX_FMT_BGRA64LE:
1074 c->chrToYV12 = bgr64LEToUV_c;
1075 break;
1076 case AV_PIX_FMT_RGB48BE:
1077 c->chrToYV12 = rgb48BEToUV_c;
1078 break;
1079 case AV_PIX_FMT_RGB48LE:
1080 c->chrToYV12 = rgb48LEToUV_c;
1081 break;
1082 case AV_PIX_FMT_BGR48BE:
1083 c->chrToYV12 = bgr48BEToUV_c;
1084 break;
1085 case AV_PIX_FMT_BGR48LE:
1086 c->chrToYV12 = bgr48LEToUV_c;
1087 break;
1088 case AV_PIX_FMT_RGB32:
1089 c->chrToYV12 = bgr32ToUV_c;
1090 break;
1091 case AV_PIX_FMT_RGB32_1:
1092 c->chrToYV12 = bgr321ToUV_c;
1093 break;
1094 case AV_PIX_FMT_BGR24:
1095 c->chrToYV12 = bgr24ToUV_c;
1096 break;
1097 case AV_PIX_FMT_BGR565LE:
1098 c->chrToYV12 = bgr16leToUV_c;
1099 break;
1100 case AV_PIX_FMT_BGR565BE:
1101 c->chrToYV12 = bgr16beToUV_c;
1102 break;
1103 case AV_PIX_FMT_BGR555LE:
1104 c->chrToYV12 = bgr15leToUV_c;
1105 break;
1106 case AV_PIX_FMT_BGR555BE:
1107 c->chrToYV12 = bgr15beToUV_c;
1108 break;
1109 case AV_PIX_FMT_BGR444LE:
1110 c->chrToYV12 = bgr12leToUV_c;
1111 break;
1112 case AV_PIX_FMT_BGR444BE:
1113 c->chrToYV12 = bgr12beToUV_c;
1114 break;
1115 case AV_PIX_FMT_BGR32:
1116 c->chrToYV12 = rgb32ToUV_c;
1117 break;
1118 case AV_PIX_FMT_BGR32_1:
1119 c->chrToYV12 = rgb321ToUV_c;
1120 break;
1121 case AV_PIX_FMT_RGB24:
1122 c->chrToYV12 = rgb24ToUV_c;
1123 break;
1124 case AV_PIX_FMT_RGB565LE:
1125 c->chrToYV12 = rgb16leToUV_c;
1126 break;
1127 case AV_PIX_FMT_RGB565BE:
1128 c->chrToYV12 = rgb16beToUV_c;
1129 break;
1130 case AV_PIX_FMT_RGB555LE:
1131 c->chrToYV12 = rgb15leToUV_c;
1132 break;
1133 case AV_PIX_FMT_RGB555BE:
1134 c->chrToYV12 = rgb15beToUV_c;
1135 break;
1136 case AV_PIX_FMT_RGB444LE:
1137 c->chrToYV12 = rgb12leToUV_c;
1138 break;
1139 case AV_PIX_FMT_RGB444BE:
1140 c->chrToYV12 = rgb12beToUV_c;
1141 break;
1142 }
1143 }
1144
1145 c->lumToYV12 = NULL;
1146 c->alpToYV12 = NULL;
1147 switch (srcFormat) {
1148 case AV_PIX_FMT_GBRP9LE:
1149 c->readLumPlanar = planar_rgb9le_to_y;
1150 break;
1151 case AV_PIX_FMT_GBRP10LE:
1152 c->readLumPlanar = planar_rgb10le_to_y;
1153 break;
1154 case AV_PIX_FMT_GBRP12LE:
1155 c->readLumPlanar = planar_rgb12le_to_y;
1156 break;
1157 case AV_PIX_FMT_GBRP14LE:
1158 c->readLumPlanar = planar_rgb14le_to_y;
1159 break;
1160 case AV_PIX_FMT_GBRAP16LE:
1161 case AV_PIX_FMT_GBRP16LE:
1162 c->readLumPlanar = planar_rgb16le_to_y;
1163 break;
1164 case AV_PIX_FMT_GBRP9BE:
1165 c->readLumPlanar = planar_rgb9be_to_y;
1166 break;
1167 case AV_PIX_FMT_GBRP10BE:
1168 c->readLumPlanar = planar_rgb10be_to_y;
1169 break;
1170 case AV_PIX_FMT_GBRP12BE:
1171 c->readLumPlanar = planar_rgb12be_to_y;
1172 break;
1173 case AV_PIX_FMT_GBRP14BE:
1174 c->readLumPlanar = planar_rgb14be_to_y;
1175 break;
1176 case AV_PIX_FMT_GBRAP16BE:
1177 case AV_PIX_FMT_GBRP16BE:
1178 c->readLumPlanar = planar_rgb16be_to_y;
1179 break;
1180 case AV_PIX_FMT_GBRAP:
1181 c->readAlpPlanar = planar_rgb_to_a;
1182 case AV_PIX_FMT_GBRP:
1183 c->readLumPlanar = planar_rgb_to_y;
1184 break;
1185#if HAVE_BIGENDIAN
1186 case AV_PIX_FMT_YUV444P9LE:
1187 case AV_PIX_FMT_YUV422P9LE:
1188 case AV_PIX_FMT_YUV420P9LE:
1189 case AV_PIX_FMT_YUV444P10LE:
1190 case AV_PIX_FMT_YUV422P10LE:
1191 case AV_PIX_FMT_YUV420P10LE:
1192 case AV_PIX_FMT_YUV444P12LE:
1193 case AV_PIX_FMT_YUV422P12LE:
1194 case AV_PIX_FMT_YUV420P12LE:
1195 case AV_PIX_FMT_YUV444P14LE:
1196 case AV_PIX_FMT_YUV422P14LE:
1197 case AV_PIX_FMT_YUV420P14LE:
1198 case AV_PIX_FMT_YUV420P16LE:
1199 case AV_PIX_FMT_YUV422P16LE:
1200 case AV_PIX_FMT_YUV444P16LE:
1201
1202 case AV_PIX_FMT_GRAY16LE:
1203 c->lumToYV12 = bswap16Y_c;
1204 break;
1205 case AV_PIX_FMT_YUVA444P9LE:
1206 case AV_PIX_FMT_YUVA422P9LE:
1207 case AV_PIX_FMT_YUVA420P9LE:
1208 case AV_PIX_FMT_YUVA444P10LE:
1209 case AV_PIX_FMT_YUVA422P10LE:
1210 case AV_PIX_FMT_YUVA420P10LE:
1211 case AV_PIX_FMT_YUVA420P16LE:
1212 case AV_PIX_FMT_YUVA422P16LE:
1213 case AV_PIX_FMT_YUVA444P16LE:
1214 c->lumToYV12 = bswap16Y_c;
1215 c->alpToYV12 = bswap16Y_c;
1216 break;
1217#else
1218 case AV_PIX_FMT_YUV444P9BE:
1219 case AV_PIX_FMT_YUV422P9BE:
1220 case AV_PIX_FMT_YUV420P9BE:
1221 case AV_PIX_FMT_YUV444P10BE:
1222 case AV_PIX_FMT_YUV422P10BE:
1223 case AV_PIX_FMT_YUV420P10BE:
1224 case AV_PIX_FMT_YUV444P12BE:
1225 case AV_PIX_FMT_YUV422P12BE:
1226 case AV_PIX_FMT_YUV420P12BE:
1227 case AV_PIX_FMT_YUV444P14BE:
1228 case AV_PIX_FMT_YUV422P14BE:
1229 case AV_PIX_FMT_YUV420P14BE:
1230 case AV_PIX_FMT_YUV420P16BE:
1231 case AV_PIX_FMT_YUV422P16BE:
1232 case AV_PIX_FMT_YUV444P16BE:
1233
1234 case AV_PIX_FMT_GRAY16BE:
1235 c->lumToYV12 = bswap16Y_c;
1236 break;
1237 case AV_PIX_FMT_YUVA444P9BE:
1238 case AV_PIX_FMT_YUVA422P9BE:
1239 case AV_PIX_FMT_YUVA420P9BE:
1240 case AV_PIX_FMT_YUVA444P10BE:
1241 case AV_PIX_FMT_YUVA422P10BE:
1242 case AV_PIX_FMT_YUVA420P10BE:
1243 case AV_PIX_FMT_YUVA420P16BE:
1244 case AV_PIX_FMT_YUVA422P16BE:
1245 case AV_PIX_FMT_YUVA444P16BE:
1246 c->lumToYV12 = bswap16Y_c;
1247 c->alpToYV12 = bswap16Y_c;
1248 break;
1249#endif
1250 case AV_PIX_FMT_YA16LE:
1251 c->lumToYV12 = read_ya16le_gray_c;
1252 c->alpToYV12 = read_ya16le_alpha_c;
1253 break;
1254 case AV_PIX_FMT_YA16BE:
1255 c->lumToYV12 = read_ya16be_gray_c;
1256 c->alpToYV12 = read_ya16be_alpha_c;
1257 break;
1258 case AV_PIX_FMT_YUYV422:
1259 case AV_PIX_FMT_YVYU422:
1260 case AV_PIX_FMT_YA8:
1261 c->lumToYV12 = yuy2ToY_c;
1262 break;
1263 case AV_PIX_FMT_UYVY422:
1264 c->lumToYV12 = uyvyToY_c;
1265 break;
1266 case AV_PIX_FMT_BGR24:
1267 c->lumToYV12 = bgr24ToY_c;
1268 break;
1269 case AV_PIX_FMT_BGR565LE:
1270 c->lumToYV12 = bgr16leToY_c;
1271 break;
1272 case AV_PIX_FMT_BGR565BE:
1273 c->lumToYV12 = bgr16beToY_c;
1274 break;
1275 case AV_PIX_FMT_BGR555LE:
1276 c->lumToYV12 = bgr15leToY_c;
1277 break;
1278 case AV_PIX_FMT_BGR555BE:
1279 c->lumToYV12 = bgr15beToY_c;
1280 break;
1281 case AV_PIX_FMT_BGR444LE:
1282 c->lumToYV12 = bgr12leToY_c;
1283 break;
1284 case AV_PIX_FMT_BGR444BE:
1285 c->lumToYV12 = bgr12beToY_c;
1286 break;
1287 case AV_PIX_FMT_RGB24:
1288 c->lumToYV12 = rgb24ToY_c;
1289 break;
1290 case AV_PIX_FMT_RGB565LE:
1291 c->lumToYV12 = rgb16leToY_c;
1292 break;
1293 case AV_PIX_FMT_RGB565BE:
1294 c->lumToYV12 = rgb16beToY_c;
1295 break;
1296 case AV_PIX_FMT_RGB555LE:
1297 c->lumToYV12 = rgb15leToY_c;
1298 break;
1299 case AV_PIX_FMT_RGB555BE:
1300 c->lumToYV12 = rgb15beToY_c;
1301 break;
1302 case AV_PIX_FMT_RGB444LE:
1303 c->lumToYV12 = rgb12leToY_c;
1304 break;
1305 case AV_PIX_FMT_RGB444BE:
1306 c->lumToYV12 = rgb12beToY_c;
1307 break;
1308 case AV_PIX_FMT_RGB8:
1309 case AV_PIX_FMT_BGR8:
1310 case AV_PIX_FMT_PAL8:
1311 case AV_PIX_FMT_BGR4_BYTE:
1312 case AV_PIX_FMT_RGB4_BYTE:
1313 c->lumToYV12 = palToY_c;
1314 break;
1315 case AV_PIX_FMT_MONOBLACK:
1316 c->lumToYV12 = monoblack2Y_c;
1317 break;
1318 case AV_PIX_FMT_MONOWHITE:
1319 c->lumToYV12 = monowhite2Y_c;
1320 break;
1321 case AV_PIX_FMT_RGB32:
1322 c->lumToYV12 = bgr32ToY_c;
1323 break;
1324 case AV_PIX_FMT_RGB32_1:
1325 c->lumToYV12 = bgr321ToY_c;
1326 break;
1327 case AV_PIX_FMT_BGR32:
1328 c->lumToYV12 = rgb32ToY_c;
1329 break;
1330 case AV_PIX_FMT_BGR32_1:
1331 c->lumToYV12 = rgb321ToY_c;
1332 break;
1333 case AV_PIX_FMT_RGB48BE:
1334 c->lumToYV12 = rgb48BEToY_c;
1335 break;
1336 case AV_PIX_FMT_RGB48LE:
1337 c->lumToYV12 = rgb48LEToY_c;
1338 break;
1339 case AV_PIX_FMT_BGR48BE:
1340 c->lumToYV12 = bgr48BEToY_c;
1341 break;
1342 case AV_PIX_FMT_BGR48LE:
1343 c->lumToYV12 = bgr48LEToY_c;
1344 break;
1345 case AV_PIX_FMT_RGBA64BE:
1346 c->lumToYV12 = rgb64BEToY_c;
1347 break;
1348 case AV_PIX_FMT_RGBA64LE:
1349 c->lumToYV12 = rgb64LEToY_c;
1350 break;
1351 case AV_PIX_FMT_BGRA64BE:
1352 c->lumToYV12 = bgr64BEToY_c;
1353 break;
1354 case AV_PIX_FMT_BGRA64LE:
1355 c->lumToYV12 = bgr64LEToY_c;
1356 }
1357 if (c->alpPixBuf) {
1358 if (is16BPS(srcFormat) || isNBPS(srcFormat)) {
1359 if (HAVE_BIGENDIAN == !isBE(srcFormat))
1360 c->alpToYV12 = bswap16Y_c;
1361 }
1362 switch (srcFormat) {
1363 case AV_PIX_FMT_BGRA64LE:
1364 case AV_PIX_FMT_BGRA64BE:
1365 case AV_PIX_FMT_RGBA64LE:
1366 case AV_PIX_FMT_RGBA64BE: c->alpToYV12 = rgba64ToA_c; break;
1367 case AV_PIX_FMT_BGRA:
1368 case AV_PIX_FMT_RGBA:
1369 c->alpToYV12 = rgbaToA_c;
1370 break;
1371 case AV_PIX_FMT_ABGR:
1372 case AV_PIX_FMT_ARGB:
1373 c->alpToYV12 = abgrToA_c;
1374 break;
1375 case AV_PIX_FMT_YA8:
1376 c->alpToYV12 = uyvyToY_c;
1377 break;
1378 case AV_PIX_FMT_PAL8 :
1379 c->alpToYV12 = palToA_c;
1380 break;
1381 }
1382 }
1383}