Imported Debian version 2.5.0~trusty1.1
[deb_ffmpeg.git] / ffmpeg / libavcodec / utvideodec.c
CommitLineData
2ba45a60
DM
1/*
2 * Ut Video decoder
3 * Copyright (c) 2011 Konstantin Shishkov
4 *
5 * This file is part of FFmpeg.
6 *
7 * FFmpeg is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
11 *
12 * FFmpeg is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with FFmpeg; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20 */
21
22/**
23 * @file
24 * Ut Video decoder
25 */
26
27#include <inttypes.h>
28#include <stdlib.h>
29
30#include "libavutil/intreadwrite.h"
31#include "avcodec.h"
32#include "bswapdsp.h"
33#include "bytestream.h"
34#include "get_bits.h"
35#include "thread.h"
36#include "utvideo.h"
37
38static int build_huff(const uint8_t *src, VLC *vlc, int *fsym)
39{
40 int i;
41 HuffEntry he[256];
42 int last;
43 uint32_t codes[256];
44 uint8_t bits[256];
45 uint8_t syms[256];
46 uint32_t code;
47
48 *fsym = -1;
49 for (i = 0; i < 256; i++) {
50 he[i].sym = i;
51 he[i].len = *src++;
52 }
53 qsort(he, 256, sizeof(*he), ff_ut_huff_cmp_len);
54
55 if (!he[0].len) {
56 *fsym = he[0].sym;
57 return 0;
58 }
2ba45a60
DM
59
60 last = 255;
61 while (he[last].len == 255 && last)
62 last--;
63
f6fa7814
DM
64 if (he[last].len > 32)
65 return -1;
66
2ba45a60
DM
67 code = 1;
68 for (i = last; i >= 0; i--) {
69 codes[i] = code >> (32 - he[i].len);
70 bits[i] = he[i].len;
71 syms[i] = he[i].sym;
72 code += 0x80000000u >> (he[i].len - 1);
73 }
74
75 return ff_init_vlc_sparse(vlc, FFMIN(he[last].len, 11), last + 1,
76 bits, sizeof(*bits), sizeof(*bits),
77 codes, sizeof(*codes), sizeof(*codes),
78 syms, sizeof(*syms), sizeof(*syms), 0);
79}
80
81static int decode_plane(UtvideoContext *c, int plane_no,
82 uint8_t *dst, int step, int stride,
83 int width, int height,
84 const uint8_t *src, int use_pred)
85{
86 int i, j, slice, pix;
87 int sstart, send;
88 VLC vlc;
89 GetBitContext gb;
90 int prev, fsym;
91 const int cmask = ~(!plane_no && c->avctx->pix_fmt == AV_PIX_FMT_YUV420P);
92
93 if (build_huff(src, &vlc, &fsym)) {
94 av_log(c->avctx, AV_LOG_ERROR, "Cannot build Huffman codes\n");
95 return AVERROR_INVALIDDATA;
96 }
97 if (fsym >= 0) { // build_huff reported a symbol to fill slices with
98 send = 0;
99 for (slice = 0; slice < c->slices; slice++) {
100 uint8_t *dest;
101
102 sstart = send;
103 send = (height * (slice + 1) / c->slices) & cmask;
104 dest = dst + sstart * stride;
105
106 prev = 0x80;
107 for (j = sstart; j < send; j++) {
108 for (i = 0; i < width * step; i += step) {
109 pix = fsym;
110 if (use_pred) {
111 prev += pix;
112 pix = prev;
113 }
114 dest[i] = pix;
115 }
116 dest += stride;
117 }
118 }
119 return 0;
120 }
121
122 src += 256;
123
124 send = 0;
125 for (slice = 0; slice < c->slices; slice++) {
126 uint8_t *dest;
127 int slice_data_start, slice_data_end, slice_size;
128
129 sstart = send;
130 send = (height * (slice + 1) / c->slices) & cmask;
131 dest = dst + sstart * stride;
132
133 // slice offset and size validation was done earlier
134 slice_data_start = slice ? AV_RL32(src + slice * 4 - 4) : 0;
135 slice_data_end = AV_RL32(src + slice * 4);
136 slice_size = slice_data_end - slice_data_start;
137
138 if (!slice_size) {
139 av_log(c->avctx, AV_LOG_ERROR, "Plane has more than one symbol "
140 "yet a slice has a length of zero.\n");
141 goto fail;
142 }
143
144 memcpy(c->slice_bits, src + slice_data_start + c->slices * 4,
145 slice_size);
146 memset(c->slice_bits + slice_size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
147 c->bdsp.bswap_buf((uint32_t *) c->slice_bits,
148 (uint32_t *) c->slice_bits,
149 (slice_data_end - slice_data_start + 3) >> 2);
150 init_get_bits(&gb, c->slice_bits, slice_size * 8);
151
152 prev = 0x80;
153 for (j = sstart; j < send; j++) {
154 for (i = 0; i < width * step; i += step) {
155 if (get_bits_left(&gb) <= 0) {
156 av_log(c->avctx, AV_LOG_ERROR,
157 "Slice decoding ran out of bits\n");
158 goto fail;
159 }
160 pix = get_vlc2(&gb, vlc.table, vlc.bits, 3);
161 if (pix < 0) {
162 av_log(c->avctx, AV_LOG_ERROR, "Decoding error\n");
163 goto fail;
164 }
165 if (use_pred) {
166 prev += pix;
167 pix = prev;
168 }
169 dest[i] = pix;
170 }
171 dest += stride;
172 }
173 if (get_bits_left(&gb) > 32)
174 av_log(c->avctx, AV_LOG_WARNING,
175 "%d bits left after decoding slice\n", get_bits_left(&gb));
176 }
177
178 ff_free_vlc(&vlc);
179
180 return 0;
181fail:
182 ff_free_vlc(&vlc);
183 return AVERROR_INVALIDDATA;
184}
185
186static void restore_rgb_planes(uint8_t *src, int step, int stride, int width,
187 int height)
188{
189 int i, j;
190 uint8_t r, g, b;
191
192 for (j = 0; j < height; j++) {
193 for (i = 0; i < width * step; i += step) {
194 r = src[i];
195 g = src[i + 1];
196 b = src[i + 2];
197 src[i] = r + g - 0x80;
198 src[i + 2] = b + g - 0x80;
199 }
200 src += stride;
201 }
202}
203
204static void restore_median(uint8_t *src, int step, int stride,
205 int width, int height, int slices, int rmode)
206{
207 int i, j, slice;
208 int A, B, C;
209 uint8_t *bsrc;
210 int slice_start, slice_height;
211 const int cmask = ~rmode;
212
213 for (slice = 0; slice < slices; slice++) {
214 slice_start = ((slice * height) / slices) & cmask;
215 slice_height = ((((slice + 1) * height) / slices) & cmask) -
216 slice_start;
217
218 bsrc = src + slice_start * stride;
219
220 // first line - left neighbour prediction
221 bsrc[0] += 0x80;
222 A = bsrc[0];
223 for (i = step; i < width * step; i += step) {
224 bsrc[i] += A;
225 A = bsrc[i];
226 }
227 bsrc += stride;
f6fa7814 228 if (slice_height <= 1)
2ba45a60
DM
229 continue;
230 // second line - first element has top prediction, the rest uses median
231 C = bsrc[-stride];
232 bsrc[0] += C;
233 A = bsrc[0];
234 for (i = step; i < width * step; i += step) {
235 B = bsrc[i - stride];
236 bsrc[i] += mid_pred(A, B, (uint8_t)(A + B - C));
237 C = B;
238 A = bsrc[i];
239 }
240 bsrc += stride;
241 // the rest of lines use continuous median prediction
242 for (j = 2; j < slice_height; j++) {
243 for (i = 0; i < width * step; i += step) {
244 B = bsrc[i - stride];
245 bsrc[i] += mid_pred(A, B, (uint8_t)(A + B - C));
246 C = B;
247 A = bsrc[i];
248 }
249 bsrc += stride;
250 }
251 }
252}
253
254/* UtVideo interlaced mode treats every two lines as a single one,
255 * so restoring function should take care of possible padding between
256 * two parts of the same "line".
257 */
258static void restore_median_il(uint8_t *src, int step, int stride,
259 int width, int height, int slices, int rmode)
260{
261 int i, j, slice;
262 int A, B, C;
263 uint8_t *bsrc;
264 int slice_start, slice_height;
265 const int cmask = ~(rmode ? 3 : 1);
266 const int stride2 = stride << 1;
267
268 for (slice = 0; slice < slices; slice++) {
269 slice_start = ((slice * height) / slices) & cmask;
270 slice_height = ((((slice + 1) * height) / slices) & cmask) -
271 slice_start;
272 slice_height >>= 1;
273
274 bsrc = src + slice_start * stride;
275
276 // first line - left neighbour prediction
277 bsrc[0] += 0x80;
278 A = bsrc[0];
279 for (i = step; i < width * step; i += step) {
280 bsrc[i] += A;
281 A = bsrc[i];
282 }
283 for (i = 0; i < width * step; i += step) {
284 bsrc[stride + i] += A;
285 A = bsrc[stride + i];
286 }
287 bsrc += stride2;
f6fa7814 288 if (slice_height <= 1)
2ba45a60
DM
289 continue;
290 // second line - first element has top prediction, the rest uses median
291 C = bsrc[-stride2];
292 bsrc[0] += C;
293 A = bsrc[0];
294 for (i = step; i < width * step; i += step) {
295 B = bsrc[i - stride2];
296 bsrc[i] += mid_pred(A, B, (uint8_t)(A + B - C));
297 C = B;
298 A = bsrc[i];
299 }
300 for (i = 0; i < width * step; i += step) {
301 B = bsrc[i - stride];
302 bsrc[stride + i] += mid_pred(A, B, (uint8_t)(A + B - C));
303 C = B;
304 A = bsrc[stride + i];
305 }
306 bsrc += stride2;
307 // the rest of lines use continuous median prediction
308 for (j = 2; j < slice_height; j++) {
309 for (i = 0; i < width * step; i += step) {
310 B = bsrc[i - stride2];
311 bsrc[i] += mid_pred(A, B, (uint8_t)(A + B - C));
312 C = B;
313 A = bsrc[i];
314 }
315 for (i = 0; i < width * step; i += step) {
316 B = bsrc[i - stride];
317 bsrc[i + stride] += mid_pred(A, B, (uint8_t)(A + B - C));
318 C = B;
319 A = bsrc[i + stride];
320 }
321 bsrc += stride2;
322 }
323 }
324}
325
326static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
327 AVPacket *avpkt)
328{
329 const uint8_t *buf = avpkt->data;
330 int buf_size = avpkt->size;
331 UtvideoContext *c = avctx->priv_data;
332 int i, j;
333 const uint8_t *plane_start[5];
334 int plane_size, max_slice_size = 0, slice_start, slice_end, slice_size;
335 int ret;
336 GetByteContext gb;
337 ThreadFrame frame = { .f = data };
338
339 if ((ret = ff_thread_get_buffer(avctx, &frame, 0)) < 0)
340 return ret;
341
342 /* parse plane structure to get frame flags and validate slice offsets */
343 bytestream2_init(&gb, buf, buf_size);
344 for (i = 0; i < c->planes; i++) {
345 plane_start[i] = gb.buffer;
346 if (bytestream2_get_bytes_left(&gb) < 256 + 4 * c->slices) {
347 av_log(avctx, AV_LOG_ERROR, "Insufficient data for a plane\n");
348 return AVERROR_INVALIDDATA;
349 }
350 bytestream2_skipu(&gb, 256);
351 slice_start = 0;
352 slice_end = 0;
353 for (j = 0; j < c->slices; j++) {
354 slice_end = bytestream2_get_le32u(&gb);
355 slice_size = slice_end - slice_start;
356 if (slice_end < 0 || slice_size < 0 ||
357 bytestream2_get_bytes_left(&gb) < slice_end) {
358 av_log(avctx, AV_LOG_ERROR, "Incorrect slice size\n");
359 return AVERROR_INVALIDDATA;
360 }
361 slice_start = slice_end;
362 max_slice_size = FFMAX(max_slice_size, slice_size);
363 }
364 plane_size = slice_end;
365 bytestream2_skipu(&gb, plane_size);
366 }
367 plane_start[c->planes] = gb.buffer;
368 if (bytestream2_get_bytes_left(&gb) < c->frame_info_size) {
369 av_log(avctx, AV_LOG_ERROR, "Not enough data for frame information\n");
370 return AVERROR_INVALIDDATA;
371 }
372 c->frame_info = bytestream2_get_le32u(&gb);
373 av_log(avctx, AV_LOG_DEBUG, "frame information flags %"PRIX32"\n",
374 c->frame_info);
375
376 c->frame_pred = (c->frame_info >> 8) & 3;
377
378 if (c->frame_pred == PRED_GRADIENT) {
379 avpriv_request_sample(avctx, "Frame with gradient prediction");
380 return AVERROR_PATCHWELCOME;
381 }
382
383 av_fast_malloc(&c->slice_bits, &c->slice_bits_size,
384 max_slice_size + FF_INPUT_BUFFER_PADDING_SIZE);
385
386 if (!c->slice_bits) {
387 av_log(avctx, AV_LOG_ERROR, "Cannot allocate temporary buffer\n");
388 return AVERROR(ENOMEM);
389 }
390
391 switch (c->avctx->pix_fmt) {
392 case AV_PIX_FMT_RGB24:
393 case AV_PIX_FMT_RGBA:
394 for (i = 0; i < c->planes; i++) {
395 ret = decode_plane(c, i, frame.f->data[0] + ff_ut_rgb_order[i],
396 c->planes, frame.f->linesize[0], avctx->width,
397 avctx->height, plane_start[i],
398 c->frame_pred == PRED_LEFT);
399 if (ret)
400 return ret;
401 if (c->frame_pred == PRED_MEDIAN) {
402 if (!c->interlaced) {
403 restore_median(frame.f->data[0] + ff_ut_rgb_order[i],
404 c->planes, frame.f->linesize[0], avctx->width,
405 avctx->height, c->slices, 0);
406 } else {
407 restore_median_il(frame.f->data[0] + ff_ut_rgb_order[i],
408 c->planes, frame.f->linesize[0],
409 avctx->width, avctx->height, c->slices,
410 0);
411 }
412 }
413 }
414 restore_rgb_planes(frame.f->data[0], c->planes, frame.f->linesize[0],
415 avctx->width, avctx->height);
416 break;
417 case AV_PIX_FMT_YUV420P:
418 for (i = 0; i < 3; i++) {
419 ret = decode_plane(c, i, frame.f->data[i], 1, frame.f->linesize[i],
420 avctx->width >> !!i, avctx->height >> !!i,
421 plane_start[i], c->frame_pred == PRED_LEFT);
422 if (ret)
423 return ret;
424 if (c->frame_pred == PRED_MEDIAN) {
425 if (!c->interlaced) {
426 restore_median(frame.f->data[i], 1, frame.f->linesize[i],
427 avctx->width >> !!i, avctx->height >> !!i,
428 c->slices, !i);
429 } else {
430 restore_median_il(frame.f->data[i], 1, frame.f->linesize[i],
431 avctx->width >> !!i,
432 avctx->height >> !!i,
433 c->slices, !i);
434 }
435 }
436 }
437 break;
438 case AV_PIX_FMT_YUV422P:
439 for (i = 0; i < 3; i++) {
440 ret = decode_plane(c, i, frame.f->data[i], 1, frame.f->linesize[i],
441 avctx->width >> !!i, avctx->height,
442 plane_start[i], c->frame_pred == PRED_LEFT);
443 if (ret)
444 return ret;
445 if (c->frame_pred == PRED_MEDIAN) {
446 if (!c->interlaced) {
447 restore_median(frame.f->data[i], 1, frame.f->linesize[i],
448 avctx->width >> !!i, avctx->height,
449 c->slices, 0);
450 } else {
451 restore_median_il(frame.f->data[i], 1, frame.f->linesize[i],
452 avctx->width >> !!i, avctx->height,
453 c->slices, 0);
454 }
455 }
456 }
457 break;
458 }
459
460 frame.f->key_frame = 1;
461 frame.f->pict_type = AV_PICTURE_TYPE_I;
462 frame.f->interlaced_frame = !!c->interlaced;
463
464 *got_frame = 1;
465
466 /* always report that the buffer was completely consumed */
467 return buf_size;
468}
469
470static av_cold int decode_init(AVCodecContext *avctx)
471{
472 UtvideoContext * const c = avctx->priv_data;
473
474 c->avctx = avctx;
475
476 ff_bswapdsp_init(&c->bdsp);
477
478 if (avctx->extradata_size < 16) {
479 av_log(avctx, AV_LOG_ERROR,
480 "Insufficient extradata size %d, should be at least 16\n",
481 avctx->extradata_size);
482 return AVERROR_INVALIDDATA;
483 }
484
485 av_log(avctx, AV_LOG_DEBUG, "Encoder version %d.%d.%d.%d\n",
486 avctx->extradata[3], avctx->extradata[2],
487 avctx->extradata[1], avctx->extradata[0]);
488 av_log(avctx, AV_LOG_DEBUG, "Original format %"PRIX32"\n",
489 AV_RB32(avctx->extradata + 4));
490 c->frame_info_size = AV_RL32(avctx->extradata + 8);
491 c->flags = AV_RL32(avctx->extradata + 12);
492
493 if (c->frame_info_size != 4)
494 avpriv_request_sample(avctx, "Frame info not 4 bytes");
495 av_log(avctx, AV_LOG_DEBUG, "Encoding parameters %08"PRIX32"\n", c->flags);
496 c->slices = (c->flags >> 24) + 1;
497 c->compression = c->flags & 1;
498 c->interlaced = c->flags & 0x800;
499
500 c->slice_bits_size = 0;
501
502 switch (avctx->codec_tag) {
503 case MKTAG('U', 'L', 'R', 'G'):
504 c->planes = 3;
505 avctx->pix_fmt = AV_PIX_FMT_RGB24;
506 break;
507 case MKTAG('U', 'L', 'R', 'A'):
508 c->planes = 4;
509 avctx->pix_fmt = AV_PIX_FMT_RGBA;
510 break;
511 case MKTAG('U', 'L', 'Y', '0'):
512 c->planes = 3;
513 avctx->pix_fmt = AV_PIX_FMT_YUV420P;
514 avctx->colorspace = AVCOL_SPC_BT470BG;
515 break;
516 case MKTAG('U', 'L', 'Y', '2'):
517 c->planes = 3;
518 avctx->pix_fmt = AV_PIX_FMT_YUV422P;
519 avctx->colorspace = AVCOL_SPC_BT470BG;
520 break;
521 case MKTAG('U', 'L', 'H', '0'):
522 c->planes = 3;
523 avctx->pix_fmt = AV_PIX_FMT_YUV420P;
524 avctx->colorspace = AVCOL_SPC_BT709;
525 break;
526 case MKTAG('U', 'L', 'H', '2'):
527 c->planes = 3;
528 avctx->pix_fmt = AV_PIX_FMT_YUV422P;
529 avctx->colorspace = AVCOL_SPC_BT709;
530 break;
531 default:
532 av_log(avctx, AV_LOG_ERROR, "Unknown Ut Video FOURCC provided (%08X)\n",
533 avctx->codec_tag);
534 return AVERROR_INVALIDDATA;
535 }
536
537 return 0;
538}
539
540static av_cold int decode_end(AVCodecContext *avctx)
541{
542 UtvideoContext * const c = avctx->priv_data;
543
544 av_freep(&c->slice_bits);
545
546 return 0;
547}
548
549AVCodec ff_utvideo_decoder = {
550 .name = "utvideo",
551 .long_name = NULL_IF_CONFIG_SMALL("Ut Video"),
552 .type = AVMEDIA_TYPE_VIDEO,
553 .id = AV_CODEC_ID_UTVIDEO,
554 .priv_data_size = sizeof(UtvideoContext),
555 .init = decode_init,
556 .close = decode_end,
557 .decode = decode_frame,
558 .capabilities = CODEC_CAP_DR1 | CODEC_CAP_FRAME_THREADS,
559};