Commit | Line | Data |
---|---|---|
2ba45a60 DM |
1 | /* |
2 | * Copyright (c) 2002-2014 Michael Niedermayer <michaelni@gmx.at> | |
3 | * | |
4 | * see http://www.pcisys.net/~melanson/codecs/huffyuv.txt for a description of | |
5 | * the algorithm used | |
6 | * | |
7 | * This file is part of FFmpeg. | |
8 | * | |
9 | * FFmpeg is free software; you can redistribute it and/or | |
10 | * modify it under the terms of the GNU Lesser General Public | |
11 | * License as published by the Free Software Foundation; either | |
12 | * version 2.1 of the License, or (at your option) any later version. | |
13 | * | |
14 | * FFmpeg is distributed in the hope that it will be useful, | |
15 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
16 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
17 | * Lesser General Public License for more details. | |
18 | * | |
19 | * You should have received a copy of the GNU Lesser General Public | |
20 | * License along with FFmpeg; if not, write to the Free Software | |
21 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA | |
22 | * | |
23 | * yuva, gray, 4:4:4, 4:1:1, 4:1:0 and >8 bit per sample support sponsored by NOA | |
24 | */ | |
25 | ||
26 | /** | |
27 | * @file | |
28 | * huffyuv encoder | |
29 | */ | |
30 | ||
31 | #include "avcodec.h" | |
32 | #include "huffyuv.h" | |
33 | #include "huffman.h" | |
34 | #include "huffyuvencdsp.h" | |
35 | #include "internal.h" | |
36 | #include "put_bits.h" | |
37 | #include "libavutil/opt.h" | |
38 | #include "libavutil/pixdesc.h" | |
39 | ||
40 | static inline void diff_bytes(HYuvContext *s, uint8_t *dst, | |
41 | const uint8_t *src0, const uint8_t *src1, int w) | |
42 | { | |
43 | if (s->bps <= 8) { | |
44 | s->hencdsp.diff_bytes(dst, src0, src1, w); | |
45 | } else { | |
46 | s->llviddsp.diff_int16((uint16_t *)dst, (const uint16_t *)src0, (const uint16_t *)src1, s->n - 1, w); | |
47 | } | |
48 | } | |
49 | ||
50 | static inline int sub_left_prediction(HYuvContext *s, uint8_t *dst, | |
51 | const uint8_t *src, int w, int left) | |
52 | { | |
53 | int i; | |
54 | if (s->bps <= 8) { | |
55 | if (w < 32) { | |
56 | for (i = 0; i < w; i++) { | |
57 | const int temp = src[i]; | |
58 | dst[i] = temp - left; | |
59 | left = temp; | |
60 | } | |
61 | return left; | |
62 | } else { | |
63 | for (i = 0; i < 16; i++) { | |
64 | const int temp = src[i]; | |
65 | dst[i] = temp - left; | |
66 | left = temp; | |
67 | } | |
68 | s->hencdsp.diff_bytes(dst + 16, src + 16, src + 15, w - 16); | |
69 | return src[w-1]; | |
70 | } | |
71 | } else { | |
72 | const uint16_t *src16 = (const uint16_t *)src; | |
73 | uint16_t *dst16 = ( uint16_t *)dst; | |
74 | if (w < 32) { | |
75 | for (i = 0; i < w; i++) { | |
76 | const int temp = src16[i]; | |
77 | dst16[i] = temp - left; | |
78 | left = temp; | |
79 | } | |
80 | return left; | |
81 | } else { | |
82 | for (i = 0; i < 16; i++) { | |
83 | const int temp = src16[i]; | |
84 | dst16[i] = temp - left; | |
85 | left = temp; | |
86 | } | |
87 | s->llviddsp.diff_int16(dst16 + 16, src16 + 16, src16 + 15, s->n - 1, w - 16); | |
88 | return src16[w-1]; | |
89 | } | |
90 | } | |
91 | } | |
92 | ||
93 | static inline void sub_left_prediction_bgr32(HYuvContext *s, uint8_t *dst, | |
94 | const uint8_t *src, int w, | |
95 | int *red, int *green, int *blue, | |
96 | int *alpha) | |
97 | { | |
98 | int i; | |
99 | int r, g, b, a; | |
100 | r = *red; | |
101 | g = *green; | |
102 | b = *blue; | |
103 | a = *alpha; | |
104 | ||
105 | for (i = 0; i < FFMIN(w, 4); i++) { | |
106 | const int rt = src[i * 4 + R]; | |
107 | const int gt = src[i * 4 + G]; | |
108 | const int bt = src[i * 4 + B]; | |
109 | const int at = src[i * 4 + A]; | |
110 | dst[i * 4 + R] = rt - r; | |
111 | dst[i * 4 + G] = gt - g; | |
112 | dst[i * 4 + B] = bt - b; | |
113 | dst[i * 4 + A] = at - a; | |
114 | r = rt; | |
115 | g = gt; | |
116 | b = bt; | |
117 | a = at; | |
118 | } | |
119 | ||
120 | s->hencdsp.diff_bytes(dst + 16, src + 16, src + 12, w * 4 - 16); | |
121 | ||
122 | *red = src[(w - 1) * 4 + R]; | |
123 | *green = src[(w - 1) * 4 + G]; | |
124 | *blue = src[(w - 1) * 4 + B]; | |
125 | *alpha = src[(w - 1) * 4 + A]; | |
126 | } | |
127 | ||
128 | static inline void sub_left_prediction_rgb24(HYuvContext *s, uint8_t *dst, | |
129 | uint8_t *src, int w, | |
130 | int *red, int *green, int *blue) | |
131 | { | |
132 | int i; | |
133 | int r, g, b; | |
134 | r = *red; | |
135 | g = *green; | |
136 | b = *blue; | |
137 | for (i = 0; i < FFMIN(w, 16); i++) { | |
138 | const int rt = src[i * 3 + 0]; | |
139 | const int gt = src[i * 3 + 1]; | |
140 | const int bt = src[i * 3 + 2]; | |
141 | dst[i * 3 + 0] = rt - r; | |
142 | dst[i * 3 + 1] = gt - g; | |
143 | dst[i * 3 + 2] = bt - b; | |
144 | r = rt; | |
145 | g = gt; | |
146 | b = bt; | |
147 | } | |
148 | ||
149 | s->hencdsp.diff_bytes(dst + 48, src + 48, src + 48 - 3, w * 3 - 48); | |
150 | ||
151 | *red = src[(w - 1) * 3 + 0]; | |
152 | *green = src[(w - 1) * 3 + 1]; | |
153 | *blue = src[(w - 1) * 3 + 2]; | |
154 | } | |
155 | ||
156 | static void sub_median_prediction(HYuvContext *s, uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int w, int *left, int *left_top) | |
157 | { | |
158 | if (s->bps <= 8) { | |
159 | s->hencdsp.sub_hfyu_median_pred(dst, src1, src2, w , left, left_top); | |
160 | } else { | |
161 | s->llviddsp.sub_hfyu_median_pred_int16((uint16_t *)dst, (const uint16_t *)src1, (const uint16_t *)src2, s->n - 1, w , left, left_top); | |
162 | } | |
163 | } | |
164 | ||
165 | static int store_table(HYuvContext *s, const uint8_t *len, uint8_t *buf) | |
166 | { | |
167 | int i; | |
168 | int index = 0; | |
169 | int n = s->vlc_n; | |
170 | ||
171 | for (i = 0; i < n;) { | |
172 | int val = len[i]; | |
173 | int repeat = 0; | |
174 | ||
175 | for (; i < n && len[i] == val && repeat < 255; i++) | |
176 | repeat++; | |
177 | ||
178 | av_assert0(val < 32 && val >0 && repeat < 256 && repeat>0); | |
179 | if (repeat > 7) { | |
180 | buf[index++] = val; | |
181 | buf[index++] = repeat; | |
182 | } else { | |
183 | buf[index++] = val | (repeat << 5); | |
184 | } | |
185 | } | |
186 | ||
187 | return index; | |
188 | } | |
189 | ||
190 | static int store_huffman_tables(HYuvContext *s, uint8_t *buf) | |
191 | { | |
192 | int i, ret; | |
193 | int size = 0; | |
194 | int count = 3; | |
195 | ||
196 | if (s->version > 2) | |
197 | count = 1 + s->alpha + 2*s->chroma; | |
198 | ||
199 | for (i = 0; i < count; i++) { | |
200 | if ((ret = ff_huff_gen_len_table(s->len[i], s->stats[i], s->vlc_n, 0)) < 0) | |
201 | return ret; | |
202 | ||
203 | if (ff_huffyuv_generate_bits_table(s->bits[i], s->len[i], s->vlc_n) < 0) { | |
204 | return -1; | |
205 | } | |
206 | ||
207 | size += store_table(s, s->len[i], buf + size); | |
208 | } | |
209 | return size; | |
210 | } | |
211 | ||
212 | static av_cold int encode_init(AVCodecContext *avctx) | |
213 | { | |
214 | HYuvContext *s = avctx->priv_data; | |
215 | int i, j; | |
216 | int ret; | |
217 | const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(avctx->pix_fmt); | |
218 | ||
219 | ff_huffyuv_common_init(avctx); | |
220 | ff_huffyuvencdsp_init(&s->hencdsp); | |
221 | ||
222 | avctx->extradata = av_mallocz(3*MAX_N + 4); | |
223 | if (!avctx->extradata) | |
224 | return AVERROR(ENOMEM); | |
225 | if (s->flags&CODEC_FLAG_PASS1) { | |
226 | #define STATS_OUT_SIZE 21*MAX_N*3 + 4 | |
227 | avctx->stats_out = av_mallocz(STATS_OUT_SIZE); // 21*256*3(%llu ) + 3(\n) + 1(0) = 16132 | |
228 | if (!avctx->stats_out) | |
229 | return AVERROR(ENOMEM); | |
230 | } | |
231 | s->version = 2; | |
232 | ||
233 | avctx->coded_frame = av_frame_alloc(); | |
234 | if (!avctx->coded_frame) | |
235 | return AVERROR(ENOMEM); | |
236 | ||
237 | avctx->coded_frame->pict_type = AV_PICTURE_TYPE_I; | |
238 | avctx->coded_frame->key_frame = 1; | |
239 | ||
240 | s->bps = desc->comp[0].depth_minus1 + 1; | |
241 | s->yuv = !(desc->flags & AV_PIX_FMT_FLAG_RGB) && desc->nb_components >= 2; | |
242 | s->chroma = desc->nb_components > 2; | |
243 | s->alpha = !!(desc->flags & AV_PIX_FMT_FLAG_ALPHA); | |
244 | av_pix_fmt_get_chroma_sub_sample(avctx->pix_fmt, | |
245 | &s->chroma_h_shift, | |
246 | &s->chroma_v_shift); | |
247 | ||
248 | switch (avctx->pix_fmt) { | |
249 | case AV_PIX_FMT_YUV420P: | |
250 | case AV_PIX_FMT_YUV422P: | |
251 | if (s->width & 1) { | |
252 | av_log(avctx, AV_LOG_ERROR, "Width must be even for this colorspace.\n"); | |
253 | return AVERROR(EINVAL); | |
254 | } | |
255 | s->bitstream_bpp = avctx->pix_fmt == AV_PIX_FMT_YUV420P ? 12 : 16; | |
256 | break; | |
257 | case AV_PIX_FMT_YUV444P: | |
258 | case AV_PIX_FMT_YUV410P: | |
259 | case AV_PIX_FMT_YUV411P: | |
260 | case AV_PIX_FMT_YUV440P: | |
261 | case AV_PIX_FMT_GBRP: | |
262 | case AV_PIX_FMT_GBRP9: | |
263 | case AV_PIX_FMT_GBRP10: | |
264 | case AV_PIX_FMT_GBRP12: | |
265 | case AV_PIX_FMT_GBRP14: | |
266 | case AV_PIX_FMT_GBRP16: | |
267 | case AV_PIX_FMT_GRAY8: | |
268 | case AV_PIX_FMT_GRAY16: | |
269 | case AV_PIX_FMT_YUVA444P: | |
270 | case AV_PIX_FMT_YUVA420P: | |
271 | case AV_PIX_FMT_YUVA422P: | |
272 | case AV_PIX_FMT_GBRAP: | |
273 | case AV_PIX_FMT_GRAY8A: | |
274 | case AV_PIX_FMT_YUV420P9: | |
275 | case AV_PIX_FMT_YUV420P10: | |
276 | case AV_PIX_FMT_YUV420P12: | |
277 | case AV_PIX_FMT_YUV420P14: | |
278 | case AV_PIX_FMT_YUV420P16: | |
279 | case AV_PIX_FMT_YUV422P9: | |
280 | case AV_PIX_FMT_YUV422P10: | |
281 | case AV_PIX_FMT_YUV422P12: | |
282 | case AV_PIX_FMT_YUV422P14: | |
283 | case AV_PIX_FMT_YUV422P16: | |
284 | case AV_PIX_FMT_YUV444P9: | |
285 | case AV_PIX_FMT_YUV444P10: | |
286 | case AV_PIX_FMT_YUV444P12: | |
287 | case AV_PIX_FMT_YUV444P14: | |
288 | case AV_PIX_FMT_YUV444P16: | |
289 | case AV_PIX_FMT_YUVA420P9: | |
290 | case AV_PIX_FMT_YUVA420P10: | |
291 | case AV_PIX_FMT_YUVA420P16: | |
292 | case AV_PIX_FMT_YUVA422P9: | |
293 | case AV_PIX_FMT_YUVA422P10: | |
294 | case AV_PIX_FMT_YUVA422P16: | |
295 | case AV_PIX_FMT_YUVA444P9: | |
296 | case AV_PIX_FMT_YUVA444P10: | |
297 | case AV_PIX_FMT_YUVA444P16: | |
298 | s->version = 3; | |
299 | break; | |
300 | case AV_PIX_FMT_RGB32: | |
301 | s->bitstream_bpp = 32; | |
302 | break; | |
303 | case AV_PIX_FMT_RGB24: | |
304 | s->bitstream_bpp = 24; | |
305 | break; | |
306 | default: | |
307 | av_log(avctx, AV_LOG_ERROR, "format not supported\n"); | |
308 | return AVERROR(EINVAL); | |
309 | } | |
310 | s->n = 1<<s->bps; | |
311 | s->vlc_n = FFMIN(s->n, MAX_VLC_N); | |
312 | ||
313 | avctx->bits_per_coded_sample = s->bitstream_bpp; | |
314 | s->decorrelate = s->bitstream_bpp >= 24 && !s->yuv && !(desc->flags & AV_PIX_FMT_FLAG_PLANAR); | |
315 | s->predictor = avctx->prediction_method; | |
316 | s->interlaced = avctx->flags&CODEC_FLAG_INTERLACED_ME ? 1 : 0; | |
317 | if (avctx->context_model == 1) { | |
318 | s->context = avctx->context_model; | |
319 | if (s->flags & (CODEC_FLAG_PASS1|CODEC_FLAG_PASS2)) { | |
320 | av_log(avctx, AV_LOG_ERROR, | |
321 | "context=1 is not compatible with " | |
322 | "2 pass huffyuv encoding\n"); | |
323 | return AVERROR(EINVAL); | |
324 | } | |
325 | }else s->context= 0; | |
326 | ||
327 | if (avctx->codec->id == AV_CODEC_ID_HUFFYUV) { | |
328 | if (avctx->pix_fmt == AV_PIX_FMT_YUV420P) { | |
329 | av_log(avctx, AV_LOG_ERROR, | |
330 | "Error: YV12 is not supported by huffyuv; use " | |
331 | "vcodec=ffvhuff or format=422p\n"); | |
332 | return AVERROR(EINVAL); | |
333 | } | |
334 | if (avctx->context_model) { | |
335 | av_log(avctx, AV_LOG_ERROR, | |
336 | "Error: per-frame huffman tables are not supported " | |
337 | "by huffyuv; use vcodec=ffvhuff\n"); | |
338 | return AVERROR(EINVAL); | |
339 | } | |
340 | if (s->version > 2) { | |
341 | av_log(avctx, AV_LOG_ERROR, | |
342 | "Error: ver>2 is not supported " | |
343 | "by huffyuv; use vcodec=ffvhuff\n"); | |
344 | return AVERROR(EINVAL); | |
345 | } | |
346 | if (s->interlaced != ( s->height > 288 )) | |
347 | av_log(avctx, AV_LOG_INFO, | |
348 | "using huffyuv 2.2.0 or newer interlacing flag\n"); | |
349 | } | |
350 | ||
351 | if (s->version > 3 && avctx->strict_std_compliance > FF_COMPLIANCE_EXPERIMENTAL) { | |
352 | av_log(avctx, AV_LOG_ERROR, "Ver > 3 is under development, files encoded with it may not be decodable with future versions!!!\n" | |
353 | "Use vstrict=-2 / -strict -2 to use it anyway.\n"); | |
354 | return AVERROR(EINVAL); | |
355 | } | |
356 | ||
357 | if (s->bitstream_bpp >= 24 && s->predictor == MEDIAN && s->version <= 2) { | |
358 | av_log(avctx, AV_LOG_ERROR, | |
359 | "Error: RGB is incompatible with median predictor\n"); | |
360 | return AVERROR(EINVAL); | |
361 | } | |
362 | ||
363 | ((uint8_t*)avctx->extradata)[0] = s->predictor | (s->decorrelate << 6); | |
364 | ((uint8_t*)avctx->extradata)[2] = s->interlaced ? 0x10 : 0x20; | |
365 | if (s->context) | |
366 | ((uint8_t*)avctx->extradata)[2] |= 0x40; | |
367 | if (s->version < 3) { | |
368 | ((uint8_t*)avctx->extradata)[1] = s->bitstream_bpp; | |
369 | ((uint8_t*)avctx->extradata)[3] = 0; | |
370 | } else { | |
371 | ((uint8_t*)avctx->extradata)[1] = ((s->bps-1)<<4) | s->chroma_h_shift | (s->chroma_v_shift<<2); | |
372 | if (s->chroma) | |
373 | ((uint8_t*)avctx->extradata)[2] |= s->yuv ? 1 : 2; | |
374 | if (s->alpha) | |
375 | ((uint8_t*)avctx->extradata)[2] |= 4; | |
376 | ((uint8_t*)avctx->extradata)[3] = 1; | |
377 | } | |
378 | s->avctx->extradata_size = 4; | |
379 | ||
380 | if (avctx->stats_in) { | |
381 | char *p = avctx->stats_in; | |
382 | ||
383 | for (i = 0; i < 4; i++) | |
384 | for (j = 0; j < s->vlc_n; j++) | |
385 | s->stats[i][j] = 1; | |
386 | ||
387 | for (;;) { | |
388 | for (i = 0; i < 4; i++) { | |
389 | char *next; | |
390 | ||
391 | for (j = 0; j < s->vlc_n; j++) { | |
392 | s->stats[i][j] += strtol(p, &next, 0); | |
393 | if (next == p) return -1; | |
394 | p = next; | |
395 | } | |
396 | } | |
397 | if (p[0] == 0 || p[1] == 0 || p[2] == 0) break; | |
398 | } | |
399 | } else { | |
400 | for (i = 0; i < 4; i++) | |
401 | for (j = 0; j < s->vlc_n; j++) { | |
402 | int d = FFMIN(j, s->vlc_n - j); | |
403 | ||
404 | s->stats[i][j] = 100000000 / (d*d + 1); | |
405 | } | |
406 | } | |
407 | ||
408 | ret = store_huffman_tables(s, s->avctx->extradata + s->avctx->extradata_size); | |
409 | if (ret < 0) | |
410 | return ret; | |
411 | s->avctx->extradata_size += ret; | |
412 | ||
413 | if (s->context) { | |
414 | for (i = 0; i < 4; i++) { | |
415 | int pels = s->width * s->height / (i ? 40 : 10); | |
416 | for (j = 0; j < s->vlc_n; j++) { | |
417 | int d = FFMIN(j, s->vlc_n - j); | |
418 | s->stats[i][j] = pels/(d*d + 1); | |
419 | } | |
420 | } | |
421 | } else { | |
422 | for (i = 0; i < 4; i++) | |
423 | for (j = 0; j < s->vlc_n; j++) | |
424 | s->stats[i][j]= 0; | |
425 | } | |
426 | ||
427 | if (ff_huffyuv_alloc_temp(s)) { | |
428 | ff_huffyuv_common_end(s); | |
429 | return AVERROR(ENOMEM); | |
430 | } | |
431 | ||
432 | s->picture_number=0; | |
433 | ||
434 | return 0; | |
435 | } | |
436 | static int encode_422_bitstream(HYuvContext *s, int offset, int count) | |
437 | { | |
438 | int i; | |
439 | const uint8_t *y = s->temp[0] + offset; | |
440 | const uint8_t *u = s->temp[1] + offset / 2; | |
441 | const uint8_t *v = s->temp[2] + offset / 2; | |
442 | ||
443 | if (s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb) >> 3) < 2 * 4 * count) { | |
444 | av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n"); | |
445 | return -1; | |
446 | } | |
447 | ||
448 | #define LOAD4\ | |
449 | int y0 = y[2 * i];\ | |
450 | int y1 = y[2 * i + 1];\ | |
451 | int u0 = u[i];\ | |
452 | int v0 = v[i]; | |
453 | ||
454 | count /= 2; | |
455 | ||
456 | if (s->flags & CODEC_FLAG_PASS1) { | |
457 | for(i = 0; i < count; i++) { | |
458 | LOAD4; | |
459 | s->stats[0][y0]++; | |
460 | s->stats[1][u0]++; | |
461 | s->stats[0][y1]++; | |
462 | s->stats[2][v0]++; | |
463 | } | |
464 | } | |
465 | if (s->avctx->flags2 & CODEC_FLAG2_NO_OUTPUT) | |
466 | return 0; | |
467 | if (s->context) { | |
468 | for (i = 0; i < count; i++) { | |
469 | LOAD4; | |
470 | s->stats[0][y0]++; | |
471 | put_bits(&s->pb, s->len[0][y0], s->bits[0][y0]); | |
472 | s->stats[1][u0]++; | |
473 | put_bits(&s->pb, s->len[1][u0], s->bits[1][u0]); | |
474 | s->stats[0][y1]++; | |
475 | put_bits(&s->pb, s->len[0][y1], s->bits[0][y1]); | |
476 | s->stats[2][v0]++; | |
477 | put_bits(&s->pb, s->len[2][v0], s->bits[2][v0]); | |
478 | } | |
479 | } else { | |
480 | for(i = 0; i < count; i++) { | |
481 | LOAD4; | |
482 | put_bits(&s->pb, s->len[0][y0], s->bits[0][y0]); | |
483 | put_bits(&s->pb, s->len[1][u0], s->bits[1][u0]); | |
484 | put_bits(&s->pb, s->len[0][y1], s->bits[0][y1]); | |
485 | put_bits(&s->pb, s->len[2][v0], s->bits[2][v0]); | |
486 | } | |
487 | } | |
488 | return 0; | |
489 | } | |
490 | ||
491 | static int encode_plane_bitstream(HYuvContext *s, int width, int plane) | |
492 | { | |
493 | int i, count = width/2; | |
494 | ||
495 | if (s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb) >> 3) < count * s->bps / 2) { | |
496 | av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n"); | |
497 | return -1; | |
498 | } | |
499 | ||
500 | #define LOADEND\ | |
501 | int y0 = s->temp[0][width-1]; | |
502 | #define LOADEND_14\ | |
503 | int y0 = s->temp16[0][width-1] & mask; | |
504 | #define LOADEND_16\ | |
505 | int y0 = s->temp16[0][width-1]; | |
506 | #define STATEND\ | |
507 | s->stats[plane][y0]++; | |
508 | #define STATEND_16\ | |
509 | s->stats[plane][y0>>2]++; | |
510 | #define WRITEEND\ | |
511 | put_bits(&s->pb, s->len[plane][y0], s->bits[plane][y0]); | |
512 | #define WRITEEND_16\ | |
513 | put_bits(&s->pb, s->len[plane][y0>>2], s->bits[plane][y0>>2]);\ | |
514 | put_bits(&s->pb, 2, y0&3); | |
515 | ||
516 | #define LOAD2\ | |
517 | int y0 = s->temp[0][2 * i];\ | |
518 | int y1 = s->temp[0][2 * i + 1]; | |
519 | #define LOAD2_14\ | |
520 | int y0 = s->temp16[0][2 * i] & mask;\ | |
521 | int y1 = s->temp16[0][2 * i + 1] & mask; | |
522 | #define LOAD2_16\ | |
523 | int y0 = s->temp16[0][2 * i];\ | |
524 | int y1 = s->temp16[0][2 * i + 1]; | |
525 | #define STAT2\ | |
526 | s->stats[plane][y0]++;\ | |
527 | s->stats[plane][y1]++; | |
528 | #define STAT2_16\ | |
529 | s->stats[plane][y0>>2]++;\ | |
530 | s->stats[plane][y1>>2]++; | |
531 | #define WRITE2\ | |
532 | put_bits(&s->pb, s->len[plane][y0], s->bits[plane][y0]);\ | |
533 | put_bits(&s->pb, s->len[plane][y1], s->bits[plane][y1]); | |
534 | #define WRITE2_16\ | |
535 | put_bits(&s->pb, s->len[plane][y0>>2], s->bits[plane][y0>>2]);\ | |
536 | put_bits(&s->pb, 2, y0&3);\ | |
537 | put_bits(&s->pb, s->len[plane][y1>>2], s->bits[plane][y1>>2]);\ | |
538 | put_bits(&s->pb, 2, y1&3); | |
539 | ||
540 | if (s->bps <= 8) { | |
541 | if (s->flags & CODEC_FLAG_PASS1) { | |
542 | for (i = 0; i < count; i++) { | |
543 | LOAD2; | |
544 | STAT2; | |
545 | } | |
546 | if (width&1) { | |
547 | LOADEND; | |
548 | STATEND; | |
549 | } | |
550 | } | |
551 | if (s->avctx->flags2 & CODEC_FLAG2_NO_OUTPUT) | |
552 | return 0; | |
553 | ||
554 | if (s->context) { | |
555 | for (i = 0; i < count; i++) { | |
556 | LOAD2; | |
557 | STAT2; | |
558 | WRITE2; | |
559 | } | |
560 | if (width&1) { | |
561 | LOADEND; | |
562 | STATEND; | |
563 | WRITEEND; | |
564 | } | |
565 | } else { | |
566 | for (i = 0; i < count; i++) { | |
567 | LOAD2; | |
568 | WRITE2; | |
569 | } | |
570 | if (width&1) { | |
571 | LOADEND; | |
572 | WRITEEND; | |
573 | } | |
574 | } | |
575 | } else if (s->bps <= 14) { | |
576 | int mask = s->n - 1; | |
577 | if (s->flags & CODEC_FLAG_PASS1) { | |
578 | for (i = 0; i < count; i++) { | |
579 | LOAD2_14; | |
580 | STAT2; | |
581 | } | |
582 | if (width&1) { | |
583 | LOADEND_14; | |
584 | STATEND; | |
585 | } | |
586 | } | |
587 | if (s->avctx->flags2 & CODEC_FLAG2_NO_OUTPUT) | |
588 | return 0; | |
589 | ||
590 | if (s->context) { | |
591 | for (i = 0; i < count; i++) { | |
592 | LOAD2_14; | |
593 | STAT2; | |
594 | WRITE2; | |
595 | } | |
596 | if (width&1) { | |
597 | LOADEND_14; | |
598 | STATEND; | |
599 | WRITEEND; | |
600 | } | |
601 | } else { | |
602 | for (i = 0; i < count; i++) { | |
603 | LOAD2_14; | |
604 | WRITE2; | |
605 | } | |
606 | if (width&1) { | |
607 | LOADEND_14; | |
608 | WRITEEND; | |
609 | } | |
610 | } | |
611 | } else { | |
612 | if (s->flags & CODEC_FLAG_PASS1) { | |
613 | for (i = 0; i < count; i++) { | |
614 | LOAD2_16; | |
615 | STAT2_16; | |
616 | } | |
617 | if (width&1) { | |
618 | LOADEND_16; | |
619 | STATEND_16; | |
620 | } | |
621 | } | |
622 | if (s->avctx->flags2 & CODEC_FLAG2_NO_OUTPUT) | |
623 | return 0; | |
624 | ||
625 | if (s->context) { | |
626 | for (i = 0; i < count; i++) { | |
627 | LOAD2_16; | |
628 | STAT2_16; | |
629 | WRITE2_16; | |
630 | } | |
631 | if (width&1) { | |
632 | LOADEND_16; | |
633 | STATEND_16; | |
634 | WRITEEND_16; | |
635 | } | |
636 | } else { | |
637 | for (i = 0; i < count; i++) { | |
638 | LOAD2_16; | |
639 | WRITE2_16; | |
640 | } | |
641 | if (width&1) { | |
642 | LOADEND_16; | |
643 | WRITEEND_16; | |
644 | } | |
645 | } | |
646 | } | |
647 | #undef LOAD2 | |
648 | #undef STAT2 | |
649 | #undef WRITE2 | |
650 | return 0; | |
651 | } | |
652 | ||
653 | static int encode_gray_bitstream(HYuvContext *s, int count) | |
654 | { | |
655 | int i; | |
656 | ||
657 | if (s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb) >> 3) < 4 * count) { | |
658 | av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n"); | |
659 | return -1; | |
660 | } | |
661 | ||
662 | #define LOAD2\ | |
663 | int y0 = s->temp[0][2 * i];\ | |
664 | int y1 = s->temp[0][2 * i + 1]; | |
665 | #define STAT2\ | |
666 | s->stats[0][y0]++;\ | |
667 | s->stats[0][y1]++; | |
668 | #define WRITE2\ | |
669 | put_bits(&s->pb, s->len[0][y0], s->bits[0][y0]);\ | |
670 | put_bits(&s->pb, s->len[0][y1], s->bits[0][y1]); | |
671 | ||
672 | count /= 2; | |
673 | ||
674 | if (s->flags & CODEC_FLAG_PASS1) { | |
675 | for (i = 0; i < count; i++) { | |
676 | LOAD2; | |
677 | STAT2; | |
678 | } | |
679 | } | |
680 | if (s->avctx->flags2 & CODEC_FLAG2_NO_OUTPUT) | |
681 | return 0; | |
682 | ||
683 | if (s->context) { | |
684 | for (i = 0; i < count; i++) { | |
685 | LOAD2; | |
686 | STAT2; | |
687 | WRITE2; | |
688 | } | |
689 | } else { | |
690 | for (i = 0; i < count; i++) { | |
691 | LOAD2; | |
692 | WRITE2; | |
693 | } | |
694 | } | |
695 | return 0; | |
696 | } | |
697 | ||
698 | static inline int encode_bgra_bitstream(HYuvContext *s, int count, int planes) | |
699 | { | |
700 | int i; | |
701 | ||
702 | if (s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb) >> 3) < | |
703 | 4 * planes * count) { | |
704 | av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n"); | |
705 | return -1; | |
706 | } | |
707 | ||
708 | #define LOAD_GBRA \ | |
709 | int g = s->temp[0][planes == 3 ? 3 * i + 1 : 4 * i + G]; \ | |
710 | int b =(s->temp[0][planes == 3 ? 3 * i + 2 : 4 * i + B] - g) & 0xFF;\ | |
711 | int r =(s->temp[0][planes == 3 ? 3 * i + 0 : 4 * i + R] - g) & 0xFF;\ | |
712 | int a = s->temp[0][planes * i + A]; | |
713 | ||
714 | #define STAT_BGRA \ | |
715 | s->stats[0][b]++; \ | |
716 | s->stats[1][g]++; \ | |
717 | s->stats[2][r]++; \ | |
718 | if (planes == 4) \ | |
719 | s->stats[2][a]++; | |
720 | ||
721 | #define WRITE_GBRA \ | |
722 | put_bits(&s->pb, s->len[1][g], s->bits[1][g]); \ | |
723 | put_bits(&s->pb, s->len[0][b], s->bits[0][b]); \ | |
724 | put_bits(&s->pb, s->len[2][r], s->bits[2][r]); \ | |
725 | if (planes == 4) \ | |
726 | put_bits(&s->pb, s->len[2][a], s->bits[2][a]); | |
727 | ||
728 | if ((s->flags & CODEC_FLAG_PASS1) && | |
729 | (s->avctx->flags2 & CODEC_FLAG2_NO_OUTPUT)) { | |
730 | for (i = 0; i < count; i++) { | |
731 | LOAD_GBRA; | |
732 | STAT_BGRA; | |
733 | } | |
734 | } else if (s->context || (s->flags & CODEC_FLAG_PASS1)) { | |
735 | for (i = 0; i < count; i++) { | |
736 | LOAD_GBRA; | |
737 | STAT_BGRA; | |
738 | WRITE_GBRA; | |
739 | } | |
740 | } else { | |
741 | for (i = 0; i < count; i++) { | |
742 | LOAD_GBRA; | |
743 | WRITE_GBRA; | |
744 | } | |
745 | } | |
746 | return 0; | |
747 | } | |
748 | ||
749 | static int encode_frame(AVCodecContext *avctx, AVPacket *pkt, | |
750 | const AVFrame *pict, int *got_packet) | |
751 | { | |
752 | HYuvContext *s = avctx->priv_data; | |
753 | const int width = s->width; | |
754 | const int width2 = s->width>>1; | |
755 | const int height = s->height; | |
756 | const int fake_ystride = s->interlaced ? pict->linesize[0]*2 : pict->linesize[0]; | |
757 | const int fake_ustride = s->interlaced ? pict->linesize[1]*2 : pict->linesize[1]; | |
758 | const int fake_vstride = s->interlaced ? pict->linesize[2]*2 : pict->linesize[2]; | |
759 | const AVFrame * const p = pict; | |
760 | int i, j, size = 0, ret; | |
761 | ||
762 | if ((ret = ff_alloc_packet2(avctx, pkt, width * height * 3 * 4 + FF_MIN_BUFFER_SIZE)) < 0) | |
763 | return ret; | |
764 | ||
765 | if (s->context) { | |
766 | size = store_huffman_tables(s, pkt->data); | |
767 | if (size < 0) | |
768 | return size; | |
769 | ||
770 | for (i = 0; i < 4; i++) | |
771 | for (j = 0; j < s->vlc_n; j++) | |
772 | s->stats[i][j] >>= 1; | |
773 | } | |
774 | ||
775 | init_put_bits(&s->pb, pkt->data + size, pkt->size - size); | |
776 | ||
777 | if (avctx->pix_fmt == AV_PIX_FMT_YUV422P || | |
778 | avctx->pix_fmt == AV_PIX_FMT_YUV420P) { | |
779 | int lefty, leftu, leftv, y, cy; | |
780 | ||
781 | put_bits(&s->pb, 8, leftv = p->data[2][0]); | |
782 | put_bits(&s->pb, 8, lefty = p->data[0][1]); | |
783 | put_bits(&s->pb, 8, leftu = p->data[1][0]); | |
784 | put_bits(&s->pb, 8, p->data[0][0]); | |
785 | ||
786 | lefty = sub_left_prediction(s, s->temp[0], p->data[0], width , 0); | |
787 | leftu = sub_left_prediction(s, s->temp[1], p->data[1], width2, 0); | |
788 | leftv = sub_left_prediction(s, s->temp[2], p->data[2], width2, 0); | |
789 | ||
790 | encode_422_bitstream(s, 2, width-2); | |
791 | ||
792 | if (s->predictor==MEDIAN) { | |
793 | int lefttopy, lefttopu, lefttopv; | |
794 | cy = y = 1; | |
795 | if (s->interlaced) { | |
796 | lefty = sub_left_prediction(s, s->temp[0], p->data[0] + p->linesize[0], width , lefty); | |
797 | leftu = sub_left_prediction(s, s->temp[1], p->data[1] + p->linesize[1], width2, leftu); | |
798 | leftv = sub_left_prediction(s, s->temp[2], p->data[2] + p->linesize[2], width2, leftv); | |
799 | ||
800 | encode_422_bitstream(s, 0, width); | |
801 | y++; cy++; | |
802 | } | |
803 | ||
804 | lefty = sub_left_prediction(s, s->temp[0], p->data[0] + fake_ystride, 4, lefty); | |
805 | leftu = sub_left_prediction(s, s->temp[1], p->data[1] + fake_ustride, 2, leftu); | |
806 | leftv = sub_left_prediction(s, s->temp[2], p->data[2] + fake_vstride, 2, leftv); | |
807 | ||
808 | encode_422_bitstream(s, 0, 4); | |
809 | ||
810 | lefttopy = p->data[0][3]; | |
811 | lefttopu = p->data[1][1]; | |
812 | lefttopv = p->data[2][1]; | |
813 | s->hencdsp.sub_hfyu_median_pred(s->temp[0], p->data[0] + 4, p->data[0] + fake_ystride + 4, width - 4, &lefty, &lefttopy); | |
814 | s->hencdsp.sub_hfyu_median_pred(s->temp[1], p->data[1] + 2, p->data[1] + fake_ustride + 2, width2 - 2, &leftu, &lefttopu); | |
815 | s->hencdsp.sub_hfyu_median_pred(s->temp[2], p->data[2] + 2, p->data[2] + fake_vstride + 2, width2 - 2, &leftv, &lefttopv); | |
816 | encode_422_bitstream(s, 0, width - 4); | |
817 | y++; cy++; | |
818 | ||
819 | for (; y < height; y++,cy++) { | |
820 | uint8_t *ydst, *udst, *vdst; | |
821 | ||
822 | if (s->bitstream_bpp == 12) { | |
823 | while (2 * cy > y) { | |
824 | ydst = p->data[0] + p->linesize[0] * y; | |
825 | s->hencdsp.sub_hfyu_median_pred(s->temp[0], ydst - fake_ystride, ydst, width, &lefty, &lefttopy); | |
826 | encode_gray_bitstream(s, width); | |
827 | y++; | |
828 | } | |
829 | if (y >= height) break; | |
830 | } | |
831 | ydst = p->data[0] + p->linesize[0] * y; | |
832 | udst = p->data[1] + p->linesize[1] * cy; | |
833 | vdst = p->data[2] + p->linesize[2] * cy; | |
834 | ||
835 | s->hencdsp.sub_hfyu_median_pred(s->temp[0], ydst - fake_ystride, ydst, width, &lefty, &lefttopy); | |
836 | s->hencdsp.sub_hfyu_median_pred(s->temp[1], udst - fake_ustride, udst, width2, &leftu, &lefttopu); | |
837 | s->hencdsp.sub_hfyu_median_pred(s->temp[2], vdst - fake_vstride, vdst, width2, &leftv, &lefttopv); | |
838 | ||
839 | encode_422_bitstream(s, 0, width); | |
840 | } | |
841 | } else { | |
842 | for (cy = y = 1; y < height; y++, cy++) { | |
843 | uint8_t *ydst, *udst, *vdst; | |
844 | ||
845 | /* encode a luma only line & y++ */ | |
846 | if (s->bitstream_bpp == 12) { | |
847 | ydst = p->data[0] + p->linesize[0] * y; | |
848 | ||
849 | if (s->predictor == PLANE && s->interlaced < y) { | |
850 | s->hencdsp.diff_bytes(s->temp[1], ydst, ydst - fake_ystride, width); | |
851 | ||
852 | lefty = sub_left_prediction(s, s->temp[0], s->temp[1], width , lefty); | |
853 | } else { | |
854 | lefty = sub_left_prediction(s, s->temp[0], ydst, width , lefty); | |
855 | } | |
856 | encode_gray_bitstream(s, width); | |
857 | y++; | |
858 | if (y >= height) break; | |
859 | } | |
860 | ||
861 | ydst = p->data[0] + p->linesize[0] * y; | |
862 | udst = p->data[1] + p->linesize[1] * cy; | |
863 | vdst = p->data[2] + p->linesize[2] * cy; | |
864 | ||
865 | if (s->predictor == PLANE && s->interlaced < cy) { | |
866 | s->hencdsp.diff_bytes(s->temp[1], ydst, ydst - fake_ystride, width); | |
867 | s->hencdsp.diff_bytes(s->temp[2], udst, udst - fake_ustride, width2); | |
868 | s->hencdsp.diff_bytes(s->temp[2] + width2, vdst, vdst - fake_vstride, width2); | |
869 | ||
870 | lefty = sub_left_prediction(s, s->temp[0], s->temp[1], width , lefty); | |
871 | leftu = sub_left_prediction(s, s->temp[1], s->temp[2], width2, leftu); | |
872 | leftv = sub_left_prediction(s, s->temp[2], s->temp[2] + width2, width2, leftv); | |
873 | } else { | |
874 | lefty = sub_left_prediction(s, s->temp[0], ydst, width , lefty); | |
875 | leftu = sub_left_prediction(s, s->temp[1], udst, width2, leftu); | |
876 | leftv = sub_left_prediction(s, s->temp[2], vdst, width2, leftv); | |
877 | } | |
878 | ||
879 | encode_422_bitstream(s, 0, width); | |
880 | } | |
881 | } | |
882 | } else if(avctx->pix_fmt == AV_PIX_FMT_RGB32) { | |
883 | uint8_t *data = p->data[0] + (height - 1) * p->linesize[0]; | |
884 | const int stride = -p->linesize[0]; | |
885 | const int fake_stride = -fake_ystride; | |
886 | int y; | |
887 | int leftr, leftg, leftb, lefta; | |
888 | ||
889 | put_bits(&s->pb, 8, lefta = data[A]); | |
890 | put_bits(&s->pb, 8, leftr = data[R]); | |
891 | put_bits(&s->pb, 8, leftg = data[G]); | |
892 | put_bits(&s->pb, 8, leftb = data[B]); | |
893 | ||
894 | sub_left_prediction_bgr32(s, s->temp[0], data + 4, width - 1, | |
895 | &leftr, &leftg, &leftb, &lefta); | |
896 | encode_bgra_bitstream(s, width - 1, 4); | |
897 | ||
898 | for (y = 1; y < s->height; y++) { | |
899 | uint8_t *dst = data + y*stride; | |
900 | if (s->predictor == PLANE && s->interlaced < y) { | |
901 | s->hencdsp.diff_bytes(s->temp[1], dst, dst - fake_stride, width * 4); | |
902 | sub_left_prediction_bgr32(s, s->temp[0], s->temp[1], width, | |
903 | &leftr, &leftg, &leftb, &lefta); | |
904 | } else { | |
905 | sub_left_prediction_bgr32(s, s->temp[0], dst, width, | |
906 | &leftr, &leftg, &leftb, &lefta); | |
907 | } | |
908 | encode_bgra_bitstream(s, width, 4); | |
909 | } | |
910 | } else if (avctx->pix_fmt == AV_PIX_FMT_RGB24) { | |
911 | uint8_t *data = p->data[0] + (height - 1) * p->linesize[0]; | |
912 | const int stride = -p->linesize[0]; | |
913 | const int fake_stride = -fake_ystride; | |
914 | int y; | |
915 | int leftr, leftg, leftb; | |
916 | ||
917 | put_bits(&s->pb, 8, leftr = data[0]); | |
918 | put_bits(&s->pb, 8, leftg = data[1]); | |
919 | put_bits(&s->pb, 8, leftb = data[2]); | |
920 | put_bits(&s->pb, 8, 0); | |
921 | ||
922 | sub_left_prediction_rgb24(s, s->temp[0], data + 3, width - 1, | |
923 | &leftr, &leftg, &leftb); | |
924 | encode_bgra_bitstream(s, width-1, 3); | |
925 | ||
926 | for (y = 1; y < s->height; y++) { | |
927 | uint8_t *dst = data + y * stride; | |
928 | if (s->predictor == PLANE && s->interlaced < y) { | |
929 | s->hencdsp.diff_bytes(s->temp[1], dst, dst - fake_stride, | |
930 | width * 3); | |
931 | sub_left_prediction_rgb24(s, s->temp[0], s->temp[1], width, | |
932 | &leftr, &leftg, &leftb); | |
933 | } else { | |
934 | sub_left_prediction_rgb24(s, s->temp[0], dst, width, | |
935 | &leftr, &leftg, &leftb); | |
936 | } | |
937 | encode_bgra_bitstream(s, width, 3); | |
938 | } | |
939 | } else if (s->version > 2) { | |
940 | int plane; | |
941 | for (plane = 0; plane < 1 + 2*s->chroma + s->alpha; plane++) { | |
942 | int left, y; | |
943 | int w = width; | |
944 | int h = height; | |
945 | int fake_stride = fake_ystride; | |
946 | ||
947 | if (s->chroma && (plane == 1 || plane == 2)) { | |
948 | w >>= s->chroma_h_shift; | |
949 | h >>= s->chroma_v_shift; | |
950 | fake_stride = plane == 1 ? fake_ustride : fake_vstride; | |
951 | } | |
952 | ||
953 | left = sub_left_prediction(s, s->temp[0], p->data[plane], w , 0); | |
954 | ||
955 | encode_plane_bitstream(s, w, plane); | |
956 | ||
957 | if (s->predictor==MEDIAN) { | |
958 | int lefttop; | |
959 | y = 1; | |
960 | if (s->interlaced) { | |
961 | left = sub_left_prediction(s, s->temp[0], p->data[plane] + p->linesize[plane], w , left); | |
962 | ||
963 | encode_plane_bitstream(s, w, plane); | |
964 | y++; | |
965 | } | |
966 | ||
967 | lefttop = p->data[plane][0]; | |
968 | ||
969 | for (; y < h; y++) { | |
970 | uint8_t *dst = p->data[plane] + p->linesize[plane] * y; | |
971 | ||
972 | sub_median_prediction(s, s->temp[0], dst - fake_stride, dst, w , &left, &lefttop); | |
973 | ||
974 | encode_plane_bitstream(s, w, plane); | |
975 | } | |
976 | } else { | |
977 | for (y = 1; y < h; y++) { | |
978 | uint8_t *dst = p->data[plane] + p->linesize[plane] * y; | |
979 | ||
980 | if (s->predictor == PLANE && s->interlaced < y) { | |
981 | diff_bytes(s, s->temp[1], dst, dst - fake_stride, w); | |
982 | ||
983 | left = sub_left_prediction(s, s->temp[0], s->temp[1], w , left); | |
984 | } else { | |
985 | left = sub_left_prediction(s, s->temp[0], dst, w , left); | |
986 | } | |
987 | ||
988 | encode_plane_bitstream(s, w, plane); | |
989 | } | |
990 | } | |
991 | } | |
992 | } else { | |
993 | av_log(avctx, AV_LOG_ERROR, "Format not supported!\n"); | |
994 | } | |
995 | emms_c(); | |
996 | ||
997 | size += (put_bits_count(&s->pb) + 31) / 8; | |
998 | put_bits(&s->pb, 16, 0); | |
999 | put_bits(&s->pb, 15, 0); | |
1000 | size /= 4; | |
1001 | ||
1002 | if ((s->flags&CODEC_FLAG_PASS1) && (s->picture_number & 31) == 0) { | |
1003 | int j; | |
1004 | char *p = avctx->stats_out; | |
1005 | char *end = p + STATS_OUT_SIZE; | |
1006 | for (i = 0; i < 4; i++) { | |
1007 | for (j = 0; j < s->vlc_n; j++) { | |
1008 | snprintf(p, end-p, "%"PRIu64" ", s->stats[i][j]); | |
1009 | p += strlen(p); | |
1010 | s->stats[i][j]= 0; | |
1011 | } | |
1012 | snprintf(p, end-p, "\n"); | |
1013 | p++; | |
1014 | if (end <= p) | |
1015 | return AVERROR(ENOMEM); | |
1016 | } | |
1017 | } else if (avctx->stats_out) | |
1018 | avctx->stats_out[0] = '\0'; | |
1019 | if (!(s->avctx->flags2 & CODEC_FLAG2_NO_OUTPUT)) { | |
1020 | flush_put_bits(&s->pb); | |
1021 | s->bdsp.bswap_buf((uint32_t *) pkt->data, (uint32_t *) pkt->data, size); | |
1022 | } | |
1023 | ||
1024 | s->picture_number++; | |
1025 | ||
1026 | pkt->size = size * 4; | |
1027 | pkt->flags |= AV_PKT_FLAG_KEY; | |
1028 | *got_packet = 1; | |
1029 | ||
1030 | return 0; | |
1031 | } | |
1032 | ||
1033 | static av_cold int encode_end(AVCodecContext *avctx) | |
1034 | { | |
1035 | HYuvContext *s = avctx->priv_data; | |
1036 | ||
1037 | ff_huffyuv_common_end(s); | |
1038 | ||
1039 | av_freep(&avctx->extradata); | |
1040 | av_freep(&avctx->stats_out); | |
1041 | ||
1042 | av_frame_free(&avctx->coded_frame); | |
1043 | ||
1044 | return 0; | |
1045 | } | |
1046 | ||
1047 | static const AVOption options[] = { | |
1048 | { "non_deterministic", "Allow multithreading for e.g. context=1 at the expense of determinism", | |
1049 | offsetof(HYuvContext, non_determ), AV_OPT_TYPE_INT, { .i64 = 1 }, | |
1050 | 0, 1, AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM }, | |
1051 | { NULL }, | |
1052 | }; | |
1053 | ||
1054 | static const AVClass normal_class = { | |
1055 | .class_name = "huffyuv", | |
1056 | .item_name = av_default_item_name, | |
1057 | .option = options, | |
1058 | .version = LIBAVUTIL_VERSION_INT, | |
1059 | }; | |
1060 | ||
1061 | static const AVClass ff_class = { | |
1062 | .class_name = "ffvhuff", | |
1063 | .item_name = av_default_item_name, | |
1064 | .option = options, | |
1065 | .version = LIBAVUTIL_VERSION_INT, | |
1066 | }; | |
1067 | ||
1068 | AVCodec ff_huffyuv_encoder = { | |
1069 | .name = "huffyuv", | |
1070 | .long_name = NULL_IF_CONFIG_SMALL("Huffyuv / HuffYUV"), | |
1071 | .type = AVMEDIA_TYPE_VIDEO, | |
1072 | .id = AV_CODEC_ID_HUFFYUV, | |
1073 | .priv_data_size = sizeof(HYuvContext), | |
1074 | .init = encode_init, | |
1075 | .encode2 = encode_frame, | |
1076 | .close = encode_end, | |
1077 | .capabilities = CODEC_CAP_FRAME_THREADS | CODEC_CAP_INTRA_ONLY, | |
1078 | .priv_class = &normal_class, | |
1079 | .pix_fmts = (const enum AVPixelFormat[]){ | |
1080 | AV_PIX_FMT_YUV422P, AV_PIX_FMT_RGB24, | |
1081 | AV_PIX_FMT_RGB32, AV_PIX_FMT_NONE | |
1082 | }, | |
1083 | }; | |
1084 | ||
1085 | #if CONFIG_FFVHUFF_ENCODER | |
1086 | AVCodec ff_ffvhuff_encoder = { | |
1087 | .name = "ffvhuff", | |
1088 | .long_name = NULL_IF_CONFIG_SMALL("Huffyuv FFmpeg variant"), | |
1089 | .type = AVMEDIA_TYPE_VIDEO, | |
1090 | .id = AV_CODEC_ID_FFVHUFF, | |
1091 | .priv_data_size = sizeof(HYuvContext), | |
1092 | .init = encode_init, | |
1093 | .encode2 = encode_frame, | |
1094 | .close = encode_end, | |
1095 | .capabilities = CODEC_CAP_FRAME_THREADS | CODEC_CAP_INTRA_ONLY, | |
1096 | .priv_class = &ff_class, | |
1097 | .pix_fmts = (const enum AVPixelFormat[]){ | |
1098 | AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV411P, | |
1099 | AV_PIX_FMT_YUV410P, AV_PIX_FMT_YUV440P, | |
1100 | AV_PIX_FMT_GBRP, | |
1101 | AV_PIX_FMT_GBRP9, AV_PIX_FMT_GBRP10, AV_PIX_FMT_GBRP12, AV_PIX_FMT_GBRP14, | |
1102 | AV_PIX_FMT_GRAY8, AV_PIX_FMT_GRAY16, | |
1103 | AV_PIX_FMT_YUVA420P, AV_PIX_FMT_YUVA422P, AV_PIX_FMT_YUVA444P, | |
1104 | AV_PIX_FMT_GBRAP, | |
1105 | AV_PIX_FMT_GRAY8A, | |
1106 | AV_PIX_FMT_YUV420P9, AV_PIX_FMT_YUV420P10, AV_PIX_FMT_YUV420P12, AV_PIX_FMT_YUV420P14, AV_PIX_FMT_YUV420P16, | |
1107 | AV_PIX_FMT_YUV422P9, AV_PIX_FMT_YUV422P10, AV_PIX_FMT_YUV422P12, AV_PIX_FMT_YUV422P14, AV_PIX_FMT_YUV422P16, | |
1108 | AV_PIX_FMT_YUV444P9, AV_PIX_FMT_YUV444P10, AV_PIX_FMT_YUV444P12, AV_PIX_FMT_YUV444P14, AV_PIX_FMT_YUV444P16, | |
1109 | AV_PIX_FMT_YUVA420P9, AV_PIX_FMT_YUVA420P10, AV_PIX_FMT_YUVA420P16, | |
1110 | AV_PIX_FMT_YUVA422P9, AV_PIX_FMT_YUVA422P10, AV_PIX_FMT_YUVA422P16, | |
1111 | AV_PIX_FMT_YUVA444P9, AV_PIX_FMT_YUVA444P10, AV_PIX_FMT_YUVA444P16, | |
1112 | AV_PIX_FMT_RGB24, | |
1113 | AV_PIX_FMT_RGB32, AV_PIX_FMT_NONE | |
1114 | }, | |
1115 | }; | |
1116 | #endif |