Commit | Line | Data |
---|---|---|
2ba45a60 DM |
1 | /* |
2 | * utils for libavcodec | |
3 | * Copyright (c) 2001 Fabrice Bellard | |
4 | * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at> | |
5 | * | |
6 | * This file is part of FFmpeg. | |
7 | * | |
8 | * FFmpeg is free software; you can redistribute it and/or | |
9 | * modify it under the terms of the GNU Lesser General Public | |
10 | * License as published by the Free Software Foundation; either | |
11 | * version 2.1 of the License, or (at your option) any later version. | |
12 | * | |
13 | * FFmpeg is distributed in the hope that it will be useful, | |
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
16 | * Lesser General Public License for more details. | |
17 | * | |
18 | * You should have received a copy of the GNU Lesser General Public | |
19 | * License along with FFmpeg; if not, write to the Free Software | |
20 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA | |
21 | */ | |
22 | ||
23 | /** | |
24 | * @file | |
25 | * utils. | |
26 | */ | |
27 | ||
28 | #include "config.h" | |
29 | #include "libavutil/atomic.h" | |
30 | #include "libavutil/attributes.h" | |
31 | #include "libavutil/avassert.h" | |
32 | #include "libavutil/avstring.h" | |
33 | #include "libavutil/bprint.h" | |
34 | #include "libavutil/channel_layout.h" | |
35 | #include "libavutil/crc.h" | |
36 | #include "libavutil/frame.h" | |
37 | #include "libavutil/internal.h" | |
38 | #include "libavutil/mathematics.h" | |
39 | #include "libavutil/pixdesc.h" | |
40 | #include "libavutil/imgutils.h" | |
41 | #include "libavutil/samplefmt.h" | |
42 | #include "libavutil/dict.h" | |
43 | #include "avcodec.h" | |
44 | #include "libavutil/opt.h" | |
45 | #include "me_cmp.h" | |
46 | #include "mpegvideo.h" | |
47 | #include "thread.h" | |
48 | #include "frame_thread_encoder.h" | |
49 | #include "internal.h" | |
50 | #include "raw.h" | |
51 | #include "bytestream.h" | |
52 | #include "version.h" | |
53 | #include <stdlib.h> | |
54 | #include <stdarg.h> | |
55 | #include <limits.h> | |
56 | #include <float.h> | |
57 | #if CONFIG_ICONV | |
58 | # include <iconv.h> | |
59 | #endif | |
60 | ||
61 | #if HAVE_PTHREADS | |
62 | #include <pthread.h> | |
63 | #elif HAVE_W32THREADS | |
64 | #include "compat/w32pthreads.h" | |
65 | #elif HAVE_OS2THREADS | |
66 | #include "compat/os2threads.h" | |
67 | #endif | |
68 | ||
092a9121 DM |
69 | #include "libavutil/ffversion.h" |
70 | const char av_codec_ffversion[] = "FFmpeg version " FFMPEG_VERSION; | |
71 | ||
2ba45a60 DM |
72 | #if HAVE_PTHREADS || HAVE_W32THREADS || HAVE_OS2THREADS |
73 | static int default_lockmgr_cb(void **arg, enum AVLockOp op) | |
74 | { | |
75 | void * volatile * mutex = arg; | |
76 | int err; | |
77 | ||
78 | switch (op) { | |
79 | case AV_LOCK_CREATE: | |
80 | return 0; | |
81 | case AV_LOCK_OBTAIN: | |
82 | if (!*mutex) { | |
83 | pthread_mutex_t *tmp = av_malloc(sizeof(pthread_mutex_t)); | |
84 | if (!tmp) | |
85 | return AVERROR(ENOMEM); | |
86 | if ((err = pthread_mutex_init(tmp, NULL))) { | |
87 | av_free(tmp); | |
88 | return AVERROR(err); | |
89 | } | |
90 | if (avpriv_atomic_ptr_cas(mutex, NULL, tmp)) { | |
91 | pthread_mutex_destroy(tmp); | |
92 | av_free(tmp); | |
93 | } | |
94 | } | |
95 | ||
96 | if ((err = pthread_mutex_lock(*mutex))) | |
97 | return AVERROR(err); | |
98 | ||
99 | return 0; | |
100 | case AV_LOCK_RELEASE: | |
101 | if ((err = pthread_mutex_unlock(*mutex))) | |
102 | return AVERROR(err); | |
103 | ||
104 | return 0; | |
105 | case AV_LOCK_DESTROY: | |
106 | if (*mutex) | |
107 | pthread_mutex_destroy(*mutex); | |
108 | av_free(*mutex); | |
109 | avpriv_atomic_ptr_cas(mutex, *mutex, NULL); | |
110 | return 0; | |
111 | } | |
112 | return 1; | |
113 | } | |
114 | static int (*lockmgr_cb)(void **mutex, enum AVLockOp op) = default_lockmgr_cb; | |
115 | #else | |
116 | static int (*lockmgr_cb)(void **mutex, enum AVLockOp op) = NULL; | |
117 | #endif | |
118 | ||
119 | ||
120 | volatile int ff_avcodec_locked; | |
121 | static int volatile entangled_thread_counter = 0; | |
122 | static void *codec_mutex; | |
123 | static void *avformat_mutex; | |
124 | ||
125 | static inline int ff_fast_malloc(void *ptr, unsigned int *size, size_t min_size, int zero_realloc) | |
126 | { | |
127 | void **p = ptr; | |
f6fa7814 | 128 | if (min_size <= *size && *p) |
2ba45a60 DM |
129 | return 0; |
130 | min_size = FFMAX(17 * min_size / 16 + 32, min_size); | |
131 | av_free(*p); | |
132 | *p = zero_realloc ? av_mallocz(min_size) : av_malloc(min_size); | |
133 | if (!*p) | |
134 | min_size = 0; | |
135 | *size = min_size; | |
136 | return 1; | |
137 | } | |
138 | ||
139 | void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size) | |
140 | { | |
141 | uint8_t **p = ptr; | |
142 | if (min_size > SIZE_MAX - FF_INPUT_BUFFER_PADDING_SIZE) { | |
143 | av_freep(p); | |
144 | *size = 0; | |
145 | return; | |
146 | } | |
147 | if (!ff_fast_malloc(p, size, min_size + FF_INPUT_BUFFER_PADDING_SIZE, 1)) | |
148 | memset(*p + min_size, 0, FF_INPUT_BUFFER_PADDING_SIZE); | |
149 | } | |
150 | ||
151 | void av_fast_padded_mallocz(void *ptr, unsigned int *size, size_t min_size) | |
152 | { | |
153 | uint8_t **p = ptr; | |
154 | if (min_size > SIZE_MAX - FF_INPUT_BUFFER_PADDING_SIZE) { | |
155 | av_freep(p); | |
156 | *size = 0; | |
157 | return; | |
158 | } | |
159 | if (!ff_fast_malloc(p, size, min_size + FF_INPUT_BUFFER_PADDING_SIZE, 1)) | |
160 | memset(*p, 0, min_size + FF_INPUT_BUFFER_PADDING_SIZE); | |
161 | } | |
162 | ||
163 | /* encoder management */ | |
164 | static AVCodec *first_avcodec = NULL; | |
165 | static AVCodec **last_avcodec = &first_avcodec; | |
166 | ||
167 | AVCodec *av_codec_next(const AVCodec *c) | |
168 | { | |
169 | if (c) | |
170 | return c->next; | |
171 | else | |
172 | return first_avcodec; | |
173 | } | |
174 | ||
175 | static av_cold void avcodec_init(void) | |
176 | { | |
177 | static int initialized = 0; | |
178 | ||
179 | if (initialized != 0) | |
180 | return; | |
181 | initialized = 1; | |
182 | ||
183 | if (CONFIG_ME_CMP) | |
184 | ff_me_cmp_init_static(); | |
185 | } | |
186 | ||
187 | int av_codec_is_encoder(const AVCodec *codec) | |
188 | { | |
189 | return codec && (codec->encode_sub || codec->encode2); | |
190 | } | |
191 | ||
192 | int av_codec_is_decoder(const AVCodec *codec) | |
193 | { | |
194 | return codec && codec->decode; | |
195 | } | |
196 | ||
197 | av_cold void avcodec_register(AVCodec *codec) | |
198 | { | |
199 | AVCodec **p; | |
200 | avcodec_init(); | |
201 | p = last_avcodec; | |
202 | codec->next = NULL; | |
203 | ||
204 | while(*p || avpriv_atomic_ptr_cas((void * volatile *)p, NULL, codec)) | |
205 | p = &(*p)->next; | |
206 | last_avcodec = &codec->next; | |
207 | ||
208 | if (codec->init_static_data) | |
209 | codec->init_static_data(codec); | |
210 | } | |
211 | ||
212 | #if FF_API_EMU_EDGE | |
213 | unsigned avcodec_get_edge_width(void) | |
214 | { | |
215 | return EDGE_WIDTH; | |
216 | } | |
217 | #endif | |
218 | ||
219 | #if FF_API_SET_DIMENSIONS | |
220 | void avcodec_set_dimensions(AVCodecContext *s, int width, int height) | |
221 | { | |
222 | int ret = ff_set_dimensions(s, width, height); | |
223 | if (ret < 0) { | |
224 | av_log(s, AV_LOG_WARNING, "Failed to set dimensions %d %d\n", width, height); | |
225 | } | |
226 | } | |
227 | #endif | |
228 | ||
229 | int ff_set_dimensions(AVCodecContext *s, int width, int height) | |
230 | { | |
231 | int ret = av_image_check_size(width, height, 0, s); | |
232 | ||
233 | if (ret < 0) | |
234 | width = height = 0; | |
235 | ||
236 | s->coded_width = width; | |
237 | s->coded_height = height; | |
238 | s->width = FF_CEIL_RSHIFT(width, s->lowres); | |
239 | s->height = FF_CEIL_RSHIFT(height, s->lowres); | |
240 | ||
241 | return ret; | |
242 | } | |
243 | ||
244 | int ff_set_sar(AVCodecContext *avctx, AVRational sar) | |
245 | { | |
246 | int ret = av_image_check_sar(avctx->width, avctx->height, sar); | |
247 | ||
248 | if (ret < 0) { | |
249 | av_log(avctx, AV_LOG_WARNING, "ignoring invalid SAR: %u/%u\n", | |
250 | sar.num, sar.den); | |
251 | avctx->sample_aspect_ratio = (AVRational){ 0, 1 }; | |
252 | return ret; | |
253 | } else { | |
254 | avctx->sample_aspect_ratio = sar; | |
255 | } | |
256 | return 0; | |
257 | } | |
258 | ||
259 | int ff_side_data_update_matrix_encoding(AVFrame *frame, | |
260 | enum AVMatrixEncoding matrix_encoding) | |
261 | { | |
262 | AVFrameSideData *side_data; | |
263 | enum AVMatrixEncoding *data; | |
264 | ||
265 | side_data = av_frame_get_side_data(frame, AV_FRAME_DATA_MATRIXENCODING); | |
266 | if (!side_data) | |
267 | side_data = av_frame_new_side_data(frame, AV_FRAME_DATA_MATRIXENCODING, | |
268 | sizeof(enum AVMatrixEncoding)); | |
269 | ||
270 | if (!side_data) | |
271 | return AVERROR(ENOMEM); | |
272 | ||
273 | data = (enum AVMatrixEncoding*)side_data->data; | |
274 | *data = matrix_encoding; | |
275 | ||
276 | return 0; | |
277 | } | |
278 | ||
279 | void avcodec_align_dimensions2(AVCodecContext *s, int *width, int *height, | |
280 | int linesize_align[AV_NUM_DATA_POINTERS]) | |
281 | { | |
282 | int i; | |
283 | int w_align = 1; | |
284 | int h_align = 1; | |
f6fa7814 | 285 | AVPixFmtDescriptor const *desc = av_pix_fmt_desc_get(s->pix_fmt); |
2ba45a60 DM |
286 | |
287 | if (desc) { | |
288 | w_align = 1 << desc->log2_chroma_w; | |
289 | h_align = 1 << desc->log2_chroma_h; | |
290 | } | |
291 | ||
292 | switch (s->pix_fmt) { | |
293 | case AV_PIX_FMT_YUV420P: | |
294 | case AV_PIX_FMT_YUYV422: | |
295 | case AV_PIX_FMT_YVYU422: | |
296 | case AV_PIX_FMT_UYVY422: | |
297 | case AV_PIX_FMT_YUV422P: | |
298 | case AV_PIX_FMT_YUV440P: | |
299 | case AV_PIX_FMT_YUV444P: | |
300 | case AV_PIX_FMT_GBRAP: | |
301 | case AV_PIX_FMT_GBRP: | |
302 | case AV_PIX_FMT_GRAY8: | |
303 | case AV_PIX_FMT_GRAY16BE: | |
304 | case AV_PIX_FMT_GRAY16LE: | |
305 | case AV_PIX_FMT_YUVJ420P: | |
306 | case AV_PIX_FMT_YUVJ422P: | |
307 | case AV_PIX_FMT_YUVJ440P: | |
308 | case AV_PIX_FMT_YUVJ444P: | |
309 | case AV_PIX_FMT_YUVA420P: | |
310 | case AV_PIX_FMT_YUVA422P: | |
311 | case AV_PIX_FMT_YUVA444P: | |
312 | case AV_PIX_FMT_YUV420P9LE: | |
313 | case AV_PIX_FMT_YUV420P9BE: | |
314 | case AV_PIX_FMT_YUV420P10LE: | |
315 | case AV_PIX_FMT_YUV420P10BE: | |
316 | case AV_PIX_FMT_YUV420P12LE: | |
317 | case AV_PIX_FMT_YUV420P12BE: | |
318 | case AV_PIX_FMT_YUV420P14LE: | |
319 | case AV_PIX_FMT_YUV420P14BE: | |
320 | case AV_PIX_FMT_YUV420P16LE: | |
321 | case AV_PIX_FMT_YUV420P16BE: | |
322 | case AV_PIX_FMT_YUVA420P9LE: | |
323 | case AV_PIX_FMT_YUVA420P9BE: | |
324 | case AV_PIX_FMT_YUVA420P10LE: | |
325 | case AV_PIX_FMT_YUVA420P10BE: | |
326 | case AV_PIX_FMT_YUVA420P16LE: | |
327 | case AV_PIX_FMT_YUVA420P16BE: | |
328 | case AV_PIX_FMT_YUV422P9LE: | |
329 | case AV_PIX_FMT_YUV422P9BE: | |
330 | case AV_PIX_FMT_YUV422P10LE: | |
331 | case AV_PIX_FMT_YUV422P10BE: | |
332 | case AV_PIX_FMT_YUV422P12LE: | |
333 | case AV_PIX_FMT_YUV422P12BE: | |
334 | case AV_PIX_FMT_YUV422P14LE: | |
335 | case AV_PIX_FMT_YUV422P14BE: | |
336 | case AV_PIX_FMT_YUV422P16LE: | |
337 | case AV_PIX_FMT_YUV422P16BE: | |
338 | case AV_PIX_FMT_YUVA422P9LE: | |
339 | case AV_PIX_FMT_YUVA422P9BE: | |
340 | case AV_PIX_FMT_YUVA422P10LE: | |
341 | case AV_PIX_FMT_YUVA422P10BE: | |
342 | case AV_PIX_FMT_YUVA422P16LE: | |
343 | case AV_PIX_FMT_YUVA422P16BE: | |
344 | case AV_PIX_FMT_YUV444P9LE: | |
345 | case AV_PIX_FMT_YUV444P9BE: | |
346 | case AV_PIX_FMT_YUV444P10LE: | |
347 | case AV_PIX_FMT_YUV444P10BE: | |
348 | case AV_PIX_FMT_YUV444P12LE: | |
349 | case AV_PIX_FMT_YUV444P12BE: | |
350 | case AV_PIX_FMT_YUV444P14LE: | |
351 | case AV_PIX_FMT_YUV444P14BE: | |
352 | case AV_PIX_FMT_YUV444P16LE: | |
353 | case AV_PIX_FMT_YUV444P16BE: | |
354 | case AV_PIX_FMT_YUVA444P9LE: | |
355 | case AV_PIX_FMT_YUVA444P9BE: | |
356 | case AV_PIX_FMT_YUVA444P10LE: | |
357 | case AV_PIX_FMT_YUVA444P10BE: | |
358 | case AV_PIX_FMT_YUVA444P16LE: | |
359 | case AV_PIX_FMT_YUVA444P16BE: | |
360 | case AV_PIX_FMT_GBRP9LE: | |
361 | case AV_PIX_FMT_GBRP9BE: | |
362 | case AV_PIX_FMT_GBRP10LE: | |
363 | case AV_PIX_FMT_GBRP10BE: | |
364 | case AV_PIX_FMT_GBRP12LE: | |
365 | case AV_PIX_FMT_GBRP12BE: | |
366 | case AV_PIX_FMT_GBRP14LE: | |
367 | case AV_PIX_FMT_GBRP14BE: | |
368 | case AV_PIX_FMT_GBRP16LE: | |
369 | case AV_PIX_FMT_GBRP16BE: | |
370 | w_align = 16; //FIXME assume 16 pixel per macroblock | |
371 | h_align = 16 * 2; // interlaced needs 2 macroblocks height | |
372 | break; | |
373 | case AV_PIX_FMT_YUV411P: | |
374 | case AV_PIX_FMT_YUVJ411P: | |
375 | case AV_PIX_FMT_UYYVYY411: | |
376 | w_align = 32; | |
377 | h_align = 8; | |
378 | break; | |
379 | case AV_PIX_FMT_YUV410P: | |
380 | if (s->codec_id == AV_CODEC_ID_SVQ1) { | |
381 | w_align = 64; | |
382 | h_align = 64; | |
383 | } | |
384 | break; | |
385 | case AV_PIX_FMT_RGB555: | |
386 | if (s->codec_id == AV_CODEC_ID_RPZA) { | |
387 | w_align = 4; | |
388 | h_align = 4; | |
389 | } | |
390 | break; | |
391 | case AV_PIX_FMT_PAL8: | |
392 | case AV_PIX_FMT_BGR8: | |
393 | case AV_PIX_FMT_RGB8: | |
394 | if (s->codec_id == AV_CODEC_ID_SMC || | |
395 | s->codec_id == AV_CODEC_ID_CINEPAK) { | |
396 | w_align = 4; | |
397 | h_align = 4; | |
398 | } | |
399 | if (s->codec_id == AV_CODEC_ID_JV) { | |
400 | w_align = 8; | |
401 | h_align = 8; | |
402 | } | |
403 | break; | |
404 | case AV_PIX_FMT_BGR24: | |
405 | if ((s->codec_id == AV_CODEC_ID_MSZH) || | |
406 | (s->codec_id == AV_CODEC_ID_ZLIB)) { | |
407 | w_align = 4; | |
408 | h_align = 4; | |
409 | } | |
410 | break; | |
411 | case AV_PIX_FMT_RGB24: | |
412 | if (s->codec_id == AV_CODEC_ID_CINEPAK) { | |
413 | w_align = 4; | |
414 | h_align = 4; | |
415 | } | |
416 | break; | |
417 | default: | |
418 | break; | |
419 | } | |
420 | ||
421 | if (s->codec_id == AV_CODEC_ID_IFF_ILBM || s->codec_id == AV_CODEC_ID_IFF_BYTERUN1) { | |
422 | w_align = FFMAX(w_align, 8); | |
423 | } | |
424 | ||
425 | *width = FFALIGN(*width, w_align); | |
426 | *height = FFALIGN(*height, h_align); | |
427 | if (s->codec_id == AV_CODEC_ID_H264 || s->lowres) | |
428 | // some of the optimized chroma MC reads one line too much | |
429 | // which is also done in mpeg decoders with lowres > 0 | |
430 | *height += 2; | |
431 | ||
432 | for (i = 0; i < 4; i++) | |
433 | linesize_align[i] = STRIDE_ALIGN; | |
434 | } | |
435 | ||
436 | void avcodec_align_dimensions(AVCodecContext *s, int *width, int *height) | |
437 | { | |
438 | const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(s->pix_fmt); | |
439 | int chroma_shift = desc->log2_chroma_w; | |
440 | int linesize_align[AV_NUM_DATA_POINTERS]; | |
441 | int align; | |
442 | ||
443 | avcodec_align_dimensions2(s, width, height, linesize_align); | |
444 | align = FFMAX(linesize_align[0], linesize_align[3]); | |
445 | linesize_align[1] <<= chroma_shift; | |
446 | linesize_align[2] <<= chroma_shift; | |
447 | align = FFMAX3(align, linesize_align[1], linesize_align[2]); | |
448 | *width = FFALIGN(*width, align); | |
449 | } | |
450 | ||
451 | int avcodec_enum_to_chroma_pos(int *xpos, int *ypos, enum AVChromaLocation pos) | |
452 | { | |
453 | if (pos <= AVCHROMA_LOC_UNSPECIFIED || pos >= AVCHROMA_LOC_NB) | |
454 | return AVERROR(EINVAL); | |
455 | pos--; | |
456 | ||
457 | *xpos = (pos&1) * 128; | |
458 | *ypos = ((pos>>1)^(pos<4)) * 128; | |
459 | ||
460 | return 0; | |
461 | } | |
462 | ||
463 | enum AVChromaLocation avcodec_chroma_pos_to_enum(int xpos, int ypos) | |
464 | { | |
465 | int pos, xout, yout; | |
466 | ||
467 | for (pos = AVCHROMA_LOC_UNSPECIFIED + 1; pos < AVCHROMA_LOC_NB; pos++) { | |
468 | if (avcodec_enum_to_chroma_pos(&xout, &yout, pos) == 0 && xout == xpos && yout == ypos) | |
469 | return pos; | |
470 | } | |
471 | return AVCHROMA_LOC_UNSPECIFIED; | |
472 | } | |
473 | ||
474 | int avcodec_fill_audio_frame(AVFrame *frame, int nb_channels, | |
475 | enum AVSampleFormat sample_fmt, const uint8_t *buf, | |
476 | int buf_size, int align) | |
477 | { | |
478 | int ch, planar, needed_size, ret = 0; | |
479 | ||
480 | needed_size = av_samples_get_buffer_size(NULL, nb_channels, | |
481 | frame->nb_samples, sample_fmt, | |
482 | align); | |
483 | if (buf_size < needed_size) | |
484 | return AVERROR(EINVAL); | |
485 | ||
486 | planar = av_sample_fmt_is_planar(sample_fmt); | |
487 | if (planar && nb_channels > AV_NUM_DATA_POINTERS) { | |
488 | if (!(frame->extended_data = av_mallocz_array(nb_channels, | |
489 | sizeof(*frame->extended_data)))) | |
490 | return AVERROR(ENOMEM); | |
491 | } else { | |
492 | frame->extended_data = frame->data; | |
493 | } | |
494 | ||
495 | if ((ret = av_samples_fill_arrays(frame->extended_data, &frame->linesize[0], | |
496 | (uint8_t *)(intptr_t)buf, nb_channels, frame->nb_samples, | |
497 | sample_fmt, align)) < 0) { | |
498 | if (frame->extended_data != frame->data) | |
499 | av_freep(&frame->extended_data); | |
500 | return ret; | |
501 | } | |
502 | if (frame->extended_data != frame->data) { | |
503 | for (ch = 0; ch < AV_NUM_DATA_POINTERS; ch++) | |
504 | frame->data[ch] = frame->extended_data[ch]; | |
505 | } | |
506 | ||
507 | return ret; | |
508 | } | |
509 | ||
510 | static int update_frame_pool(AVCodecContext *avctx, AVFrame *frame) | |
511 | { | |
512 | FramePool *pool = avctx->internal->pool; | |
513 | int i, ret; | |
514 | ||
515 | switch (avctx->codec_type) { | |
516 | case AVMEDIA_TYPE_VIDEO: { | |
517 | AVPicture picture; | |
518 | int size[4] = { 0 }; | |
519 | int w = frame->width; | |
520 | int h = frame->height; | |
521 | int tmpsize, unaligned; | |
522 | ||
523 | if (pool->format == frame->format && | |
524 | pool->width == frame->width && pool->height == frame->height) | |
525 | return 0; | |
526 | ||
527 | avcodec_align_dimensions2(avctx, &w, &h, pool->stride_align); | |
528 | ||
529 | do { | |
530 | // NOTE: do not align linesizes individually, this breaks e.g. assumptions | |
531 | // that linesize[0] == 2*linesize[1] in the MPEG-encoder for 4:2:2 | |
532 | av_image_fill_linesizes(picture.linesize, avctx->pix_fmt, w); | |
533 | // increase alignment of w for next try (rhs gives the lowest bit set in w) | |
534 | w += w & ~(w - 1); | |
535 | ||
536 | unaligned = 0; | |
537 | for (i = 0; i < 4; i++) | |
538 | unaligned |= picture.linesize[i] % pool->stride_align[i]; | |
539 | } while (unaligned); | |
540 | ||
541 | tmpsize = av_image_fill_pointers(picture.data, avctx->pix_fmt, h, | |
542 | NULL, picture.linesize); | |
543 | if (tmpsize < 0) | |
544 | return -1; | |
545 | ||
546 | for (i = 0; i < 3 && picture.data[i + 1]; i++) | |
547 | size[i] = picture.data[i + 1] - picture.data[i]; | |
548 | size[i] = tmpsize - (picture.data[i] - picture.data[0]); | |
549 | ||
550 | for (i = 0; i < 4; i++) { | |
551 | av_buffer_pool_uninit(&pool->pools[i]); | |
552 | pool->linesize[i] = picture.linesize[i]; | |
553 | if (size[i]) { | |
554 | pool->pools[i] = av_buffer_pool_init(size[i] + 16 + STRIDE_ALIGN - 1, | |
555 | CONFIG_MEMORY_POISONING ? | |
556 | NULL : | |
557 | av_buffer_allocz); | |
558 | if (!pool->pools[i]) { | |
559 | ret = AVERROR(ENOMEM); | |
560 | goto fail; | |
561 | } | |
562 | } | |
563 | } | |
564 | pool->format = frame->format; | |
565 | pool->width = frame->width; | |
566 | pool->height = frame->height; | |
567 | ||
568 | break; | |
569 | } | |
570 | case AVMEDIA_TYPE_AUDIO: { | |
571 | int ch = av_frame_get_channels(frame); //av_get_channel_layout_nb_channels(frame->channel_layout); | |
572 | int planar = av_sample_fmt_is_planar(frame->format); | |
573 | int planes = planar ? ch : 1; | |
574 | ||
575 | if (pool->format == frame->format && pool->planes == planes && | |
576 | pool->channels == ch && frame->nb_samples == pool->samples) | |
577 | return 0; | |
578 | ||
579 | av_buffer_pool_uninit(&pool->pools[0]); | |
580 | ret = av_samples_get_buffer_size(&pool->linesize[0], ch, | |
581 | frame->nb_samples, frame->format, 0); | |
582 | if (ret < 0) | |
583 | goto fail; | |
584 | ||
585 | pool->pools[0] = av_buffer_pool_init(pool->linesize[0], NULL); | |
586 | if (!pool->pools[0]) { | |
587 | ret = AVERROR(ENOMEM); | |
588 | goto fail; | |
589 | } | |
590 | ||
591 | pool->format = frame->format; | |
592 | pool->planes = planes; | |
593 | pool->channels = ch; | |
594 | pool->samples = frame->nb_samples; | |
595 | break; | |
596 | } | |
597 | default: av_assert0(0); | |
598 | } | |
599 | return 0; | |
600 | fail: | |
601 | for (i = 0; i < 4; i++) | |
602 | av_buffer_pool_uninit(&pool->pools[i]); | |
603 | pool->format = -1; | |
604 | pool->planes = pool->channels = pool->samples = 0; | |
605 | pool->width = pool->height = 0; | |
606 | return ret; | |
607 | } | |
608 | ||
609 | static int audio_get_buffer(AVCodecContext *avctx, AVFrame *frame) | |
610 | { | |
611 | FramePool *pool = avctx->internal->pool; | |
612 | int planes = pool->planes; | |
613 | int i; | |
614 | ||
615 | frame->linesize[0] = pool->linesize[0]; | |
616 | ||
617 | if (planes > AV_NUM_DATA_POINTERS) { | |
618 | frame->extended_data = av_mallocz_array(planes, sizeof(*frame->extended_data)); | |
619 | frame->nb_extended_buf = planes - AV_NUM_DATA_POINTERS; | |
620 | frame->extended_buf = av_mallocz_array(frame->nb_extended_buf, | |
621 | sizeof(*frame->extended_buf)); | |
622 | if (!frame->extended_data || !frame->extended_buf) { | |
623 | av_freep(&frame->extended_data); | |
624 | av_freep(&frame->extended_buf); | |
625 | return AVERROR(ENOMEM); | |
626 | } | |
627 | } else { | |
628 | frame->extended_data = frame->data; | |
629 | av_assert0(frame->nb_extended_buf == 0); | |
630 | } | |
631 | ||
632 | for (i = 0; i < FFMIN(planes, AV_NUM_DATA_POINTERS); i++) { | |
633 | frame->buf[i] = av_buffer_pool_get(pool->pools[0]); | |
634 | if (!frame->buf[i]) | |
635 | goto fail; | |
636 | frame->extended_data[i] = frame->data[i] = frame->buf[i]->data; | |
637 | } | |
638 | for (i = 0; i < frame->nb_extended_buf; i++) { | |
639 | frame->extended_buf[i] = av_buffer_pool_get(pool->pools[0]); | |
640 | if (!frame->extended_buf[i]) | |
641 | goto fail; | |
642 | frame->extended_data[i + AV_NUM_DATA_POINTERS] = frame->extended_buf[i]->data; | |
643 | } | |
644 | ||
645 | if (avctx->debug & FF_DEBUG_BUFFERS) | |
646 | av_log(avctx, AV_LOG_DEBUG, "default_get_buffer called on frame %p", frame); | |
647 | ||
648 | return 0; | |
649 | fail: | |
650 | av_frame_unref(frame); | |
651 | return AVERROR(ENOMEM); | |
652 | } | |
653 | ||
654 | static int video_get_buffer(AVCodecContext *s, AVFrame *pic) | |
655 | { | |
656 | FramePool *pool = s->internal->pool; | |
657 | int i; | |
658 | ||
659 | if (pic->data[0]) { | |
660 | av_log(s, AV_LOG_ERROR, "pic->data[0]!=NULL in avcodec_default_get_buffer\n"); | |
661 | return -1; | |
662 | } | |
663 | ||
664 | memset(pic->data, 0, sizeof(pic->data)); | |
665 | pic->extended_data = pic->data; | |
666 | ||
667 | for (i = 0; i < 4 && pool->pools[i]; i++) { | |
668 | pic->linesize[i] = pool->linesize[i]; | |
669 | ||
670 | pic->buf[i] = av_buffer_pool_get(pool->pools[i]); | |
671 | if (!pic->buf[i]) | |
672 | goto fail; | |
673 | ||
674 | pic->data[i] = pic->buf[i]->data; | |
675 | } | |
676 | for (; i < AV_NUM_DATA_POINTERS; i++) { | |
677 | pic->data[i] = NULL; | |
678 | pic->linesize[i] = 0; | |
679 | } | |
680 | if (pic->data[1] && !pic->data[2]) | |
681 | avpriv_set_systematic_pal2((uint32_t *)pic->data[1], s->pix_fmt); | |
682 | ||
683 | if (s->debug & FF_DEBUG_BUFFERS) | |
684 | av_log(s, AV_LOG_DEBUG, "default_get_buffer called on pic %p\n", pic); | |
685 | ||
686 | return 0; | |
687 | fail: | |
688 | av_frame_unref(pic); | |
689 | return AVERROR(ENOMEM); | |
690 | } | |
691 | ||
692 | void avpriv_color_frame(AVFrame *frame, const int c[4]) | |
693 | { | |
694 | const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(frame->format); | |
695 | int p, y, x; | |
696 | ||
697 | av_assert0(desc->flags & AV_PIX_FMT_FLAG_PLANAR); | |
698 | ||
699 | for (p = 0; p<desc->nb_components; p++) { | |
700 | uint8_t *dst = frame->data[p]; | |
701 | int is_chroma = p == 1 || p == 2; | |
702 | int bytes = is_chroma ? FF_CEIL_RSHIFT(frame->width, desc->log2_chroma_w) : frame->width; | |
703 | int height = is_chroma ? FF_CEIL_RSHIFT(frame->height, desc->log2_chroma_h) : frame->height; | |
704 | for (y = 0; y < height; y++) { | |
705 | if (desc->comp[0].depth_minus1 >= 8) { | |
706 | for (x = 0; x<bytes; x++) | |
707 | ((uint16_t*)dst)[x] = c[p]; | |
708 | }else | |
709 | memset(dst, c[p], bytes); | |
710 | dst += frame->linesize[p]; | |
711 | } | |
712 | } | |
713 | } | |
714 | ||
715 | int avcodec_default_get_buffer2(AVCodecContext *avctx, AVFrame *frame, int flags) | |
716 | { | |
717 | int ret; | |
718 | ||
719 | if ((ret = update_frame_pool(avctx, frame)) < 0) | |
720 | return ret; | |
721 | ||
722 | #if FF_API_GET_BUFFER | |
723 | FF_DISABLE_DEPRECATION_WARNINGS | |
724 | frame->type = FF_BUFFER_TYPE_INTERNAL; | |
725 | FF_ENABLE_DEPRECATION_WARNINGS | |
726 | #endif | |
727 | ||
728 | switch (avctx->codec_type) { | |
729 | case AVMEDIA_TYPE_VIDEO: | |
730 | return video_get_buffer(avctx, frame); | |
731 | case AVMEDIA_TYPE_AUDIO: | |
732 | return audio_get_buffer(avctx, frame); | |
733 | default: | |
734 | return -1; | |
735 | } | |
736 | } | |
737 | ||
738 | int ff_init_buffer_info(AVCodecContext *avctx, AVFrame *frame) | |
739 | { | |
740 | AVPacket *pkt = avctx->internal->pkt; | |
f6fa7814 DM |
741 | int i; |
742 | static const struct { | |
743 | enum AVPacketSideDataType packet; | |
744 | enum AVFrameSideDataType frame; | |
745 | } sd[] = { | |
746 | { AV_PKT_DATA_REPLAYGAIN , AV_FRAME_DATA_REPLAYGAIN }, | |
747 | { AV_PKT_DATA_DISPLAYMATRIX, AV_FRAME_DATA_DISPLAYMATRIX }, | |
748 | { AV_PKT_DATA_STEREO3D, AV_FRAME_DATA_STEREO3D }, | |
749 | }; | |
2ba45a60 DM |
750 | |
751 | if (pkt) { | |
2ba45a60 DM |
752 | frame->pkt_pts = pkt->pts; |
753 | av_frame_set_pkt_pos (frame, pkt->pos); | |
754 | av_frame_set_pkt_duration(frame, pkt->duration); | |
755 | av_frame_set_pkt_size (frame, pkt->size); | |
756 | ||
f6fa7814 DM |
757 | for (i = 0; i < FF_ARRAY_ELEMS(sd); i++) { |
758 | int size; | |
759 | uint8_t *packet_sd = av_packet_get_side_data(pkt, sd[i].packet, &size); | |
760 | if (packet_sd) { | |
761 | AVFrameSideData *frame_sd = av_frame_new_side_data(frame, | |
762 | sd[i].frame, | |
763 | size); | |
764 | if (!frame_sd) | |
765 | return AVERROR(ENOMEM); | |
766 | ||
767 | memcpy(frame_sd->data, packet_sd, size); | |
768 | } | |
2ba45a60 DM |
769 | } |
770 | } else { | |
771 | frame->pkt_pts = AV_NOPTS_VALUE; | |
772 | av_frame_set_pkt_pos (frame, -1); | |
773 | av_frame_set_pkt_duration(frame, 0); | |
774 | av_frame_set_pkt_size (frame, -1); | |
775 | } | |
776 | frame->reordered_opaque = avctx->reordered_opaque; | |
777 | ||
778 | if (frame->color_primaries == AVCOL_PRI_UNSPECIFIED) | |
779 | frame->color_primaries = avctx->color_primaries; | |
780 | if (frame->color_trc == AVCOL_TRC_UNSPECIFIED) | |
781 | frame->color_trc = avctx->color_trc; | |
782 | if (av_frame_get_colorspace(frame) == AVCOL_SPC_UNSPECIFIED) | |
783 | av_frame_set_colorspace(frame, avctx->colorspace); | |
784 | if (av_frame_get_color_range(frame) == AVCOL_RANGE_UNSPECIFIED) | |
785 | av_frame_set_color_range(frame, avctx->color_range); | |
786 | if (frame->chroma_location == AVCHROMA_LOC_UNSPECIFIED) | |
787 | frame->chroma_location = avctx->chroma_sample_location; | |
788 | ||
789 | switch (avctx->codec->type) { | |
790 | case AVMEDIA_TYPE_VIDEO: | |
791 | frame->format = avctx->pix_fmt; | |
792 | if (!frame->sample_aspect_ratio.num) | |
793 | frame->sample_aspect_ratio = avctx->sample_aspect_ratio; | |
794 | ||
795 | if (frame->width && frame->height && | |
796 | av_image_check_sar(frame->width, frame->height, | |
797 | frame->sample_aspect_ratio) < 0) { | |
798 | av_log(avctx, AV_LOG_WARNING, "ignoring invalid SAR: %u/%u\n", | |
799 | frame->sample_aspect_ratio.num, | |
800 | frame->sample_aspect_ratio.den); | |
801 | frame->sample_aspect_ratio = (AVRational){ 0, 1 }; | |
802 | } | |
803 | ||
804 | break; | |
805 | case AVMEDIA_TYPE_AUDIO: | |
806 | if (!frame->sample_rate) | |
807 | frame->sample_rate = avctx->sample_rate; | |
808 | if (frame->format < 0) | |
809 | frame->format = avctx->sample_fmt; | |
810 | if (!frame->channel_layout) { | |
811 | if (avctx->channel_layout) { | |
812 | if (av_get_channel_layout_nb_channels(avctx->channel_layout) != | |
813 | avctx->channels) { | |
814 | av_log(avctx, AV_LOG_ERROR, "Inconsistent channel " | |
815 | "configuration.\n"); | |
816 | return AVERROR(EINVAL); | |
817 | } | |
818 | ||
819 | frame->channel_layout = avctx->channel_layout; | |
820 | } else { | |
821 | if (avctx->channels > FF_SANE_NB_CHANNELS) { | |
822 | av_log(avctx, AV_LOG_ERROR, "Too many channels: %d.\n", | |
823 | avctx->channels); | |
824 | return AVERROR(ENOSYS); | |
825 | } | |
826 | } | |
827 | } | |
828 | av_frame_set_channels(frame, avctx->channels); | |
829 | break; | |
830 | } | |
831 | return 0; | |
832 | } | |
833 | ||
834 | #if FF_API_GET_BUFFER | |
835 | FF_DISABLE_DEPRECATION_WARNINGS | |
836 | int avcodec_default_get_buffer(AVCodecContext *avctx, AVFrame *frame) | |
837 | { | |
838 | return avcodec_default_get_buffer2(avctx, frame, 0); | |
839 | } | |
840 | ||
841 | typedef struct CompatReleaseBufPriv { | |
842 | AVCodecContext avctx; | |
843 | AVFrame frame; | |
844 | uint8_t avframe_padding[1024]; // hack to allow linking to a avutil with larger AVFrame | |
845 | } CompatReleaseBufPriv; | |
846 | ||
847 | static void compat_free_buffer(void *opaque, uint8_t *data) | |
848 | { | |
849 | CompatReleaseBufPriv *priv = opaque; | |
850 | if (priv->avctx.release_buffer) | |
851 | priv->avctx.release_buffer(&priv->avctx, &priv->frame); | |
852 | av_freep(&priv); | |
853 | } | |
854 | ||
855 | static void compat_release_buffer(void *opaque, uint8_t *data) | |
856 | { | |
857 | AVBufferRef *buf = opaque; | |
858 | av_buffer_unref(&buf); | |
859 | } | |
860 | FF_ENABLE_DEPRECATION_WARNINGS | |
861 | #endif | |
862 | ||
863 | int ff_decode_frame_props(AVCodecContext *avctx, AVFrame *frame) | |
864 | { | |
865 | return ff_init_buffer_info(avctx, frame); | |
866 | } | |
867 | ||
868 | static int get_buffer_internal(AVCodecContext *avctx, AVFrame *frame, int flags) | |
869 | { | |
870 | const AVHWAccel *hwaccel = avctx->hwaccel; | |
871 | int override_dimensions = 1; | |
872 | int ret; | |
873 | ||
874 | if (avctx->codec_type == AVMEDIA_TYPE_VIDEO) { | |
875 | if ((ret = av_image_check_size(avctx->width, avctx->height, 0, avctx)) < 0 || avctx->pix_fmt<0) { | |
876 | av_log(avctx, AV_LOG_ERROR, "video_get_buffer: image parameters invalid\n"); | |
877 | return AVERROR(EINVAL); | |
878 | } | |
879 | } | |
880 | if (avctx->codec_type == AVMEDIA_TYPE_VIDEO) { | |
881 | if (frame->width <= 0 || frame->height <= 0) { | |
882 | frame->width = FFMAX(avctx->width, FF_CEIL_RSHIFT(avctx->coded_width, avctx->lowres)); | |
883 | frame->height = FFMAX(avctx->height, FF_CEIL_RSHIFT(avctx->coded_height, avctx->lowres)); | |
884 | override_dimensions = 0; | |
885 | } | |
886 | } | |
887 | ret = ff_decode_frame_props(avctx, frame); | |
888 | if (ret < 0) | |
889 | return ret; | |
890 | if ((ret = ff_init_buffer_info(avctx, frame)) < 0) | |
891 | return ret; | |
892 | ||
893 | if (hwaccel && hwaccel->alloc_frame) { | |
894 | ret = hwaccel->alloc_frame(avctx, frame); | |
895 | goto end; | |
896 | } | |
897 | ||
898 | #if FF_API_GET_BUFFER | |
899 | FF_DISABLE_DEPRECATION_WARNINGS | |
900 | /* | |
901 | * Wrap an old get_buffer()-allocated buffer in a bunch of AVBuffers. | |
902 | * We wrap each plane in its own AVBuffer. Each of those has a reference to | |
903 | * a dummy AVBuffer as its private data, unreffing it on free. | |
904 | * When all the planes are freed, the dummy buffer's free callback calls | |
905 | * release_buffer(). | |
906 | */ | |
907 | if (avctx->get_buffer) { | |
908 | CompatReleaseBufPriv *priv = NULL; | |
909 | AVBufferRef *dummy_buf = NULL; | |
910 | int planes, i, ret; | |
911 | ||
912 | if (flags & AV_GET_BUFFER_FLAG_REF) | |
913 | frame->reference = 1; | |
914 | ||
915 | ret = avctx->get_buffer(avctx, frame); | |
916 | if (ret < 0) | |
917 | return ret; | |
918 | ||
919 | /* return if the buffers are already set up | |
920 | * this would happen e.g. when a custom get_buffer() calls | |
921 | * avcodec_default_get_buffer | |
922 | */ | |
923 | if (frame->buf[0]) | |
924 | goto end0; | |
925 | ||
926 | priv = av_mallocz(sizeof(*priv)); | |
927 | if (!priv) { | |
928 | ret = AVERROR(ENOMEM); | |
929 | goto fail; | |
930 | } | |
931 | priv->avctx = *avctx; | |
932 | priv->frame = *frame; | |
933 | ||
934 | dummy_buf = av_buffer_create(NULL, 0, compat_free_buffer, priv, 0); | |
935 | if (!dummy_buf) { | |
936 | ret = AVERROR(ENOMEM); | |
937 | goto fail; | |
938 | } | |
939 | ||
940 | #define WRAP_PLANE(ref_out, data, data_size) \ | |
941 | do { \ | |
942 | AVBufferRef *dummy_ref = av_buffer_ref(dummy_buf); \ | |
943 | if (!dummy_ref) { \ | |
944 | ret = AVERROR(ENOMEM); \ | |
945 | goto fail; \ | |
946 | } \ | |
947 | ref_out = av_buffer_create(data, data_size, compat_release_buffer, \ | |
948 | dummy_ref, 0); \ | |
949 | if (!ref_out) { \ | |
950 | av_frame_unref(frame); \ | |
951 | ret = AVERROR(ENOMEM); \ | |
952 | goto fail; \ | |
953 | } \ | |
954 | } while (0) | |
955 | ||
956 | if (avctx->codec_type == AVMEDIA_TYPE_VIDEO) { | |
957 | const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(frame->format); | |
958 | ||
959 | planes = av_pix_fmt_count_planes(frame->format); | |
960 | /* workaround for AVHWAccel plane count of 0, buf[0] is used as | |
961 | check for allocated buffers: make libavcodec happy */ | |
962 | if (desc && desc->flags & AV_PIX_FMT_FLAG_HWACCEL) | |
963 | planes = 1; | |
964 | if (!desc || planes <= 0) { | |
965 | ret = AVERROR(EINVAL); | |
966 | goto fail; | |
967 | } | |
968 | ||
969 | for (i = 0; i < planes; i++) { | |
970 | int v_shift = (i == 1 || i == 2) ? desc->log2_chroma_h : 0; | |
971 | int plane_size = (frame->height >> v_shift) * frame->linesize[i]; | |
972 | ||
973 | WRAP_PLANE(frame->buf[i], frame->data[i], plane_size); | |
974 | } | |
975 | } else { | |
976 | int planar = av_sample_fmt_is_planar(frame->format); | |
977 | planes = planar ? avctx->channels : 1; | |
978 | ||
979 | if (planes > FF_ARRAY_ELEMS(frame->buf)) { | |
980 | frame->nb_extended_buf = planes - FF_ARRAY_ELEMS(frame->buf); | |
981 | frame->extended_buf = av_malloc_array(sizeof(*frame->extended_buf), | |
982 | frame->nb_extended_buf); | |
983 | if (!frame->extended_buf) { | |
984 | ret = AVERROR(ENOMEM); | |
985 | goto fail; | |
986 | } | |
987 | } | |
988 | ||
989 | for (i = 0; i < FFMIN(planes, FF_ARRAY_ELEMS(frame->buf)); i++) | |
990 | WRAP_PLANE(frame->buf[i], frame->extended_data[i], frame->linesize[0]); | |
991 | ||
992 | for (i = 0; i < frame->nb_extended_buf; i++) | |
993 | WRAP_PLANE(frame->extended_buf[i], | |
994 | frame->extended_data[i + FF_ARRAY_ELEMS(frame->buf)], | |
995 | frame->linesize[0]); | |
996 | } | |
997 | ||
998 | av_buffer_unref(&dummy_buf); | |
999 | ||
1000 | end0: | |
1001 | frame->width = avctx->width; | |
1002 | frame->height = avctx->height; | |
1003 | ||
1004 | return 0; | |
1005 | ||
1006 | fail: | |
1007 | avctx->release_buffer(avctx, frame); | |
1008 | av_freep(&priv); | |
1009 | av_buffer_unref(&dummy_buf); | |
1010 | return ret; | |
1011 | } | |
1012 | FF_ENABLE_DEPRECATION_WARNINGS | |
1013 | #endif | |
1014 | ||
1015 | ret = avctx->get_buffer2(avctx, frame, flags); | |
1016 | ||
1017 | end: | |
1018 | if (avctx->codec_type == AVMEDIA_TYPE_VIDEO && !override_dimensions) { | |
1019 | frame->width = avctx->width; | |
1020 | frame->height = avctx->height; | |
1021 | } | |
1022 | ||
1023 | return ret; | |
1024 | } | |
1025 | ||
1026 | int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags) | |
1027 | { | |
1028 | int ret = get_buffer_internal(avctx, frame, flags); | |
1029 | if (ret < 0) | |
1030 | av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); | |
1031 | return ret; | |
1032 | } | |
1033 | ||
1034 | static int reget_buffer_internal(AVCodecContext *avctx, AVFrame *frame) | |
1035 | { | |
1036 | AVFrame *tmp; | |
1037 | int ret; | |
1038 | ||
1039 | av_assert0(avctx->codec_type == AVMEDIA_TYPE_VIDEO); | |
1040 | ||
1041 | if (frame->data[0] && (frame->width != avctx->width || frame->height != avctx->height || frame->format != avctx->pix_fmt)) { | |
1042 | av_log(avctx, AV_LOG_WARNING, "Picture changed from size:%dx%d fmt:%s to size:%dx%d fmt:%s in reget buffer()\n", | |
1043 | frame->width, frame->height, av_get_pix_fmt_name(frame->format), avctx->width, avctx->height, av_get_pix_fmt_name(avctx->pix_fmt)); | |
1044 | av_frame_unref(frame); | |
1045 | } | |
1046 | ||
1047 | ff_init_buffer_info(avctx, frame); | |
1048 | ||
1049 | if (!frame->data[0]) | |
1050 | return ff_get_buffer(avctx, frame, AV_GET_BUFFER_FLAG_REF); | |
1051 | ||
1052 | if (av_frame_is_writable(frame)) | |
1053 | return ff_decode_frame_props(avctx, frame); | |
1054 | ||
1055 | tmp = av_frame_alloc(); | |
1056 | if (!tmp) | |
1057 | return AVERROR(ENOMEM); | |
1058 | ||
1059 | av_frame_move_ref(tmp, frame); | |
1060 | ||
1061 | ret = ff_get_buffer(avctx, frame, AV_GET_BUFFER_FLAG_REF); | |
1062 | if (ret < 0) { | |
1063 | av_frame_free(&tmp); | |
1064 | return ret; | |
1065 | } | |
1066 | ||
1067 | av_frame_copy(frame, tmp); | |
1068 | av_frame_free(&tmp); | |
1069 | ||
1070 | return 0; | |
1071 | } | |
1072 | ||
1073 | int ff_reget_buffer(AVCodecContext *avctx, AVFrame *frame) | |
1074 | { | |
1075 | int ret = reget_buffer_internal(avctx, frame); | |
1076 | if (ret < 0) | |
1077 | av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n"); | |
1078 | return ret; | |
1079 | } | |
1080 | ||
1081 | #if FF_API_GET_BUFFER | |
1082 | void avcodec_default_release_buffer(AVCodecContext *s, AVFrame *pic) | |
1083 | { | |
1084 | av_assert0(s->codec_type == AVMEDIA_TYPE_VIDEO); | |
1085 | ||
1086 | av_frame_unref(pic); | |
1087 | } | |
1088 | ||
1089 | int avcodec_default_reget_buffer(AVCodecContext *s, AVFrame *pic) | |
1090 | { | |
1091 | av_assert0(0); | |
1092 | return AVERROR_BUG; | |
1093 | } | |
1094 | #endif | |
1095 | ||
1096 | int avcodec_default_execute(AVCodecContext *c, int (*func)(AVCodecContext *c2, void *arg2), void *arg, int *ret, int count, int size) | |
1097 | { | |
1098 | int i; | |
1099 | ||
1100 | for (i = 0; i < count; i++) { | |
1101 | int r = func(c, (char *)arg + i * size); | |
1102 | if (ret) | |
1103 | ret[i] = r; | |
1104 | } | |
1105 | return 0; | |
1106 | } | |
1107 | ||
1108 | int avcodec_default_execute2(AVCodecContext *c, int (*func)(AVCodecContext *c2, void *arg2, int jobnr, int threadnr), void *arg, int *ret, int count) | |
1109 | { | |
1110 | int i; | |
1111 | ||
1112 | for (i = 0; i < count; i++) { | |
1113 | int r = func(c, arg, i, 0); | |
1114 | if (ret) | |
1115 | ret[i] = r; | |
1116 | } | |
1117 | return 0; | |
1118 | } | |
1119 | ||
1120 | enum AVPixelFormat avpriv_find_pix_fmt(const PixelFormatTag *tags, | |
1121 | unsigned int fourcc) | |
1122 | { | |
1123 | while (tags->pix_fmt >= 0) { | |
1124 | if (tags->fourcc == fourcc) | |
1125 | return tags->pix_fmt; | |
1126 | tags++; | |
1127 | } | |
1128 | return AV_PIX_FMT_NONE; | |
1129 | } | |
1130 | ||
1131 | static int is_hwaccel_pix_fmt(enum AVPixelFormat pix_fmt) | |
1132 | { | |
1133 | const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(pix_fmt); | |
1134 | return desc->flags & AV_PIX_FMT_FLAG_HWACCEL; | |
1135 | } | |
1136 | ||
1137 | enum AVPixelFormat avcodec_default_get_format(struct AVCodecContext *s, const enum AVPixelFormat *fmt) | |
1138 | { | |
1139 | while (*fmt != AV_PIX_FMT_NONE && is_hwaccel_pix_fmt(*fmt)) | |
1140 | ++fmt; | |
1141 | return fmt[0]; | |
1142 | } | |
1143 | ||
1144 | static AVHWAccel *find_hwaccel(enum AVCodecID codec_id, | |
1145 | enum AVPixelFormat pix_fmt) | |
1146 | { | |
1147 | AVHWAccel *hwaccel = NULL; | |
1148 | ||
1149 | while ((hwaccel = av_hwaccel_next(hwaccel))) | |
1150 | if (hwaccel->id == codec_id | |
1151 | && hwaccel->pix_fmt == pix_fmt) | |
1152 | return hwaccel; | |
1153 | return NULL; | |
1154 | } | |
1155 | ||
f6fa7814 DM |
1156 | static int setup_hwaccel(AVCodecContext *avctx, |
1157 | const enum AVPixelFormat fmt, | |
1158 | const char *name) | |
1159 | { | |
1160 | AVHWAccel *hwa = find_hwaccel(avctx->codec_id, fmt); | |
1161 | int ret = 0; | |
1162 | ||
1163 | if (!hwa) { | |
1164 | av_log(avctx, AV_LOG_ERROR, | |
1165 | "Could not find an AVHWAccel for the pixel format: %s", | |
1166 | name); | |
1167 | return AVERROR(ENOENT); | |
1168 | } | |
1169 | ||
1170 | if (hwa->priv_data_size) { | |
1171 | avctx->internal->hwaccel_priv_data = av_mallocz(hwa->priv_data_size); | |
1172 | if (!avctx->internal->hwaccel_priv_data) | |
1173 | return AVERROR(ENOMEM); | |
1174 | } | |
1175 | ||
1176 | if (hwa->init) { | |
1177 | ret = hwa->init(avctx); | |
1178 | if (ret < 0) { | |
1179 | av_freep(&avctx->internal->hwaccel_priv_data); | |
1180 | return ret; | |
1181 | } | |
1182 | } | |
1183 | ||
1184 | avctx->hwaccel = hwa; | |
1185 | ||
1186 | return 0; | |
1187 | } | |
2ba45a60 DM |
1188 | |
1189 | int ff_get_format(AVCodecContext *avctx, const enum AVPixelFormat *fmt) | |
1190 | { | |
1191 | const AVPixFmtDescriptor *desc; | |
f6fa7814 DM |
1192 | enum AVPixelFormat *choices; |
1193 | enum AVPixelFormat ret; | |
1194 | unsigned n = 0; | |
1195 | ||
1196 | while (fmt[n] != AV_PIX_FMT_NONE) | |
1197 | ++n; | |
2ba45a60 | 1198 | |
f6fa7814 DM |
1199 | choices = av_malloc_array(n + 1, sizeof(*choices)); |
1200 | if (!choices) | |
2ba45a60 DM |
1201 | return AV_PIX_FMT_NONE; |
1202 | ||
f6fa7814 | 1203 | memcpy(choices, fmt, (n + 1) * sizeof(*choices)); |
2ba45a60 | 1204 | |
f6fa7814 DM |
1205 | for (;;) { |
1206 | if (avctx->hwaccel && avctx->hwaccel->uninit) | |
1207 | avctx->hwaccel->uninit(avctx); | |
1208 | av_freep(&avctx->internal->hwaccel_priv_data); | |
1209 | avctx->hwaccel = NULL; | |
2ba45a60 | 1210 | |
f6fa7814 | 1211 | ret = avctx->get_format(avctx, choices); |
2ba45a60 | 1212 | |
f6fa7814 DM |
1213 | desc = av_pix_fmt_desc_get(ret); |
1214 | if (!desc) { | |
1215 | ret = AV_PIX_FMT_NONE; | |
1216 | break; | |
2ba45a60 DM |
1217 | } |
1218 | ||
f6fa7814 DM |
1219 | if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL)) |
1220 | break; | |
1221 | if (avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU) | |
1222 | break; | |
1223 | ||
1224 | if (!setup_hwaccel(avctx, ret, desc->name)) | |
1225 | break; | |
1226 | ||
1227 | /* Remove failed hwaccel from choices */ | |
1228 | for (n = 0; choices[n] != ret; n++) | |
1229 | av_assert0(choices[n] != AV_PIX_FMT_NONE); | |
1230 | ||
1231 | do | |
1232 | choices[n] = choices[n + 1]; | |
1233 | while (choices[n++] != AV_PIX_FMT_NONE); | |
2ba45a60 DM |
1234 | } |
1235 | ||
f6fa7814 | 1236 | av_freep(&choices); |
2ba45a60 DM |
1237 | return ret; |
1238 | } | |
1239 | ||
1240 | #if FF_API_AVFRAME_LAVC | |
1241 | void avcodec_get_frame_defaults(AVFrame *frame) | |
1242 | { | |
1243 | #if LIBAVCODEC_VERSION_MAJOR >= 55 | |
1244 | // extended_data should explicitly be freed when needed, this code is unsafe currently | |
1245 | // also this is not compatible to the <55 ABI/API | |
1246 | if (frame->extended_data != frame->data && 0) | |
1247 | av_freep(&frame->extended_data); | |
1248 | #endif | |
1249 | ||
1250 | memset(frame, 0, sizeof(AVFrame)); | |
1251 | av_frame_unref(frame); | |
1252 | } | |
1253 | ||
1254 | AVFrame *avcodec_alloc_frame(void) | |
1255 | { | |
1256 | return av_frame_alloc(); | |
1257 | } | |
1258 | ||
1259 | void avcodec_free_frame(AVFrame **frame) | |
1260 | { | |
1261 | av_frame_free(frame); | |
1262 | } | |
1263 | #endif | |
1264 | ||
1265 | MAKE_ACCESSORS(AVCodecContext, codec, AVRational, pkt_timebase) | |
1266 | MAKE_ACCESSORS(AVCodecContext, codec, const AVCodecDescriptor *, codec_descriptor) | |
1267 | MAKE_ACCESSORS(AVCodecContext, codec, int, lowres) | |
1268 | MAKE_ACCESSORS(AVCodecContext, codec, int, seek_preroll) | |
1269 | MAKE_ACCESSORS(AVCodecContext, codec, uint16_t*, chroma_intra_matrix) | |
1270 | ||
1271 | int av_codec_get_max_lowres(const AVCodec *codec) | |
1272 | { | |
1273 | return codec->max_lowres; | |
1274 | } | |
1275 | ||
f6fa7814 | 1276 | static void get_subtitle_defaults(AVSubtitle *sub) |
2ba45a60 DM |
1277 | { |
1278 | memset(sub, 0, sizeof(*sub)); | |
1279 | sub->pts = AV_NOPTS_VALUE; | |
1280 | } | |
1281 | ||
1282 | static int get_bit_rate(AVCodecContext *ctx) | |
1283 | { | |
1284 | int bit_rate; | |
1285 | int bits_per_sample; | |
1286 | ||
1287 | switch (ctx->codec_type) { | |
1288 | case AVMEDIA_TYPE_VIDEO: | |
1289 | case AVMEDIA_TYPE_DATA: | |
1290 | case AVMEDIA_TYPE_SUBTITLE: | |
1291 | case AVMEDIA_TYPE_ATTACHMENT: | |
1292 | bit_rate = ctx->bit_rate; | |
1293 | break; | |
1294 | case AVMEDIA_TYPE_AUDIO: | |
1295 | bits_per_sample = av_get_bits_per_sample(ctx->codec_id); | |
1296 | bit_rate = bits_per_sample ? ctx->sample_rate * ctx->channels * bits_per_sample : ctx->bit_rate; | |
1297 | break; | |
1298 | default: | |
1299 | bit_rate = 0; | |
1300 | break; | |
1301 | } | |
1302 | return bit_rate; | |
1303 | } | |
1304 | ||
1305 | int attribute_align_arg ff_codec_open2_recursive(AVCodecContext *avctx, const AVCodec *codec, AVDictionary **options) | |
1306 | { | |
1307 | int ret = 0; | |
1308 | ||
1309 | ff_unlock_avcodec(); | |
1310 | ||
1311 | ret = avcodec_open2(avctx, codec, options); | |
1312 | ||
1313 | ff_lock_avcodec(avctx); | |
1314 | return ret; | |
1315 | } | |
1316 | ||
1317 | int attribute_align_arg avcodec_open2(AVCodecContext *avctx, const AVCodec *codec, AVDictionary **options) | |
1318 | { | |
1319 | int ret = 0; | |
1320 | AVDictionary *tmp = NULL; | |
1321 | ||
1322 | if (avcodec_is_open(avctx)) | |
1323 | return 0; | |
1324 | ||
1325 | if ((!codec && !avctx->codec)) { | |
1326 | av_log(avctx, AV_LOG_ERROR, "No codec provided to avcodec_open2()\n"); | |
1327 | return AVERROR(EINVAL); | |
1328 | } | |
1329 | if ((codec && avctx->codec && codec != avctx->codec)) { | |
1330 | av_log(avctx, AV_LOG_ERROR, "This AVCodecContext was allocated for %s, " | |
1331 | "but %s passed to avcodec_open2()\n", avctx->codec->name, codec->name); | |
1332 | return AVERROR(EINVAL); | |
1333 | } | |
1334 | if (!codec) | |
1335 | codec = avctx->codec; | |
1336 | ||
1337 | if (avctx->extradata_size < 0 || avctx->extradata_size >= FF_MAX_EXTRADATA_SIZE) | |
1338 | return AVERROR(EINVAL); | |
1339 | ||
1340 | if (options) | |
1341 | av_dict_copy(&tmp, *options, 0); | |
1342 | ||
1343 | ret = ff_lock_avcodec(avctx); | |
1344 | if (ret < 0) | |
1345 | return ret; | |
1346 | ||
1347 | avctx->internal = av_mallocz(sizeof(AVCodecInternal)); | |
1348 | if (!avctx->internal) { | |
1349 | ret = AVERROR(ENOMEM); | |
1350 | goto end; | |
1351 | } | |
1352 | ||
1353 | avctx->internal->pool = av_mallocz(sizeof(*avctx->internal->pool)); | |
1354 | if (!avctx->internal->pool) { | |
1355 | ret = AVERROR(ENOMEM); | |
1356 | goto free_and_end; | |
1357 | } | |
1358 | ||
1359 | avctx->internal->to_free = av_frame_alloc(); | |
1360 | if (!avctx->internal->to_free) { | |
1361 | ret = AVERROR(ENOMEM); | |
1362 | goto free_and_end; | |
1363 | } | |
1364 | ||
1365 | if (codec->priv_data_size > 0) { | |
1366 | if (!avctx->priv_data) { | |
1367 | avctx->priv_data = av_mallocz(codec->priv_data_size); | |
1368 | if (!avctx->priv_data) { | |
1369 | ret = AVERROR(ENOMEM); | |
1370 | goto end; | |
1371 | } | |
1372 | if (codec->priv_class) { | |
1373 | *(const AVClass **)avctx->priv_data = codec->priv_class; | |
1374 | av_opt_set_defaults(avctx->priv_data); | |
1375 | } | |
1376 | } | |
1377 | if (codec->priv_class && (ret = av_opt_set_dict(avctx->priv_data, &tmp)) < 0) | |
1378 | goto free_and_end; | |
1379 | } else { | |
1380 | avctx->priv_data = NULL; | |
1381 | } | |
1382 | if ((ret = av_opt_set_dict(avctx, &tmp)) < 0) | |
1383 | goto free_and_end; | |
1384 | ||
f6fa7814 DM |
1385 | if (avctx->codec_whitelist && av_match_list(codec->name, avctx->codec_whitelist, ',') <= 0) { |
1386 | av_log(avctx, AV_LOG_ERROR, "Codec (%s) not on whitelist\n", codec->name); | |
1387 | ret = AVERROR(EINVAL); | |
1388 | goto free_and_end; | |
1389 | } | |
1390 | ||
2ba45a60 DM |
1391 | // only call ff_set_dimensions() for non H.264/VP6F codecs so as not to overwrite previously setup dimensions |
1392 | if (!(avctx->coded_width && avctx->coded_height && avctx->width && avctx->height && | |
1393 | (avctx->codec_id == AV_CODEC_ID_H264 || avctx->codec_id == AV_CODEC_ID_VP6F))) { | |
1394 | if (avctx->coded_width && avctx->coded_height) | |
1395 | ret = ff_set_dimensions(avctx, avctx->coded_width, avctx->coded_height); | |
1396 | else if (avctx->width && avctx->height) | |
1397 | ret = ff_set_dimensions(avctx, avctx->width, avctx->height); | |
1398 | if (ret < 0) | |
1399 | goto free_and_end; | |
1400 | } | |
1401 | ||
1402 | if ((avctx->coded_width || avctx->coded_height || avctx->width || avctx->height) | |
1403 | && ( av_image_check_size(avctx->coded_width, avctx->coded_height, 0, avctx) < 0 | |
1404 | || av_image_check_size(avctx->width, avctx->height, 0, avctx) < 0)) { | |
1405 | av_log(avctx, AV_LOG_WARNING, "Ignoring invalid width/height values\n"); | |
1406 | ff_set_dimensions(avctx, 0, 0); | |
1407 | } | |
1408 | ||
1409 | if (avctx->width > 0 && avctx->height > 0) { | |
1410 | if (av_image_check_sar(avctx->width, avctx->height, | |
1411 | avctx->sample_aspect_ratio) < 0) { | |
1412 | av_log(avctx, AV_LOG_WARNING, "ignoring invalid SAR: %u/%u\n", | |
1413 | avctx->sample_aspect_ratio.num, | |
1414 | avctx->sample_aspect_ratio.den); | |
1415 | avctx->sample_aspect_ratio = (AVRational){ 0, 1 }; | |
1416 | } | |
1417 | } | |
1418 | ||
1419 | /* if the decoder init function was already called previously, | |
1420 | * free the already allocated subtitle_header before overwriting it */ | |
1421 | if (av_codec_is_decoder(codec)) | |
1422 | av_freep(&avctx->subtitle_header); | |
1423 | ||
1424 | if (avctx->channels > FF_SANE_NB_CHANNELS) { | |
1425 | ret = AVERROR(EINVAL); | |
1426 | goto free_and_end; | |
1427 | } | |
1428 | ||
1429 | avctx->codec = codec; | |
1430 | if ((avctx->codec_type == AVMEDIA_TYPE_UNKNOWN || avctx->codec_type == codec->type) && | |
1431 | avctx->codec_id == AV_CODEC_ID_NONE) { | |
1432 | avctx->codec_type = codec->type; | |
1433 | avctx->codec_id = codec->id; | |
1434 | } | |
1435 | if (avctx->codec_id != codec->id || (avctx->codec_type != codec->type | |
1436 | && avctx->codec_type != AVMEDIA_TYPE_ATTACHMENT)) { | |
1437 | av_log(avctx, AV_LOG_ERROR, "Codec type or id mismatches\n"); | |
1438 | ret = AVERROR(EINVAL); | |
1439 | goto free_and_end; | |
1440 | } | |
1441 | avctx->frame_number = 0; | |
1442 | avctx->codec_descriptor = avcodec_descriptor_get(avctx->codec_id); | |
1443 | ||
1444 | if (avctx->codec->capabilities & CODEC_CAP_EXPERIMENTAL && | |
1445 | avctx->strict_std_compliance > FF_COMPLIANCE_EXPERIMENTAL) { | |
1446 | const char *codec_string = av_codec_is_encoder(codec) ? "encoder" : "decoder"; | |
1447 | AVCodec *codec2; | |
1448 | av_log(avctx, AV_LOG_ERROR, | |
1449 | "The %s '%s' is experimental but experimental codecs are not enabled, " | |
1450 | "add '-strict %d' if you want to use it.\n", | |
1451 | codec_string, codec->name, FF_COMPLIANCE_EXPERIMENTAL); | |
1452 | codec2 = av_codec_is_encoder(codec) ? avcodec_find_encoder(codec->id) : avcodec_find_decoder(codec->id); | |
1453 | if (!(codec2->capabilities & CODEC_CAP_EXPERIMENTAL)) | |
1454 | av_log(avctx, AV_LOG_ERROR, "Alternatively use the non experimental %s '%s'.\n", | |
1455 | codec_string, codec2->name); | |
1456 | ret = AVERROR_EXPERIMENTAL; | |
1457 | goto free_and_end; | |
1458 | } | |
1459 | ||
1460 | if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && | |
1461 | (!avctx->time_base.num || !avctx->time_base.den)) { | |
1462 | avctx->time_base.num = 1; | |
1463 | avctx->time_base.den = avctx->sample_rate; | |
1464 | } | |
1465 | ||
1466 | if (!HAVE_THREADS) | |
1467 | av_log(avctx, AV_LOG_WARNING, "Warning: not compiled with thread support, using thread emulation\n"); | |
1468 | ||
1469 | if (CONFIG_FRAME_THREAD_ENCODER) { | |
1470 | ff_unlock_avcodec(); //we will instanciate a few encoders thus kick the counter to prevent false detection of a problem | |
1471 | ret = ff_frame_thread_encoder_init(avctx, options ? *options : NULL); | |
1472 | ff_lock_avcodec(avctx); | |
1473 | if (ret < 0) | |
1474 | goto free_and_end; | |
1475 | } | |
1476 | ||
1477 | if (HAVE_THREADS | |
1478 | && !(avctx->internal->frame_thread_encoder && (avctx->active_thread_type&FF_THREAD_FRAME))) { | |
1479 | ret = ff_thread_init(avctx); | |
1480 | if (ret < 0) { | |
1481 | goto free_and_end; | |
1482 | } | |
1483 | } | |
1484 | if (!HAVE_THREADS && !(codec->capabilities & CODEC_CAP_AUTO_THREADS)) | |
1485 | avctx->thread_count = 1; | |
1486 | ||
1487 | if (avctx->codec->max_lowres < avctx->lowres || avctx->lowres < 0) { | |
1488 | av_log(avctx, AV_LOG_ERROR, "The maximum value for lowres supported by the decoder is %d\n", | |
1489 | avctx->codec->max_lowres); | |
1490 | ret = AVERROR(EINVAL); | |
1491 | goto free_and_end; | |
1492 | } | |
1493 | ||
1494 | #if FF_API_VISMV | |
1495 | if (avctx->debug_mv) | |
1496 | av_log(avctx, AV_LOG_WARNING, "The 'vismv' option is deprecated, " | |
1497 | "see the codecview filter instead.\n"); | |
1498 | #endif | |
1499 | ||
1500 | if (av_codec_is_encoder(avctx->codec)) { | |
1501 | int i; | |
1502 | if (avctx->codec->sample_fmts) { | |
1503 | for (i = 0; avctx->codec->sample_fmts[i] != AV_SAMPLE_FMT_NONE; i++) { | |
1504 | if (avctx->sample_fmt == avctx->codec->sample_fmts[i]) | |
1505 | break; | |
1506 | if (avctx->channels == 1 && | |
1507 | av_get_planar_sample_fmt(avctx->sample_fmt) == | |
1508 | av_get_planar_sample_fmt(avctx->codec->sample_fmts[i])) { | |
1509 | avctx->sample_fmt = avctx->codec->sample_fmts[i]; | |
1510 | break; | |
1511 | } | |
1512 | } | |
1513 | if (avctx->codec->sample_fmts[i] == AV_SAMPLE_FMT_NONE) { | |
1514 | char buf[128]; | |
1515 | snprintf(buf, sizeof(buf), "%d", avctx->sample_fmt); | |
1516 | av_log(avctx, AV_LOG_ERROR, "Specified sample format %s is invalid or not supported\n", | |
1517 | (char *)av_x_if_null(av_get_sample_fmt_name(avctx->sample_fmt), buf)); | |
1518 | ret = AVERROR(EINVAL); | |
1519 | goto free_and_end; | |
1520 | } | |
1521 | } | |
1522 | if (avctx->codec->pix_fmts) { | |
1523 | for (i = 0; avctx->codec->pix_fmts[i] != AV_PIX_FMT_NONE; i++) | |
1524 | if (avctx->pix_fmt == avctx->codec->pix_fmts[i]) | |
1525 | break; | |
1526 | if (avctx->codec->pix_fmts[i] == AV_PIX_FMT_NONE | |
1527 | && !((avctx->codec_id == AV_CODEC_ID_MJPEG || avctx->codec_id == AV_CODEC_ID_LJPEG) | |
1528 | && avctx->strict_std_compliance <= FF_COMPLIANCE_UNOFFICIAL)) { | |
1529 | char buf[128]; | |
1530 | snprintf(buf, sizeof(buf), "%d", avctx->pix_fmt); | |
1531 | av_log(avctx, AV_LOG_ERROR, "Specified pixel format %s is invalid or not supported\n", | |
1532 | (char *)av_x_if_null(av_get_pix_fmt_name(avctx->pix_fmt), buf)); | |
1533 | ret = AVERROR(EINVAL); | |
1534 | goto free_and_end; | |
1535 | } | |
f6fa7814 DM |
1536 | if (avctx->codec->pix_fmts[i] == AV_PIX_FMT_YUVJ420P || |
1537 | avctx->codec->pix_fmts[i] == AV_PIX_FMT_YUVJ411P || | |
1538 | avctx->codec->pix_fmts[i] == AV_PIX_FMT_YUVJ422P || | |
1539 | avctx->codec->pix_fmts[i] == AV_PIX_FMT_YUVJ440P || | |
1540 | avctx->codec->pix_fmts[i] == AV_PIX_FMT_YUVJ444P) | |
1541 | avctx->color_range = AVCOL_RANGE_JPEG; | |
2ba45a60 DM |
1542 | } |
1543 | if (avctx->codec->supported_samplerates) { | |
1544 | for (i = 0; avctx->codec->supported_samplerates[i] != 0; i++) | |
1545 | if (avctx->sample_rate == avctx->codec->supported_samplerates[i]) | |
1546 | break; | |
1547 | if (avctx->codec->supported_samplerates[i] == 0) { | |
1548 | av_log(avctx, AV_LOG_ERROR, "Specified sample rate %d is not supported\n", | |
1549 | avctx->sample_rate); | |
1550 | ret = AVERROR(EINVAL); | |
1551 | goto free_and_end; | |
1552 | } | |
1553 | } | |
1554 | if (avctx->codec->channel_layouts) { | |
1555 | if (!avctx->channel_layout) { | |
1556 | av_log(avctx, AV_LOG_WARNING, "Channel layout not specified\n"); | |
1557 | } else { | |
1558 | for (i = 0; avctx->codec->channel_layouts[i] != 0; i++) | |
1559 | if (avctx->channel_layout == avctx->codec->channel_layouts[i]) | |
1560 | break; | |
1561 | if (avctx->codec->channel_layouts[i] == 0) { | |
1562 | char buf[512]; | |
1563 | av_get_channel_layout_string(buf, sizeof(buf), -1, avctx->channel_layout); | |
1564 | av_log(avctx, AV_LOG_ERROR, "Specified channel layout '%s' is not supported\n", buf); | |
1565 | ret = AVERROR(EINVAL); | |
1566 | goto free_and_end; | |
1567 | } | |
1568 | } | |
1569 | } | |
1570 | if (avctx->channel_layout && avctx->channels) { | |
1571 | int channels = av_get_channel_layout_nb_channels(avctx->channel_layout); | |
1572 | if (channels != avctx->channels) { | |
1573 | char buf[512]; | |
1574 | av_get_channel_layout_string(buf, sizeof(buf), -1, avctx->channel_layout); | |
1575 | av_log(avctx, AV_LOG_ERROR, | |
1576 | "Channel layout '%s' with %d channels does not match number of specified channels %d\n", | |
1577 | buf, channels, avctx->channels); | |
1578 | ret = AVERROR(EINVAL); | |
1579 | goto free_and_end; | |
1580 | } | |
1581 | } else if (avctx->channel_layout) { | |
1582 | avctx->channels = av_get_channel_layout_nb_channels(avctx->channel_layout); | |
1583 | } | |
1584 | if(avctx->codec_type == AVMEDIA_TYPE_VIDEO) { | |
1585 | if (avctx->width <= 0 || avctx->height <= 0) { | |
1586 | av_log(avctx, AV_LOG_ERROR, "dimensions not set\n"); | |
1587 | ret = AVERROR(EINVAL); | |
1588 | goto free_and_end; | |
1589 | } | |
1590 | } | |
1591 | if ( (avctx->codec_type == AVMEDIA_TYPE_VIDEO || avctx->codec_type == AVMEDIA_TYPE_AUDIO) | |
1592 | && avctx->bit_rate>0 && avctx->bit_rate<1000) { | |
1593 | av_log(avctx, AV_LOG_WARNING, "Bitrate %d is extremely low, maybe you mean %dk\n", avctx->bit_rate, avctx->bit_rate); | |
1594 | } | |
1595 | ||
1596 | if (!avctx->rc_initial_buffer_occupancy) | |
1597 | avctx->rc_initial_buffer_occupancy = avctx->rc_buffer_size * 3 / 4; | |
1598 | } | |
1599 | ||
1600 | avctx->pts_correction_num_faulty_pts = | |
1601 | avctx->pts_correction_num_faulty_dts = 0; | |
1602 | avctx->pts_correction_last_pts = | |
1603 | avctx->pts_correction_last_dts = INT64_MIN; | |
1604 | ||
1605 | if ( avctx->codec->init && (!(avctx->active_thread_type&FF_THREAD_FRAME) | |
1606 | || avctx->internal->frame_thread_encoder)) { | |
1607 | ret = avctx->codec->init(avctx); | |
1608 | if (ret < 0) { | |
1609 | goto free_and_end; | |
1610 | } | |
1611 | } | |
1612 | ||
1613 | ret=0; | |
1614 | ||
f6fa7814 DM |
1615 | #if FF_API_AUDIOENC_DELAY |
1616 | if (av_codec_is_encoder(avctx->codec)) | |
1617 | avctx->delay = avctx->initial_padding; | |
1618 | #endif | |
1619 | ||
2ba45a60 DM |
1620 | if (av_codec_is_decoder(avctx->codec)) { |
1621 | if (!avctx->bit_rate) | |
1622 | avctx->bit_rate = get_bit_rate(avctx); | |
1623 | /* validate channel layout from the decoder */ | |
1624 | if (avctx->channel_layout) { | |
1625 | int channels = av_get_channel_layout_nb_channels(avctx->channel_layout); | |
1626 | if (!avctx->channels) | |
1627 | avctx->channels = channels; | |
1628 | else if (channels != avctx->channels) { | |
1629 | char buf[512]; | |
1630 | av_get_channel_layout_string(buf, sizeof(buf), -1, avctx->channel_layout); | |
1631 | av_log(avctx, AV_LOG_WARNING, | |
1632 | "Channel layout '%s' with %d channels does not match specified number of channels %d: " | |
1633 | "ignoring specified channel layout\n", | |
1634 | buf, channels, avctx->channels); | |
1635 | avctx->channel_layout = 0; | |
1636 | } | |
1637 | } | |
1638 | if (avctx->channels && avctx->channels < 0 || | |
1639 | avctx->channels > FF_SANE_NB_CHANNELS) { | |
1640 | ret = AVERROR(EINVAL); | |
1641 | goto free_and_end; | |
1642 | } | |
1643 | if (avctx->sub_charenc) { | |
1644 | if (avctx->codec_type != AVMEDIA_TYPE_SUBTITLE) { | |
1645 | av_log(avctx, AV_LOG_ERROR, "Character encoding is only " | |
1646 | "supported with subtitles codecs\n"); | |
1647 | ret = AVERROR(EINVAL); | |
1648 | goto free_and_end; | |
1649 | } else if (avctx->codec_descriptor->props & AV_CODEC_PROP_BITMAP_SUB) { | |
1650 | av_log(avctx, AV_LOG_WARNING, "Codec '%s' is bitmap-based, " | |
1651 | "subtitles character encoding will be ignored\n", | |
1652 | avctx->codec_descriptor->name); | |
1653 | avctx->sub_charenc_mode = FF_SUB_CHARENC_MODE_DO_NOTHING; | |
1654 | } else { | |
1655 | /* input character encoding is set for a text based subtitle | |
1656 | * codec at this point */ | |
1657 | if (avctx->sub_charenc_mode == FF_SUB_CHARENC_MODE_AUTOMATIC) | |
1658 | avctx->sub_charenc_mode = FF_SUB_CHARENC_MODE_PRE_DECODER; | |
1659 | ||
1660 | if (avctx->sub_charenc_mode == FF_SUB_CHARENC_MODE_PRE_DECODER) { | |
1661 | #if CONFIG_ICONV | |
1662 | iconv_t cd = iconv_open("UTF-8", avctx->sub_charenc); | |
1663 | if (cd == (iconv_t)-1) { | |
f6fa7814 | 1664 | ret = AVERROR(errno); |
2ba45a60 DM |
1665 | av_log(avctx, AV_LOG_ERROR, "Unable to open iconv context " |
1666 | "with input character encoding \"%s\"\n", avctx->sub_charenc); | |
2ba45a60 DM |
1667 | goto free_and_end; |
1668 | } | |
1669 | iconv_close(cd); | |
1670 | #else | |
1671 | av_log(avctx, AV_LOG_ERROR, "Character encoding subtitles " | |
1672 | "conversion needs a libavcodec built with iconv support " | |
1673 | "for this codec\n"); | |
1674 | ret = AVERROR(ENOSYS); | |
1675 | goto free_and_end; | |
1676 | #endif | |
1677 | } | |
1678 | } | |
1679 | } | |
f6fa7814 DM |
1680 | |
1681 | #if FF_API_AVCTX_TIMEBASE | |
1682 | if (avctx->framerate.num > 0 && avctx->framerate.den > 0) | |
1683 | avctx->time_base = av_inv_q(av_mul_q(avctx->framerate, (AVRational){avctx->ticks_per_frame, 1})); | |
1684 | #endif | |
2ba45a60 DM |
1685 | } |
1686 | end: | |
1687 | ff_unlock_avcodec(); | |
1688 | if (options) { | |
1689 | av_dict_free(options); | |
1690 | *options = tmp; | |
1691 | } | |
1692 | ||
1693 | return ret; | |
1694 | free_and_end: | |
1695 | av_dict_free(&tmp); | |
f6fa7814 DM |
1696 | if (codec->priv_class && codec->priv_data_size) |
1697 | av_opt_free(avctx->priv_data); | |
2ba45a60 DM |
1698 | av_freep(&avctx->priv_data); |
1699 | if (avctx->internal) { | |
1700 | av_frame_free(&avctx->internal->to_free); | |
1701 | av_freep(&avctx->internal->pool); | |
1702 | } | |
1703 | av_freep(&avctx->internal); | |
1704 | avctx->codec = NULL; | |
1705 | goto end; | |
1706 | } | |
1707 | ||
1708 | int ff_alloc_packet2(AVCodecContext *avctx, AVPacket *avpkt, int64_t size) | |
1709 | { | |
1710 | if (avpkt->size < 0) { | |
1711 | av_log(avctx, AV_LOG_ERROR, "Invalid negative user packet size %d\n", avpkt->size); | |
1712 | return AVERROR(EINVAL); | |
1713 | } | |
1714 | if (size < 0 || size > INT_MAX - FF_INPUT_BUFFER_PADDING_SIZE) { | |
1715 | av_log(avctx, AV_LOG_ERROR, "Invalid minimum required packet size %"PRId64" (max allowed is %d)\n", | |
1716 | size, INT_MAX - FF_INPUT_BUFFER_PADDING_SIZE); | |
1717 | return AVERROR(EINVAL); | |
1718 | } | |
1719 | ||
1720 | if (avctx) { | |
1721 | av_assert0(!avpkt->data || avpkt->data != avctx->internal->byte_buffer); | |
1722 | if (!avpkt->data || avpkt->size < size) { | |
1723 | av_fast_padded_malloc(&avctx->internal->byte_buffer, &avctx->internal->byte_buffer_size, size); | |
1724 | avpkt->data = avctx->internal->byte_buffer; | |
1725 | avpkt->size = avctx->internal->byte_buffer_size; | |
1726 | #if FF_API_DESTRUCT_PACKET | |
1727 | FF_DISABLE_DEPRECATION_WARNINGS | |
1728 | avpkt->destruct = NULL; | |
1729 | FF_ENABLE_DEPRECATION_WARNINGS | |
1730 | #endif | |
1731 | } | |
1732 | } | |
1733 | ||
1734 | if (avpkt->data) { | |
1735 | AVBufferRef *buf = avpkt->buf; | |
1736 | #if FF_API_DESTRUCT_PACKET | |
1737 | FF_DISABLE_DEPRECATION_WARNINGS | |
1738 | void *destruct = avpkt->destruct; | |
1739 | FF_ENABLE_DEPRECATION_WARNINGS | |
1740 | #endif | |
1741 | ||
1742 | if (avpkt->size < size) { | |
1743 | av_log(avctx, AV_LOG_ERROR, "User packet is too small (%d < %"PRId64")\n", avpkt->size, size); | |
1744 | return AVERROR(EINVAL); | |
1745 | } | |
1746 | ||
1747 | av_init_packet(avpkt); | |
1748 | #if FF_API_DESTRUCT_PACKET | |
1749 | FF_DISABLE_DEPRECATION_WARNINGS | |
1750 | avpkt->destruct = destruct; | |
1751 | FF_ENABLE_DEPRECATION_WARNINGS | |
1752 | #endif | |
1753 | avpkt->buf = buf; | |
1754 | avpkt->size = size; | |
1755 | return 0; | |
1756 | } else { | |
1757 | int ret = av_new_packet(avpkt, size); | |
1758 | if (ret < 0) | |
1759 | av_log(avctx, AV_LOG_ERROR, "Failed to allocate packet of size %"PRId64"\n", size); | |
1760 | return ret; | |
1761 | } | |
1762 | } | |
1763 | ||
1764 | int ff_alloc_packet(AVPacket *avpkt, int size) | |
1765 | { | |
1766 | return ff_alloc_packet2(NULL, avpkt, size); | |
1767 | } | |
1768 | ||
1769 | /** | |
1770 | * Pad last frame with silence. | |
1771 | */ | |
1772 | static int pad_last_frame(AVCodecContext *s, AVFrame **dst, const AVFrame *src) | |
1773 | { | |
1774 | AVFrame *frame = NULL; | |
1775 | int ret; | |
1776 | ||
1777 | if (!(frame = av_frame_alloc())) | |
1778 | return AVERROR(ENOMEM); | |
1779 | ||
1780 | frame->format = src->format; | |
1781 | frame->channel_layout = src->channel_layout; | |
1782 | av_frame_set_channels(frame, av_frame_get_channels(src)); | |
1783 | frame->nb_samples = s->frame_size; | |
1784 | ret = av_frame_get_buffer(frame, 32); | |
1785 | if (ret < 0) | |
1786 | goto fail; | |
1787 | ||
1788 | ret = av_frame_copy_props(frame, src); | |
1789 | if (ret < 0) | |
1790 | goto fail; | |
1791 | ||
1792 | if ((ret = av_samples_copy(frame->extended_data, src->extended_data, 0, 0, | |
1793 | src->nb_samples, s->channels, s->sample_fmt)) < 0) | |
1794 | goto fail; | |
1795 | if ((ret = av_samples_set_silence(frame->extended_data, src->nb_samples, | |
1796 | frame->nb_samples - src->nb_samples, | |
1797 | s->channels, s->sample_fmt)) < 0) | |
1798 | goto fail; | |
1799 | ||
1800 | *dst = frame; | |
1801 | ||
1802 | return 0; | |
1803 | ||
1804 | fail: | |
1805 | av_frame_free(&frame); | |
1806 | return ret; | |
1807 | } | |
1808 | ||
1809 | int attribute_align_arg avcodec_encode_audio2(AVCodecContext *avctx, | |
1810 | AVPacket *avpkt, | |
1811 | const AVFrame *frame, | |
1812 | int *got_packet_ptr) | |
1813 | { | |
1814 | AVFrame *extended_frame = NULL; | |
1815 | AVFrame *padded_frame = NULL; | |
1816 | int ret; | |
1817 | AVPacket user_pkt = *avpkt; | |
1818 | int needs_realloc = !user_pkt.data; | |
1819 | ||
1820 | *got_packet_ptr = 0; | |
1821 | ||
1822 | if (!(avctx->codec->capabilities & CODEC_CAP_DELAY) && !frame) { | |
1823 | av_free_packet(avpkt); | |
1824 | av_init_packet(avpkt); | |
1825 | return 0; | |
1826 | } | |
1827 | ||
1828 | /* ensure that extended_data is properly set */ | |
1829 | if (frame && !frame->extended_data) { | |
1830 | if (av_sample_fmt_is_planar(avctx->sample_fmt) && | |
1831 | avctx->channels > AV_NUM_DATA_POINTERS) { | |
1832 | av_log(avctx, AV_LOG_ERROR, "Encoding to a planar sample format, " | |
1833 | "with more than %d channels, but extended_data is not set.\n", | |
1834 | AV_NUM_DATA_POINTERS); | |
1835 | return AVERROR(EINVAL); | |
1836 | } | |
1837 | av_log(avctx, AV_LOG_WARNING, "extended_data is not set.\n"); | |
1838 | ||
1839 | extended_frame = av_frame_alloc(); | |
1840 | if (!extended_frame) | |
1841 | return AVERROR(ENOMEM); | |
1842 | ||
1843 | memcpy(extended_frame, frame, sizeof(AVFrame)); | |
1844 | extended_frame->extended_data = extended_frame->data; | |
1845 | frame = extended_frame; | |
1846 | } | |
1847 | ||
1848 | /* check for valid frame size */ | |
1849 | if (frame) { | |
1850 | if (avctx->codec->capabilities & CODEC_CAP_SMALL_LAST_FRAME) { | |
1851 | if (frame->nb_samples > avctx->frame_size) { | |
1852 | av_log(avctx, AV_LOG_ERROR, "more samples than frame size (avcodec_encode_audio2)\n"); | |
1853 | ret = AVERROR(EINVAL); | |
1854 | goto end; | |
1855 | } | |
1856 | } else if (!(avctx->codec->capabilities & CODEC_CAP_VARIABLE_FRAME_SIZE)) { | |
1857 | if (frame->nb_samples < avctx->frame_size && | |
1858 | !avctx->internal->last_audio_frame) { | |
1859 | ret = pad_last_frame(avctx, &padded_frame, frame); | |
1860 | if (ret < 0) | |
1861 | goto end; | |
1862 | ||
1863 | frame = padded_frame; | |
1864 | avctx->internal->last_audio_frame = 1; | |
1865 | } | |
1866 | ||
1867 | if (frame->nb_samples != avctx->frame_size) { | |
1868 | av_log(avctx, AV_LOG_ERROR, "nb_samples (%d) != frame_size (%d) (avcodec_encode_audio2)\n", frame->nb_samples, avctx->frame_size); | |
1869 | ret = AVERROR(EINVAL); | |
1870 | goto end; | |
1871 | } | |
1872 | } | |
1873 | } | |
1874 | ||
1875 | ret = avctx->codec->encode2(avctx, avpkt, frame, got_packet_ptr); | |
1876 | if (!ret) { | |
1877 | if (*got_packet_ptr) { | |
1878 | if (!(avctx->codec->capabilities & CODEC_CAP_DELAY)) { | |
1879 | if (avpkt->pts == AV_NOPTS_VALUE) | |
1880 | avpkt->pts = frame->pts; | |
1881 | if (!avpkt->duration) | |
1882 | avpkt->duration = ff_samples_to_time_base(avctx, | |
1883 | frame->nb_samples); | |
1884 | } | |
1885 | avpkt->dts = avpkt->pts; | |
1886 | } else { | |
1887 | avpkt->size = 0; | |
1888 | } | |
1889 | } | |
1890 | if (avpkt->data && avpkt->data == avctx->internal->byte_buffer) { | |
1891 | needs_realloc = 0; | |
1892 | if (user_pkt.data) { | |
1893 | if (user_pkt.size >= avpkt->size) { | |
1894 | memcpy(user_pkt.data, avpkt->data, avpkt->size); | |
1895 | } else { | |
1896 | av_log(avctx, AV_LOG_ERROR, "Provided packet is too small, needs to be %d\n", avpkt->size); | |
1897 | avpkt->size = user_pkt.size; | |
1898 | ret = -1; | |
1899 | } | |
1900 | avpkt->buf = user_pkt.buf; | |
1901 | avpkt->data = user_pkt.data; | |
1902 | #if FF_API_DESTRUCT_PACKET | |
1903 | FF_DISABLE_DEPRECATION_WARNINGS | |
1904 | avpkt->destruct = user_pkt.destruct; | |
1905 | FF_ENABLE_DEPRECATION_WARNINGS | |
1906 | #endif | |
1907 | } else { | |
1908 | if (av_dup_packet(avpkt) < 0) { | |
1909 | ret = AVERROR(ENOMEM); | |
1910 | } | |
1911 | } | |
1912 | } | |
1913 | ||
1914 | if (!ret) { | |
1915 | if (needs_realloc && avpkt->data) { | |
1916 | ret = av_buffer_realloc(&avpkt->buf, avpkt->size + FF_INPUT_BUFFER_PADDING_SIZE); | |
1917 | if (ret >= 0) | |
1918 | avpkt->data = avpkt->buf->data; | |
1919 | } | |
1920 | ||
1921 | avctx->frame_number++; | |
1922 | } | |
1923 | ||
1924 | if (ret < 0 || !*got_packet_ptr) { | |
1925 | av_free_packet(avpkt); | |
1926 | av_init_packet(avpkt); | |
1927 | goto end; | |
1928 | } | |
1929 | ||
1930 | /* NOTE: if we add any audio encoders which output non-keyframe packets, | |
1931 | * this needs to be moved to the encoders, but for now we can do it | |
1932 | * here to simplify things */ | |
1933 | avpkt->flags |= AV_PKT_FLAG_KEY; | |
1934 | ||
1935 | end: | |
1936 | av_frame_free(&padded_frame); | |
1937 | av_free(extended_frame); | |
1938 | ||
f6fa7814 DM |
1939 | #if FF_API_AUDIOENC_DELAY |
1940 | avctx->delay = avctx->initial_padding; | |
1941 | #endif | |
1942 | ||
2ba45a60 DM |
1943 | return ret; |
1944 | } | |
1945 | ||
1946 | #if FF_API_OLD_ENCODE_AUDIO | |
1947 | int attribute_align_arg avcodec_encode_audio(AVCodecContext *avctx, | |
1948 | uint8_t *buf, int buf_size, | |
1949 | const short *samples) | |
1950 | { | |
1951 | AVPacket pkt; | |
1952 | AVFrame *frame; | |
1953 | int ret, samples_size, got_packet; | |
1954 | ||
1955 | av_init_packet(&pkt); | |
1956 | pkt.data = buf; | |
1957 | pkt.size = buf_size; | |
1958 | ||
1959 | if (samples) { | |
1960 | frame = av_frame_alloc(); | |
1961 | if (!frame) | |
1962 | return AVERROR(ENOMEM); | |
1963 | ||
1964 | if (avctx->frame_size) { | |
1965 | frame->nb_samples = avctx->frame_size; | |
1966 | } else { | |
1967 | /* if frame_size is not set, the number of samples must be | |
1968 | * calculated from the buffer size */ | |
1969 | int64_t nb_samples; | |
1970 | if (!av_get_bits_per_sample(avctx->codec_id)) { | |
1971 | av_log(avctx, AV_LOG_ERROR, "avcodec_encode_audio() does not " | |
1972 | "support this codec\n"); | |
1973 | av_frame_free(&frame); | |
1974 | return AVERROR(EINVAL); | |
1975 | } | |
1976 | nb_samples = (int64_t)buf_size * 8 / | |
1977 | (av_get_bits_per_sample(avctx->codec_id) * | |
1978 | avctx->channels); | |
1979 | if (nb_samples >= INT_MAX) { | |
1980 | av_frame_free(&frame); | |
1981 | return AVERROR(EINVAL); | |
1982 | } | |
1983 | frame->nb_samples = nb_samples; | |
1984 | } | |
1985 | ||
1986 | /* it is assumed that the samples buffer is large enough based on the | |
1987 | * relevant parameters */ | |
1988 | samples_size = av_samples_get_buffer_size(NULL, avctx->channels, | |
1989 | frame->nb_samples, | |
1990 | avctx->sample_fmt, 1); | |
1991 | if ((ret = avcodec_fill_audio_frame(frame, avctx->channels, | |
1992 | avctx->sample_fmt, | |
1993 | (const uint8_t *)samples, | |
1994 | samples_size, 1)) < 0) { | |
1995 | av_frame_free(&frame); | |
1996 | return ret; | |
1997 | } | |
1998 | ||
1999 | /* fabricate frame pts from sample count. | |
2000 | * this is needed because the avcodec_encode_audio() API does not have | |
2001 | * a way for the user to provide pts */ | |
2002 | if (avctx->sample_rate && avctx->time_base.num) | |
2003 | frame->pts = ff_samples_to_time_base(avctx, | |
2004 | avctx->internal->sample_count); | |
2005 | else | |
2006 | frame->pts = AV_NOPTS_VALUE; | |
2007 | avctx->internal->sample_count += frame->nb_samples; | |
2008 | } else { | |
2009 | frame = NULL; | |
2010 | } | |
2011 | ||
2012 | got_packet = 0; | |
2013 | ret = avcodec_encode_audio2(avctx, &pkt, frame, &got_packet); | |
2014 | if (!ret && got_packet && avctx->coded_frame) { | |
2015 | avctx->coded_frame->pts = pkt.pts; | |
2016 | avctx->coded_frame->key_frame = !!(pkt.flags & AV_PKT_FLAG_KEY); | |
2017 | } | |
2018 | /* free any side data since we cannot return it */ | |
2019 | av_packet_free_side_data(&pkt); | |
2020 | ||
2021 | if (frame && frame->extended_data != frame->data) | |
2022 | av_freep(&frame->extended_data); | |
2023 | ||
2024 | av_frame_free(&frame); | |
2025 | return ret ? ret : pkt.size; | |
2026 | } | |
2027 | ||
2028 | #endif | |
2029 | ||
2030 | #if FF_API_OLD_ENCODE_VIDEO | |
2031 | int attribute_align_arg avcodec_encode_video(AVCodecContext *avctx, uint8_t *buf, int buf_size, | |
2032 | const AVFrame *pict) | |
2033 | { | |
2034 | AVPacket pkt; | |
2035 | int ret, got_packet = 0; | |
2036 | ||
2037 | if (buf_size < FF_MIN_BUFFER_SIZE) { | |
2038 | av_log(avctx, AV_LOG_ERROR, "buffer smaller than minimum size\n"); | |
2039 | return -1; | |
2040 | } | |
2041 | ||
2042 | av_init_packet(&pkt); | |
2043 | pkt.data = buf; | |
2044 | pkt.size = buf_size; | |
2045 | ||
2046 | ret = avcodec_encode_video2(avctx, &pkt, pict, &got_packet); | |
2047 | if (!ret && got_packet && avctx->coded_frame) { | |
2048 | avctx->coded_frame->pts = pkt.pts; | |
2049 | avctx->coded_frame->key_frame = !!(pkt.flags & AV_PKT_FLAG_KEY); | |
2050 | } | |
2051 | ||
2052 | /* free any side data since we cannot return it */ | |
2053 | if (pkt.side_data_elems > 0) { | |
2054 | int i; | |
2055 | for (i = 0; i < pkt.side_data_elems; i++) | |
2056 | av_free(pkt.side_data[i].data); | |
2057 | av_freep(&pkt.side_data); | |
2058 | pkt.side_data_elems = 0; | |
2059 | } | |
2060 | ||
2061 | return ret ? ret : pkt.size; | |
2062 | } | |
2063 | ||
2064 | #endif | |
2065 | ||
2066 | int attribute_align_arg avcodec_encode_video2(AVCodecContext *avctx, | |
2067 | AVPacket *avpkt, | |
2068 | const AVFrame *frame, | |
2069 | int *got_packet_ptr) | |
2070 | { | |
2071 | int ret; | |
2072 | AVPacket user_pkt = *avpkt; | |
2073 | int needs_realloc = !user_pkt.data; | |
2074 | ||
2075 | *got_packet_ptr = 0; | |
2076 | ||
2077 | if(CONFIG_FRAME_THREAD_ENCODER && | |
2078 | avctx->internal->frame_thread_encoder && (avctx->active_thread_type&FF_THREAD_FRAME)) | |
2079 | return ff_thread_video_encode_frame(avctx, avpkt, frame, got_packet_ptr); | |
2080 | ||
2081 | if ((avctx->flags&CODEC_FLAG_PASS1) && avctx->stats_out) | |
2082 | avctx->stats_out[0] = '\0'; | |
2083 | ||
2084 | if (!(avctx->codec->capabilities & CODEC_CAP_DELAY) && !frame) { | |
2085 | av_free_packet(avpkt); | |
2086 | av_init_packet(avpkt); | |
2087 | avpkt->size = 0; | |
2088 | return 0; | |
2089 | } | |
2090 | ||
2091 | if (av_image_check_size(avctx->width, avctx->height, 0, avctx)) | |
2092 | return AVERROR(EINVAL); | |
2093 | ||
2094 | av_assert0(avctx->codec->encode2); | |
2095 | ||
2096 | ret = avctx->codec->encode2(avctx, avpkt, frame, got_packet_ptr); | |
2097 | av_assert0(ret <= 0); | |
2098 | ||
2099 | if (avpkt->data && avpkt->data == avctx->internal->byte_buffer) { | |
2100 | needs_realloc = 0; | |
2101 | if (user_pkt.data) { | |
2102 | if (user_pkt.size >= avpkt->size) { | |
2103 | memcpy(user_pkt.data, avpkt->data, avpkt->size); | |
2104 | } else { | |
2105 | av_log(avctx, AV_LOG_ERROR, "Provided packet is too small, needs to be %d\n", avpkt->size); | |
2106 | avpkt->size = user_pkt.size; | |
2107 | ret = -1; | |
2108 | } | |
2109 | avpkt->buf = user_pkt.buf; | |
2110 | avpkt->data = user_pkt.data; | |
2111 | #if FF_API_DESTRUCT_PACKET | |
2112 | FF_DISABLE_DEPRECATION_WARNINGS | |
2113 | avpkt->destruct = user_pkt.destruct; | |
2114 | FF_ENABLE_DEPRECATION_WARNINGS | |
2115 | #endif | |
2116 | } else { | |
2117 | if (av_dup_packet(avpkt) < 0) { | |
2118 | ret = AVERROR(ENOMEM); | |
2119 | } | |
2120 | } | |
2121 | } | |
2122 | ||
2123 | if (!ret) { | |
2124 | if (!*got_packet_ptr) | |
2125 | avpkt->size = 0; | |
2126 | else if (!(avctx->codec->capabilities & CODEC_CAP_DELAY)) | |
2127 | avpkt->pts = avpkt->dts = frame->pts; | |
2128 | ||
2129 | if (needs_realloc && avpkt->data) { | |
2130 | ret = av_buffer_realloc(&avpkt->buf, avpkt->size + FF_INPUT_BUFFER_PADDING_SIZE); | |
2131 | if (ret >= 0) | |
2132 | avpkt->data = avpkt->buf->data; | |
2133 | } | |
2134 | ||
2135 | avctx->frame_number++; | |
2136 | } | |
2137 | ||
2138 | if (ret < 0 || !*got_packet_ptr) | |
2139 | av_free_packet(avpkt); | |
2140 | else | |
2141 | av_packet_merge_side_data(avpkt); | |
2142 | ||
2143 | emms_c(); | |
2144 | return ret; | |
2145 | } | |
2146 | ||
2147 | int avcodec_encode_subtitle(AVCodecContext *avctx, uint8_t *buf, int buf_size, | |
2148 | const AVSubtitle *sub) | |
2149 | { | |
2150 | int ret; | |
2151 | if (sub->start_display_time) { | |
2152 | av_log(avctx, AV_LOG_ERROR, "start_display_time must be 0.\n"); | |
2153 | return -1; | |
2154 | } | |
2155 | ||
2156 | ret = avctx->codec->encode_sub(avctx, buf, buf_size, sub); | |
2157 | avctx->frame_number++; | |
2158 | return ret; | |
2159 | } | |
2160 | ||
2161 | /** | |
2162 | * Attempt to guess proper monotonic timestamps for decoded video frames | |
2163 | * which might have incorrect times. Input timestamps may wrap around, in | |
2164 | * which case the output will as well. | |
2165 | * | |
2166 | * @param pts the pts field of the decoded AVPacket, as passed through | |
2167 | * AVFrame.pkt_pts | |
2168 | * @param dts the dts field of the decoded AVPacket | |
2169 | * @return one of the input values, may be AV_NOPTS_VALUE | |
2170 | */ | |
2171 | static int64_t guess_correct_pts(AVCodecContext *ctx, | |
2172 | int64_t reordered_pts, int64_t dts) | |
2173 | { | |
2174 | int64_t pts = AV_NOPTS_VALUE; | |
2175 | ||
2176 | if (dts != AV_NOPTS_VALUE) { | |
2177 | ctx->pts_correction_num_faulty_dts += dts <= ctx->pts_correction_last_dts; | |
2178 | ctx->pts_correction_last_dts = dts; | |
2179 | } else if (reordered_pts != AV_NOPTS_VALUE) | |
2180 | ctx->pts_correction_last_dts = reordered_pts; | |
2181 | ||
2182 | if (reordered_pts != AV_NOPTS_VALUE) { | |
2183 | ctx->pts_correction_num_faulty_pts += reordered_pts <= ctx->pts_correction_last_pts; | |
2184 | ctx->pts_correction_last_pts = reordered_pts; | |
2185 | } else if(dts != AV_NOPTS_VALUE) | |
2186 | ctx->pts_correction_last_pts = dts; | |
2187 | ||
2188 | if ((ctx->pts_correction_num_faulty_pts<=ctx->pts_correction_num_faulty_dts || dts == AV_NOPTS_VALUE) | |
2189 | && reordered_pts != AV_NOPTS_VALUE) | |
2190 | pts = reordered_pts; | |
2191 | else | |
2192 | pts = dts; | |
2193 | ||
2194 | return pts; | |
2195 | } | |
2196 | ||
2197 | static int apply_param_change(AVCodecContext *avctx, AVPacket *avpkt) | |
2198 | { | |
2199 | int size = 0, ret; | |
2200 | const uint8_t *data; | |
2201 | uint32_t flags; | |
2202 | ||
2203 | data = av_packet_get_side_data(avpkt, AV_PKT_DATA_PARAM_CHANGE, &size); | |
2204 | if (!data) | |
2205 | return 0; | |
2206 | ||
2207 | if (!(avctx->codec->capabilities & CODEC_CAP_PARAM_CHANGE)) { | |
2208 | av_log(avctx, AV_LOG_ERROR, "This decoder does not support parameter " | |
2209 | "changes, but PARAM_CHANGE side data was sent to it.\n"); | |
2210 | return AVERROR(EINVAL); | |
2211 | } | |
2212 | ||
2213 | if (size < 4) | |
2214 | goto fail; | |
2215 | ||
2216 | flags = bytestream_get_le32(&data); | |
2217 | size -= 4; | |
2218 | ||
2219 | if (flags & AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_COUNT) { | |
2220 | if (size < 4) | |
2221 | goto fail; | |
2222 | avctx->channels = bytestream_get_le32(&data); | |
2223 | size -= 4; | |
2224 | } | |
2225 | if (flags & AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_LAYOUT) { | |
2226 | if (size < 8) | |
2227 | goto fail; | |
2228 | avctx->channel_layout = bytestream_get_le64(&data); | |
2229 | size -= 8; | |
2230 | } | |
2231 | if (flags & AV_SIDE_DATA_PARAM_CHANGE_SAMPLE_RATE) { | |
2232 | if (size < 4) | |
2233 | goto fail; | |
2234 | avctx->sample_rate = bytestream_get_le32(&data); | |
2235 | size -= 4; | |
2236 | } | |
2237 | if (flags & AV_SIDE_DATA_PARAM_CHANGE_DIMENSIONS) { | |
2238 | if (size < 8) | |
2239 | goto fail; | |
2240 | avctx->width = bytestream_get_le32(&data); | |
2241 | avctx->height = bytestream_get_le32(&data); | |
2242 | size -= 8; | |
2243 | ret = ff_set_dimensions(avctx, avctx->width, avctx->height); | |
2244 | if (ret < 0) | |
2245 | return ret; | |
2246 | } | |
2247 | ||
2248 | return 0; | |
2249 | fail: | |
2250 | av_log(avctx, AV_LOG_ERROR, "PARAM_CHANGE side data too small.\n"); | |
2251 | return AVERROR_INVALIDDATA; | |
2252 | } | |
2253 | ||
2254 | static int add_metadata_from_side_data(AVCodecContext *avctx, AVFrame *frame) | |
2255 | { | |
2256 | int size; | |
2257 | const uint8_t *side_metadata; | |
2258 | ||
2259 | AVDictionary **frame_md = avpriv_frame_get_metadatap(frame); | |
2260 | ||
2261 | side_metadata = av_packet_get_side_data(avctx->internal->pkt, | |
2262 | AV_PKT_DATA_STRINGS_METADATA, &size); | |
2263 | return av_packet_unpack_dictionary(side_metadata, size, frame_md); | |
2264 | } | |
2265 | ||
2266 | static int unrefcount_frame(AVCodecInternal *avci, AVFrame *frame) | |
2267 | { | |
2268 | int ret; | |
2269 | ||
2270 | /* move the original frame to our backup */ | |
2271 | av_frame_unref(avci->to_free); | |
2272 | av_frame_move_ref(avci->to_free, frame); | |
2273 | ||
2274 | /* now copy everything except the AVBufferRefs back | |
2275 | * note that we make a COPY of the side data, so calling av_frame_free() on | |
2276 | * the caller's frame will work properly */ | |
2277 | ret = av_frame_copy_props(frame, avci->to_free); | |
2278 | if (ret < 0) | |
2279 | return ret; | |
2280 | ||
2281 | memcpy(frame->data, avci->to_free->data, sizeof(frame->data)); | |
2282 | memcpy(frame->linesize, avci->to_free->linesize, sizeof(frame->linesize)); | |
2283 | if (avci->to_free->extended_data != avci->to_free->data) { | |
2284 | int planes = av_frame_get_channels(avci->to_free); | |
2285 | int size = planes * sizeof(*frame->extended_data); | |
2286 | ||
2287 | if (!size) { | |
2288 | av_frame_unref(frame); | |
2289 | return AVERROR_BUG; | |
2290 | } | |
2291 | ||
2292 | frame->extended_data = av_malloc(size); | |
2293 | if (!frame->extended_data) { | |
2294 | av_frame_unref(frame); | |
2295 | return AVERROR(ENOMEM); | |
2296 | } | |
2297 | memcpy(frame->extended_data, avci->to_free->extended_data, | |
2298 | size); | |
2299 | } else | |
2300 | frame->extended_data = frame->data; | |
2301 | ||
2302 | frame->format = avci->to_free->format; | |
2303 | frame->width = avci->to_free->width; | |
2304 | frame->height = avci->to_free->height; | |
2305 | frame->channel_layout = avci->to_free->channel_layout; | |
2306 | frame->nb_samples = avci->to_free->nb_samples; | |
2307 | av_frame_set_channels(frame, av_frame_get_channels(avci->to_free)); | |
2308 | ||
2309 | return 0; | |
2310 | } | |
2311 | ||
2312 | int attribute_align_arg avcodec_decode_video2(AVCodecContext *avctx, AVFrame *picture, | |
2313 | int *got_picture_ptr, | |
2314 | const AVPacket *avpkt) | |
2315 | { | |
2316 | AVCodecInternal *avci = avctx->internal; | |
2317 | int ret; | |
2318 | // copy to ensure we do not change avpkt | |
2319 | AVPacket tmp = *avpkt; | |
2320 | ||
2321 | if (!avctx->codec) | |
2322 | return AVERROR(EINVAL); | |
2323 | if (avctx->codec->type != AVMEDIA_TYPE_VIDEO) { | |
2324 | av_log(avctx, AV_LOG_ERROR, "Invalid media type for video\n"); | |
2325 | return AVERROR(EINVAL); | |
2326 | } | |
2327 | ||
2328 | *got_picture_ptr = 0; | |
2329 | if ((avctx->coded_width || avctx->coded_height) && av_image_check_size(avctx->coded_width, avctx->coded_height, 0, avctx)) | |
2330 | return AVERROR(EINVAL); | |
2331 | ||
2332 | av_frame_unref(picture); | |
2333 | ||
2334 | if ((avctx->codec->capabilities & CODEC_CAP_DELAY) || avpkt->size || (avctx->active_thread_type & FF_THREAD_FRAME)) { | |
2335 | int did_split = av_packet_split_side_data(&tmp); | |
2336 | ret = apply_param_change(avctx, &tmp); | |
2337 | if (ret < 0) { | |
2338 | av_log(avctx, AV_LOG_ERROR, "Error applying parameter changes.\n"); | |
2339 | if (avctx->err_recognition & AV_EF_EXPLODE) | |
2340 | goto fail; | |
2341 | } | |
2342 | ||
2343 | avctx->internal->pkt = &tmp; | |
2344 | if (HAVE_THREADS && avctx->active_thread_type & FF_THREAD_FRAME) | |
2345 | ret = ff_thread_decode_frame(avctx, picture, got_picture_ptr, | |
2346 | &tmp); | |
2347 | else { | |
2348 | ret = avctx->codec->decode(avctx, picture, got_picture_ptr, | |
2349 | &tmp); | |
2350 | picture->pkt_dts = avpkt->dts; | |
2351 | ||
2352 | if(!avctx->has_b_frames){ | |
2353 | av_frame_set_pkt_pos(picture, avpkt->pos); | |
2354 | } | |
2355 | //FIXME these should be under if(!avctx->has_b_frames) | |
2356 | /* get_buffer is supposed to set frame parameters */ | |
2357 | if (!(avctx->codec->capabilities & CODEC_CAP_DR1)) { | |
2358 | if (!picture->sample_aspect_ratio.num) picture->sample_aspect_ratio = avctx->sample_aspect_ratio; | |
2359 | if (!picture->width) picture->width = avctx->width; | |
2360 | if (!picture->height) picture->height = avctx->height; | |
2361 | if (picture->format == AV_PIX_FMT_NONE) picture->format = avctx->pix_fmt; | |
2362 | } | |
2363 | } | |
2364 | add_metadata_from_side_data(avctx, picture); | |
2365 | ||
2366 | fail: | |
2367 | emms_c(); //needed to avoid an emms_c() call before every return; | |
2368 | ||
2369 | avctx->internal->pkt = NULL; | |
2370 | if (did_split) { | |
2371 | av_packet_free_side_data(&tmp); | |
2372 | if(ret == tmp.size) | |
2373 | ret = avpkt->size; | |
2374 | } | |
2375 | ||
2376 | if (*got_picture_ptr) { | |
2377 | if (!avctx->refcounted_frames) { | |
2378 | int err = unrefcount_frame(avci, picture); | |
2379 | if (err < 0) | |
2380 | return err; | |
2381 | } | |
2382 | ||
2383 | avctx->frame_number++; | |
2384 | av_frame_set_best_effort_timestamp(picture, | |
2385 | guess_correct_pts(avctx, | |
2386 | picture->pkt_pts, | |
2387 | picture->pkt_dts)); | |
2388 | } else | |
2389 | av_frame_unref(picture); | |
2390 | } else | |
2391 | ret = 0; | |
2392 | ||
2393 | /* many decoders assign whole AVFrames, thus overwriting extended_data; | |
2394 | * make sure it's set correctly */ | |
2395 | av_assert0(!picture->extended_data || picture->extended_data == picture->data); | |
2396 | ||
f6fa7814 DM |
2397 | #if FF_API_AVCTX_TIMEBASE |
2398 | if (avctx->framerate.num > 0 && avctx->framerate.den > 0) | |
2399 | avctx->time_base = av_inv_q(av_mul_q(avctx->framerate, (AVRational){avctx->ticks_per_frame, 1})); | |
2400 | #endif | |
2401 | ||
2ba45a60 DM |
2402 | return ret; |
2403 | } | |
2404 | ||
2405 | #if FF_API_OLD_DECODE_AUDIO | |
2406 | int attribute_align_arg avcodec_decode_audio3(AVCodecContext *avctx, int16_t *samples, | |
2407 | int *frame_size_ptr, | |
2408 | AVPacket *avpkt) | |
2409 | { | |
2410 | AVFrame *frame = av_frame_alloc(); | |
2411 | int ret, got_frame = 0; | |
2412 | ||
2413 | if (!frame) | |
2414 | return AVERROR(ENOMEM); | |
2415 | if (avctx->get_buffer != avcodec_default_get_buffer) { | |
2416 | av_log(avctx, AV_LOG_ERROR, "Custom get_buffer() for use with" | |
2417 | "avcodec_decode_audio3() detected. Overriding with avcodec_default_get_buffer\n"); | |
2418 | av_log(avctx, AV_LOG_ERROR, "Please port your application to " | |
2419 | "avcodec_decode_audio4()\n"); | |
2420 | avctx->get_buffer = avcodec_default_get_buffer; | |
2421 | avctx->release_buffer = avcodec_default_release_buffer; | |
2422 | } | |
2423 | ||
2424 | ret = avcodec_decode_audio4(avctx, frame, &got_frame, avpkt); | |
2425 | ||
2426 | if (ret >= 0 && got_frame) { | |
2427 | int ch, plane_size; | |
2428 | int planar = av_sample_fmt_is_planar(avctx->sample_fmt); | |
2429 | int data_size = av_samples_get_buffer_size(&plane_size, avctx->channels, | |
2430 | frame->nb_samples, | |
2431 | avctx->sample_fmt, 1); | |
2432 | if (*frame_size_ptr < data_size) { | |
2433 | av_log(avctx, AV_LOG_ERROR, "output buffer size is too small for " | |
2434 | "the current frame (%d < %d)\n", *frame_size_ptr, data_size); | |
2435 | av_frame_free(&frame); | |
2436 | return AVERROR(EINVAL); | |
2437 | } | |
2438 | ||
2439 | memcpy(samples, frame->extended_data[0], plane_size); | |
2440 | ||
2441 | if (planar && avctx->channels > 1) { | |
2442 | uint8_t *out = ((uint8_t *)samples) + plane_size; | |
2443 | for (ch = 1; ch < avctx->channels; ch++) { | |
2444 | memcpy(out, frame->extended_data[ch], plane_size); | |
2445 | out += plane_size; | |
2446 | } | |
2447 | } | |
2448 | *frame_size_ptr = data_size; | |
2449 | } else { | |
2450 | *frame_size_ptr = 0; | |
2451 | } | |
2452 | av_frame_free(&frame); | |
2453 | return ret; | |
2454 | } | |
2455 | ||
2456 | #endif | |
2457 | ||
2458 | int attribute_align_arg avcodec_decode_audio4(AVCodecContext *avctx, | |
2459 | AVFrame *frame, | |
2460 | int *got_frame_ptr, | |
2461 | const AVPacket *avpkt) | |
2462 | { | |
2463 | AVCodecInternal *avci = avctx->internal; | |
2464 | int ret = 0; | |
2465 | ||
2466 | *got_frame_ptr = 0; | |
2467 | ||
2468 | if (!avpkt->data && avpkt->size) { | |
2469 | av_log(avctx, AV_LOG_ERROR, "invalid packet: NULL data, size != 0\n"); | |
2470 | return AVERROR(EINVAL); | |
2471 | } | |
2472 | if (!avctx->codec) | |
2473 | return AVERROR(EINVAL); | |
2474 | if (avctx->codec->type != AVMEDIA_TYPE_AUDIO) { | |
2475 | av_log(avctx, AV_LOG_ERROR, "Invalid media type for audio\n"); | |
2476 | return AVERROR(EINVAL); | |
2477 | } | |
2478 | ||
2479 | av_frame_unref(frame); | |
2480 | ||
2481 | if ((avctx->codec->capabilities & CODEC_CAP_DELAY) || avpkt->size || (avctx->active_thread_type & FF_THREAD_FRAME)) { | |
2482 | uint8_t *side; | |
2483 | int side_size; | |
2484 | uint32_t discard_padding = 0; | |
f6fa7814 DM |
2485 | uint8_t skip_reason = 0; |
2486 | uint8_t discard_reason = 0; | |
2ba45a60 DM |
2487 | // copy to ensure we do not change avpkt |
2488 | AVPacket tmp = *avpkt; | |
2489 | int did_split = av_packet_split_side_data(&tmp); | |
2490 | ret = apply_param_change(avctx, &tmp); | |
2491 | if (ret < 0) { | |
2492 | av_log(avctx, AV_LOG_ERROR, "Error applying parameter changes.\n"); | |
2493 | if (avctx->err_recognition & AV_EF_EXPLODE) | |
2494 | goto fail; | |
2495 | } | |
2496 | ||
2497 | avctx->internal->pkt = &tmp; | |
2498 | if (HAVE_THREADS && avctx->active_thread_type & FF_THREAD_FRAME) | |
2499 | ret = ff_thread_decode_frame(avctx, frame, got_frame_ptr, &tmp); | |
2500 | else { | |
2501 | ret = avctx->codec->decode(avctx, frame, got_frame_ptr, &tmp); | |
2502 | frame->pkt_dts = avpkt->dts; | |
2503 | } | |
2504 | if (ret >= 0 && *got_frame_ptr) { | |
2505 | add_metadata_from_side_data(avctx, frame); | |
2506 | avctx->frame_number++; | |
2507 | av_frame_set_best_effort_timestamp(frame, | |
2508 | guess_correct_pts(avctx, | |
2509 | frame->pkt_pts, | |
2510 | frame->pkt_dts)); | |
2511 | if (frame->format == AV_SAMPLE_FMT_NONE) | |
2512 | frame->format = avctx->sample_fmt; | |
2513 | if (!frame->channel_layout) | |
2514 | frame->channel_layout = avctx->channel_layout; | |
2515 | if (!av_frame_get_channels(frame)) | |
2516 | av_frame_set_channels(frame, avctx->channels); | |
2517 | if (!frame->sample_rate) | |
2518 | frame->sample_rate = avctx->sample_rate; | |
2519 | } | |
2520 | ||
2521 | side= av_packet_get_side_data(avctx->internal->pkt, AV_PKT_DATA_SKIP_SAMPLES, &side_size); | |
2522 | if(side && side_size>=10) { | |
2523 | avctx->internal->skip_samples = AV_RL32(side); | |
2524 | av_log(avctx, AV_LOG_DEBUG, "skip %d samples due to side data\n", | |
2525 | avctx->internal->skip_samples); | |
2526 | discard_padding = AV_RL32(side + 4); | |
f6fa7814 DM |
2527 | skip_reason = AV_RL8(side + 8); |
2528 | discard_reason = AV_RL8(side + 9); | |
2ba45a60 | 2529 | } |
f6fa7814 DM |
2530 | if (avctx->internal->skip_samples && *got_frame_ptr && |
2531 | !(avctx->flags2 & CODEC_FLAG2_SKIP_MANUAL)) { | |
2ba45a60 DM |
2532 | if(frame->nb_samples <= avctx->internal->skip_samples){ |
2533 | *got_frame_ptr = 0; | |
2534 | avctx->internal->skip_samples -= frame->nb_samples; | |
2535 | av_log(avctx, AV_LOG_DEBUG, "skip whole frame, skip left: %d\n", | |
2536 | avctx->internal->skip_samples); | |
2537 | } else { | |
2538 | av_samples_copy(frame->extended_data, frame->extended_data, 0, avctx->internal->skip_samples, | |
2539 | frame->nb_samples - avctx->internal->skip_samples, avctx->channels, frame->format); | |
2540 | if(avctx->pkt_timebase.num && avctx->sample_rate) { | |
2541 | int64_t diff_ts = av_rescale_q(avctx->internal->skip_samples, | |
2542 | (AVRational){1, avctx->sample_rate}, | |
2543 | avctx->pkt_timebase); | |
2544 | if(frame->pkt_pts!=AV_NOPTS_VALUE) | |
2545 | frame->pkt_pts += diff_ts; | |
2546 | if(frame->pkt_dts!=AV_NOPTS_VALUE) | |
2547 | frame->pkt_dts += diff_ts; | |
2548 | if (av_frame_get_pkt_duration(frame) >= diff_ts) | |
2549 | av_frame_set_pkt_duration(frame, av_frame_get_pkt_duration(frame) - diff_ts); | |
2550 | } else { | |
2551 | av_log(avctx, AV_LOG_WARNING, "Could not update timestamps for skipped samples.\n"); | |
2552 | } | |
2553 | av_log(avctx, AV_LOG_DEBUG, "skip %d/%d samples\n", | |
2554 | avctx->internal->skip_samples, frame->nb_samples); | |
2555 | frame->nb_samples -= avctx->internal->skip_samples; | |
2556 | avctx->internal->skip_samples = 0; | |
2557 | } | |
2558 | } | |
2559 | ||
f6fa7814 DM |
2560 | if (discard_padding > 0 && discard_padding <= frame->nb_samples && *got_frame_ptr && |
2561 | !(avctx->flags2 & CODEC_FLAG2_SKIP_MANUAL)) { | |
2ba45a60 DM |
2562 | if (discard_padding == frame->nb_samples) { |
2563 | *got_frame_ptr = 0; | |
2564 | } else { | |
2565 | if(avctx->pkt_timebase.num && avctx->sample_rate) { | |
2566 | int64_t diff_ts = av_rescale_q(frame->nb_samples - discard_padding, | |
2567 | (AVRational){1, avctx->sample_rate}, | |
2568 | avctx->pkt_timebase); | |
2569 | if (av_frame_get_pkt_duration(frame) >= diff_ts) | |
2570 | av_frame_set_pkt_duration(frame, av_frame_get_pkt_duration(frame) - diff_ts); | |
2571 | } else { | |
2572 | av_log(avctx, AV_LOG_WARNING, "Could not update timestamps for discarded samples.\n"); | |
2573 | } | |
2574 | av_log(avctx, AV_LOG_DEBUG, "discard %d/%d samples\n", | |
2575 | discard_padding, frame->nb_samples); | |
2576 | frame->nb_samples -= discard_padding; | |
2577 | } | |
2578 | } | |
f6fa7814 DM |
2579 | |
2580 | if ((avctx->flags2 & CODEC_FLAG2_SKIP_MANUAL) && *got_frame_ptr) { | |
2581 | AVFrameSideData *fside = av_frame_new_side_data(frame, AV_FRAME_DATA_SKIP_SAMPLES, 10); | |
2582 | if (fside) { | |
2583 | AV_WL32(fside->data, avctx->internal->skip_samples); | |
2584 | AV_WL32(fside->data + 4, discard_padding); | |
2585 | AV_WL8(fside->data + 8, skip_reason); | |
2586 | AV_WL8(fside->data + 9, discard_reason); | |
2587 | avctx->internal->skip_samples = 0; | |
2588 | } | |
2589 | } | |
2ba45a60 DM |
2590 | fail: |
2591 | avctx->internal->pkt = NULL; | |
2592 | if (did_split) { | |
2593 | av_packet_free_side_data(&tmp); | |
2594 | if(ret == tmp.size) | |
2595 | ret = avpkt->size; | |
2596 | } | |
2597 | ||
2598 | if (ret >= 0 && *got_frame_ptr) { | |
2599 | if (!avctx->refcounted_frames) { | |
2600 | int err = unrefcount_frame(avci, frame); | |
2601 | if (err < 0) | |
2602 | return err; | |
2603 | } | |
2604 | } else | |
2605 | av_frame_unref(frame); | |
2606 | } | |
2607 | ||
2608 | return ret; | |
2609 | } | |
2610 | ||
2611 | #define UTF8_MAX_BYTES 4 /* 5 and 6 bytes sequences should not be used */ | |
2612 | static int recode_subtitle(AVCodecContext *avctx, | |
2613 | AVPacket *outpkt, const AVPacket *inpkt) | |
2614 | { | |
2615 | #if CONFIG_ICONV | |
2616 | iconv_t cd = (iconv_t)-1; | |
2617 | int ret = 0; | |
2618 | char *inb, *outb; | |
2619 | size_t inl, outl; | |
2620 | AVPacket tmp; | |
2621 | #endif | |
2622 | ||
2623 | if (avctx->sub_charenc_mode != FF_SUB_CHARENC_MODE_PRE_DECODER || inpkt->size == 0) | |
2624 | return 0; | |
2625 | ||
2626 | #if CONFIG_ICONV | |
2627 | cd = iconv_open("UTF-8", avctx->sub_charenc); | |
2628 | av_assert0(cd != (iconv_t)-1); | |
2629 | ||
2630 | inb = inpkt->data; | |
2631 | inl = inpkt->size; | |
2632 | ||
2633 | if (inl >= INT_MAX / UTF8_MAX_BYTES - FF_INPUT_BUFFER_PADDING_SIZE) { | |
2634 | av_log(avctx, AV_LOG_ERROR, "Subtitles packet is too big for recoding\n"); | |
2635 | ret = AVERROR(ENOMEM); | |
2636 | goto end; | |
2637 | } | |
2638 | ||
2639 | ret = av_new_packet(&tmp, inl * UTF8_MAX_BYTES); | |
2640 | if (ret < 0) | |
2641 | goto end; | |
2642 | outpkt->buf = tmp.buf; | |
2643 | outpkt->data = tmp.data; | |
2644 | outpkt->size = tmp.size; | |
2645 | outb = outpkt->data; | |
2646 | outl = outpkt->size; | |
2647 | ||
2648 | if (iconv(cd, &inb, &inl, &outb, &outl) == (size_t)-1 || | |
2649 | iconv(cd, NULL, NULL, &outb, &outl) == (size_t)-1 || | |
2650 | outl >= outpkt->size || inl != 0) { | |
f6fa7814 | 2651 | ret = FFMIN(AVERROR(errno), -1); |
2ba45a60 DM |
2652 | av_log(avctx, AV_LOG_ERROR, "Unable to recode subtitle event \"%s\" " |
2653 | "from %s to UTF-8\n", inpkt->data, avctx->sub_charenc); | |
2654 | av_free_packet(&tmp); | |
2ba45a60 DM |
2655 | goto end; |
2656 | } | |
2657 | outpkt->size -= outl; | |
2658 | memset(outpkt->data + outpkt->size, 0, outl); | |
2659 | ||
2660 | end: | |
2661 | if (cd != (iconv_t)-1) | |
2662 | iconv_close(cd); | |
2663 | return ret; | |
2664 | #else | |
2665 | av_log(avctx, AV_LOG_ERROR, "requesting subtitles recoding without iconv"); | |
2666 | return AVERROR(EINVAL); | |
2667 | #endif | |
2668 | } | |
2669 | ||
2670 | static int utf8_check(const uint8_t *str) | |
2671 | { | |
2672 | const uint8_t *byte; | |
2673 | uint32_t codepoint, min; | |
2674 | ||
2675 | while (*str) { | |
2676 | byte = str; | |
2677 | GET_UTF8(codepoint, *(byte++), return 0;); | |
2678 | min = byte - str == 1 ? 0 : byte - str == 2 ? 0x80 : | |
2679 | 1 << (5 * (byte - str) - 4); | |
2680 | if (codepoint < min || codepoint >= 0x110000 || | |
2681 | codepoint == 0xFFFE /* BOM */ || | |
2682 | codepoint >= 0xD800 && codepoint <= 0xDFFF /* surrogates */) | |
2683 | return 0; | |
2684 | str = byte; | |
2685 | } | |
2686 | return 1; | |
2687 | } | |
2688 | ||
2689 | int avcodec_decode_subtitle2(AVCodecContext *avctx, AVSubtitle *sub, | |
2690 | int *got_sub_ptr, | |
2691 | AVPacket *avpkt) | |
2692 | { | |
2693 | int i, ret = 0; | |
2694 | ||
2695 | if (!avpkt->data && avpkt->size) { | |
2696 | av_log(avctx, AV_LOG_ERROR, "invalid packet: NULL data, size != 0\n"); | |
2697 | return AVERROR(EINVAL); | |
2698 | } | |
2699 | if (!avctx->codec) | |
2700 | return AVERROR(EINVAL); | |
2701 | if (avctx->codec->type != AVMEDIA_TYPE_SUBTITLE) { | |
2702 | av_log(avctx, AV_LOG_ERROR, "Invalid media type for subtitles\n"); | |
2703 | return AVERROR(EINVAL); | |
2704 | } | |
2705 | ||
2706 | *got_sub_ptr = 0; | |
f6fa7814 | 2707 | get_subtitle_defaults(sub); |
2ba45a60 DM |
2708 | |
2709 | if ((avctx->codec->capabilities & CODEC_CAP_DELAY) || avpkt->size) { | |
2710 | AVPacket pkt_recoded; | |
2711 | AVPacket tmp = *avpkt; | |
2712 | int did_split = av_packet_split_side_data(&tmp); | |
2713 | //apply_param_change(avctx, &tmp); | |
2714 | ||
2715 | if (did_split) { | |
2716 | /* FFMIN() prevents overflow in case the packet wasn't allocated with | |
2717 | * proper padding. | |
2718 | * If the side data is smaller than the buffer padding size, the | |
2719 | * remaining bytes should have already been filled with zeros by the | |
2720 | * original packet allocation anyway. */ | |
2721 | memset(tmp.data + tmp.size, 0, | |
2722 | FFMIN(avpkt->size - tmp.size, FF_INPUT_BUFFER_PADDING_SIZE)); | |
2723 | } | |
2724 | ||
2725 | pkt_recoded = tmp; | |
2726 | ret = recode_subtitle(avctx, &pkt_recoded, &tmp); | |
2727 | if (ret < 0) { | |
2728 | *got_sub_ptr = 0; | |
2729 | } else { | |
2730 | avctx->internal->pkt = &pkt_recoded; | |
2731 | ||
2732 | if (avctx->pkt_timebase.den && avpkt->pts != AV_NOPTS_VALUE) | |
2733 | sub->pts = av_rescale_q(avpkt->pts, | |
2734 | avctx->pkt_timebase, AV_TIME_BASE_Q); | |
2735 | ret = avctx->codec->decode(avctx, sub, got_sub_ptr, &pkt_recoded); | |
2736 | av_assert1((ret >= 0) >= !!*got_sub_ptr && | |
2737 | !!*got_sub_ptr >= !!sub->num_rects); | |
2738 | ||
2739 | if (sub->num_rects && !sub->end_display_time && avpkt->duration && | |
2740 | avctx->pkt_timebase.num) { | |
2741 | AVRational ms = { 1, 1000 }; | |
2742 | sub->end_display_time = av_rescale_q(avpkt->duration, | |
2743 | avctx->pkt_timebase, ms); | |
2744 | } | |
2745 | ||
2746 | for (i = 0; i < sub->num_rects; i++) { | |
2747 | if (sub->rects[i]->ass && !utf8_check(sub->rects[i]->ass)) { | |
2748 | av_log(avctx, AV_LOG_ERROR, | |
2749 | "Invalid UTF-8 in decoded subtitles text; " | |
2750 | "maybe missing -sub_charenc option\n"); | |
2751 | avsubtitle_free(sub); | |
2752 | return AVERROR_INVALIDDATA; | |
2753 | } | |
2754 | } | |
2755 | ||
2756 | if (tmp.data != pkt_recoded.data) { // did we recode? | |
2757 | /* prevent from destroying side data from original packet */ | |
2758 | pkt_recoded.side_data = NULL; | |
2759 | pkt_recoded.side_data_elems = 0; | |
2760 | ||
2761 | av_free_packet(&pkt_recoded); | |
2762 | } | |
2763 | if (avctx->codec_descriptor->props & AV_CODEC_PROP_BITMAP_SUB) | |
2764 | sub->format = 0; | |
2765 | else if (avctx->codec_descriptor->props & AV_CODEC_PROP_TEXT_SUB) | |
2766 | sub->format = 1; | |
2767 | avctx->internal->pkt = NULL; | |
2768 | } | |
2769 | ||
2770 | if (did_split) { | |
2771 | av_packet_free_side_data(&tmp); | |
2772 | if(ret == tmp.size) | |
2773 | ret = avpkt->size; | |
2774 | } | |
2775 | ||
2776 | if (*got_sub_ptr) | |
2777 | avctx->frame_number++; | |
2778 | } | |
2779 | ||
2780 | return ret; | |
2781 | } | |
2782 | ||
2783 | void avsubtitle_free(AVSubtitle *sub) | |
2784 | { | |
2785 | int i; | |
2786 | ||
2787 | for (i = 0; i < sub->num_rects; i++) { | |
2788 | av_freep(&sub->rects[i]->pict.data[0]); | |
2789 | av_freep(&sub->rects[i]->pict.data[1]); | |
2790 | av_freep(&sub->rects[i]->pict.data[2]); | |
2791 | av_freep(&sub->rects[i]->pict.data[3]); | |
2792 | av_freep(&sub->rects[i]->text); | |
2793 | av_freep(&sub->rects[i]->ass); | |
2794 | av_freep(&sub->rects[i]); | |
2795 | } | |
2796 | ||
2797 | av_freep(&sub->rects); | |
2798 | ||
2799 | memset(sub, 0, sizeof(AVSubtitle)); | |
2800 | } | |
2801 | ||
2802 | av_cold int avcodec_close(AVCodecContext *avctx) | |
2803 | { | |
2804 | if (!avctx) | |
2805 | return 0; | |
2806 | ||
2807 | if (avcodec_is_open(avctx)) { | |
2808 | FramePool *pool = avctx->internal->pool; | |
2809 | int i; | |
2810 | if (CONFIG_FRAME_THREAD_ENCODER && | |
2811 | avctx->internal->frame_thread_encoder && avctx->thread_count > 1) { | |
2812 | ff_frame_thread_encoder_free(avctx); | |
2813 | } | |
2814 | if (HAVE_THREADS && avctx->internal->thread_ctx) | |
2815 | ff_thread_free(avctx); | |
2816 | if (avctx->codec && avctx->codec->close) | |
2817 | avctx->codec->close(avctx); | |
2818 | avctx->coded_frame = NULL; | |
2819 | avctx->internal->byte_buffer_size = 0; | |
2820 | av_freep(&avctx->internal->byte_buffer); | |
2821 | av_frame_free(&avctx->internal->to_free); | |
2822 | for (i = 0; i < FF_ARRAY_ELEMS(pool->pools); i++) | |
2823 | av_buffer_pool_uninit(&pool->pools[i]); | |
2824 | av_freep(&avctx->internal->pool); | |
2825 | ||
2826 | if (avctx->hwaccel && avctx->hwaccel->uninit) | |
2827 | avctx->hwaccel->uninit(avctx); | |
2828 | av_freep(&avctx->internal->hwaccel_priv_data); | |
2829 | ||
2830 | av_freep(&avctx->internal); | |
2831 | } | |
2832 | ||
2833 | if (avctx->priv_data && avctx->codec && avctx->codec->priv_class) | |
2834 | av_opt_free(avctx->priv_data); | |
2835 | av_opt_free(avctx); | |
2836 | av_freep(&avctx->priv_data); | |
2837 | if (av_codec_is_encoder(avctx->codec)) | |
2838 | av_freep(&avctx->extradata); | |
2839 | avctx->codec = NULL; | |
2840 | avctx->active_thread_type = 0; | |
2841 | ||
2842 | return 0; | |
2843 | } | |
2844 | ||
2845 | static enum AVCodecID remap_deprecated_codec_id(enum AVCodecID id) | |
2846 | { | |
2847 | switch(id){ | |
2848 | //This is for future deprecatec codec ids, its empty since | |
2849 | //last major bump but will fill up again over time, please don't remove it | |
2850 | // case AV_CODEC_ID_UTVIDEO_DEPRECATED: return AV_CODEC_ID_UTVIDEO; | |
2851 | case AV_CODEC_ID_BRENDER_PIX_DEPRECATED : return AV_CODEC_ID_BRENDER_PIX; | |
2852 | case AV_CODEC_ID_OPUS_DEPRECATED : return AV_CODEC_ID_OPUS; | |
2853 | case AV_CODEC_ID_TAK_DEPRECATED : return AV_CODEC_ID_TAK; | |
2854 | case AV_CODEC_ID_PAF_AUDIO_DEPRECATED : return AV_CODEC_ID_PAF_AUDIO; | |
2855 | case AV_CODEC_ID_PCM_S24LE_PLANAR_DEPRECATED : return AV_CODEC_ID_PCM_S24LE_PLANAR; | |
2856 | case AV_CODEC_ID_PCM_S32LE_PLANAR_DEPRECATED : return AV_CODEC_ID_PCM_S32LE_PLANAR; | |
2857 | case AV_CODEC_ID_ADPCM_VIMA_DEPRECATED : return AV_CODEC_ID_ADPCM_VIMA; | |
2858 | case AV_CODEC_ID_ESCAPE130_DEPRECATED : return AV_CODEC_ID_ESCAPE130; | |
2859 | case AV_CODEC_ID_EXR_DEPRECATED : return AV_CODEC_ID_EXR; | |
2860 | case AV_CODEC_ID_G2M_DEPRECATED : return AV_CODEC_ID_G2M; | |
2861 | case AV_CODEC_ID_PAF_VIDEO_DEPRECATED : return AV_CODEC_ID_PAF_VIDEO; | |
2862 | case AV_CODEC_ID_WEBP_DEPRECATED : return AV_CODEC_ID_WEBP; | |
2863 | case AV_CODEC_ID_HEVC_DEPRECATED : return AV_CODEC_ID_HEVC; | |
2864 | case AV_CODEC_ID_MVC1_DEPRECATED : return AV_CODEC_ID_MVC1; | |
2865 | case AV_CODEC_ID_MVC2_DEPRECATED : return AV_CODEC_ID_MVC2; | |
2866 | case AV_CODEC_ID_SANM_DEPRECATED : return AV_CODEC_ID_SANM; | |
2867 | case AV_CODEC_ID_SGIRLE_DEPRECATED : return AV_CODEC_ID_SGIRLE; | |
2868 | case AV_CODEC_ID_VP7_DEPRECATED : return AV_CODEC_ID_VP7; | |
2869 | default : return id; | |
2870 | } | |
2871 | } | |
2872 | ||
2873 | static AVCodec *find_encdec(enum AVCodecID id, int encoder) | |
2874 | { | |
2875 | AVCodec *p, *experimental = NULL; | |
2876 | p = first_avcodec; | |
2877 | id= remap_deprecated_codec_id(id); | |
2878 | while (p) { | |
2879 | if ((encoder ? av_codec_is_encoder(p) : av_codec_is_decoder(p)) && | |
2880 | p->id == id) { | |
2881 | if (p->capabilities & CODEC_CAP_EXPERIMENTAL && !experimental) { | |
2882 | experimental = p; | |
2883 | } else | |
2884 | return p; | |
2885 | } | |
2886 | p = p->next; | |
2887 | } | |
2888 | return experimental; | |
2889 | } | |
2890 | ||
2891 | AVCodec *avcodec_find_encoder(enum AVCodecID id) | |
2892 | { | |
2893 | return find_encdec(id, 1); | |
2894 | } | |
2895 | ||
2896 | AVCodec *avcodec_find_encoder_by_name(const char *name) | |
2897 | { | |
2898 | AVCodec *p; | |
2899 | if (!name) | |
2900 | return NULL; | |
2901 | p = first_avcodec; | |
2902 | while (p) { | |
2903 | if (av_codec_is_encoder(p) && strcmp(name, p->name) == 0) | |
2904 | return p; | |
2905 | p = p->next; | |
2906 | } | |
2907 | return NULL; | |
2908 | } | |
2909 | ||
2910 | AVCodec *avcodec_find_decoder(enum AVCodecID id) | |
2911 | { | |
2912 | return find_encdec(id, 0); | |
2913 | } | |
2914 | ||
2915 | AVCodec *avcodec_find_decoder_by_name(const char *name) | |
2916 | { | |
2917 | AVCodec *p; | |
2918 | if (!name) | |
2919 | return NULL; | |
2920 | p = first_avcodec; | |
2921 | while (p) { | |
2922 | if (av_codec_is_decoder(p) && strcmp(name, p->name) == 0) | |
2923 | return p; | |
2924 | p = p->next; | |
2925 | } | |
2926 | return NULL; | |
2927 | } | |
2928 | ||
2929 | const char *avcodec_get_name(enum AVCodecID id) | |
2930 | { | |
2931 | const AVCodecDescriptor *cd; | |
2932 | AVCodec *codec; | |
2933 | ||
2934 | if (id == AV_CODEC_ID_NONE) | |
2935 | return "none"; | |
2936 | cd = avcodec_descriptor_get(id); | |
2937 | if (cd) | |
2938 | return cd->name; | |
2939 | av_log(NULL, AV_LOG_WARNING, "Codec 0x%x is not in the full list.\n", id); | |
2940 | codec = avcodec_find_decoder(id); | |
2941 | if (codec) | |
2942 | return codec->name; | |
2943 | codec = avcodec_find_encoder(id); | |
2944 | if (codec) | |
2945 | return codec->name; | |
2946 | return "unknown_codec"; | |
2947 | } | |
2948 | ||
2949 | size_t av_get_codec_tag_string(char *buf, size_t buf_size, unsigned int codec_tag) | |
2950 | { | |
2951 | int i, len, ret = 0; | |
2952 | ||
2953 | #define TAG_PRINT(x) \ | |
2954 | (((x) >= '0' && (x) <= '9') || \ | |
2955 | ((x) >= 'a' && (x) <= 'z') || ((x) >= 'A' && (x) <= 'Z') || \ | |
2956 | ((x) == '.' || (x) == ' ' || (x) == '-' || (x) == '_')) | |
2957 | ||
2958 | for (i = 0; i < 4; i++) { | |
2959 | len = snprintf(buf, buf_size, | |
2960 | TAG_PRINT(codec_tag & 0xFF) ? "%c" : "[%d]", codec_tag & 0xFF); | |
2961 | buf += len; | |
2962 | buf_size = buf_size > len ? buf_size - len : 0; | |
2963 | ret += len; | |
2964 | codec_tag >>= 8; | |
2965 | } | |
2966 | return ret; | |
2967 | } | |
2968 | ||
2969 | void avcodec_string(char *buf, int buf_size, AVCodecContext *enc, int encode) | |
2970 | { | |
2971 | const char *codec_type; | |
2972 | const char *codec_name; | |
2973 | const char *profile = NULL; | |
2974 | const AVCodec *p; | |
2975 | int bitrate; | |
f6fa7814 | 2976 | int new_line = 0; |
2ba45a60 | 2977 | AVRational display_aspect_ratio; |
f6fa7814 | 2978 | const char *separator = enc->dump_separator ? (const char *)enc->dump_separator : ", "; |
2ba45a60 DM |
2979 | |
2980 | if (!buf || buf_size <= 0) | |
2981 | return; | |
2982 | codec_type = av_get_media_type_string(enc->codec_type); | |
2983 | codec_name = avcodec_get_name(enc->codec_id); | |
2984 | if (enc->profile != FF_PROFILE_UNKNOWN) { | |
2985 | if (enc->codec) | |
2986 | p = enc->codec; | |
2987 | else | |
2988 | p = encode ? avcodec_find_encoder(enc->codec_id) : | |
2989 | avcodec_find_decoder(enc->codec_id); | |
2990 | if (p) | |
2991 | profile = av_get_profile_name(p, enc->profile); | |
2992 | } | |
2993 | ||
2994 | snprintf(buf, buf_size, "%s: %s", codec_type ? codec_type : "unknown", | |
2995 | codec_name); | |
2996 | buf[0] ^= 'a' ^ 'A'; /* first letter in uppercase */ | |
2997 | ||
2998 | if (enc->codec && strcmp(enc->codec->name, codec_name)) | |
2999 | snprintf(buf + strlen(buf), buf_size - strlen(buf), " (%s)", enc->codec->name); | |
3000 | ||
3001 | if (profile) | |
3002 | snprintf(buf + strlen(buf), buf_size - strlen(buf), " (%s)", profile); | |
f6fa7814 | 3003 | |
2ba45a60 DM |
3004 | if (enc->codec_tag) { |
3005 | char tag_buf[32]; | |
3006 | av_get_codec_tag_string(tag_buf, sizeof(tag_buf), enc->codec_tag); | |
3007 | snprintf(buf + strlen(buf), buf_size - strlen(buf), | |
3008 | " (%s / 0x%04X)", tag_buf, enc->codec_tag); | |
3009 | } | |
3010 | ||
3011 | switch (enc->codec_type) { | |
3012 | case AVMEDIA_TYPE_VIDEO: | |
f6fa7814 | 3013 | { |
2ba45a60 | 3014 | char detail[256] = "("; |
f6fa7814 DM |
3015 | |
3016 | av_strlcat(buf, separator, buf_size); | |
3017 | ||
2ba45a60 | 3018 | snprintf(buf + strlen(buf), buf_size - strlen(buf), |
f6fa7814 | 3019 | "%s", enc->pix_fmt == AV_PIX_FMT_NONE ? "none" : |
2ba45a60 | 3020 | av_get_pix_fmt_name(enc->pix_fmt)); |
f6fa7814 | 3021 | if (enc->bits_per_raw_sample && enc->pix_fmt != AV_PIX_FMT_NONE && |
2ba45a60 DM |
3022 | enc->bits_per_raw_sample <= av_pix_fmt_desc_get(enc->pix_fmt)->comp[0].depth_minus1) |
3023 | av_strlcatf(detail, sizeof(detail), "%d bpc, ", enc->bits_per_raw_sample); | |
3024 | if (enc->color_range != AVCOL_RANGE_UNSPECIFIED) | |
f6fa7814 DM |
3025 | av_strlcatf(detail, sizeof(detail), "%s, ", |
3026 | av_color_range_name(enc->color_range)); | |
3027 | ||
3028 | if (enc->colorspace != AVCOL_SPC_UNSPECIFIED || | |
3029 | enc->color_primaries != AVCOL_PRI_UNSPECIFIED || | |
3030 | enc->color_trc != AVCOL_TRC_UNSPECIFIED) { | |
3031 | if (enc->colorspace != (int)enc->color_primaries || | |
3032 | enc->colorspace != (int)enc->color_trc) { | |
3033 | new_line = 1; | |
3034 | av_strlcatf(detail, sizeof(detail), "%s/%s/%s, ", | |
3035 | av_color_space_name(enc->colorspace), | |
3036 | av_color_primaries_name(enc->color_primaries), | |
3037 | av_color_transfer_name(enc->color_trc)); | |
3038 | } else | |
3039 | av_strlcatf(detail, sizeof(detail), "%s, ", | |
3040 | av_get_colorspace_name(enc->colorspace)); | |
3041 | } | |
2ba45a60 | 3042 | |
f6fa7814 DM |
3043 | if (av_log_get_level() >= AV_LOG_DEBUG && |
3044 | enc->chroma_sample_location != AVCHROMA_LOC_UNSPECIFIED) | |
3045 | av_strlcatf(detail, sizeof(detail), "%s, ", | |
3046 | av_chroma_location_name(enc->chroma_sample_location)); | |
2ba45a60 DM |
3047 | |
3048 | if (strlen(detail) > 1) { | |
3049 | detail[strlen(detail) - 2] = 0; | |
3050 | av_strlcatf(buf, buf_size, "%s)", detail); | |
3051 | } | |
3052 | } | |
f6fa7814 | 3053 | |
2ba45a60 | 3054 | if (enc->width) { |
f6fa7814 DM |
3055 | av_strlcat(buf, new_line ? separator : ", ", buf_size); |
3056 | ||
2ba45a60 | 3057 | snprintf(buf + strlen(buf), buf_size - strlen(buf), |
f6fa7814 | 3058 | "%dx%d", |
2ba45a60 | 3059 | enc->width, enc->height); |
f6fa7814 DM |
3060 | |
3061 | if (av_log_get_level() >= AV_LOG_VERBOSE && | |
3062 | (enc->width != enc->coded_width || | |
3063 | enc->height != enc->coded_height)) | |
3064 | snprintf(buf + strlen(buf), buf_size - strlen(buf), | |
3065 | " (%dx%d)", enc->coded_width, enc->coded_height); | |
3066 | ||
2ba45a60 DM |
3067 | if (enc->sample_aspect_ratio.num) { |
3068 | av_reduce(&display_aspect_ratio.num, &display_aspect_ratio.den, | |
3069 | enc->width * enc->sample_aspect_ratio.num, | |
3070 | enc->height * enc->sample_aspect_ratio.den, | |
3071 | 1024 * 1024); | |
3072 | snprintf(buf + strlen(buf), buf_size - strlen(buf), | |
3073 | " [SAR %d:%d DAR %d:%d]", | |
3074 | enc->sample_aspect_ratio.num, enc->sample_aspect_ratio.den, | |
3075 | display_aspect_ratio.num, display_aspect_ratio.den); | |
3076 | } | |
3077 | if (av_log_get_level() >= AV_LOG_DEBUG) { | |
3078 | int g = av_gcd(enc->time_base.num, enc->time_base.den); | |
3079 | snprintf(buf + strlen(buf), buf_size - strlen(buf), | |
3080 | ", %d/%d", | |
3081 | enc->time_base.num / g, enc->time_base.den / g); | |
3082 | } | |
3083 | } | |
3084 | if (encode) { | |
3085 | snprintf(buf + strlen(buf), buf_size - strlen(buf), | |
3086 | ", q=%d-%d", enc->qmin, enc->qmax); | |
3087 | } | |
3088 | break; | |
3089 | case AVMEDIA_TYPE_AUDIO: | |
f6fa7814 DM |
3090 | av_strlcat(buf, separator, buf_size); |
3091 | ||
2ba45a60 DM |
3092 | if (enc->sample_rate) { |
3093 | snprintf(buf + strlen(buf), buf_size - strlen(buf), | |
f6fa7814 | 3094 | "%d Hz, ", enc->sample_rate); |
2ba45a60 | 3095 | } |
2ba45a60 DM |
3096 | av_get_channel_layout_string(buf + strlen(buf), buf_size - strlen(buf), enc->channels, enc->channel_layout); |
3097 | if (enc->sample_fmt != AV_SAMPLE_FMT_NONE) { | |
3098 | snprintf(buf + strlen(buf), buf_size - strlen(buf), | |
3099 | ", %s", av_get_sample_fmt_name(enc->sample_fmt)); | |
3100 | } | |
3101 | if ( enc->bits_per_raw_sample > 0 | |
3102 | && enc->bits_per_raw_sample != av_get_bytes_per_sample(enc->sample_fmt) * 8) | |
3103 | snprintf(buf + strlen(buf), buf_size - strlen(buf), | |
3104 | " (%d bit)", enc->bits_per_raw_sample); | |
3105 | break; | |
3106 | case AVMEDIA_TYPE_DATA: | |
3107 | if (av_log_get_level() >= AV_LOG_DEBUG) { | |
3108 | int g = av_gcd(enc->time_base.num, enc->time_base.den); | |
3109 | if (g) | |
3110 | snprintf(buf + strlen(buf), buf_size - strlen(buf), | |
3111 | ", %d/%d", | |
3112 | enc->time_base.num / g, enc->time_base.den / g); | |
3113 | } | |
3114 | break; | |
3115 | case AVMEDIA_TYPE_SUBTITLE: | |
3116 | if (enc->width) | |
3117 | snprintf(buf + strlen(buf), buf_size - strlen(buf), | |
3118 | ", %dx%d", enc->width, enc->height); | |
3119 | break; | |
3120 | default: | |
3121 | return; | |
3122 | } | |
3123 | if (encode) { | |
3124 | if (enc->flags & CODEC_FLAG_PASS1) | |
3125 | snprintf(buf + strlen(buf), buf_size - strlen(buf), | |
3126 | ", pass 1"); | |
3127 | if (enc->flags & CODEC_FLAG_PASS2) | |
3128 | snprintf(buf + strlen(buf), buf_size - strlen(buf), | |
3129 | ", pass 2"); | |
3130 | } | |
3131 | bitrate = get_bit_rate(enc); | |
3132 | if (bitrate != 0) { | |
3133 | snprintf(buf + strlen(buf), buf_size - strlen(buf), | |
3134 | ", %d kb/s", bitrate / 1000); | |
3135 | } else if (enc->rc_max_rate > 0) { | |
3136 | snprintf(buf + strlen(buf), buf_size - strlen(buf), | |
3137 | ", max. %d kb/s", enc->rc_max_rate / 1000); | |
3138 | } | |
3139 | } | |
3140 | ||
3141 | const char *av_get_profile_name(const AVCodec *codec, int profile) | |
3142 | { | |
3143 | const AVProfile *p; | |
3144 | if (profile == FF_PROFILE_UNKNOWN || !codec->profiles) | |
3145 | return NULL; | |
3146 | ||
3147 | for (p = codec->profiles; p->profile != FF_PROFILE_UNKNOWN; p++) | |
3148 | if (p->profile == profile) | |
3149 | return p->name; | |
3150 | ||
3151 | return NULL; | |
3152 | } | |
3153 | ||
3154 | unsigned avcodec_version(void) | |
3155 | { | |
3156 | // av_assert0(AV_CODEC_ID_V410==164); | |
3157 | av_assert0(AV_CODEC_ID_PCM_S8_PLANAR==65563); | |
3158 | av_assert0(AV_CODEC_ID_ADPCM_G722==69660); | |
3159 | // av_assert0(AV_CODEC_ID_BMV_AUDIO==86071); | |
3160 | av_assert0(AV_CODEC_ID_SRT==94216); | |
3161 | av_assert0(LIBAVCODEC_VERSION_MICRO >= 100); | |
3162 | ||
3163 | av_assert0(CODEC_ID_CLLC == AV_CODEC_ID_CLLC); | |
3164 | av_assert0(CODEC_ID_PCM_S8_PLANAR == AV_CODEC_ID_PCM_S8_PLANAR); | |
3165 | av_assert0(CODEC_ID_ADPCM_IMA_APC == AV_CODEC_ID_ADPCM_IMA_APC); | |
3166 | av_assert0(CODEC_ID_ILBC == AV_CODEC_ID_ILBC); | |
3167 | av_assert0(CODEC_ID_SRT == AV_CODEC_ID_SRT); | |
3168 | return LIBAVCODEC_VERSION_INT; | |
3169 | } | |
3170 | ||
3171 | const char *avcodec_configuration(void) | |
3172 | { | |
3173 | return FFMPEG_CONFIGURATION; | |
3174 | } | |
3175 | ||
3176 | const char *avcodec_license(void) | |
3177 | { | |
3178 | #define LICENSE_PREFIX "libavcodec license: " | |
3179 | return LICENSE_PREFIX FFMPEG_LICENSE + sizeof(LICENSE_PREFIX) - 1; | |
3180 | } | |
3181 | ||
3182 | void avcodec_flush_buffers(AVCodecContext *avctx) | |
3183 | { | |
3184 | if (HAVE_THREADS && avctx->active_thread_type & FF_THREAD_FRAME) | |
3185 | ff_thread_flush(avctx); | |
3186 | else if (avctx->codec->flush) | |
3187 | avctx->codec->flush(avctx); | |
3188 | ||
3189 | avctx->pts_correction_last_pts = | |
3190 | avctx->pts_correction_last_dts = INT64_MIN; | |
3191 | ||
3192 | if (!avctx->refcounted_frames) | |
3193 | av_frame_unref(avctx->internal->to_free); | |
3194 | } | |
3195 | ||
3196 | int av_get_exact_bits_per_sample(enum AVCodecID codec_id) | |
3197 | { | |
3198 | switch (codec_id) { | |
3199 | case AV_CODEC_ID_8SVX_EXP: | |
3200 | case AV_CODEC_ID_8SVX_FIB: | |
3201 | case AV_CODEC_ID_ADPCM_CT: | |
3202 | case AV_CODEC_ID_ADPCM_IMA_APC: | |
3203 | case AV_CODEC_ID_ADPCM_IMA_EA_SEAD: | |
3204 | case AV_CODEC_ID_ADPCM_IMA_OKI: | |
3205 | case AV_CODEC_ID_ADPCM_IMA_WS: | |
3206 | case AV_CODEC_ID_ADPCM_G722: | |
3207 | case AV_CODEC_ID_ADPCM_YAMAHA: | |
3208 | return 4; | |
3209 | case AV_CODEC_ID_DSD_LSBF: | |
3210 | case AV_CODEC_ID_DSD_MSBF: | |
3211 | case AV_CODEC_ID_DSD_LSBF_PLANAR: | |
3212 | case AV_CODEC_ID_DSD_MSBF_PLANAR: | |
3213 | case AV_CODEC_ID_PCM_ALAW: | |
3214 | case AV_CODEC_ID_PCM_MULAW: | |
3215 | case AV_CODEC_ID_PCM_S8: | |
3216 | case AV_CODEC_ID_PCM_S8_PLANAR: | |
3217 | case AV_CODEC_ID_PCM_U8: | |
3218 | case AV_CODEC_ID_PCM_ZORK: | |
3219 | return 8; | |
3220 | case AV_CODEC_ID_PCM_S16BE: | |
3221 | case AV_CODEC_ID_PCM_S16BE_PLANAR: | |
3222 | case AV_CODEC_ID_PCM_S16LE: | |
3223 | case AV_CODEC_ID_PCM_S16LE_PLANAR: | |
3224 | case AV_CODEC_ID_PCM_U16BE: | |
3225 | case AV_CODEC_ID_PCM_U16LE: | |
3226 | return 16; | |
3227 | case AV_CODEC_ID_PCM_S24DAUD: | |
3228 | case AV_CODEC_ID_PCM_S24BE: | |
3229 | case AV_CODEC_ID_PCM_S24LE: | |
3230 | case AV_CODEC_ID_PCM_S24LE_PLANAR: | |
3231 | case AV_CODEC_ID_PCM_U24BE: | |
3232 | case AV_CODEC_ID_PCM_U24LE: | |
3233 | return 24; | |
3234 | case AV_CODEC_ID_PCM_S32BE: | |
3235 | case AV_CODEC_ID_PCM_S32LE: | |
3236 | case AV_CODEC_ID_PCM_S32LE_PLANAR: | |
3237 | case AV_CODEC_ID_PCM_U32BE: | |
3238 | case AV_CODEC_ID_PCM_U32LE: | |
3239 | case AV_CODEC_ID_PCM_F32BE: | |
3240 | case AV_CODEC_ID_PCM_F32LE: | |
3241 | return 32; | |
3242 | case AV_CODEC_ID_PCM_F64BE: | |
3243 | case AV_CODEC_ID_PCM_F64LE: | |
3244 | return 64; | |
3245 | default: | |
3246 | return 0; | |
3247 | } | |
3248 | } | |
3249 | ||
3250 | enum AVCodecID av_get_pcm_codec(enum AVSampleFormat fmt, int be) | |
3251 | { | |
3252 | static const enum AVCodecID map[AV_SAMPLE_FMT_NB][2] = { | |
3253 | [AV_SAMPLE_FMT_U8 ] = { AV_CODEC_ID_PCM_U8, AV_CODEC_ID_PCM_U8 }, | |
3254 | [AV_SAMPLE_FMT_S16 ] = { AV_CODEC_ID_PCM_S16LE, AV_CODEC_ID_PCM_S16BE }, | |
3255 | [AV_SAMPLE_FMT_S32 ] = { AV_CODEC_ID_PCM_S32LE, AV_CODEC_ID_PCM_S32BE }, | |
3256 | [AV_SAMPLE_FMT_FLT ] = { AV_CODEC_ID_PCM_F32LE, AV_CODEC_ID_PCM_F32BE }, | |
3257 | [AV_SAMPLE_FMT_DBL ] = { AV_CODEC_ID_PCM_F64LE, AV_CODEC_ID_PCM_F64BE }, | |
3258 | [AV_SAMPLE_FMT_U8P ] = { AV_CODEC_ID_PCM_U8, AV_CODEC_ID_PCM_U8 }, | |
3259 | [AV_SAMPLE_FMT_S16P] = { AV_CODEC_ID_PCM_S16LE, AV_CODEC_ID_PCM_S16BE }, | |
3260 | [AV_SAMPLE_FMT_S32P] = { AV_CODEC_ID_PCM_S32LE, AV_CODEC_ID_PCM_S32BE }, | |
3261 | [AV_SAMPLE_FMT_FLTP] = { AV_CODEC_ID_PCM_F32LE, AV_CODEC_ID_PCM_F32BE }, | |
3262 | [AV_SAMPLE_FMT_DBLP] = { AV_CODEC_ID_PCM_F64LE, AV_CODEC_ID_PCM_F64BE }, | |
3263 | }; | |
3264 | if (fmt < 0 || fmt >= AV_SAMPLE_FMT_NB) | |
3265 | return AV_CODEC_ID_NONE; | |
3266 | if (be < 0 || be > 1) | |
3267 | be = AV_NE(1, 0); | |
3268 | return map[fmt][be]; | |
3269 | } | |
3270 | ||
3271 | int av_get_bits_per_sample(enum AVCodecID codec_id) | |
3272 | { | |
3273 | switch (codec_id) { | |
3274 | case AV_CODEC_ID_ADPCM_SBPRO_2: | |
3275 | return 2; | |
3276 | case AV_CODEC_ID_ADPCM_SBPRO_3: | |
3277 | return 3; | |
3278 | case AV_CODEC_ID_ADPCM_SBPRO_4: | |
3279 | case AV_CODEC_ID_ADPCM_IMA_WAV: | |
3280 | case AV_CODEC_ID_ADPCM_IMA_QT: | |
3281 | case AV_CODEC_ID_ADPCM_SWF: | |
3282 | case AV_CODEC_ID_ADPCM_MS: | |
3283 | return 4; | |
3284 | default: | |
3285 | return av_get_exact_bits_per_sample(codec_id); | |
3286 | } | |
3287 | } | |
3288 | ||
3289 | int av_get_audio_frame_duration(AVCodecContext *avctx, int frame_bytes) | |
3290 | { | |
3291 | int id, sr, ch, ba, tag, bps; | |
3292 | ||
3293 | id = avctx->codec_id; | |
3294 | sr = avctx->sample_rate; | |
3295 | ch = avctx->channels; | |
3296 | ba = avctx->block_align; | |
3297 | tag = avctx->codec_tag; | |
3298 | bps = av_get_exact_bits_per_sample(avctx->codec_id); | |
3299 | ||
3300 | /* codecs with an exact constant bits per sample */ | |
3301 | if (bps > 0 && ch > 0 && frame_bytes > 0 && ch < 32768 && bps < 32768) | |
3302 | return (frame_bytes * 8LL) / (bps * ch); | |
3303 | bps = avctx->bits_per_coded_sample; | |
3304 | ||
3305 | /* codecs with a fixed packet duration */ | |
3306 | switch (id) { | |
3307 | case AV_CODEC_ID_ADPCM_ADX: return 32; | |
3308 | case AV_CODEC_ID_ADPCM_IMA_QT: return 64; | |
3309 | case AV_CODEC_ID_ADPCM_EA_XAS: return 128; | |
3310 | case AV_CODEC_ID_AMR_NB: | |
3311 | case AV_CODEC_ID_EVRC: | |
3312 | case AV_CODEC_ID_GSM: | |
3313 | case AV_CODEC_ID_QCELP: | |
3314 | case AV_CODEC_ID_RA_288: return 160; | |
3315 | case AV_CODEC_ID_AMR_WB: | |
3316 | case AV_CODEC_ID_GSM_MS: return 320; | |
3317 | case AV_CODEC_ID_MP1: return 384; | |
3318 | case AV_CODEC_ID_ATRAC1: return 512; | |
3319 | case AV_CODEC_ID_ATRAC3: return 1024; | |
f6fa7814 | 3320 | case AV_CODEC_ID_ATRAC3P: return 2048; |
2ba45a60 DM |
3321 | case AV_CODEC_ID_MP2: |
3322 | case AV_CODEC_ID_MUSEPACK7: return 1152; | |
3323 | case AV_CODEC_ID_AC3: return 1536; | |
3324 | } | |
3325 | ||
3326 | if (sr > 0) { | |
3327 | /* calc from sample rate */ | |
3328 | if (id == AV_CODEC_ID_TTA) | |
3329 | return 256 * sr / 245; | |
3330 | ||
3331 | if (ch > 0) { | |
3332 | /* calc from sample rate and channels */ | |
3333 | if (id == AV_CODEC_ID_BINKAUDIO_DCT) | |
3334 | return (480 << (sr / 22050)) / ch; | |
3335 | } | |
3336 | } | |
3337 | ||
3338 | if (ba > 0) { | |
3339 | /* calc from block_align */ | |
3340 | if (id == AV_CODEC_ID_SIPR) { | |
3341 | switch (ba) { | |
3342 | case 20: return 160; | |
3343 | case 19: return 144; | |
3344 | case 29: return 288; | |
3345 | case 37: return 480; | |
3346 | } | |
3347 | } else if (id == AV_CODEC_ID_ILBC) { | |
3348 | switch (ba) { | |
3349 | case 38: return 160; | |
3350 | case 50: return 240; | |
3351 | } | |
3352 | } | |
3353 | } | |
3354 | ||
3355 | if (frame_bytes > 0) { | |
3356 | /* calc from frame_bytes only */ | |
3357 | if (id == AV_CODEC_ID_TRUESPEECH) | |
3358 | return 240 * (frame_bytes / 32); | |
3359 | if (id == AV_CODEC_ID_NELLYMOSER) | |
3360 | return 256 * (frame_bytes / 64); | |
3361 | if (id == AV_CODEC_ID_RA_144) | |
3362 | return 160 * (frame_bytes / 20); | |
3363 | if (id == AV_CODEC_ID_G723_1) | |
3364 | return 240 * (frame_bytes / 24); | |
3365 | ||
3366 | if (bps > 0) { | |
3367 | /* calc from frame_bytes and bits_per_coded_sample */ | |
3368 | if (id == AV_CODEC_ID_ADPCM_G726) | |
3369 | return frame_bytes * 8 / bps; | |
3370 | } | |
3371 | ||
3372 | if (ch > 0) { | |
3373 | /* calc from frame_bytes and channels */ | |
3374 | switch (id) { | |
3375 | case AV_CODEC_ID_ADPCM_AFC: | |
3376 | return frame_bytes / (9 * ch) * 16; | |
3377 | case AV_CODEC_ID_ADPCM_DTK: | |
3378 | return frame_bytes / (16 * ch) * 28; | |
3379 | case AV_CODEC_ID_ADPCM_4XM: | |
3380 | case AV_CODEC_ID_ADPCM_IMA_ISS: | |
3381 | return (frame_bytes - 4 * ch) * 2 / ch; | |
3382 | case AV_CODEC_ID_ADPCM_IMA_SMJPEG: | |
3383 | return (frame_bytes - 4) * 2 / ch; | |
3384 | case AV_CODEC_ID_ADPCM_IMA_AMV: | |
3385 | return (frame_bytes - 8) * 2 / ch; | |
3386 | case AV_CODEC_ID_ADPCM_XA: | |
3387 | return (frame_bytes / 128) * 224 / ch; | |
3388 | case AV_CODEC_ID_INTERPLAY_DPCM: | |
3389 | return (frame_bytes - 6 - ch) / ch; | |
3390 | case AV_CODEC_ID_ROQ_DPCM: | |
3391 | return (frame_bytes - 8) / ch; | |
3392 | case AV_CODEC_ID_XAN_DPCM: | |
3393 | return (frame_bytes - 2 * ch) / ch; | |
3394 | case AV_CODEC_ID_MACE3: | |
3395 | return 3 * frame_bytes / ch; | |
3396 | case AV_CODEC_ID_MACE6: | |
3397 | return 6 * frame_bytes / ch; | |
3398 | case AV_CODEC_ID_PCM_LXF: | |
3399 | return 2 * (frame_bytes / (5 * ch)); | |
3400 | case AV_CODEC_ID_IAC: | |
3401 | case AV_CODEC_ID_IMC: | |
3402 | return 4 * frame_bytes / ch; | |
3403 | } | |
3404 | ||
3405 | if (tag) { | |
3406 | /* calc from frame_bytes, channels, and codec_tag */ | |
3407 | if (id == AV_CODEC_ID_SOL_DPCM) { | |
3408 | if (tag == 3) | |
3409 | return frame_bytes / ch; | |
3410 | else | |
3411 | return frame_bytes * 2 / ch; | |
3412 | } | |
3413 | } | |
3414 | ||
3415 | if (ba > 0) { | |
3416 | /* calc from frame_bytes, channels, and block_align */ | |
3417 | int blocks = frame_bytes / ba; | |
3418 | switch (avctx->codec_id) { | |
3419 | case AV_CODEC_ID_ADPCM_IMA_WAV: | |
3420 | if (bps < 2 || bps > 5) | |
3421 | return 0; | |
3422 | return blocks * (1 + (ba - 4 * ch) / (bps * ch) * 8); | |
3423 | case AV_CODEC_ID_ADPCM_IMA_DK3: | |
3424 | return blocks * (((ba - 16) * 2 / 3 * 4) / ch); | |
3425 | case AV_CODEC_ID_ADPCM_IMA_DK4: | |
3426 | return blocks * (1 + (ba - 4 * ch) * 2 / ch); | |
3427 | case AV_CODEC_ID_ADPCM_IMA_RAD: | |
3428 | return blocks * ((ba - 4 * ch) * 2 / ch); | |
3429 | case AV_CODEC_ID_ADPCM_MS: | |
3430 | return blocks * (2 + (ba - 7 * ch) * 2 / ch); | |
3431 | } | |
3432 | } | |
3433 | ||
3434 | if (bps > 0) { | |
3435 | /* calc from frame_bytes, channels, and bits_per_coded_sample */ | |
3436 | switch (avctx->codec_id) { | |
3437 | case AV_CODEC_ID_PCM_DVD: | |
3438 | if(bps<4) | |
3439 | return 0; | |
3440 | return 2 * (frame_bytes / ((bps * 2 / 8) * ch)); | |
3441 | case AV_CODEC_ID_PCM_BLURAY: | |
3442 | if(bps<4) | |
3443 | return 0; | |
3444 | return frame_bytes / ((FFALIGN(ch, 2) * bps) / 8); | |
3445 | case AV_CODEC_ID_S302M: | |
3446 | return 2 * (frame_bytes / ((bps + 4) / 4)) / ch; | |
3447 | } | |
3448 | } | |
3449 | } | |
3450 | } | |
3451 | ||
3452 | /* Fall back on using frame_size */ | |
3453 | if (avctx->frame_size > 1 && frame_bytes) | |
3454 | return avctx->frame_size; | |
3455 | ||
3456 | //For WMA we currently have no other means to calculate duration thus we | |
3457 | //do it here by assuming CBR, which is true for all known cases. | |
3458 | if (avctx->bit_rate>0 && frame_bytes>0 && avctx->sample_rate>0 && avctx->block_align>1) { | |
3459 | if (avctx->codec_id == AV_CODEC_ID_WMAV1 || avctx->codec_id == AV_CODEC_ID_WMAV2) | |
3460 | return (frame_bytes * 8LL * avctx->sample_rate) / avctx->bit_rate; | |
3461 | } | |
3462 | ||
3463 | return 0; | |
3464 | } | |
3465 | ||
3466 | #if !HAVE_THREADS | |
3467 | int ff_thread_init(AVCodecContext *s) | |
3468 | { | |
3469 | return -1; | |
3470 | } | |
3471 | ||
3472 | #endif | |
3473 | ||
3474 | unsigned int av_xiphlacing(unsigned char *s, unsigned int v) | |
3475 | { | |
3476 | unsigned int n = 0; | |
3477 | ||
3478 | while (v >= 0xff) { | |
3479 | *s++ = 0xff; | |
3480 | v -= 0xff; | |
3481 | n++; | |
3482 | } | |
3483 | *s = v; | |
3484 | n++; | |
3485 | return n; | |
3486 | } | |
3487 | ||
3488 | int ff_match_2uint16(const uint16_t(*tab)[2], int size, int a, int b) | |
3489 | { | |
3490 | int i; | |
3491 | for (i = 0; i < size && !(tab[i][0] == a && tab[i][1] == b); i++) ; | |
3492 | return i; | |
3493 | } | |
3494 | ||
3495 | #if FF_API_MISSING_SAMPLE | |
3496 | FF_DISABLE_DEPRECATION_WARNINGS | |
3497 | void av_log_missing_feature(void *avc, const char *feature, int want_sample) | |
3498 | { | |
3499 | av_log(avc, AV_LOG_WARNING, "%s is not implemented. Update your FFmpeg " | |
3500 | "version to the newest one from Git. If the problem still " | |
3501 | "occurs, it means that your file has a feature which has not " | |
3502 | "been implemented.\n", feature); | |
3503 | if(want_sample) | |
3504 | av_log_ask_for_sample(avc, NULL); | |
3505 | } | |
3506 | ||
3507 | void av_log_ask_for_sample(void *avc, const char *msg, ...) | |
3508 | { | |
3509 | va_list argument_list; | |
3510 | ||
3511 | va_start(argument_list, msg); | |
3512 | ||
3513 | if (msg) | |
3514 | av_vlog(avc, AV_LOG_WARNING, msg, argument_list); | |
3515 | av_log(avc, AV_LOG_WARNING, "If you want to help, upload a sample " | |
3516 | "of this file to ftp://upload.ffmpeg.org/incoming/ " | |
3517 | "and contact the ffmpeg-devel mailing list. (ffmpeg-devel@ffmpeg.org)\n"); | |
3518 | ||
3519 | va_end(argument_list); | |
3520 | } | |
3521 | FF_ENABLE_DEPRECATION_WARNINGS | |
3522 | #endif /* FF_API_MISSING_SAMPLE */ | |
3523 | ||
3524 | static AVHWAccel *first_hwaccel = NULL; | |
3525 | static AVHWAccel **last_hwaccel = &first_hwaccel; | |
3526 | ||
3527 | void av_register_hwaccel(AVHWAccel *hwaccel) | |
3528 | { | |
3529 | AVHWAccel **p = last_hwaccel; | |
3530 | hwaccel->next = NULL; | |
3531 | while(*p || avpriv_atomic_ptr_cas((void * volatile *)p, NULL, hwaccel)) | |
3532 | p = &(*p)->next; | |
3533 | last_hwaccel = &hwaccel->next; | |
3534 | } | |
3535 | ||
3536 | AVHWAccel *av_hwaccel_next(const AVHWAccel *hwaccel) | |
3537 | { | |
3538 | return hwaccel ? hwaccel->next : first_hwaccel; | |
3539 | } | |
3540 | ||
3541 | int av_lockmgr_register(int (*cb)(void **mutex, enum AVLockOp op)) | |
3542 | { | |
3543 | if (lockmgr_cb) { | |
f6fa7814 DM |
3544 | // There is no good way to rollback a failure to destroy the |
3545 | // mutex, so we ignore failures. | |
3546 | lockmgr_cb(&codec_mutex, AV_LOCK_DESTROY); | |
3547 | lockmgr_cb(&avformat_mutex, AV_LOCK_DESTROY); | |
3548 | lockmgr_cb = NULL; | |
3549 | codec_mutex = NULL; | |
3550 | avformat_mutex = NULL; | |
3551 | } | |
3552 | ||
3553 | if (cb) { | |
3554 | void *new_codec_mutex = NULL; | |
3555 | void *new_avformat_mutex = NULL; | |
3556 | int err; | |
3557 | if (err = cb(&new_codec_mutex, AV_LOCK_CREATE)) { | |
3558 | return err > 0 ? AVERROR_UNKNOWN : err; | |
3559 | } | |
3560 | if (err = cb(&new_avformat_mutex, AV_LOCK_CREATE)) { | |
3561 | // Ignore failures to destroy the newly created mutex. | |
3562 | cb(&new_codec_mutex, AV_LOCK_DESTROY); | |
3563 | return err > 0 ? AVERROR_UNKNOWN : err; | |
3564 | } | |
3565 | lockmgr_cb = cb; | |
3566 | codec_mutex = new_codec_mutex; | |
3567 | avformat_mutex = new_avformat_mutex; | |
2ba45a60 DM |
3568 | } |
3569 | ||
2ba45a60 DM |
3570 | return 0; |
3571 | } | |
3572 | ||
3573 | int ff_lock_avcodec(AVCodecContext *log_ctx) | |
3574 | { | |
3575 | if (lockmgr_cb) { | |
3576 | if ((*lockmgr_cb)(&codec_mutex, AV_LOCK_OBTAIN)) | |
3577 | return -1; | |
3578 | } | |
3579 | entangled_thread_counter++; | |
3580 | if (entangled_thread_counter != 1) { | |
3581 | av_log(log_ctx, AV_LOG_ERROR, "Insufficient thread locking around avcodec_open/close()\n"); | |
3582 | if (!lockmgr_cb) | |
3583 | av_log(log_ctx, AV_LOG_ERROR, "No lock manager is set, please see av_lockmgr_register()\n"); | |
3584 | ff_avcodec_locked = 1; | |
3585 | ff_unlock_avcodec(); | |
3586 | return AVERROR(EINVAL); | |
3587 | } | |
3588 | av_assert0(!ff_avcodec_locked); | |
3589 | ff_avcodec_locked = 1; | |
3590 | return 0; | |
3591 | } | |
3592 | ||
3593 | int ff_unlock_avcodec(void) | |
3594 | { | |
3595 | av_assert0(ff_avcodec_locked); | |
3596 | ff_avcodec_locked = 0; | |
3597 | entangled_thread_counter--; | |
3598 | if (lockmgr_cb) { | |
3599 | if ((*lockmgr_cb)(&codec_mutex, AV_LOCK_RELEASE)) | |
3600 | return -1; | |
3601 | } | |
f6fa7814 | 3602 | |
2ba45a60 DM |
3603 | return 0; |
3604 | } | |
3605 | ||
3606 | int avpriv_lock_avformat(void) | |
3607 | { | |
3608 | if (lockmgr_cb) { | |
3609 | if ((*lockmgr_cb)(&avformat_mutex, AV_LOCK_OBTAIN)) | |
3610 | return -1; | |
3611 | } | |
3612 | return 0; | |
3613 | } | |
3614 | ||
3615 | int avpriv_unlock_avformat(void) | |
3616 | { | |
3617 | if (lockmgr_cb) { | |
3618 | if ((*lockmgr_cb)(&avformat_mutex, AV_LOCK_RELEASE)) | |
3619 | return -1; | |
3620 | } | |
3621 | return 0; | |
3622 | } | |
3623 | ||
3624 | unsigned int avpriv_toupper4(unsigned int x) | |
3625 | { | |
3626 | return av_toupper(x & 0xFF) + | |
3627 | (av_toupper((x >> 8) & 0xFF) << 8) + | |
3628 | (av_toupper((x >> 16) & 0xFF) << 16) + | |
3629 | ((unsigned)av_toupper((x >> 24) & 0xFF) << 24); | |
3630 | } | |
3631 | ||
3632 | int ff_thread_ref_frame(ThreadFrame *dst, ThreadFrame *src) | |
3633 | { | |
3634 | int ret; | |
3635 | ||
3636 | dst->owner = src->owner; | |
3637 | ||
3638 | ret = av_frame_ref(dst->f, src->f); | |
3639 | if (ret < 0) | |
3640 | return ret; | |
3641 | ||
3642 | if (src->progress && | |
3643 | !(dst->progress = av_buffer_ref(src->progress))) { | |
3644 | ff_thread_release_buffer(dst->owner, dst); | |
3645 | return AVERROR(ENOMEM); | |
3646 | } | |
3647 | ||
3648 | return 0; | |
3649 | } | |
3650 | ||
3651 | #if !HAVE_THREADS | |
3652 | ||
3653 | enum AVPixelFormat ff_thread_get_format(AVCodecContext *avctx, const enum AVPixelFormat *fmt) | |
3654 | { | |
3655 | return ff_get_format(avctx, fmt); | |
3656 | } | |
3657 | ||
3658 | int ff_thread_get_buffer(AVCodecContext *avctx, ThreadFrame *f, int flags) | |
3659 | { | |
3660 | f->owner = avctx; | |
3661 | return ff_get_buffer(avctx, f->f, flags); | |
3662 | } | |
3663 | ||
3664 | void ff_thread_release_buffer(AVCodecContext *avctx, ThreadFrame *f) | |
3665 | { | |
3666 | if (f->f) | |
3667 | av_frame_unref(f->f); | |
3668 | } | |
3669 | ||
3670 | void ff_thread_finish_setup(AVCodecContext *avctx) | |
3671 | { | |
3672 | } | |
3673 | ||
3674 | void ff_thread_report_progress(ThreadFrame *f, int progress, int field) | |
3675 | { | |
3676 | } | |
3677 | ||
3678 | void ff_thread_await_progress(ThreadFrame *f, int progress, int field) | |
3679 | { | |
3680 | } | |
3681 | ||
3682 | int ff_thread_can_start_frame(AVCodecContext *avctx) | |
3683 | { | |
3684 | return 1; | |
3685 | } | |
3686 | ||
3687 | int ff_alloc_entries(AVCodecContext *avctx, int count) | |
3688 | { | |
3689 | return 0; | |
3690 | } | |
3691 | ||
3692 | void ff_reset_entries(AVCodecContext *avctx) | |
3693 | { | |
3694 | } | |
3695 | ||
3696 | void ff_thread_await_progress2(AVCodecContext *avctx, int field, int thread, int shift) | |
3697 | { | |
3698 | } | |
3699 | ||
3700 | void ff_thread_report_progress2(AVCodecContext *avctx, int field, int thread, int n) | |
3701 | { | |
3702 | } | |
3703 | ||
3704 | #endif | |
3705 | ||
3706 | enum AVMediaType avcodec_get_type(enum AVCodecID codec_id) | |
3707 | { | |
3708 | AVCodec *c= avcodec_find_decoder(codec_id); | |
3709 | if(!c) | |
3710 | c= avcodec_find_encoder(codec_id); | |
3711 | if(c) | |
3712 | return c->type; | |
3713 | ||
3714 | if (codec_id <= AV_CODEC_ID_NONE) | |
3715 | return AVMEDIA_TYPE_UNKNOWN; | |
3716 | else if (codec_id < AV_CODEC_ID_FIRST_AUDIO) | |
3717 | return AVMEDIA_TYPE_VIDEO; | |
3718 | else if (codec_id < AV_CODEC_ID_FIRST_SUBTITLE) | |
3719 | return AVMEDIA_TYPE_AUDIO; | |
3720 | else if (codec_id < AV_CODEC_ID_FIRST_UNKNOWN) | |
3721 | return AVMEDIA_TYPE_SUBTITLE; | |
3722 | ||
3723 | return AVMEDIA_TYPE_UNKNOWN; | |
3724 | } | |
3725 | ||
3726 | int avcodec_is_open(AVCodecContext *s) | |
3727 | { | |
3728 | return !!s->internal; | |
3729 | } | |
3730 | ||
3731 | int avpriv_bprint_to_extradata(AVCodecContext *avctx, struct AVBPrint *buf) | |
3732 | { | |
3733 | int ret; | |
3734 | char *str; | |
3735 | ||
3736 | ret = av_bprint_finalize(buf, &str); | |
3737 | if (ret < 0) | |
3738 | return ret; | |
f6fa7814 DM |
3739 | if (!av_bprint_is_complete(buf)) { |
3740 | av_free(str); | |
3741 | return AVERROR(ENOMEM); | |
3742 | } | |
3743 | ||
2ba45a60 DM |
3744 | avctx->extradata = str; |
3745 | /* Note: the string is NUL terminated (so extradata can be read as a | |
3746 | * string), but the ending character is not accounted in the size (in | |
3747 | * binary formats you are likely not supposed to mux that character). When | |
3748 | * extradata is copied, it is also padded with FF_INPUT_BUFFER_PADDING_SIZE | |
3749 | * zeros. */ | |
3750 | avctx->extradata_size = buf->len; | |
3751 | return 0; | |
3752 | } | |
3753 | ||
3754 | const uint8_t *avpriv_find_start_code(const uint8_t *av_restrict p, | |
3755 | const uint8_t *end, | |
3756 | uint32_t *av_restrict state) | |
3757 | { | |
3758 | int i; | |
3759 | ||
3760 | av_assert0(p <= end); | |
3761 | if (p >= end) | |
3762 | return end; | |
3763 | ||
3764 | for (i = 0; i < 3; i++) { | |
3765 | uint32_t tmp = *state << 8; | |
3766 | *state = tmp + *(p++); | |
3767 | if (tmp == 0x100 || p == end) | |
3768 | return p; | |
3769 | } | |
3770 | ||
3771 | while (p < end) { | |
3772 | if (p[-1] > 1 ) p += 3; | |
3773 | else if (p[-2] ) p += 2; | |
3774 | else if (p[-3]|(p[-1]-1)) p++; | |
3775 | else { | |
3776 | p++; | |
3777 | break; | |
3778 | } | |
3779 | } | |
3780 | ||
3781 | p = FFMIN(p, end) - 4; | |
3782 | *state = AV_RB32(p); | |
3783 | ||
3784 | return p + 4; | |
3785 | } |