Imported Debian version 2.5.3~trusty1
[deb_ffmpeg.git] / ffmpeg / libavformat / utils.c
1 /*
2 * various utility functions for use within FFmpeg
3 * Copyright (c) 2000, 2001, 2002 Fabrice Bellard
4 *
5 * This file is part of FFmpeg.
6 *
7 * FFmpeg is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
11 *
12 * FFmpeg is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with FFmpeg; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20 */
21
22 #undef NDEBUG
23 #include <assert.h>
24 #include <stdarg.h>
25 #include <stdint.h>
26
27 #include "config.h"
28
29 #include "libavutil/avassert.h"
30 #include "libavutil/avstring.h"
31 #include "libavutil/dict.h"
32 #include "libavutil/internal.h"
33 #include "libavutil/mathematics.h"
34 #include "libavutil/opt.h"
35 #include "libavutil/parseutils.h"
36 #include "libavutil/pixdesc.h"
37 #include "libavutil/time.h"
38 #include "libavutil/timestamp.h"
39
40 #include "libavcodec/bytestream.h"
41 #include "libavcodec/internal.h"
42 #include "libavcodec/raw.h"
43
44 #include "audiointerleave.h"
45 #include "avformat.h"
46 #include "avio_internal.h"
47 #include "id3v2.h"
48 #include "internal.h"
49 #include "metadata.h"
50 #if CONFIG_NETWORK
51 #include "network.h"
52 #endif
53 #include "riff.h"
54 #include "url.h"
55
56 #include "libavutil/ffversion.h"
57 const char av_format_ffversion[] = "FFmpeg version " FFMPEG_VERSION;
58
59 /**
60 * @file
61 * various utility functions for use within FFmpeg
62 */
63
64 unsigned avformat_version(void)
65 {
66 av_assert0(LIBAVFORMAT_VERSION_MICRO >= 100);
67 return LIBAVFORMAT_VERSION_INT;
68 }
69
70 const char *avformat_configuration(void)
71 {
72 return FFMPEG_CONFIGURATION;
73 }
74
75 const char *avformat_license(void)
76 {
77 #define LICENSE_PREFIX "libavformat license: "
78 return LICENSE_PREFIX FFMPEG_LICENSE + sizeof(LICENSE_PREFIX) - 1;
79 }
80
81 #define RELATIVE_TS_BASE (INT64_MAX - (1LL<<48))
82
83 static int is_relative(int64_t ts) {
84 return ts > (RELATIVE_TS_BASE - (1LL<<48));
85 }
86
87 /**
88 * Wrap a given time stamp, if there is an indication for an overflow
89 *
90 * @param st stream
91 * @param timestamp the time stamp to wrap
92 * @return resulting time stamp
93 */
94 static int64_t wrap_timestamp(AVStream *st, int64_t timestamp)
95 {
96 if (st->pts_wrap_behavior != AV_PTS_WRAP_IGNORE &&
97 st->pts_wrap_reference != AV_NOPTS_VALUE && timestamp != AV_NOPTS_VALUE) {
98 if (st->pts_wrap_behavior == AV_PTS_WRAP_ADD_OFFSET &&
99 timestamp < st->pts_wrap_reference)
100 return timestamp + (1ULL << st->pts_wrap_bits);
101 else if (st->pts_wrap_behavior == AV_PTS_WRAP_SUB_OFFSET &&
102 timestamp >= st->pts_wrap_reference)
103 return timestamp - (1ULL << st->pts_wrap_bits);
104 }
105 return timestamp;
106 }
107
108 MAKE_ACCESSORS(AVStream, stream, AVRational, r_frame_rate)
109 MAKE_ACCESSORS(AVStream, stream, char *, recommended_encoder_configuration)
110 MAKE_ACCESSORS(AVFormatContext, format, AVCodec *, video_codec)
111 MAKE_ACCESSORS(AVFormatContext, format, AVCodec *, audio_codec)
112 MAKE_ACCESSORS(AVFormatContext, format, AVCodec *, subtitle_codec)
113 MAKE_ACCESSORS(AVFormatContext, format, int, metadata_header_padding)
114 MAKE_ACCESSORS(AVFormatContext, format, void *, opaque)
115 MAKE_ACCESSORS(AVFormatContext, format, av_format_control_message, control_message_cb)
116
117 int64_t av_stream_get_end_pts(const AVStream *st)
118 {
119 return st->pts.val;
120 }
121
122 struct AVCodecParserContext *av_stream_get_parser(const AVStream *st)
123 {
124 return st->parser;
125 }
126
127 void av_format_inject_global_side_data(AVFormatContext *s)
128 {
129 int i;
130 s->internal->inject_global_side_data = 1;
131 for (i = 0; i < s->nb_streams; i++) {
132 AVStream *st = s->streams[i];
133 st->inject_global_side_data = 1;
134 }
135 }
136
137 int ff_copy_whitelists(AVFormatContext *dst, AVFormatContext *src)
138 {
139 av_assert0(!dst->codec_whitelist && !dst->format_whitelist);
140 dst-> codec_whitelist = av_strdup(src->codec_whitelist);
141 dst->format_whitelist = av_strdup(src->format_whitelist);
142 if ( (src-> codec_whitelist && !dst-> codec_whitelist)
143 || (src->format_whitelist && !dst->format_whitelist)) {
144 av_log(dst, AV_LOG_ERROR, "Failed to duplicate whitelist\n");
145 return AVERROR(ENOMEM);
146 }
147 return 0;
148 }
149
150 static const AVCodec *find_decoder(AVFormatContext *s, AVStream *st, enum AVCodecID codec_id)
151 {
152 if (st->codec->codec)
153 return st->codec->codec;
154
155 switch (st->codec->codec_type) {
156 case AVMEDIA_TYPE_VIDEO:
157 if (s->video_codec) return s->video_codec;
158 break;
159 case AVMEDIA_TYPE_AUDIO:
160 if (s->audio_codec) return s->audio_codec;
161 break;
162 case AVMEDIA_TYPE_SUBTITLE:
163 if (s->subtitle_codec) return s->subtitle_codec;
164 break;
165 }
166
167 return avcodec_find_decoder(codec_id);
168 }
169
170 int av_format_get_probe_score(const AVFormatContext *s)
171 {
172 return s->probe_score;
173 }
174
175 /* an arbitrarily chosen "sane" max packet size -- 50M */
176 #define SANE_CHUNK_SIZE (50000000)
177
178 int ffio_limit(AVIOContext *s, int size)
179 {
180 if (s->maxsize>= 0) {
181 int64_t remaining= s->maxsize - avio_tell(s);
182 if (remaining < size) {
183 int64_t newsize = avio_size(s);
184 if (!s->maxsize || s->maxsize<newsize)
185 s->maxsize = newsize - !newsize;
186 remaining= s->maxsize - avio_tell(s);
187 remaining= FFMAX(remaining, 0);
188 }
189
190 if (s->maxsize>= 0 && remaining+1 < size) {
191 av_log(NULL, remaining ? AV_LOG_ERROR : AV_LOG_DEBUG, "Truncating packet of size %d to %"PRId64"\n", size, remaining+1);
192 size = remaining+1;
193 }
194 }
195 return size;
196 }
197
198 /* Read the data in sane-sized chunks and append to pkt.
199 * Return the number of bytes read or an error. */
200 static int append_packet_chunked(AVIOContext *s, AVPacket *pkt, int size)
201 {
202 int64_t orig_pos = pkt->pos; // av_grow_packet might reset pos
203 int orig_size = pkt->size;
204 int ret;
205
206 do {
207 int prev_size = pkt->size;
208 int read_size;
209
210 /* When the caller requests a lot of data, limit it to the amount
211 * left in file or SANE_CHUNK_SIZE when it is not known. */
212 read_size = size;
213 if (read_size > SANE_CHUNK_SIZE/10) {
214 read_size = ffio_limit(s, read_size);
215 // If filesize/maxsize is unknown, limit to SANE_CHUNK_SIZE
216 if (s->maxsize < 0)
217 read_size = FFMIN(read_size, SANE_CHUNK_SIZE);
218 }
219
220 ret = av_grow_packet(pkt, read_size);
221 if (ret < 0)
222 break;
223
224 ret = avio_read(s, pkt->data + prev_size, read_size);
225 if (ret != read_size) {
226 av_shrink_packet(pkt, prev_size + FFMAX(ret, 0));
227 break;
228 }
229
230 size -= read_size;
231 } while (size > 0);
232 if (size > 0)
233 pkt->flags |= AV_PKT_FLAG_CORRUPT;
234
235 pkt->pos = orig_pos;
236 if (!pkt->size)
237 av_free_packet(pkt);
238 return pkt->size > orig_size ? pkt->size - orig_size : ret;
239 }
240
241 int av_get_packet(AVIOContext *s, AVPacket *pkt, int size)
242 {
243 av_init_packet(pkt);
244 pkt->data = NULL;
245 pkt->size = 0;
246 pkt->pos = avio_tell(s);
247
248 return append_packet_chunked(s, pkt, size);
249 }
250
251 int av_append_packet(AVIOContext *s, AVPacket *pkt, int size)
252 {
253 if (!pkt->size)
254 return av_get_packet(s, pkt, size);
255 return append_packet_chunked(s, pkt, size);
256 }
257
258 int av_filename_number_test(const char *filename)
259 {
260 char buf[1024];
261 return filename &&
262 (av_get_frame_filename(buf, sizeof(buf), filename, 1) >= 0);
263 }
264
265 static int set_codec_from_probe_data(AVFormatContext *s, AVStream *st,
266 AVProbeData *pd)
267 {
268 static const struct {
269 const char *name;
270 enum AVCodecID id;
271 enum AVMediaType type;
272 } fmt_id_type[] = {
273 { "aac", AV_CODEC_ID_AAC, AVMEDIA_TYPE_AUDIO },
274 { "ac3", AV_CODEC_ID_AC3, AVMEDIA_TYPE_AUDIO },
275 { "dts", AV_CODEC_ID_DTS, AVMEDIA_TYPE_AUDIO },
276 { "eac3", AV_CODEC_ID_EAC3, AVMEDIA_TYPE_AUDIO },
277 { "h264", AV_CODEC_ID_H264, AVMEDIA_TYPE_VIDEO },
278 { "hevc", AV_CODEC_ID_HEVC, AVMEDIA_TYPE_VIDEO },
279 { "loas", AV_CODEC_ID_AAC_LATM, AVMEDIA_TYPE_AUDIO },
280 { "m4v", AV_CODEC_ID_MPEG4, AVMEDIA_TYPE_VIDEO },
281 { "mp3", AV_CODEC_ID_MP3, AVMEDIA_TYPE_AUDIO },
282 { "mpegvideo", AV_CODEC_ID_MPEG2VIDEO, AVMEDIA_TYPE_VIDEO },
283 { 0 }
284 };
285 int score;
286 AVInputFormat *fmt = av_probe_input_format3(pd, 1, &score);
287
288 if (fmt && st->request_probe <= score) {
289 int i;
290 av_log(s, AV_LOG_DEBUG,
291 "Probe with size=%d, packets=%d detected %s with score=%d\n",
292 pd->buf_size, MAX_PROBE_PACKETS - st->probe_packets,
293 fmt->name, score);
294 for (i = 0; fmt_id_type[i].name; i++) {
295 if (!strcmp(fmt->name, fmt_id_type[i].name)) {
296 st->codec->codec_id = fmt_id_type[i].id;
297 st->codec->codec_type = fmt_id_type[i].type;
298 return score;
299 }
300 }
301 }
302 return 0;
303 }
304
305 /************************************************************/
306 /* input media file */
307
308 int av_demuxer_open(AVFormatContext *ic) {
309 int err;
310
311 if (ic->format_whitelist && av_match_list(ic->iformat->name, ic->format_whitelist, ',') <= 0) {
312 av_log(ic, AV_LOG_ERROR, "Format not on whitelist\n");
313 return AVERROR(EINVAL);
314 }
315
316 if (ic->iformat->read_header) {
317 err = ic->iformat->read_header(ic);
318 if (err < 0)
319 return err;
320 }
321
322 if (ic->pb && !ic->data_offset)
323 ic->data_offset = avio_tell(ic->pb);
324
325 return 0;
326 }
327
328 /* Open input file and probe the format if necessary. */
329 static int init_input(AVFormatContext *s, const char *filename,
330 AVDictionary **options)
331 {
332 int ret;
333 AVProbeData pd = { filename, NULL, 0 };
334 int score = AVPROBE_SCORE_RETRY;
335
336 if (s->pb) {
337 s->flags |= AVFMT_FLAG_CUSTOM_IO;
338 if (!s->iformat)
339 return av_probe_input_buffer2(s->pb, &s->iformat, filename,
340 s, 0, s->format_probesize);
341 else if (s->iformat->flags & AVFMT_NOFILE)
342 av_log(s, AV_LOG_WARNING, "Custom AVIOContext makes no sense and "
343 "will be ignored with AVFMT_NOFILE format.\n");
344 return 0;
345 }
346
347 if ((s->iformat && s->iformat->flags & AVFMT_NOFILE) ||
348 (!s->iformat && (s->iformat = av_probe_input_format2(&pd, 0, &score))))
349 return score;
350
351 if ((ret = avio_open2(&s->pb, filename, AVIO_FLAG_READ | s->avio_flags,
352 &s->interrupt_callback, options)) < 0)
353 return ret;
354 if (s->iformat)
355 return 0;
356 return av_probe_input_buffer2(s->pb, &s->iformat, filename,
357 s, 0, s->format_probesize);
358 }
359
360 static AVPacket *add_to_pktbuf(AVPacketList **packet_buffer, AVPacket *pkt,
361 AVPacketList **plast_pktl)
362 {
363 AVPacketList *pktl = av_mallocz(sizeof(AVPacketList));
364 if (!pktl)
365 return NULL;
366
367 if (*packet_buffer)
368 (*plast_pktl)->next = pktl;
369 else
370 *packet_buffer = pktl;
371
372 /* Add the packet in the buffered packet list. */
373 *plast_pktl = pktl;
374 pktl->pkt = *pkt;
375 return &pktl->pkt;
376 }
377
378 int avformat_queue_attached_pictures(AVFormatContext *s)
379 {
380 int i;
381 for (i = 0; i < s->nb_streams; i++)
382 if (s->streams[i]->disposition & AV_DISPOSITION_ATTACHED_PIC &&
383 s->streams[i]->discard < AVDISCARD_ALL) {
384 AVPacket copy = s->streams[i]->attached_pic;
385 if (copy.size <= 0) {
386 av_log(s, AV_LOG_WARNING,
387 "Attached picture on stream %d has invalid size, "
388 "ignoring\n", i);
389 continue;
390 }
391 copy.buf = av_buffer_ref(copy.buf);
392 if (!copy.buf)
393 return AVERROR(ENOMEM);
394
395 add_to_pktbuf(&s->raw_packet_buffer, &copy,
396 &s->raw_packet_buffer_end);
397 }
398 return 0;
399 }
400
401 int avformat_open_input(AVFormatContext **ps, const char *filename,
402 AVInputFormat *fmt, AVDictionary **options)
403 {
404 AVFormatContext *s = *ps;
405 int ret = 0;
406 AVDictionary *tmp = NULL;
407 ID3v2ExtraMeta *id3v2_extra_meta = NULL;
408
409 if (!s && !(s = avformat_alloc_context()))
410 return AVERROR(ENOMEM);
411 if (!s->av_class) {
412 av_log(NULL, AV_LOG_ERROR, "Input context has not been properly allocated by avformat_alloc_context() and is not NULL either\n");
413 return AVERROR(EINVAL);
414 }
415 if (fmt)
416 s->iformat = fmt;
417
418 if (options)
419 av_dict_copy(&tmp, *options, 0);
420
421 if ((ret = av_opt_set_dict(s, &tmp)) < 0)
422 goto fail;
423
424 if ((ret = init_input(s, filename, &tmp)) < 0)
425 goto fail;
426 s->probe_score = ret;
427
428 if (s->format_whitelist && av_match_list(s->iformat->name, s->format_whitelist, ',') <= 0) {
429 av_log(s, AV_LOG_ERROR, "Format not on whitelist\n");
430 ret = AVERROR(EINVAL);
431 goto fail;
432 }
433
434 avio_skip(s->pb, s->skip_initial_bytes);
435
436 /* Check filename in case an image number is expected. */
437 if (s->iformat->flags & AVFMT_NEEDNUMBER) {
438 if (!av_filename_number_test(filename)) {
439 ret = AVERROR(EINVAL);
440 goto fail;
441 }
442 }
443
444 s->duration = s->start_time = AV_NOPTS_VALUE;
445 av_strlcpy(s->filename, filename ? filename : "", sizeof(s->filename));
446
447 /* Allocate private data. */
448 if (s->iformat->priv_data_size > 0) {
449 if (!(s->priv_data = av_mallocz(s->iformat->priv_data_size))) {
450 ret = AVERROR(ENOMEM);
451 goto fail;
452 }
453 if (s->iformat->priv_class) {
454 *(const AVClass **) s->priv_data = s->iformat->priv_class;
455 av_opt_set_defaults(s->priv_data);
456 if ((ret = av_opt_set_dict(s->priv_data, &tmp)) < 0)
457 goto fail;
458 }
459 }
460
461 /* e.g. AVFMT_NOFILE formats will not have a AVIOContext */
462 if (s->pb)
463 ff_id3v2_read(s, ID3v2_DEFAULT_MAGIC, &id3v2_extra_meta, 0);
464
465 if (!(s->flags&AVFMT_FLAG_PRIV_OPT) && s->iformat->read_header)
466 if ((ret = s->iformat->read_header(s)) < 0)
467 goto fail;
468
469 if (id3v2_extra_meta) {
470 if (!strcmp(s->iformat->name, "mp3") || !strcmp(s->iformat->name, "aac") ||
471 !strcmp(s->iformat->name, "tta")) {
472 if ((ret = ff_id3v2_parse_apic(s, &id3v2_extra_meta)) < 0)
473 goto fail;
474 } else
475 av_log(s, AV_LOG_DEBUG, "demuxer does not support additional id3 data, skipping\n");
476 }
477 ff_id3v2_free_extra_meta(&id3v2_extra_meta);
478
479 if ((ret = avformat_queue_attached_pictures(s)) < 0)
480 goto fail;
481
482 if (!(s->flags&AVFMT_FLAG_PRIV_OPT) && s->pb && !s->data_offset)
483 s->data_offset = avio_tell(s->pb);
484
485 s->raw_packet_buffer_remaining_size = RAW_PACKET_BUFFER_SIZE;
486
487 if (options) {
488 av_dict_free(options);
489 *options = tmp;
490 }
491 *ps = s;
492 return 0;
493
494 fail:
495 ff_id3v2_free_extra_meta(&id3v2_extra_meta);
496 av_dict_free(&tmp);
497 if (s->pb && !(s->flags & AVFMT_FLAG_CUSTOM_IO))
498 avio_close(s->pb);
499 avformat_free_context(s);
500 *ps = NULL;
501 return ret;
502 }
503
504 /*******************************************************/
505
506 static void force_codec_ids(AVFormatContext *s, AVStream *st)
507 {
508 switch (st->codec->codec_type) {
509 case AVMEDIA_TYPE_VIDEO:
510 if (s->video_codec_id)
511 st->codec->codec_id = s->video_codec_id;
512 break;
513 case AVMEDIA_TYPE_AUDIO:
514 if (s->audio_codec_id)
515 st->codec->codec_id = s->audio_codec_id;
516 break;
517 case AVMEDIA_TYPE_SUBTITLE:
518 if (s->subtitle_codec_id)
519 st->codec->codec_id = s->subtitle_codec_id;
520 break;
521 }
522 }
523
524 static int probe_codec(AVFormatContext *s, AVStream *st, const AVPacket *pkt)
525 {
526 if (st->request_probe>0) {
527 AVProbeData *pd = &st->probe_data;
528 int end;
529 av_log(s, AV_LOG_DEBUG, "probing stream %d pp:%d\n", st->index, st->probe_packets);
530 --st->probe_packets;
531
532 if (pkt) {
533 uint8_t *new_buf = av_realloc(pd->buf, pd->buf_size+pkt->size+AVPROBE_PADDING_SIZE);
534 if (!new_buf) {
535 av_log(s, AV_LOG_WARNING,
536 "Failed to reallocate probe buffer for stream %d\n",
537 st->index);
538 goto no_packet;
539 }
540 pd->buf = new_buf;
541 memcpy(pd->buf + pd->buf_size, pkt->data, pkt->size);
542 pd->buf_size += pkt->size;
543 memset(pd->buf + pd->buf_size, 0, AVPROBE_PADDING_SIZE);
544 } else {
545 no_packet:
546 st->probe_packets = 0;
547 if (!pd->buf_size) {
548 av_log(s, AV_LOG_WARNING,
549 "nothing to probe for stream %d\n", st->index);
550 }
551 }
552
553 end= s->raw_packet_buffer_remaining_size <= 0
554 || st->probe_packets<= 0;
555
556 if (end || av_log2(pd->buf_size) != av_log2(pd->buf_size - pkt->size)) {
557 int score = set_codec_from_probe_data(s, st, pd);
558 if ( (st->codec->codec_id != AV_CODEC_ID_NONE && score > AVPROBE_SCORE_STREAM_RETRY)
559 || end) {
560 pd->buf_size = 0;
561 av_freep(&pd->buf);
562 st->request_probe = -1;
563 if (st->codec->codec_id != AV_CODEC_ID_NONE) {
564 av_log(s, AV_LOG_DEBUG, "probed stream %d\n", st->index);
565 } else
566 av_log(s, AV_LOG_WARNING, "probed stream %d failed\n", st->index);
567 }
568 force_codec_ids(s, st);
569 }
570 }
571 return 0;
572 }
573
574 static int update_wrap_reference(AVFormatContext *s, AVStream *st, int stream_index, AVPacket *pkt)
575 {
576 int64_t ref = pkt->dts;
577 int i, pts_wrap_behavior;
578 int64_t pts_wrap_reference;
579 AVProgram *first_program;
580
581 if (ref == AV_NOPTS_VALUE)
582 ref = pkt->pts;
583 if (st->pts_wrap_reference != AV_NOPTS_VALUE || st->pts_wrap_bits >= 63 || ref == AV_NOPTS_VALUE || !s->correct_ts_overflow)
584 return 0;
585 ref &= (1LL << st->pts_wrap_bits)-1;
586
587 // reference time stamp should be 60 s before first time stamp
588 pts_wrap_reference = ref - av_rescale(60, st->time_base.den, st->time_base.num);
589 // if first time stamp is not more than 1/8 and 60s before the wrap point, subtract rather than add wrap offset
590 pts_wrap_behavior = (ref < (1LL << st->pts_wrap_bits) - (1LL << st->pts_wrap_bits-3)) ||
591 (ref < (1LL << st->pts_wrap_bits) - av_rescale(60, st->time_base.den, st->time_base.num)) ?
592 AV_PTS_WRAP_ADD_OFFSET : AV_PTS_WRAP_SUB_OFFSET;
593
594 first_program = av_find_program_from_stream(s, NULL, stream_index);
595
596 if (!first_program) {
597 int default_stream_index = av_find_default_stream_index(s);
598 if (s->streams[default_stream_index]->pts_wrap_reference == AV_NOPTS_VALUE) {
599 for (i = 0; i < s->nb_streams; i++) {
600 if (av_find_program_from_stream(s, NULL, i))
601 continue;
602 s->streams[i]->pts_wrap_reference = pts_wrap_reference;
603 s->streams[i]->pts_wrap_behavior = pts_wrap_behavior;
604 }
605 }
606 else {
607 st->pts_wrap_reference = s->streams[default_stream_index]->pts_wrap_reference;
608 st->pts_wrap_behavior = s->streams[default_stream_index]->pts_wrap_behavior;
609 }
610 }
611 else {
612 AVProgram *program = first_program;
613 while (program) {
614 if (program->pts_wrap_reference != AV_NOPTS_VALUE) {
615 pts_wrap_reference = program->pts_wrap_reference;
616 pts_wrap_behavior = program->pts_wrap_behavior;
617 break;
618 }
619 program = av_find_program_from_stream(s, program, stream_index);
620 }
621
622 // update every program with differing pts_wrap_reference
623 program = first_program;
624 while (program) {
625 if (program->pts_wrap_reference != pts_wrap_reference) {
626 for (i = 0; i<program->nb_stream_indexes; i++) {
627 s->streams[program->stream_index[i]]->pts_wrap_reference = pts_wrap_reference;
628 s->streams[program->stream_index[i]]->pts_wrap_behavior = pts_wrap_behavior;
629 }
630
631 program->pts_wrap_reference = pts_wrap_reference;
632 program->pts_wrap_behavior = pts_wrap_behavior;
633 }
634 program = av_find_program_from_stream(s, program, stream_index);
635 }
636 }
637 return 1;
638 }
639
640 int ff_read_packet(AVFormatContext *s, AVPacket *pkt)
641 {
642 int ret, i, err;
643 AVStream *st;
644
645 for (;;) {
646 AVPacketList *pktl = s->raw_packet_buffer;
647
648 if (pktl) {
649 *pkt = pktl->pkt;
650 st = s->streams[pkt->stream_index];
651 if (s->raw_packet_buffer_remaining_size <= 0)
652 if ((err = probe_codec(s, st, NULL)) < 0)
653 return err;
654 if (st->request_probe <= 0) {
655 s->raw_packet_buffer = pktl->next;
656 s->raw_packet_buffer_remaining_size += pkt->size;
657 av_free(pktl);
658 return 0;
659 }
660 }
661
662 pkt->data = NULL;
663 pkt->size = 0;
664 av_init_packet(pkt);
665 ret = s->iformat->read_packet(s, pkt);
666 if (ret < 0) {
667 if (!pktl || ret == AVERROR(EAGAIN))
668 return ret;
669 for (i = 0; i < s->nb_streams; i++) {
670 st = s->streams[i];
671 if (st->probe_packets)
672 if ((err = probe_codec(s, st, NULL)) < 0)
673 return err;
674 av_assert0(st->request_probe <= 0);
675 }
676 continue;
677 }
678
679 if ((s->flags & AVFMT_FLAG_DISCARD_CORRUPT) &&
680 (pkt->flags & AV_PKT_FLAG_CORRUPT)) {
681 av_log(s, AV_LOG_WARNING,
682 "Dropped corrupted packet (stream = %d)\n",
683 pkt->stream_index);
684 av_free_packet(pkt);
685 continue;
686 }
687
688 if (pkt->stream_index >= (unsigned)s->nb_streams) {
689 av_log(s, AV_LOG_ERROR, "Invalid stream index %d\n", pkt->stream_index);
690 continue;
691 }
692
693 st = s->streams[pkt->stream_index];
694
695 if (update_wrap_reference(s, st, pkt->stream_index, pkt) && st->pts_wrap_behavior == AV_PTS_WRAP_SUB_OFFSET) {
696 // correct first time stamps to negative values
697 if (!is_relative(st->first_dts))
698 st->first_dts = wrap_timestamp(st, st->first_dts);
699 if (!is_relative(st->start_time))
700 st->start_time = wrap_timestamp(st, st->start_time);
701 if (!is_relative(st->cur_dts))
702 st->cur_dts = wrap_timestamp(st, st->cur_dts);
703 }
704
705 pkt->dts = wrap_timestamp(st, pkt->dts);
706 pkt->pts = wrap_timestamp(st, pkt->pts);
707
708 force_codec_ids(s, st);
709
710 /* TODO: audio: time filter; video: frame reordering (pts != dts) */
711 if (s->use_wallclock_as_timestamps)
712 pkt->dts = pkt->pts = av_rescale_q(av_gettime(), AV_TIME_BASE_Q, st->time_base);
713
714 if (!pktl && st->request_probe <= 0)
715 return ret;
716
717 add_to_pktbuf(&s->raw_packet_buffer, pkt, &s->raw_packet_buffer_end);
718 s->raw_packet_buffer_remaining_size -= pkt->size;
719
720 if ((err = probe_codec(s, st, pkt)) < 0)
721 return err;
722 }
723 }
724
725
726 /**********************************************************/
727
728 static int determinable_frame_size(AVCodecContext *avctx)
729 {
730 if (/*avctx->codec_id == AV_CODEC_ID_AAC ||*/
731 avctx->codec_id == AV_CODEC_ID_MP1 ||
732 avctx->codec_id == AV_CODEC_ID_MP2 ||
733 avctx->codec_id == AV_CODEC_ID_MP3/* ||
734 avctx->codec_id == AV_CODEC_ID_CELT*/)
735 return 1;
736 return 0;
737 }
738
739 /**
740 * Return the frame duration in seconds. Return 0 if not available.
741 */
742 void ff_compute_frame_duration(AVFormatContext *s, int *pnum, int *pden, AVStream *st,
743 AVCodecParserContext *pc, AVPacket *pkt)
744 {
745 AVRational codec_framerate = s->iformat ? st->codec->framerate :
746 av_mul_q(av_inv_q(st->codec->time_base), (AVRational){1, st->codec->ticks_per_frame});
747 int frame_size;
748
749 *pnum = 0;
750 *pden = 0;
751 switch (st->codec->codec_type) {
752 case AVMEDIA_TYPE_VIDEO:
753 if (st->r_frame_rate.num && !pc) {
754 *pnum = st->r_frame_rate.den;
755 *pden = st->r_frame_rate.num;
756 } else if (st->time_base.num * 1000LL > st->time_base.den) {
757 *pnum = st->time_base.num;
758 *pden = st->time_base.den;
759 } else if (codec_framerate.den * 1000LL > codec_framerate.num) {
760 av_assert0(st->codec->ticks_per_frame);
761 av_reduce(pnum, pden,
762 codec_framerate.den,
763 codec_framerate.num * (int64_t)st->codec->ticks_per_frame,
764 INT_MAX);
765
766 if (pc && pc->repeat_pict) {
767 av_assert0(s->iformat); // this may be wrong for interlaced encoding but its not used for that case
768 av_reduce(pnum, pden,
769 (*pnum) * (1LL + pc->repeat_pict),
770 (*pden),
771 INT_MAX);
772 }
773 /* If this codec can be interlaced or progressive then we need
774 * a parser to compute duration of a packet. Thus if we have
775 * no parser in such case leave duration undefined. */
776 if (st->codec->ticks_per_frame > 1 && !pc)
777 *pnum = *pden = 0;
778 }
779 break;
780 case AVMEDIA_TYPE_AUDIO:
781 frame_size = av_get_audio_frame_duration(st->codec, pkt->size);
782 if (frame_size <= 0 || st->codec->sample_rate <= 0)
783 break;
784 *pnum = frame_size;
785 *pden = st->codec->sample_rate;
786 break;
787 default:
788 break;
789 }
790 }
791
792 static int is_intra_only(AVCodecContext *enc) {
793 const AVCodecDescriptor *desc;
794
795 if (enc->codec_type != AVMEDIA_TYPE_VIDEO)
796 return 1;
797
798 desc = av_codec_get_codec_descriptor(enc);
799 if (!desc) {
800 desc = avcodec_descriptor_get(enc->codec_id);
801 av_codec_set_codec_descriptor(enc, desc);
802 }
803 if (desc)
804 return !!(desc->props & AV_CODEC_PROP_INTRA_ONLY);
805 return 0;
806 }
807
808 static int has_decode_delay_been_guessed(AVStream *st)
809 {
810 if (st->codec->codec_id != AV_CODEC_ID_H264) return 1;
811 if (!st->info) // if we have left find_stream_info then nb_decoded_frames won't increase anymore for stream copy
812 return 1;
813 #if CONFIG_H264_DECODER
814 if (st->codec->has_b_frames &&
815 avpriv_h264_has_num_reorder_frames(st->codec) == st->codec->has_b_frames)
816 return 1;
817 #endif
818 if (st->codec->has_b_frames<3)
819 return st->nb_decoded_frames >= 7;
820 else if (st->codec->has_b_frames<4)
821 return st->nb_decoded_frames >= 18;
822 else
823 return st->nb_decoded_frames >= 20;
824 }
825
826 static AVPacketList *get_next_pkt(AVFormatContext *s, AVStream *st, AVPacketList *pktl)
827 {
828 if (pktl->next)
829 return pktl->next;
830 if (pktl == s->packet_buffer_end)
831 return s->parse_queue;
832 return NULL;
833 }
834
835 static int64_t select_from_pts_buffer(AVStream *st, int64_t *pts_buffer, int64_t dts) {
836 int onein_oneout = st->codec->codec_id != AV_CODEC_ID_H264 &&
837 st->codec->codec_id != AV_CODEC_ID_HEVC;
838
839 if(!onein_oneout) {
840 int delay = st->codec->has_b_frames;
841 int i;
842
843 if (dts == AV_NOPTS_VALUE) {
844 int64_t best_score = INT64_MAX;
845 for (i = 0; i<delay; i++) {
846 if (st->pts_reorder_error_count[i]) {
847 int64_t score = st->pts_reorder_error[i] / st->pts_reorder_error_count[i];
848 if (score < best_score) {
849 best_score = score;
850 dts = pts_buffer[i];
851 }
852 }
853 }
854 } else {
855 for (i = 0; i<delay; i++) {
856 if (pts_buffer[i] != AV_NOPTS_VALUE) {
857 int64_t diff = FFABS(pts_buffer[i] - dts)
858 + (uint64_t)st->pts_reorder_error[i];
859 diff = FFMAX(diff, st->pts_reorder_error[i]);
860 st->pts_reorder_error[i] = diff;
861 st->pts_reorder_error_count[i]++;
862 if (st->pts_reorder_error_count[i] > 250) {
863 st->pts_reorder_error[i] >>= 1;
864 st->pts_reorder_error_count[i] >>= 1;
865 }
866 }
867 }
868 }
869 }
870
871 if (dts == AV_NOPTS_VALUE)
872 dts = pts_buffer[0];
873
874 return dts;
875 }
876
877 static void update_initial_timestamps(AVFormatContext *s, int stream_index,
878 int64_t dts, int64_t pts, AVPacket *pkt)
879 {
880 AVStream *st = s->streams[stream_index];
881 AVPacketList *pktl = s->packet_buffer ? s->packet_buffer : s->parse_queue;
882 int64_t pts_buffer[MAX_REORDER_DELAY+1];
883 int64_t shift;
884 int i, delay;
885
886 if (st->first_dts != AV_NOPTS_VALUE ||
887 dts == AV_NOPTS_VALUE ||
888 st->cur_dts == AV_NOPTS_VALUE ||
889 is_relative(dts))
890 return;
891
892 delay = st->codec->has_b_frames;
893 st->first_dts = dts - (st->cur_dts - RELATIVE_TS_BASE);
894 st->cur_dts = dts;
895 shift = st->first_dts - RELATIVE_TS_BASE;
896
897 for (i = 0; i<MAX_REORDER_DELAY+1; i++)
898 pts_buffer[i] = AV_NOPTS_VALUE;
899
900 if (is_relative(pts))
901 pts += shift;
902
903 for (; pktl; pktl = get_next_pkt(s, st, pktl)) {
904 if (pktl->pkt.stream_index != stream_index)
905 continue;
906 if (is_relative(pktl->pkt.pts))
907 pktl->pkt.pts += shift;
908
909 if (is_relative(pktl->pkt.dts))
910 pktl->pkt.dts += shift;
911
912 if (st->start_time == AV_NOPTS_VALUE && pktl->pkt.pts != AV_NOPTS_VALUE)
913 st->start_time = pktl->pkt.pts;
914
915 if (pktl->pkt.pts != AV_NOPTS_VALUE && delay <= MAX_REORDER_DELAY && has_decode_delay_been_guessed(st)) {
916 pts_buffer[0] = pktl->pkt.pts;
917 for (i = 0; i<delay && pts_buffer[i] > pts_buffer[i + 1]; i++)
918 FFSWAP(int64_t, pts_buffer[i], pts_buffer[i + 1]);
919
920 pktl->pkt.dts = select_from_pts_buffer(st, pts_buffer, pktl->pkt.dts);
921 }
922 }
923
924 if (st->start_time == AV_NOPTS_VALUE)
925 st->start_time = pts;
926 }
927
928 static void update_initial_durations(AVFormatContext *s, AVStream *st,
929 int stream_index, int duration)
930 {
931 AVPacketList *pktl = s->packet_buffer ? s->packet_buffer : s->parse_queue;
932 int64_t cur_dts = RELATIVE_TS_BASE;
933
934 if (st->first_dts != AV_NOPTS_VALUE) {
935 if (st->update_initial_durations_done)
936 return;
937 st->update_initial_durations_done = 1;
938 cur_dts = st->first_dts;
939 for (; pktl; pktl = get_next_pkt(s, st, pktl)) {
940 if (pktl->pkt.stream_index == stream_index) {
941 if (pktl->pkt.pts != pktl->pkt.dts ||
942 pktl->pkt.dts != AV_NOPTS_VALUE ||
943 pktl->pkt.duration)
944 break;
945 cur_dts -= duration;
946 }
947 }
948 if (pktl && pktl->pkt.dts != st->first_dts) {
949 av_log(s, AV_LOG_DEBUG, "first_dts %s not matching first dts %s (pts %s, duration %d) in the queue\n",
950 av_ts2str(st->first_dts), av_ts2str(pktl->pkt.dts), av_ts2str(pktl->pkt.pts), pktl->pkt.duration);
951 return;
952 }
953 if (!pktl) {
954 av_log(s, AV_LOG_DEBUG, "first_dts %s but no packet with dts in the queue\n", av_ts2str(st->first_dts));
955 return;
956 }
957 pktl = s->packet_buffer ? s->packet_buffer : s->parse_queue;
958 st->first_dts = cur_dts;
959 } else if (st->cur_dts != RELATIVE_TS_BASE)
960 return;
961
962 for (; pktl; pktl = get_next_pkt(s, st, pktl)) {
963 if (pktl->pkt.stream_index != stream_index)
964 continue;
965 if (pktl->pkt.pts == pktl->pkt.dts &&
966 (pktl->pkt.dts == AV_NOPTS_VALUE || pktl->pkt.dts == st->first_dts) &&
967 !pktl->pkt.duration) {
968 pktl->pkt.dts = cur_dts;
969 if (!st->codec->has_b_frames)
970 pktl->pkt.pts = cur_dts;
971 // if (st->codec->codec_type != AVMEDIA_TYPE_AUDIO)
972 pktl->pkt.duration = duration;
973 } else
974 break;
975 cur_dts = pktl->pkt.dts + pktl->pkt.duration;
976 }
977 if (!pktl)
978 st->cur_dts = cur_dts;
979 }
980
981 static void compute_pkt_fields(AVFormatContext *s, AVStream *st,
982 AVCodecParserContext *pc, AVPacket *pkt)
983 {
984 int num, den, presentation_delayed, delay, i;
985 int64_t offset;
986 AVRational duration;
987 int onein_oneout = st->codec->codec_id != AV_CODEC_ID_H264 &&
988 st->codec->codec_id != AV_CODEC_ID_HEVC;
989
990 if (s->flags & AVFMT_FLAG_NOFILLIN)
991 return;
992
993 if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO && pkt->dts != AV_NOPTS_VALUE) {
994 if (pkt->dts == pkt->pts && st->last_dts_for_order_check != AV_NOPTS_VALUE) {
995 if (st->last_dts_for_order_check <= pkt->dts) {
996 st->dts_ordered++;
997 } else {
998 av_log(s, st->dts_misordered ? AV_LOG_DEBUG : AV_LOG_WARNING,
999 "DTS %"PRIi64" < %"PRIi64" out of order\n",
1000 pkt->dts,
1001 st->last_dts_for_order_check);
1002 st->dts_misordered++;
1003 }
1004 if (st->dts_ordered + st->dts_misordered > 250) {
1005 st->dts_ordered >>= 1;
1006 st->dts_misordered >>= 1;
1007 }
1008 }
1009
1010 st->last_dts_for_order_check = pkt->dts;
1011 if (st->dts_ordered < 8*st->dts_misordered && pkt->dts == pkt->pts)
1012 pkt->dts = AV_NOPTS_VALUE;
1013 }
1014
1015 if ((s->flags & AVFMT_FLAG_IGNDTS) && pkt->pts != AV_NOPTS_VALUE)
1016 pkt->dts = AV_NOPTS_VALUE;
1017
1018 if (pc && pc->pict_type == AV_PICTURE_TYPE_B
1019 && !st->codec->has_b_frames)
1020 //FIXME Set low_delay = 0 when has_b_frames = 1
1021 st->codec->has_b_frames = 1;
1022
1023 /* do we have a video B-frame ? */
1024 delay = st->codec->has_b_frames;
1025 presentation_delayed = 0;
1026
1027 /* XXX: need has_b_frame, but cannot get it if the codec is
1028 * not initialized */
1029 if (delay &&
1030 pc && pc->pict_type != AV_PICTURE_TYPE_B)
1031 presentation_delayed = 1;
1032
1033 if (pkt->pts != AV_NOPTS_VALUE && pkt->dts != AV_NOPTS_VALUE &&
1034 st->pts_wrap_bits < 63 &&
1035 pkt->dts - (1LL << (st->pts_wrap_bits - 1)) > pkt->pts) {
1036 if (is_relative(st->cur_dts) || pkt->dts - (1LL<<(st->pts_wrap_bits - 1)) > st->cur_dts) {
1037 pkt->dts -= 1LL << st->pts_wrap_bits;
1038 } else
1039 pkt->pts += 1LL << st->pts_wrap_bits;
1040 }
1041
1042 /* Some MPEG-2 in MPEG-PS lack dts (issue #171 / input_file.mpg).
1043 * We take the conservative approach and discard both.
1044 * Note: If this is misbehaving for an H.264 file, then possibly
1045 * presentation_delayed is not set correctly. */
1046 if (delay == 1 && pkt->dts == pkt->pts &&
1047 pkt->dts != AV_NOPTS_VALUE && presentation_delayed) {
1048 av_log(s, AV_LOG_DEBUG, "invalid dts/pts combination %"PRIi64"\n", pkt->dts);
1049 if ( strcmp(s->iformat->name, "mov,mp4,m4a,3gp,3g2,mj2")
1050 && strcmp(s->iformat->name, "flv")) // otherwise we discard correct timestamps for vc1-wmapro.ism
1051 pkt->dts = AV_NOPTS_VALUE;
1052 }
1053
1054 duration = av_mul_q((AVRational) {pkt->duration, 1}, st->time_base);
1055 if (pkt->duration == 0) {
1056 ff_compute_frame_duration(s, &num, &den, st, pc, pkt);
1057 if (den && num) {
1058 duration = (AVRational) {num, den};
1059 pkt->duration = av_rescale_rnd(1,
1060 num * (int64_t) st->time_base.den,
1061 den * (int64_t) st->time_base.num,
1062 AV_ROUND_DOWN);
1063 }
1064 }
1065
1066 if (pkt->duration != 0 && (s->packet_buffer || s->parse_queue))
1067 update_initial_durations(s, st, pkt->stream_index, pkt->duration);
1068
1069 /* Correct timestamps with byte offset if demuxers only have timestamps
1070 * on packet boundaries */
1071 if (pc && st->need_parsing == AVSTREAM_PARSE_TIMESTAMPS && pkt->size) {
1072 /* this will estimate bitrate based on this frame's duration and size */
1073 offset = av_rescale(pc->offset, pkt->duration, pkt->size);
1074 if (pkt->pts != AV_NOPTS_VALUE)
1075 pkt->pts += offset;
1076 if (pkt->dts != AV_NOPTS_VALUE)
1077 pkt->dts += offset;
1078 }
1079
1080 /* This may be redundant, but it should not hurt. */
1081 if (pkt->dts != AV_NOPTS_VALUE &&
1082 pkt->pts != AV_NOPTS_VALUE &&
1083 pkt->pts > pkt->dts)
1084 presentation_delayed = 1;
1085
1086 av_dlog(NULL,
1087 "IN delayed:%d pts:%s, dts:%s cur_dts:%s st:%d pc:%p duration:%d delay:%d onein_oneout:%d\n",
1088 presentation_delayed, av_ts2str(pkt->pts), av_ts2str(pkt->dts), av_ts2str(st->cur_dts),
1089 pkt->stream_index, pc, pkt->duration, delay, onein_oneout);
1090 /* Interpolate PTS and DTS if they are not present. We skip H264
1091 * currently because delay and has_b_frames are not reliably set. */
1092 if ((delay == 0 || (delay == 1 && pc)) &&
1093 onein_oneout) {
1094 if (presentation_delayed) {
1095 /* DTS = decompression timestamp */
1096 /* PTS = presentation timestamp */
1097 if (pkt->dts == AV_NOPTS_VALUE)
1098 pkt->dts = st->last_IP_pts;
1099 update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts, pkt);
1100 if (pkt->dts == AV_NOPTS_VALUE)
1101 pkt->dts = st->cur_dts;
1102
1103 /* This is tricky: the dts must be incremented by the duration
1104 * of the frame we are displaying, i.e. the last I- or P-frame. */
1105 if (st->last_IP_duration == 0)
1106 st->last_IP_duration = pkt->duration;
1107 if (pkt->dts != AV_NOPTS_VALUE)
1108 st->cur_dts = pkt->dts + st->last_IP_duration;
1109 st->last_IP_duration = pkt->duration;
1110 st->last_IP_pts = pkt->pts;
1111 /* Cannot compute PTS if not present (we can compute it only
1112 * by knowing the future. */
1113 } else if (pkt->pts != AV_NOPTS_VALUE ||
1114 pkt->dts != AV_NOPTS_VALUE ||
1115 pkt->duration ) {
1116
1117 /* presentation is not delayed : PTS and DTS are the same */
1118 if (pkt->pts == AV_NOPTS_VALUE)
1119 pkt->pts = pkt->dts;
1120 update_initial_timestamps(s, pkt->stream_index, pkt->pts,
1121 pkt->pts, pkt);
1122 if (pkt->pts == AV_NOPTS_VALUE)
1123 pkt->pts = st->cur_dts;
1124 pkt->dts = pkt->pts;
1125 if (pkt->pts != AV_NOPTS_VALUE)
1126 st->cur_dts = av_add_stable(st->time_base, pkt->pts, duration, 1);
1127 }
1128 }
1129
1130 if (pkt->pts != AV_NOPTS_VALUE && delay <= MAX_REORDER_DELAY && has_decode_delay_been_guessed(st)) {
1131 st->pts_buffer[0] = pkt->pts;
1132 for (i = 0; i<delay && st->pts_buffer[i] > st->pts_buffer[i + 1]; i++)
1133 FFSWAP(int64_t, st->pts_buffer[i], st->pts_buffer[i + 1]);
1134
1135 pkt->dts = select_from_pts_buffer(st, st->pts_buffer, pkt->dts);
1136 }
1137 // We skipped it above so we try here.
1138 if (!onein_oneout)
1139 // This should happen on the first packet
1140 update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts, pkt);
1141 if (pkt->dts > st->cur_dts)
1142 st->cur_dts = pkt->dts;
1143
1144 av_dlog(NULL, "OUTdelayed:%d/%d pts:%s, dts:%s cur_dts:%s\n",
1145 presentation_delayed, delay, av_ts2str(pkt->pts), av_ts2str(pkt->dts), av_ts2str(st->cur_dts));
1146
1147 /* update flags */
1148 if (is_intra_only(st->codec))
1149 pkt->flags |= AV_PKT_FLAG_KEY;
1150 if (pc)
1151 pkt->convergence_duration = pc->convergence_duration;
1152 }
1153
1154 static void free_packet_buffer(AVPacketList **pkt_buf, AVPacketList **pkt_buf_end)
1155 {
1156 while (*pkt_buf) {
1157 AVPacketList *pktl = *pkt_buf;
1158 *pkt_buf = pktl->next;
1159 av_free_packet(&pktl->pkt);
1160 av_freep(&pktl);
1161 }
1162 *pkt_buf_end = NULL;
1163 }
1164
1165 /**
1166 * Parse a packet, add all split parts to parse_queue.
1167 *
1168 * @param pkt Packet to parse, NULL when flushing the parser at end of stream.
1169 */
1170 static int parse_packet(AVFormatContext *s, AVPacket *pkt, int stream_index)
1171 {
1172 AVPacket out_pkt = { 0 }, flush_pkt = { 0 };
1173 AVStream *st = s->streams[stream_index];
1174 uint8_t *data = pkt ? pkt->data : NULL;
1175 int size = pkt ? pkt->size : 0;
1176 int ret = 0, got_output = 0;
1177
1178 if (!pkt) {
1179 av_init_packet(&flush_pkt);
1180 pkt = &flush_pkt;
1181 got_output = 1;
1182 } else if (!size && st->parser->flags & PARSER_FLAG_COMPLETE_FRAMES) {
1183 // preserve 0-size sync packets
1184 compute_pkt_fields(s, st, st->parser, pkt);
1185 }
1186
1187 while (size > 0 || (pkt == &flush_pkt && got_output)) {
1188 int len;
1189
1190 av_init_packet(&out_pkt);
1191 len = av_parser_parse2(st->parser, st->codec,
1192 &out_pkt.data, &out_pkt.size, data, size,
1193 pkt->pts, pkt->dts, pkt->pos);
1194
1195 pkt->pts = pkt->dts = AV_NOPTS_VALUE;
1196 pkt->pos = -1;
1197 /* increment read pointer */
1198 data += len;
1199 size -= len;
1200
1201 got_output = !!out_pkt.size;
1202
1203 if (!out_pkt.size)
1204 continue;
1205
1206 if (pkt->side_data) {
1207 out_pkt.side_data = pkt->side_data;
1208 out_pkt.side_data_elems = pkt->side_data_elems;
1209 pkt->side_data = NULL;
1210 pkt->side_data_elems = 0;
1211 }
1212
1213 /* set the duration */
1214 out_pkt.duration = 0;
1215 if (st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
1216 if (st->codec->sample_rate > 0) {
1217 out_pkt.duration =
1218 av_rescale_q_rnd(st->parser->duration,
1219 (AVRational) { 1, st->codec->sample_rate },
1220 st->time_base,
1221 AV_ROUND_DOWN);
1222 }
1223 }
1224
1225 out_pkt.stream_index = st->index;
1226 out_pkt.pts = st->parser->pts;
1227 out_pkt.dts = st->parser->dts;
1228 out_pkt.pos = st->parser->pos;
1229
1230 if (st->need_parsing == AVSTREAM_PARSE_FULL_RAW)
1231 out_pkt.pos = st->parser->frame_offset;
1232
1233 if (st->parser->key_frame == 1 ||
1234 (st->parser->key_frame == -1 &&
1235 st->parser->pict_type == AV_PICTURE_TYPE_I))
1236 out_pkt.flags |= AV_PKT_FLAG_KEY;
1237
1238 if (st->parser->key_frame == -1 && st->parser->pict_type ==AV_PICTURE_TYPE_NONE && (pkt->flags&AV_PKT_FLAG_KEY))
1239 out_pkt.flags |= AV_PKT_FLAG_KEY;
1240
1241 compute_pkt_fields(s, st, st->parser, &out_pkt);
1242
1243 if (out_pkt.data == pkt->data && out_pkt.size == pkt->size) {
1244 out_pkt.buf = pkt->buf;
1245 pkt->buf = NULL;
1246 #if FF_API_DESTRUCT_PACKET
1247 FF_DISABLE_DEPRECATION_WARNINGS
1248 out_pkt.destruct = pkt->destruct;
1249 pkt->destruct = NULL;
1250 FF_ENABLE_DEPRECATION_WARNINGS
1251 #endif
1252 }
1253 if ((ret = av_dup_packet(&out_pkt)) < 0)
1254 goto fail;
1255
1256 if (!add_to_pktbuf(&s->parse_queue, &out_pkt, &s->parse_queue_end)) {
1257 av_free_packet(&out_pkt);
1258 ret = AVERROR(ENOMEM);
1259 goto fail;
1260 }
1261 }
1262
1263 /* end of the stream => close and free the parser */
1264 if (pkt == &flush_pkt) {
1265 av_parser_close(st->parser);
1266 st->parser = NULL;
1267 }
1268
1269 fail:
1270 av_free_packet(pkt);
1271 return ret;
1272 }
1273
1274 static int read_from_packet_buffer(AVPacketList **pkt_buffer,
1275 AVPacketList **pkt_buffer_end,
1276 AVPacket *pkt)
1277 {
1278 AVPacketList *pktl;
1279 av_assert0(*pkt_buffer);
1280 pktl = *pkt_buffer;
1281 *pkt = pktl->pkt;
1282 *pkt_buffer = pktl->next;
1283 if (!pktl->next)
1284 *pkt_buffer_end = NULL;
1285 av_freep(&pktl);
1286 return 0;
1287 }
1288
1289 static int64_t ts_to_samples(AVStream *st, int64_t ts)
1290 {
1291 return av_rescale(ts, st->time_base.num * st->codec->sample_rate, st->time_base.den);
1292 }
1293
1294 static int read_frame_internal(AVFormatContext *s, AVPacket *pkt)
1295 {
1296 int ret = 0, i, got_packet = 0;
1297 AVDictionary *metadata = NULL;
1298
1299 av_init_packet(pkt);
1300
1301 while (!got_packet && !s->parse_queue) {
1302 AVStream *st;
1303 AVPacket cur_pkt;
1304
1305 /* read next packet */
1306 ret = ff_read_packet(s, &cur_pkt);
1307 if (ret < 0) {
1308 if (ret == AVERROR(EAGAIN))
1309 return ret;
1310 /* flush the parsers */
1311 for (i = 0; i < s->nb_streams; i++) {
1312 st = s->streams[i];
1313 if (st->parser && st->need_parsing)
1314 parse_packet(s, NULL, st->index);
1315 }
1316 /* all remaining packets are now in parse_queue =>
1317 * really terminate parsing */
1318 break;
1319 }
1320 ret = 0;
1321 st = s->streams[cur_pkt.stream_index];
1322
1323 if (cur_pkt.pts != AV_NOPTS_VALUE &&
1324 cur_pkt.dts != AV_NOPTS_VALUE &&
1325 cur_pkt.pts < cur_pkt.dts) {
1326 av_log(s, AV_LOG_WARNING,
1327 "Invalid timestamps stream=%d, pts=%s, dts=%s, size=%d\n",
1328 cur_pkt.stream_index,
1329 av_ts2str(cur_pkt.pts),
1330 av_ts2str(cur_pkt.dts),
1331 cur_pkt.size);
1332 }
1333 if (s->debug & FF_FDEBUG_TS)
1334 av_log(s, AV_LOG_DEBUG,
1335 "ff_read_packet stream=%d, pts=%s, dts=%s, size=%d, duration=%d, flags=%d\n",
1336 cur_pkt.stream_index,
1337 av_ts2str(cur_pkt.pts),
1338 av_ts2str(cur_pkt.dts),
1339 cur_pkt.size, cur_pkt.duration, cur_pkt.flags);
1340
1341 if (st->need_parsing && !st->parser && !(s->flags & AVFMT_FLAG_NOPARSE)) {
1342 st->parser = av_parser_init(st->codec->codec_id);
1343 if (!st->parser) {
1344 av_log(s, AV_LOG_VERBOSE, "parser not found for codec "
1345 "%s, packets or times may be invalid.\n",
1346 avcodec_get_name(st->codec->codec_id));
1347 /* no parser available: just output the raw packets */
1348 st->need_parsing = AVSTREAM_PARSE_NONE;
1349 } else if (st->need_parsing == AVSTREAM_PARSE_HEADERS)
1350 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
1351 else if (st->need_parsing == AVSTREAM_PARSE_FULL_ONCE)
1352 st->parser->flags |= PARSER_FLAG_ONCE;
1353 else if (st->need_parsing == AVSTREAM_PARSE_FULL_RAW)
1354 st->parser->flags |= PARSER_FLAG_USE_CODEC_TS;
1355 }
1356
1357 if (!st->need_parsing || !st->parser) {
1358 /* no parsing needed: we just output the packet as is */
1359 *pkt = cur_pkt;
1360 compute_pkt_fields(s, st, NULL, pkt);
1361 if ((s->iformat->flags & AVFMT_GENERIC_INDEX) &&
1362 (pkt->flags & AV_PKT_FLAG_KEY) && pkt->dts != AV_NOPTS_VALUE) {
1363 ff_reduce_index(s, st->index);
1364 av_add_index_entry(st, pkt->pos, pkt->dts,
1365 0, 0, AVINDEX_KEYFRAME);
1366 }
1367 got_packet = 1;
1368 } else if (st->discard < AVDISCARD_ALL) {
1369 if ((ret = parse_packet(s, &cur_pkt, cur_pkt.stream_index)) < 0)
1370 return ret;
1371 } else {
1372 /* free packet */
1373 av_free_packet(&cur_pkt);
1374 }
1375 if (pkt->flags & AV_PKT_FLAG_KEY)
1376 st->skip_to_keyframe = 0;
1377 if (st->skip_to_keyframe) {
1378 av_free_packet(&cur_pkt);
1379 if (got_packet) {
1380 *pkt = cur_pkt;
1381 }
1382 got_packet = 0;
1383 }
1384 }
1385
1386 if (!got_packet && s->parse_queue)
1387 ret = read_from_packet_buffer(&s->parse_queue, &s->parse_queue_end, pkt);
1388
1389 if (ret >= 0) {
1390 AVStream *st = s->streams[pkt->stream_index];
1391 int discard_padding = 0;
1392 if (st->first_discard_sample && pkt->pts != AV_NOPTS_VALUE) {
1393 int64_t pts = pkt->pts - (is_relative(pkt->pts) ? RELATIVE_TS_BASE : 0);
1394 int64_t sample = ts_to_samples(st, pts);
1395 int duration = ts_to_samples(st, pkt->duration);
1396 int64_t end_sample = sample + duration;
1397 if (duration > 0 && end_sample >= st->first_discard_sample &&
1398 sample < st->last_discard_sample)
1399 discard_padding = FFMIN(end_sample - st->first_discard_sample, duration);
1400 }
1401 if (st->skip_samples || discard_padding) {
1402 uint8_t *p = av_packet_new_side_data(pkt, AV_PKT_DATA_SKIP_SAMPLES, 10);
1403 if (p) {
1404 AV_WL32(p, st->skip_samples);
1405 AV_WL32(p + 4, discard_padding);
1406 av_log(s, AV_LOG_DEBUG, "demuxer injecting skip %d\n", st->skip_samples);
1407 }
1408 st->skip_samples = 0;
1409 }
1410
1411 if (st->inject_global_side_data) {
1412 for (i = 0; i < st->nb_side_data; i++) {
1413 AVPacketSideData *src_sd = &st->side_data[i];
1414 uint8_t *dst_data;
1415
1416 if (av_packet_get_side_data(pkt, src_sd->type, NULL))
1417 continue;
1418
1419 dst_data = av_packet_new_side_data(pkt, src_sd->type, src_sd->size);
1420 if (!dst_data) {
1421 av_log(s, AV_LOG_WARNING, "Could not inject global side data\n");
1422 continue;
1423 }
1424
1425 memcpy(dst_data, src_sd->data, src_sd->size);
1426 }
1427 st->inject_global_side_data = 0;
1428 }
1429
1430 if (!(s->flags & AVFMT_FLAG_KEEP_SIDE_DATA))
1431 av_packet_merge_side_data(pkt);
1432 }
1433
1434 av_opt_get_dict_val(s, "metadata", AV_OPT_SEARCH_CHILDREN, &metadata);
1435 if (metadata) {
1436 s->event_flags |= AVFMT_EVENT_FLAG_METADATA_UPDATED;
1437 av_dict_copy(&s->metadata, metadata, 0);
1438 av_dict_free(&metadata);
1439 av_opt_set_dict_val(s, "metadata", NULL, AV_OPT_SEARCH_CHILDREN);
1440 }
1441
1442 if (s->debug & FF_FDEBUG_TS)
1443 av_log(s, AV_LOG_DEBUG,
1444 "read_frame_internal stream=%d, pts=%s, dts=%s, "
1445 "size=%d, duration=%d, flags=%d\n",
1446 pkt->stream_index,
1447 av_ts2str(pkt->pts),
1448 av_ts2str(pkt->dts),
1449 pkt->size, pkt->duration, pkt->flags);
1450
1451 return ret;
1452 }
1453
1454 int av_read_frame(AVFormatContext *s, AVPacket *pkt)
1455 {
1456 const int genpts = s->flags & AVFMT_FLAG_GENPTS;
1457 int eof = 0;
1458 int ret;
1459 AVStream *st;
1460
1461 if (!genpts) {
1462 ret = s->packet_buffer
1463 ? read_from_packet_buffer(&s->packet_buffer,
1464 &s->packet_buffer_end, pkt)
1465 : read_frame_internal(s, pkt);
1466 if (ret < 0)
1467 return ret;
1468 goto return_packet;
1469 }
1470
1471 for (;;) {
1472 AVPacketList *pktl = s->packet_buffer;
1473
1474 if (pktl) {
1475 AVPacket *next_pkt = &pktl->pkt;
1476
1477 if (next_pkt->dts != AV_NOPTS_VALUE) {
1478 int wrap_bits = s->streams[next_pkt->stream_index]->pts_wrap_bits;
1479 // last dts seen for this stream. if any of packets following
1480 // current one had no dts, we will set this to AV_NOPTS_VALUE.
1481 int64_t last_dts = next_pkt->dts;
1482 while (pktl && next_pkt->pts == AV_NOPTS_VALUE) {
1483 if (pktl->pkt.stream_index == next_pkt->stream_index &&
1484 (av_compare_mod(next_pkt->dts, pktl->pkt.dts, 2LL << (wrap_bits - 1)) < 0)) {
1485 if (av_compare_mod(pktl->pkt.pts, pktl->pkt.dts, 2LL << (wrap_bits - 1))) {
1486 // not B-frame
1487 next_pkt->pts = pktl->pkt.dts;
1488 }
1489 if (last_dts != AV_NOPTS_VALUE) {
1490 // Once last dts was set to AV_NOPTS_VALUE, we don't change it.
1491 last_dts = pktl->pkt.dts;
1492 }
1493 }
1494 pktl = pktl->next;
1495 }
1496 if (eof && next_pkt->pts == AV_NOPTS_VALUE && last_dts != AV_NOPTS_VALUE) {
1497 // Fixing the last reference frame had none pts issue (For MXF etc).
1498 // We only do this when
1499 // 1. eof.
1500 // 2. we are not able to resolve a pts value for current packet.
1501 // 3. the packets for this stream at the end of the files had valid dts.
1502 next_pkt->pts = last_dts + next_pkt->duration;
1503 }
1504 pktl = s->packet_buffer;
1505 }
1506
1507 /* read packet from packet buffer, if there is data */
1508 st = s->streams[next_pkt->stream_index];
1509 if (!(next_pkt->pts == AV_NOPTS_VALUE && st->discard < AVDISCARD_ALL &&
1510 next_pkt->dts != AV_NOPTS_VALUE && !eof)) {
1511 ret = read_from_packet_buffer(&s->packet_buffer,
1512 &s->packet_buffer_end, pkt);
1513 goto return_packet;
1514 }
1515 }
1516
1517 ret = read_frame_internal(s, pkt);
1518 if (ret < 0) {
1519 if (pktl && ret != AVERROR(EAGAIN)) {
1520 eof = 1;
1521 continue;
1522 } else
1523 return ret;
1524 }
1525
1526 if (av_dup_packet(add_to_pktbuf(&s->packet_buffer, pkt,
1527 &s->packet_buffer_end)) < 0)
1528 return AVERROR(ENOMEM);
1529 }
1530
1531 return_packet:
1532
1533 st = s->streams[pkt->stream_index];
1534 if ((s->iformat->flags & AVFMT_GENERIC_INDEX) && pkt->flags & AV_PKT_FLAG_KEY) {
1535 ff_reduce_index(s, st->index);
1536 av_add_index_entry(st, pkt->pos, pkt->dts, 0, 0, AVINDEX_KEYFRAME);
1537 }
1538
1539 if (is_relative(pkt->dts))
1540 pkt->dts -= RELATIVE_TS_BASE;
1541 if (is_relative(pkt->pts))
1542 pkt->pts -= RELATIVE_TS_BASE;
1543
1544 return ret;
1545 }
1546
1547 /* XXX: suppress the packet queue */
1548 static void flush_packet_queue(AVFormatContext *s)
1549 {
1550 free_packet_buffer(&s->parse_queue, &s->parse_queue_end);
1551 free_packet_buffer(&s->packet_buffer, &s->packet_buffer_end);
1552 free_packet_buffer(&s->raw_packet_buffer, &s->raw_packet_buffer_end);
1553
1554 s->raw_packet_buffer_remaining_size = RAW_PACKET_BUFFER_SIZE;
1555 }
1556
1557 /*******************************************************/
1558 /* seek support */
1559
1560 int av_find_default_stream_index(AVFormatContext *s)
1561 {
1562 int i;
1563 AVStream *st;
1564 int best_stream = 0;
1565 int best_score = -1;
1566
1567 if (s->nb_streams <= 0)
1568 return -1;
1569 for (i = 0; i < s->nb_streams; i++) {
1570 int score = 0;
1571 st = s->streams[i];
1572 if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO &&
1573 !(st->disposition & AV_DISPOSITION_ATTACHED_PIC)) {
1574 if (!st->codec->width && !st->codec->height && !st->codec_info_nb_frames)
1575 score += 25;
1576 else
1577 score += 100;
1578 }
1579 if (st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
1580 if (!st->codec->sample_rate && !st->codec_info_nb_frames)
1581 score += 12;
1582 else
1583 score += 50;
1584 }
1585
1586 if (score > best_score) {
1587 best_score = score;
1588 best_stream = i;
1589 }
1590 }
1591 return best_stream;
1592 }
1593
1594 /** Flush the frame reader. */
1595 void ff_read_frame_flush(AVFormatContext *s)
1596 {
1597 AVStream *st;
1598 int i, j;
1599
1600 flush_packet_queue(s);
1601
1602 /* Reset read state for each stream. */
1603 for (i = 0; i < s->nb_streams; i++) {
1604 st = s->streams[i];
1605
1606 if (st->parser) {
1607 av_parser_close(st->parser);
1608 st->parser = NULL;
1609 }
1610 st->last_IP_pts = AV_NOPTS_VALUE;
1611 st->last_dts_for_order_check = AV_NOPTS_VALUE;
1612 if (st->first_dts == AV_NOPTS_VALUE)
1613 st->cur_dts = RELATIVE_TS_BASE;
1614 else
1615 /* We set the current DTS to an unspecified origin. */
1616 st->cur_dts = AV_NOPTS_VALUE;
1617
1618 st->probe_packets = MAX_PROBE_PACKETS;
1619
1620 for (j = 0; j < MAX_REORDER_DELAY + 1; j++)
1621 st->pts_buffer[j] = AV_NOPTS_VALUE;
1622
1623 if (s->internal->inject_global_side_data)
1624 st->inject_global_side_data = 1;
1625 }
1626 }
1627
1628 void ff_update_cur_dts(AVFormatContext *s, AVStream *ref_st, int64_t timestamp)
1629 {
1630 int i;
1631
1632 for (i = 0; i < s->nb_streams; i++) {
1633 AVStream *st = s->streams[i];
1634
1635 st->cur_dts =
1636 av_rescale(timestamp,
1637 st->time_base.den * (int64_t) ref_st->time_base.num,
1638 st->time_base.num * (int64_t) ref_st->time_base.den);
1639 }
1640 }
1641
1642 void ff_reduce_index(AVFormatContext *s, int stream_index)
1643 {
1644 AVStream *st = s->streams[stream_index];
1645 unsigned int max_entries = s->max_index_size / sizeof(AVIndexEntry);
1646
1647 if ((unsigned) st->nb_index_entries >= max_entries) {
1648 int i;
1649 for (i = 0; 2 * i < st->nb_index_entries; i++)
1650 st->index_entries[i] = st->index_entries[2 * i];
1651 st->nb_index_entries = i;
1652 }
1653 }
1654
1655 int ff_add_index_entry(AVIndexEntry **index_entries,
1656 int *nb_index_entries,
1657 unsigned int *index_entries_allocated_size,
1658 int64_t pos, int64_t timestamp,
1659 int size, int distance, int flags)
1660 {
1661 AVIndexEntry *entries, *ie;
1662 int index;
1663
1664 if ((unsigned) *nb_index_entries + 1 >= UINT_MAX / sizeof(AVIndexEntry))
1665 return -1;
1666
1667 if (timestamp == AV_NOPTS_VALUE)
1668 return AVERROR(EINVAL);
1669
1670 if (size < 0 || size > 0x3FFFFFFF)
1671 return AVERROR(EINVAL);
1672
1673 if (is_relative(timestamp)) //FIXME this maintains previous behavior but we should shift by the correct offset once known
1674 timestamp -= RELATIVE_TS_BASE;
1675
1676 entries = av_fast_realloc(*index_entries,
1677 index_entries_allocated_size,
1678 (*nb_index_entries + 1) *
1679 sizeof(AVIndexEntry));
1680 if (!entries)
1681 return -1;
1682
1683 *index_entries = entries;
1684
1685 index = ff_index_search_timestamp(*index_entries, *nb_index_entries,
1686 timestamp, AVSEEK_FLAG_ANY);
1687
1688 if (index < 0) {
1689 index = (*nb_index_entries)++;
1690 ie = &entries[index];
1691 av_assert0(index == 0 || ie[-1].timestamp < timestamp);
1692 } else {
1693 ie = &entries[index];
1694 if (ie->timestamp != timestamp) {
1695 if (ie->timestamp <= timestamp)
1696 return -1;
1697 memmove(entries + index + 1, entries + index,
1698 sizeof(AVIndexEntry) * (*nb_index_entries - index));
1699 (*nb_index_entries)++;
1700 } else if (ie->pos == pos && distance < ie->min_distance)
1701 // do not reduce the distance
1702 distance = ie->min_distance;
1703 }
1704
1705 ie->pos = pos;
1706 ie->timestamp = timestamp;
1707 ie->min_distance = distance;
1708 ie->size = size;
1709 ie->flags = flags;
1710
1711 return index;
1712 }
1713
1714 int av_add_index_entry(AVStream *st, int64_t pos, int64_t timestamp,
1715 int size, int distance, int flags)
1716 {
1717 timestamp = wrap_timestamp(st, timestamp);
1718 return ff_add_index_entry(&st->index_entries, &st->nb_index_entries,
1719 &st->index_entries_allocated_size, pos,
1720 timestamp, size, distance, flags);
1721 }
1722
1723 int ff_index_search_timestamp(const AVIndexEntry *entries, int nb_entries,
1724 int64_t wanted_timestamp, int flags)
1725 {
1726 int a, b, m;
1727 int64_t timestamp;
1728
1729 a = -1;
1730 b = nb_entries;
1731
1732 // Optimize appending index entries at the end.
1733 if (b && entries[b - 1].timestamp < wanted_timestamp)
1734 a = b - 1;
1735
1736 while (b - a > 1) {
1737 m = (a + b) >> 1;
1738 timestamp = entries[m].timestamp;
1739 if (timestamp >= wanted_timestamp)
1740 b = m;
1741 if (timestamp <= wanted_timestamp)
1742 a = m;
1743 }
1744 m = (flags & AVSEEK_FLAG_BACKWARD) ? a : b;
1745
1746 if (!(flags & AVSEEK_FLAG_ANY))
1747 while (m >= 0 && m < nb_entries &&
1748 !(entries[m].flags & AVINDEX_KEYFRAME))
1749 m += (flags & AVSEEK_FLAG_BACKWARD) ? -1 : 1;
1750
1751 if (m == nb_entries)
1752 return -1;
1753 return m;
1754 }
1755
1756 int av_index_search_timestamp(AVStream *st, int64_t wanted_timestamp, int flags)
1757 {
1758 return ff_index_search_timestamp(st->index_entries, st->nb_index_entries,
1759 wanted_timestamp, flags);
1760 }
1761
1762 static int64_t ff_read_timestamp(AVFormatContext *s, int stream_index, int64_t *ppos, int64_t pos_limit,
1763 int64_t (*read_timestamp)(struct AVFormatContext *, int , int64_t *, int64_t ))
1764 {
1765 int64_t ts = read_timestamp(s, stream_index, ppos, pos_limit);
1766 if (stream_index >= 0)
1767 ts = wrap_timestamp(s->streams[stream_index], ts);
1768 return ts;
1769 }
1770
1771 int ff_seek_frame_binary(AVFormatContext *s, int stream_index,
1772 int64_t target_ts, int flags)
1773 {
1774 AVInputFormat *avif = s->iformat;
1775 int64_t av_uninit(pos_min), av_uninit(pos_max), pos, pos_limit;
1776 int64_t ts_min, ts_max, ts;
1777 int index;
1778 int64_t ret;
1779 AVStream *st;
1780
1781 if (stream_index < 0)
1782 return -1;
1783
1784 av_dlog(s, "read_seek: %d %s\n", stream_index, av_ts2str(target_ts));
1785
1786 ts_max =
1787 ts_min = AV_NOPTS_VALUE;
1788 pos_limit = -1; // GCC falsely says it may be uninitialized.
1789
1790 st = s->streams[stream_index];
1791 if (st->index_entries) {
1792 AVIndexEntry *e;
1793
1794 /* FIXME: Whole function must be checked for non-keyframe entries in
1795 * index case, especially read_timestamp(). */
1796 index = av_index_search_timestamp(st, target_ts,
1797 flags | AVSEEK_FLAG_BACKWARD);
1798 index = FFMAX(index, 0);
1799 e = &st->index_entries[index];
1800
1801 if (e->timestamp <= target_ts || e->pos == e->min_distance) {
1802 pos_min = e->pos;
1803 ts_min = e->timestamp;
1804 av_dlog(s, "using cached pos_min=0x%"PRIx64" dts_min=%s\n",
1805 pos_min, av_ts2str(ts_min));
1806 } else {
1807 av_assert1(index == 0);
1808 }
1809
1810 index = av_index_search_timestamp(st, target_ts,
1811 flags & ~AVSEEK_FLAG_BACKWARD);
1812 av_assert0(index < st->nb_index_entries);
1813 if (index >= 0) {
1814 e = &st->index_entries[index];
1815 av_assert1(e->timestamp >= target_ts);
1816 pos_max = e->pos;
1817 ts_max = e->timestamp;
1818 pos_limit = pos_max - e->min_distance;
1819 av_dlog(s, "using cached pos_max=0x%"PRIx64" pos_limit=0x%"PRIx64
1820 " dts_max=%s\n", pos_max, pos_limit, av_ts2str(ts_max));
1821 }
1822 }
1823
1824 pos = ff_gen_search(s, stream_index, target_ts, pos_min, pos_max, pos_limit,
1825 ts_min, ts_max, flags, &ts, avif->read_timestamp);
1826 if (pos < 0)
1827 return -1;
1828
1829 /* do the seek */
1830 if ((ret = avio_seek(s->pb, pos, SEEK_SET)) < 0)
1831 return ret;
1832
1833 ff_read_frame_flush(s);
1834 ff_update_cur_dts(s, st, ts);
1835
1836 return 0;
1837 }
1838
1839 int ff_find_last_ts(AVFormatContext *s, int stream_index, int64_t *ts, int64_t *pos,
1840 int64_t (*read_timestamp)(struct AVFormatContext *, int , int64_t *, int64_t ))
1841 {
1842 int64_t step = 1024;
1843 int64_t limit, ts_max;
1844 int64_t filesize = avio_size(s->pb);
1845 int64_t pos_max = filesize - 1;
1846 do {
1847 limit = pos_max;
1848 pos_max = FFMAX(0, (pos_max) - step);
1849 ts_max = ff_read_timestamp(s, stream_index,
1850 &pos_max, limit, read_timestamp);
1851 step += step;
1852 } while (ts_max == AV_NOPTS_VALUE && 2*limit > step);
1853 if (ts_max == AV_NOPTS_VALUE)
1854 return -1;
1855
1856 for (;;) {
1857 int64_t tmp_pos = pos_max + 1;
1858 int64_t tmp_ts = ff_read_timestamp(s, stream_index,
1859 &tmp_pos, INT64_MAX, read_timestamp);
1860 if (tmp_ts == AV_NOPTS_VALUE)
1861 break;
1862 av_assert0(tmp_pos > pos_max);
1863 ts_max = tmp_ts;
1864 pos_max = tmp_pos;
1865 if (tmp_pos >= filesize)
1866 break;
1867 }
1868
1869 if (ts)
1870 *ts = ts_max;
1871 if (pos)
1872 *pos = pos_max;
1873
1874 return 0;
1875 }
1876
1877 int64_t ff_gen_search(AVFormatContext *s, int stream_index, int64_t target_ts,
1878 int64_t pos_min, int64_t pos_max, int64_t pos_limit,
1879 int64_t ts_min, int64_t ts_max,
1880 int flags, int64_t *ts_ret,
1881 int64_t (*read_timestamp)(struct AVFormatContext *, int,
1882 int64_t *, int64_t))
1883 {
1884 int64_t pos, ts;
1885 int64_t start_pos;
1886 int no_change;
1887 int ret;
1888
1889 av_dlog(s, "gen_seek: %d %s\n", stream_index, av_ts2str(target_ts));
1890
1891 if (ts_min == AV_NOPTS_VALUE) {
1892 pos_min = s->data_offset;
1893 ts_min = ff_read_timestamp(s, stream_index, &pos_min, INT64_MAX, read_timestamp);
1894 if (ts_min == AV_NOPTS_VALUE)
1895 return -1;
1896 }
1897
1898 if (ts_min >= target_ts) {
1899 *ts_ret = ts_min;
1900 return pos_min;
1901 }
1902
1903 if (ts_max == AV_NOPTS_VALUE) {
1904 if ((ret = ff_find_last_ts(s, stream_index, &ts_max, &pos_max, read_timestamp)) < 0)
1905 return ret;
1906 pos_limit = pos_max;
1907 }
1908
1909 if (ts_max <= target_ts) {
1910 *ts_ret = ts_max;
1911 return pos_max;
1912 }
1913
1914 if (ts_min > ts_max)
1915 return -1;
1916 else if (ts_min == ts_max)
1917 pos_limit = pos_min;
1918
1919 no_change = 0;
1920 while (pos_min < pos_limit) {
1921 av_dlog(s,
1922 "pos_min=0x%"PRIx64" pos_max=0x%"PRIx64" dts_min=%s dts_max=%s\n",
1923 pos_min, pos_max, av_ts2str(ts_min), av_ts2str(ts_max));
1924 assert(pos_limit <= pos_max);
1925
1926 if (no_change == 0) {
1927 int64_t approximate_keyframe_distance = pos_max - pos_limit;
1928 // interpolate position (better than dichotomy)
1929 pos = av_rescale(target_ts - ts_min, pos_max - pos_min,
1930 ts_max - ts_min) +
1931 pos_min - approximate_keyframe_distance;
1932 } else if (no_change == 1) {
1933 // bisection if interpolation did not change min / max pos last time
1934 pos = (pos_min + pos_limit) >> 1;
1935 } else {
1936 /* linear search if bisection failed, can only happen if there
1937 * are very few or no keyframes between min/max */
1938 pos = pos_min;
1939 }
1940 if (pos <= pos_min)
1941 pos = pos_min + 1;
1942 else if (pos > pos_limit)
1943 pos = pos_limit;
1944 start_pos = pos;
1945
1946 // May pass pos_limit instead of -1.
1947 ts = ff_read_timestamp(s, stream_index, &pos, INT64_MAX, read_timestamp);
1948 if (pos == pos_max)
1949 no_change++;
1950 else
1951 no_change = 0;
1952 av_dlog(s, "%"PRId64" %"PRId64" %"PRId64" / %s %s %s"
1953 " target:%s limit:%"PRId64" start:%"PRId64" noc:%d\n",
1954 pos_min, pos, pos_max,
1955 av_ts2str(ts_min), av_ts2str(ts), av_ts2str(ts_max), av_ts2str(target_ts),
1956 pos_limit, start_pos, no_change);
1957 if (ts == AV_NOPTS_VALUE) {
1958 av_log(s, AV_LOG_ERROR, "read_timestamp() failed in the middle\n");
1959 return -1;
1960 }
1961 if (target_ts <= ts) {
1962 pos_limit = start_pos - 1;
1963 pos_max = pos;
1964 ts_max = ts;
1965 }
1966 if (target_ts >= ts) {
1967 pos_min = pos;
1968 ts_min = ts;
1969 }
1970 }
1971
1972 pos = (flags & AVSEEK_FLAG_BACKWARD) ? pos_min : pos_max;
1973 ts = (flags & AVSEEK_FLAG_BACKWARD) ? ts_min : ts_max;
1974 #if 0
1975 pos_min = pos;
1976 ts_min = ff_read_timestamp(s, stream_index, &pos_min, INT64_MAX, read_timestamp);
1977 pos_min++;
1978 ts_max = ff_read_timestamp(s, stream_index, &pos_min, INT64_MAX, read_timestamp);
1979 av_dlog(s, "pos=0x%"PRIx64" %s<=%s<=%s\n",
1980 pos, av_ts2str(ts_min), av_ts2str(target_ts), av_ts2str(ts_max));
1981 #endif
1982 *ts_ret = ts;
1983 return pos;
1984 }
1985
1986 static int seek_frame_byte(AVFormatContext *s, int stream_index,
1987 int64_t pos, int flags)
1988 {
1989 int64_t pos_min, pos_max;
1990
1991 pos_min = s->data_offset;
1992 pos_max = avio_size(s->pb) - 1;
1993
1994 if (pos < pos_min)
1995 pos = pos_min;
1996 else if (pos > pos_max)
1997 pos = pos_max;
1998
1999 avio_seek(s->pb, pos, SEEK_SET);
2000
2001 s->io_repositioned = 1;
2002
2003 return 0;
2004 }
2005
2006 static int seek_frame_generic(AVFormatContext *s, int stream_index,
2007 int64_t timestamp, int flags)
2008 {
2009 int index;
2010 int64_t ret;
2011 AVStream *st;
2012 AVIndexEntry *ie;
2013
2014 st = s->streams[stream_index];
2015
2016 index = av_index_search_timestamp(st, timestamp, flags);
2017
2018 if (index < 0 && st->nb_index_entries &&
2019 timestamp < st->index_entries[0].timestamp)
2020 return -1;
2021
2022 if (index < 0 || index == st->nb_index_entries - 1) {
2023 AVPacket pkt;
2024 int nonkey = 0;
2025
2026 if (st->nb_index_entries) {
2027 av_assert0(st->index_entries);
2028 ie = &st->index_entries[st->nb_index_entries - 1];
2029 if ((ret = avio_seek(s->pb, ie->pos, SEEK_SET)) < 0)
2030 return ret;
2031 ff_update_cur_dts(s, st, ie->timestamp);
2032 } else {
2033 if ((ret = avio_seek(s->pb, s->data_offset, SEEK_SET)) < 0)
2034 return ret;
2035 }
2036 for (;;) {
2037 int read_status;
2038 do {
2039 read_status = av_read_frame(s, &pkt);
2040 } while (read_status == AVERROR(EAGAIN));
2041 if (read_status < 0)
2042 break;
2043 av_free_packet(&pkt);
2044 if (stream_index == pkt.stream_index && pkt.dts > timestamp) {
2045 if (pkt.flags & AV_PKT_FLAG_KEY)
2046 break;
2047 if (nonkey++ > 1000 && st->codec->codec_id != AV_CODEC_ID_CDGRAPHICS) {
2048 av_log(s, AV_LOG_ERROR,"seek_frame_generic failed as this stream seems to contain no keyframes after the target timestamp, %d non keyframes found\n", nonkey);
2049 break;
2050 }
2051 }
2052 }
2053 index = av_index_search_timestamp(st, timestamp, flags);
2054 }
2055 if (index < 0)
2056 return -1;
2057
2058 ff_read_frame_flush(s);
2059 if (s->iformat->read_seek)
2060 if (s->iformat->read_seek(s, stream_index, timestamp, flags) >= 0)
2061 return 0;
2062 ie = &st->index_entries[index];
2063 if ((ret = avio_seek(s->pb, ie->pos, SEEK_SET)) < 0)
2064 return ret;
2065 ff_update_cur_dts(s, st, ie->timestamp);
2066
2067 return 0;
2068 }
2069
2070 static int seek_frame_internal(AVFormatContext *s, int stream_index,
2071 int64_t timestamp, int flags)
2072 {
2073 int ret;
2074 AVStream *st;
2075
2076 if (flags & AVSEEK_FLAG_BYTE) {
2077 if (s->iformat->flags & AVFMT_NO_BYTE_SEEK)
2078 return -1;
2079 ff_read_frame_flush(s);
2080 return seek_frame_byte(s, stream_index, timestamp, flags);
2081 }
2082
2083 if (stream_index < 0) {
2084 stream_index = av_find_default_stream_index(s);
2085 if (stream_index < 0)
2086 return -1;
2087
2088 st = s->streams[stream_index];
2089 /* timestamp for default must be expressed in AV_TIME_BASE units */
2090 timestamp = av_rescale(timestamp, st->time_base.den,
2091 AV_TIME_BASE * (int64_t) st->time_base.num);
2092 }
2093
2094 /* first, we try the format specific seek */
2095 if (s->iformat->read_seek) {
2096 ff_read_frame_flush(s);
2097 ret = s->iformat->read_seek(s, stream_index, timestamp, flags);
2098 } else
2099 ret = -1;
2100 if (ret >= 0)
2101 return 0;
2102
2103 if (s->iformat->read_timestamp &&
2104 !(s->iformat->flags & AVFMT_NOBINSEARCH)) {
2105 ff_read_frame_flush(s);
2106 return ff_seek_frame_binary(s, stream_index, timestamp, flags);
2107 } else if (!(s->iformat->flags & AVFMT_NOGENSEARCH)) {
2108 ff_read_frame_flush(s);
2109 return seek_frame_generic(s, stream_index, timestamp, flags);
2110 } else
2111 return -1;
2112 }
2113
2114 int av_seek_frame(AVFormatContext *s, int stream_index,
2115 int64_t timestamp, int flags)
2116 {
2117 int ret;
2118
2119 if (s->iformat->read_seek2 && !s->iformat->read_seek) {
2120 int64_t min_ts = INT64_MIN, max_ts = INT64_MAX;
2121 if ((flags & AVSEEK_FLAG_BACKWARD))
2122 max_ts = timestamp;
2123 else
2124 min_ts = timestamp;
2125 return avformat_seek_file(s, stream_index, min_ts, timestamp, max_ts,
2126 flags & ~AVSEEK_FLAG_BACKWARD);
2127 }
2128
2129 ret = seek_frame_internal(s, stream_index, timestamp, flags);
2130
2131 if (ret >= 0)
2132 ret = avformat_queue_attached_pictures(s);
2133
2134 return ret;
2135 }
2136
2137 int avformat_seek_file(AVFormatContext *s, int stream_index, int64_t min_ts,
2138 int64_t ts, int64_t max_ts, int flags)
2139 {
2140 if (min_ts > ts || max_ts < ts)
2141 return -1;
2142 if (stream_index < -1 || stream_index >= (int)s->nb_streams)
2143 return AVERROR(EINVAL);
2144
2145 if (s->seek2any>0)
2146 flags |= AVSEEK_FLAG_ANY;
2147 flags &= ~AVSEEK_FLAG_BACKWARD;
2148
2149 if (s->iformat->read_seek2) {
2150 int ret;
2151 ff_read_frame_flush(s);
2152
2153 if (stream_index == -1 && s->nb_streams == 1) {
2154 AVRational time_base = s->streams[0]->time_base;
2155 ts = av_rescale_q(ts, AV_TIME_BASE_Q, time_base);
2156 min_ts = av_rescale_rnd(min_ts, time_base.den,
2157 time_base.num * (int64_t)AV_TIME_BASE,
2158 AV_ROUND_UP | AV_ROUND_PASS_MINMAX);
2159 max_ts = av_rescale_rnd(max_ts, time_base.den,
2160 time_base.num * (int64_t)AV_TIME_BASE,
2161 AV_ROUND_DOWN | AV_ROUND_PASS_MINMAX);
2162 }
2163
2164 ret = s->iformat->read_seek2(s, stream_index, min_ts,
2165 ts, max_ts, flags);
2166
2167 if (ret >= 0)
2168 ret = avformat_queue_attached_pictures(s);
2169 return ret;
2170 }
2171
2172 if (s->iformat->read_timestamp) {
2173 // try to seek via read_timestamp()
2174 }
2175
2176 // Fall back on old API if new is not implemented but old is.
2177 // Note the old API has somewhat different semantics.
2178 if (s->iformat->read_seek || 1) {
2179 int dir = (ts - (uint64_t)min_ts > (uint64_t)max_ts - ts ? AVSEEK_FLAG_BACKWARD : 0);
2180 int ret = av_seek_frame(s, stream_index, ts, flags | dir);
2181 if (ret<0 && ts != min_ts && max_ts != ts) {
2182 ret = av_seek_frame(s, stream_index, dir ? max_ts : min_ts, flags | dir);
2183 if (ret >= 0)
2184 ret = av_seek_frame(s, stream_index, ts, flags | (dir^AVSEEK_FLAG_BACKWARD));
2185 }
2186 return ret;
2187 }
2188
2189 // try some generic seek like seek_frame_generic() but with new ts semantics
2190 return -1; //unreachable
2191 }
2192
2193 /*******************************************************/
2194
2195 /**
2196 * Return TRUE if the stream has accurate duration in any stream.
2197 *
2198 * @return TRUE if the stream has accurate duration for at least one component.
2199 */
2200 static int has_duration(AVFormatContext *ic)
2201 {
2202 int i;
2203 AVStream *st;
2204
2205 for (i = 0; i < ic->nb_streams; i++) {
2206 st = ic->streams[i];
2207 if (st->duration != AV_NOPTS_VALUE)
2208 return 1;
2209 }
2210 if (ic->duration != AV_NOPTS_VALUE)
2211 return 1;
2212 return 0;
2213 }
2214
2215 /**
2216 * Estimate the stream timings from the one of each components.
2217 *
2218 * Also computes the global bitrate if possible.
2219 */
2220 static void update_stream_timings(AVFormatContext *ic)
2221 {
2222 int64_t start_time, start_time1, start_time_text, end_time, end_time1;
2223 int64_t duration, duration1, filesize;
2224 int i;
2225 AVStream *st;
2226 AVProgram *p;
2227
2228 start_time = INT64_MAX;
2229 start_time_text = INT64_MAX;
2230 end_time = INT64_MIN;
2231 duration = INT64_MIN;
2232 for (i = 0; i < ic->nb_streams; i++) {
2233 st = ic->streams[i];
2234 if (st->start_time != AV_NOPTS_VALUE && st->time_base.den) {
2235 start_time1 = av_rescale_q(st->start_time, st->time_base,
2236 AV_TIME_BASE_Q);
2237 if (st->codec->codec_type == AVMEDIA_TYPE_SUBTITLE || st->codec->codec_type == AVMEDIA_TYPE_DATA) {
2238 if (start_time1 < start_time_text)
2239 start_time_text = start_time1;
2240 } else
2241 start_time = FFMIN(start_time, start_time1);
2242 end_time1 = AV_NOPTS_VALUE;
2243 if (st->duration != AV_NOPTS_VALUE) {
2244 end_time1 = start_time1 +
2245 av_rescale_q(st->duration, st->time_base,
2246 AV_TIME_BASE_Q);
2247 end_time = FFMAX(end_time, end_time1);
2248 }
2249 for (p = NULL; (p = av_find_program_from_stream(ic, p, i)); ) {
2250 if (p->start_time == AV_NOPTS_VALUE || p->start_time > start_time1)
2251 p->start_time = start_time1;
2252 if (p->end_time < end_time1)
2253 p->end_time = end_time1;
2254 }
2255 }
2256 if (st->duration != AV_NOPTS_VALUE) {
2257 duration1 = av_rescale_q(st->duration, st->time_base,
2258 AV_TIME_BASE_Q);
2259 duration = FFMAX(duration, duration1);
2260 }
2261 }
2262 if (start_time == INT64_MAX || (start_time > start_time_text && start_time - start_time_text < AV_TIME_BASE))
2263 start_time = start_time_text;
2264 else if (start_time > start_time_text)
2265 av_log(ic, AV_LOG_VERBOSE, "Ignoring outlier non primary stream starttime %f\n", start_time_text / (float)AV_TIME_BASE);
2266
2267 if (start_time != INT64_MAX) {
2268 ic->start_time = start_time;
2269 if (end_time != INT64_MIN) {
2270 if (ic->nb_programs) {
2271 for (i = 0; i < ic->nb_programs; i++) {
2272 p = ic->programs[i];
2273 if (p->start_time != AV_NOPTS_VALUE && p->end_time > p->start_time)
2274 duration = FFMAX(duration, p->end_time - p->start_time);
2275 }
2276 } else
2277 duration = FFMAX(duration, end_time - start_time);
2278 }
2279 }
2280 if (duration != INT64_MIN && duration > 0 && ic->duration == AV_NOPTS_VALUE) {
2281 ic->duration = duration;
2282 }
2283 if (ic->pb && (filesize = avio_size(ic->pb)) > 0 && ic->duration != AV_NOPTS_VALUE) {
2284 /* compute the bitrate */
2285 double bitrate = (double) filesize * 8.0 * AV_TIME_BASE /
2286 (double) ic->duration;
2287 if (bitrate >= 0 && bitrate <= INT_MAX)
2288 ic->bit_rate = bitrate;
2289 }
2290 }
2291
2292 static void fill_all_stream_timings(AVFormatContext *ic)
2293 {
2294 int i;
2295 AVStream *st;
2296
2297 update_stream_timings(ic);
2298 for (i = 0; i < ic->nb_streams; i++) {
2299 st = ic->streams[i];
2300 if (st->start_time == AV_NOPTS_VALUE) {
2301 if (ic->start_time != AV_NOPTS_VALUE)
2302 st->start_time = av_rescale_q(ic->start_time, AV_TIME_BASE_Q,
2303 st->time_base);
2304 if (ic->duration != AV_NOPTS_VALUE)
2305 st->duration = av_rescale_q(ic->duration, AV_TIME_BASE_Q,
2306 st->time_base);
2307 }
2308 }
2309 }
2310
2311 static void estimate_timings_from_bit_rate(AVFormatContext *ic)
2312 {
2313 int64_t filesize, duration;
2314 int i, show_warning = 0;
2315 AVStream *st;
2316
2317 /* if bit_rate is already set, we believe it */
2318 if (ic->bit_rate <= 0) {
2319 int bit_rate = 0;
2320 for (i = 0; i < ic->nb_streams; i++) {
2321 st = ic->streams[i];
2322 if (st->codec->bit_rate > 0) {
2323 if (INT_MAX - st->codec->bit_rate < bit_rate) {
2324 bit_rate = 0;
2325 break;
2326 }
2327 bit_rate += st->codec->bit_rate;
2328 }
2329 }
2330 ic->bit_rate = bit_rate;
2331 }
2332
2333 /* if duration is already set, we believe it */
2334 if (ic->duration == AV_NOPTS_VALUE &&
2335 ic->bit_rate != 0) {
2336 filesize = ic->pb ? avio_size(ic->pb) : 0;
2337 if (filesize > ic->data_offset) {
2338 filesize -= ic->data_offset;
2339 for (i = 0; i < ic->nb_streams; i++) {
2340 st = ic->streams[i];
2341 if ( st->time_base.num <= INT64_MAX / ic->bit_rate
2342 && st->duration == AV_NOPTS_VALUE) {
2343 duration = av_rescale(8 * filesize, st->time_base.den,
2344 ic->bit_rate *
2345 (int64_t) st->time_base.num);
2346 st->duration = duration;
2347 show_warning = 1;
2348 }
2349 }
2350 }
2351 }
2352 if (show_warning)
2353 av_log(ic, AV_LOG_WARNING,
2354 "Estimating duration from bitrate, this may be inaccurate\n");
2355 }
2356
2357 #define DURATION_MAX_READ_SIZE 250000LL
2358 #define DURATION_MAX_RETRY 4
2359
2360 /* only usable for MPEG-PS streams */
2361 static void estimate_timings_from_pts(AVFormatContext *ic, int64_t old_offset)
2362 {
2363 AVPacket pkt1, *pkt = &pkt1;
2364 AVStream *st;
2365 int num, den, read_size, i, ret;
2366 int found_duration = 0;
2367 int is_end;
2368 int64_t filesize, offset, duration;
2369 int retry = 0;
2370
2371 /* flush packet queue */
2372 flush_packet_queue(ic);
2373
2374 for (i = 0; i < ic->nb_streams; i++) {
2375 st = ic->streams[i];
2376 if (st->start_time == AV_NOPTS_VALUE &&
2377 st->first_dts == AV_NOPTS_VALUE &&
2378 st->codec->codec_type != AVMEDIA_TYPE_UNKNOWN)
2379 av_log(st->codec, AV_LOG_WARNING,
2380 "start time for stream %d is not set in estimate_timings_from_pts\n", i);
2381
2382 if (st->parser) {
2383 av_parser_close(st->parser);
2384 st->parser = NULL;
2385 }
2386 }
2387
2388 av_opt_set(ic, "skip_changes", "1", AV_OPT_SEARCH_CHILDREN);
2389 /* estimate the end time (duration) */
2390 /* XXX: may need to support wrapping */
2391 filesize = ic->pb ? avio_size(ic->pb) : 0;
2392 do {
2393 is_end = found_duration;
2394 offset = filesize - (DURATION_MAX_READ_SIZE << retry);
2395 if (offset < 0)
2396 offset = 0;
2397
2398 avio_seek(ic->pb, offset, SEEK_SET);
2399 read_size = 0;
2400 for (;;) {
2401 if (read_size >= DURATION_MAX_READ_SIZE << (FFMAX(retry - 1, 0)))
2402 break;
2403
2404 do {
2405 ret = ff_read_packet(ic, pkt);
2406 } while (ret == AVERROR(EAGAIN));
2407 if (ret != 0)
2408 break;
2409 read_size += pkt->size;
2410 st = ic->streams[pkt->stream_index];
2411 if (pkt->pts != AV_NOPTS_VALUE &&
2412 (st->start_time != AV_NOPTS_VALUE ||
2413 st->first_dts != AV_NOPTS_VALUE)) {
2414 if (pkt->duration == 0) {
2415 ff_compute_frame_duration(ic, &num, &den, st, st->parser, pkt);
2416 if (den && num) {
2417 pkt->duration = av_rescale_rnd(1,
2418 num * (int64_t) st->time_base.den,
2419 den * (int64_t) st->time_base.num,
2420 AV_ROUND_DOWN);
2421 }
2422 }
2423 duration = pkt->pts + pkt->duration;
2424 found_duration = 1;
2425 if (st->start_time != AV_NOPTS_VALUE)
2426 duration -= st->start_time;
2427 else
2428 duration -= st->first_dts;
2429 if (duration > 0) {
2430 if (st->duration == AV_NOPTS_VALUE || st->info->last_duration<= 0 ||
2431 (st->duration < duration && FFABS(duration - st->info->last_duration) < 60LL*st->time_base.den / st->time_base.num))
2432 st->duration = duration;
2433 st->info->last_duration = duration;
2434 }
2435 }
2436 av_free_packet(pkt);
2437 }
2438
2439 /* check if all audio/video streams have valid duration */
2440 if (!is_end) {
2441 is_end = 1;
2442 for (i = 0; i < ic->nb_streams; i++) {
2443 st = ic->streams[i];
2444 switch (st->codec->codec_type) {
2445 case AVMEDIA_TYPE_VIDEO:
2446 case AVMEDIA_TYPE_AUDIO:
2447 if (st->duration == AV_NOPTS_VALUE)
2448 is_end = 0;
2449 }
2450 }
2451 }
2452 } while (!is_end &&
2453 offset &&
2454 ++retry <= DURATION_MAX_RETRY);
2455
2456 av_opt_set(ic, "skip_changes", "0", AV_OPT_SEARCH_CHILDREN);
2457
2458 /* warn about audio/video streams which duration could not be estimated */
2459 for (i = 0; i < ic->nb_streams; i++) {
2460 st = ic->streams[i];
2461 if (st->duration == AV_NOPTS_VALUE) {
2462 switch (st->codec->codec_type) {
2463 case AVMEDIA_TYPE_VIDEO:
2464 case AVMEDIA_TYPE_AUDIO:
2465 if (st->start_time != AV_NOPTS_VALUE || st->first_dts != AV_NOPTS_VALUE) {
2466 av_log(ic, AV_LOG_DEBUG, "stream %d : no PTS found at end of file, duration not set\n", i);
2467 } else
2468 av_log(ic, AV_LOG_DEBUG, "stream %d : no TS found at start of file, duration not set\n", i);
2469 }
2470 }
2471 }
2472 fill_all_stream_timings(ic);
2473
2474 avio_seek(ic->pb, old_offset, SEEK_SET);
2475 for (i = 0; i < ic->nb_streams; i++) {
2476 int j;
2477
2478 st = ic->streams[i];
2479 st->cur_dts = st->first_dts;
2480 st->last_IP_pts = AV_NOPTS_VALUE;
2481 st->last_dts_for_order_check = AV_NOPTS_VALUE;
2482 for (j = 0; j < MAX_REORDER_DELAY + 1; j++)
2483 st->pts_buffer[j] = AV_NOPTS_VALUE;
2484 }
2485 }
2486
2487 static void estimate_timings(AVFormatContext *ic, int64_t old_offset)
2488 {
2489 int64_t file_size;
2490
2491 /* get the file size, if possible */
2492 if (ic->iformat->flags & AVFMT_NOFILE) {
2493 file_size = 0;
2494 } else {
2495 file_size = avio_size(ic->pb);
2496 file_size = FFMAX(0, file_size);
2497 }
2498
2499 if ((!strcmp(ic->iformat->name, "mpeg") ||
2500 !strcmp(ic->iformat->name, "mpegts")) &&
2501 file_size && ic->pb->seekable) {
2502 /* get accurate estimate from the PTSes */
2503 estimate_timings_from_pts(ic, old_offset);
2504 ic->duration_estimation_method = AVFMT_DURATION_FROM_PTS;
2505 } else if (has_duration(ic)) {
2506 /* at least one component has timings - we use them for all
2507 * the components */
2508 fill_all_stream_timings(ic);
2509 ic->duration_estimation_method = AVFMT_DURATION_FROM_STREAM;
2510 } else {
2511 /* less precise: use bitrate info */
2512 estimate_timings_from_bit_rate(ic);
2513 ic->duration_estimation_method = AVFMT_DURATION_FROM_BITRATE;
2514 }
2515 update_stream_timings(ic);
2516
2517 {
2518 int i;
2519 AVStream av_unused *st;
2520 for (i = 0; i < ic->nb_streams; i++) {
2521 st = ic->streams[i];
2522 av_dlog(ic, "%d: start_time: %0.3f duration: %0.3f\n", i,
2523 (double) st->start_time / AV_TIME_BASE,
2524 (double) st->duration / AV_TIME_BASE);
2525 }
2526 av_dlog(ic,
2527 "stream: start_time: %0.3f duration: %0.3f bitrate=%d kb/s\n",
2528 (double) ic->start_time / AV_TIME_BASE,
2529 (double) ic->duration / AV_TIME_BASE,
2530 ic->bit_rate / 1000);
2531 }
2532 }
2533
2534 static int has_codec_parameters(AVStream *st, const char **errmsg_ptr)
2535 {
2536 AVCodecContext *avctx = st->codec;
2537
2538 #define FAIL(errmsg) do { \
2539 if (errmsg_ptr) \
2540 *errmsg_ptr = errmsg; \
2541 return 0; \
2542 } while (0)
2543
2544 if ( avctx->codec_id == AV_CODEC_ID_NONE
2545 && avctx->codec_type != AVMEDIA_TYPE_DATA)
2546 FAIL("unknown codec");
2547 switch (avctx->codec_type) {
2548 case AVMEDIA_TYPE_AUDIO:
2549 if (!avctx->frame_size && determinable_frame_size(avctx))
2550 FAIL("unspecified frame size");
2551 if (st->info->found_decoder >= 0 &&
2552 avctx->sample_fmt == AV_SAMPLE_FMT_NONE)
2553 FAIL("unspecified sample format");
2554 if (!avctx->sample_rate)
2555 FAIL("unspecified sample rate");
2556 if (!avctx->channels)
2557 FAIL("unspecified number of channels");
2558 if (st->info->found_decoder >= 0 && !st->nb_decoded_frames && avctx->codec_id == AV_CODEC_ID_DTS)
2559 FAIL("no decodable DTS frames");
2560 break;
2561 case AVMEDIA_TYPE_VIDEO:
2562 if (!avctx->width)
2563 FAIL("unspecified size");
2564 if (st->info->found_decoder >= 0 && avctx->pix_fmt == AV_PIX_FMT_NONE)
2565 FAIL("unspecified pixel format");
2566 if (st->codec->codec_id == AV_CODEC_ID_RV30 || st->codec->codec_id == AV_CODEC_ID_RV40)
2567 if (!st->sample_aspect_ratio.num && !st->codec->sample_aspect_ratio.num && !st->codec_info_nb_frames)
2568 FAIL("no frame in rv30/40 and no sar");
2569 break;
2570 case AVMEDIA_TYPE_SUBTITLE:
2571 if (avctx->codec_id == AV_CODEC_ID_HDMV_PGS_SUBTITLE && !avctx->width)
2572 FAIL("unspecified size");
2573 break;
2574 case AVMEDIA_TYPE_DATA:
2575 if (avctx->codec_id == AV_CODEC_ID_NONE) return 1;
2576 }
2577
2578 return 1;
2579 }
2580
2581 /* returns 1 or 0 if or if not decoded data was returned, or a negative error */
2582 static int try_decode_frame(AVFormatContext *s, AVStream *st, AVPacket *avpkt,
2583 AVDictionary **options)
2584 {
2585 const AVCodec *codec;
2586 int got_picture = 1, ret = 0;
2587 AVFrame *frame = av_frame_alloc();
2588 AVSubtitle subtitle;
2589 AVPacket pkt = *avpkt;
2590
2591 if (!frame)
2592 return AVERROR(ENOMEM);
2593
2594 if (!avcodec_is_open(st->codec) &&
2595 st->info->found_decoder <= 0 &&
2596 (st->codec->codec_id != -st->info->found_decoder || !st->codec->codec_id)) {
2597 AVDictionary *thread_opt = NULL;
2598
2599 codec = find_decoder(s, st, st->codec->codec_id);
2600
2601 if (!codec) {
2602 st->info->found_decoder = -st->codec->codec_id;
2603 ret = -1;
2604 goto fail;
2605 }
2606
2607 /* Force thread count to 1 since the H.264 decoder will not extract
2608 * SPS and PPS to extradata during multi-threaded decoding. */
2609 av_dict_set(options ? options : &thread_opt, "threads", "1", 0);
2610 if (s->codec_whitelist)
2611 av_dict_set(options ? options : &thread_opt, "codec_whitelist", s->codec_whitelist, 0);
2612 ret = avcodec_open2(st->codec, codec, options ? options : &thread_opt);
2613 if (!options)
2614 av_dict_free(&thread_opt);
2615 if (ret < 0) {
2616 st->info->found_decoder = -st->codec->codec_id;
2617 goto fail;
2618 }
2619 st->info->found_decoder = 1;
2620 } else if (!st->info->found_decoder)
2621 st->info->found_decoder = 1;
2622
2623 if (st->info->found_decoder < 0) {
2624 ret = -1;
2625 goto fail;
2626 }
2627
2628 while ((pkt.size > 0 || (!pkt.data && got_picture)) &&
2629 ret >= 0 &&
2630 (!has_codec_parameters(st, NULL) || !has_decode_delay_been_guessed(st) ||
2631 (!st->codec_info_nb_frames &&
2632 st->codec->codec->capabilities & CODEC_CAP_CHANNEL_CONF))) {
2633 got_picture = 0;
2634 switch (st->codec->codec_type) {
2635 case AVMEDIA_TYPE_VIDEO:
2636 ret = avcodec_decode_video2(st->codec, frame,
2637 &got_picture, &pkt);
2638 break;
2639 case AVMEDIA_TYPE_AUDIO:
2640 ret = avcodec_decode_audio4(st->codec, frame, &got_picture, &pkt);
2641 break;
2642 case AVMEDIA_TYPE_SUBTITLE:
2643 ret = avcodec_decode_subtitle2(st->codec, &subtitle,
2644 &got_picture, &pkt);
2645 ret = pkt.size;
2646 break;
2647 default:
2648 break;
2649 }
2650 if (ret >= 0) {
2651 if (got_picture)
2652 st->nb_decoded_frames++;
2653 pkt.data += ret;
2654 pkt.size -= ret;
2655 ret = got_picture;
2656 }
2657 }
2658
2659 if (!pkt.data && !got_picture)
2660 ret = -1;
2661
2662 fail:
2663 av_frame_free(&frame);
2664 return ret;
2665 }
2666
2667 unsigned int ff_codec_get_tag(const AVCodecTag *tags, enum AVCodecID id)
2668 {
2669 while (tags->id != AV_CODEC_ID_NONE) {
2670 if (tags->id == id)
2671 return tags->tag;
2672 tags++;
2673 }
2674 return 0;
2675 }
2676
2677 enum AVCodecID ff_codec_get_id(const AVCodecTag *tags, unsigned int tag)
2678 {
2679 int i;
2680 for (i = 0; tags[i].id != AV_CODEC_ID_NONE; i++)
2681 if (tag == tags[i].tag)
2682 return tags[i].id;
2683 for (i = 0; tags[i].id != AV_CODEC_ID_NONE; i++)
2684 if (avpriv_toupper4(tag) == avpriv_toupper4(tags[i].tag))
2685 return tags[i].id;
2686 return AV_CODEC_ID_NONE;
2687 }
2688
2689 enum AVCodecID ff_get_pcm_codec_id(int bps, int flt, int be, int sflags)
2690 {
2691 if (flt) {
2692 switch (bps) {
2693 case 32:
2694 return be ? AV_CODEC_ID_PCM_F32BE : AV_CODEC_ID_PCM_F32LE;
2695 case 64:
2696 return be ? AV_CODEC_ID_PCM_F64BE : AV_CODEC_ID_PCM_F64LE;
2697 default:
2698 return AV_CODEC_ID_NONE;
2699 }
2700 } else {
2701 bps += 7;
2702 bps >>= 3;
2703 if (sflags & (1 << (bps - 1))) {
2704 switch (bps) {
2705 case 1:
2706 return AV_CODEC_ID_PCM_S8;
2707 case 2:
2708 return be ? AV_CODEC_ID_PCM_S16BE : AV_CODEC_ID_PCM_S16LE;
2709 case 3:
2710 return be ? AV_CODEC_ID_PCM_S24BE : AV_CODEC_ID_PCM_S24LE;
2711 case 4:
2712 return be ? AV_CODEC_ID_PCM_S32BE : AV_CODEC_ID_PCM_S32LE;
2713 default:
2714 return AV_CODEC_ID_NONE;
2715 }
2716 } else {
2717 switch (bps) {
2718 case 1:
2719 return AV_CODEC_ID_PCM_U8;
2720 case 2:
2721 return be ? AV_CODEC_ID_PCM_U16BE : AV_CODEC_ID_PCM_U16LE;
2722 case 3:
2723 return be ? AV_CODEC_ID_PCM_U24BE : AV_CODEC_ID_PCM_U24LE;
2724 case 4:
2725 return be ? AV_CODEC_ID_PCM_U32BE : AV_CODEC_ID_PCM_U32LE;
2726 default:
2727 return AV_CODEC_ID_NONE;
2728 }
2729 }
2730 }
2731 }
2732
2733 unsigned int av_codec_get_tag(const AVCodecTag *const *tags, enum AVCodecID id)
2734 {
2735 unsigned int tag;
2736 if (!av_codec_get_tag2(tags, id, &tag))
2737 return 0;
2738 return tag;
2739 }
2740
2741 int av_codec_get_tag2(const AVCodecTag * const *tags, enum AVCodecID id,
2742 unsigned int *tag)
2743 {
2744 int i;
2745 for (i = 0; tags && tags[i]; i++) {
2746 const AVCodecTag *codec_tags = tags[i];
2747 while (codec_tags->id != AV_CODEC_ID_NONE) {
2748 if (codec_tags->id == id) {
2749 *tag = codec_tags->tag;
2750 return 1;
2751 }
2752 codec_tags++;
2753 }
2754 }
2755 return 0;
2756 }
2757
2758 enum AVCodecID av_codec_get_id(const AVCodecTag *const *tags, unsigned int tag)
2759 {
2760 int i;
2761 for (i = 0; tags && tags[i]; i++) {
2762 enum AVCodecID id = ff_codec_get_id(tags[i], tag);
2763 if (id != AV_CODEC_ID_NONE)
2764 return id;
2765 }
2766 return AV_CODEC_ID_NONE;
2767 }
2768
2769 static void compute_chapters_end(AVFormatContext *s)
2770 {
2771 unsigned int i, j;
2772 int64_t max_time = s->duration +
2773 ((s->start_time == AV_NOPTS_VALUE) ? 0 : s->start_time);
2774
2775 for (i = 0; i < s->nb_chapters; i++)
2776 if (s->chapters[i]->end == AV_NOPTS_VALUE) {
2777 AVChapter *ch = s->chapters[i];
2778 int64_t end = max_time ? av_rescale_q(max_time, AV_TIME_BASE_Q,
2779 ch->time_base)
2780 : INT64_MAX;
2781
2782 for (j = 0; j < s->nb_chapters; j++) {
2783 AVChapter *ch1 = s->chapters[j];
2784 int64_t next_start = av_rescale_q(ch1->start, ch1->time_base,
2785 ch->time_base);
2786 if (j != i && next_start > ch->start && next_start < end)
2787 end = next_start;
2788 }
2789 ch->end = (end == INT64_MAX) ? ch->start : end;
2790 }
2791 }
2792
2793 static int get_std_framerate(int i)
2794 {
2795 if (i < 30*12)
2796 return (i + 1) * 1001;
2797 i -= 30*12;
2798
2799 if (i < 7)
2800 return ((const int[]) { 40, 48, 50, 60, 80, 120, 240})[i] * 1001 * 12;
2801
2802 i -= 7;
2803
2804 return ((const int[]) { 24, 30, 60, 12, 15, 48 })[i] * 1000 * 12;
2805 }
2806
2807 /* Is the time base unreliable?
2808 * This is a heuristic to balance between quick acceptance of the values in
2809 * the headers vs. some extra checks.
2810 * Old DivX and Xvid often have nonsense timebases like 1fps or 2fps.
2811 * MPEG-2 commonly misuses field repeat flags to store different framerates.
2812 * And there are "variable" fps files this needs to detect as well. */
2813 static int tb_unreliable(AVCodecContext *c)
2814 {
2815 if (c->time_base.den >= 101L * c->time_base.num ||
2816 c->time_base.den < 5L * c->time_base.num ||
2817 // c->codec_tag == AV_RL32("DIVX") ||
2818 // c->codec_tag == AV_RL32("XVID") ||
2819 c->codec_tag == AV_RL32("mp4v") ||
2820 c->codec_id == AV_CODEC_ID_MPEG2VIDEO ||
2821 c->codec_id == AV_CODEC_ID_GIF ||
2822 c->codec_id == AV_CODEC_ID_H264)
2823 return 1;
2824 return 0;
2825 }
2826
2827 int ff_alloc_extradata(AVCodecContext *avctx, int size)
2828 {
2829 int ret;
2830
2831 if (size < 0 || size >= INT32_MAX - FF_INPUT_BUFFER_PADDING_SIZE) {
2832 avctx->extradata = NULL;
2833 avctx->extradata_size = 0;
2834 return AVERROR(EINVAL);
2835 }
2836 avctx->extradata = av_malloc(size + FF_INPUT_BUFFER_PADDING_SIZE);
2837 if (avctx->extradata) {
2838 memset(avctx->extradata + size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
2839 avctx->extradata_size = size;
2840 ret = 0;
2841 } else {
2842 avctx->extradata_size = 0;
2843 ret = AVERROR(ENOMEM);
2844 }
2845 return ret;
2846 }
2847
2848 int ff_get_extradata(AVCodecContext *avctx, AVIOContext *pb, int size)
2849 {
2850 int ret = ff_alloc_extradata(avctx, size);
2851 if (ret < 0)
2852 return ret;
2853 ret = avio_read(pb, avctx->extradata, size);
2854 if (ret != size) {
2855 av_freep(&avctx->extradata);
2856 avctx->extradata_size = 0;
2857 av_log(avctx, AV_LOG_ERROR, "Failed to read extradata of size %d\n", size);
2858 return ret < 0 ? ret : AVERROR_INVALIDDATA;
2859 }
2860
2861 return ret;
2862 }
2863
2864 int ff_rfps_add_frame(AVFormatContext *ic, AVStream *st, int64_t ts)
2865 {
2866 int i, j;
2867 int64_t last = st->info->last_dts;
2868
2869 if ( ts != AV_NOPTS_VALUE && last != AV_NOPTS_VALUE && ts > last
2870 && ts - (uint64_t)last < INT64_MAX) {
2871 double dts = (is_relative(ts) ? ts - RELATIVE_TS_BASE : ts) * av_q2d(st->time_base);
2872 int64_t duration = ts - last;
2873
2874 if (!st->info->duration_error)
2875 st->info->duration_error = av_mallocz(sizeof(st->info->duration_error[0])*2);
2876 if (!st->info->duration_error)
2877 return AVERROR(ENOMEM);
2878
2879 // if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO)
2880 // av_log(NULL, AV_LOG_ERROR, "%f\n", dts);
2881 for (i = 0; i<MAX_STD_TIMEBASES; i++) {
2882 if (st->info->duration_error[0][1][i] < 1e10) {
2883 int framerate = get_std_framerate(i);
2884 double sdts = dts*framerate/(1001*12);
2885 for (j= 0; j<2; j++) {
2886 int64_t ticks = llrint(sdts+j*0.5);
2887 double error= sdts - ticks + j*0.5;
2888 st->info->duration_error[j][0][i] += error;
2889 st->info->duration_error[j][1][i] += error*error;
2890 }
2891 }
2892 }
2893 st->info->duration_count++;
2894 st->info->rfps_duration_sum += duration;
2895
2896 if (st->info->duration_count % 10 == 0) {
2897 int n = st->info->duration_count;
2898 for (i = 0; i<MAX_STD_TIMEBASES; i++) {
2899 if (st->info->duration_error[0][1][i] < 1e10) {
2900 double a0 = st->info->duration_error[0][0][i] / n;
2901 double error0 = st->info->duration_error[0][1][i] / n - a0*a0;
2902 double a1 = st->info->duration_error[1][0][i] / n;
2903 double error1 = st->info->duration_error[1][1][i] / n - a1*a1;
2904 if (error0 > 0.04 && error1 > 0.04) {
2905 st->info->duration_error[0][1][i] = 2e10;
2906 st->info->duration_error[1][1][i] = 2e10;
2907 }
2908 }
2909 }
2910 }
2911
2912 // ignore the first 4 values, they might have some random jitter
2913 if (st->info->duration_count > 3 && is_relative(ts) == is_relative(last))
2914 st->info->duration_gcd = av_gcd(st->info->duration_gcd, duration);
2915 }
2916 if (ts != AV_NOPTS_VALUE)
2917 st->info->last_dts = ts;
2918
2919 return 0;
2920 }
2921
2922 void ff_rfps_calculate(AVFormatContext *ic)
2923 {
2924 int i, j;
2925
2926 for (i = 0; i < ic->nb_streams; i++) {
2927 AVStream *st = ic->streams[i];
2928
2929 if (st->codec->codec_type != AVMEDIA_TYPE_VIDEO)
2930 continue;
2931 // the check for tb_unreliable() is not completely correct, since this is not about handling
2932 // a unreliable/inexact time base, but a time base that is finer than necessary, as e.g.
2933 // ipmovie.c produces.
2934 if (tb_unreliable(st->codec) && st->info->duration_count > 15 && st->info->duration_gcd > FFMAX(1, st->time_base.den/(500LL*st->time_base.num)) && !st->r_frame_rate.num)
2935 av_reduce(&st->r_frame_rate.num, &st->r_frame_rate.den, st->time_base.den, st->time_base.num * st->info->duration_gcd, INT_MAX);
2936 if (st->info->duration_count>1 && !st->r_frame_rate.num
2937 && tb_unreliable(st->codec)) {
2938 int num = 0;
2939 double best_error= 0.01;
2940 AVRational ref_rate = st->r_frame_rate.num ? st->r_frame_rate : av_inv_q(st->time_base);
2941
2942 for (j= 0; j<MAX_STD_TIMEBASES; j++) {
2943 int k;
2944
2945 if (st->info->codec_info_duration && st->info->codec_info_duration*av_q2d(st->time_base) < (1001*12.0)/get_std_framerate(j))
2946 continue;
2947 if (!st->info->codec_info_duration && 1.0 < (1001*12.0)/get_std_framerate(j))
2948 continue;
2949
2950 if (av_q2d(st->time_base) * st->info->rfps_duration_sum / st->info->duration_count < (1001*12.0 * 0.8)/get_std_framerate(j))
2951 continue;
2952
2953 for (k= 0; k<2; k++) {
2954 int n = st->info->duration_count;
2955 double a= st->info->duration_error[k][0][j] / n;
2956 double error= st->info->duration_error[k][1][j]/n - a*a;
2957
2958 if (error < best_error && best_error> 0.000000001) {
2959 best_error= error;
2960 num = get_std_framerate(j);
2961 }
2962 if (error < 0.02)
2963 av_log(NULL, AV_LOG_DEBUG, "rfps: %f %f\n", get_std_framerate(j) / 12.0/1001, error);
2964 }
2965 }
2966 // do not increase frame rate by more than 1 % in order to match a standard rate.
2967 if (num && (!ref_rate.num || (double)num/(12*1001) < 1.01 * av_q2d(ref_rate)))
2968 av_reduce(&st->r_frame_rate.num, &st->r_frame_rate.den, num, 12*1001, INT_MAX);
2969 }
2970 if ( !st->avg_frame_rate.num
2971 && st->r_frame_rate.num && st->info->rfps_duration_sum
2972 && st->info->codec_info_duration <= 0
2973 && st->info->duration_count > 2
2974 && fabs(1.0 / (av_q2d(st->r_frame_rate) * av_q2d(st->time_base)) - st->info->rfps_duration_sum / (double)st->info->duration_count) <= 1.0
2975 ) {
2976 av_log(ic, AV_LOG_DEBUG, "Setting avg frame rate based on r frame rate\n");
2977 st->avg_frame_rate = st->r_frame_rate;
2978 }
2979
2980 av_freep(&st->info->duration_error);
2981 st->info->last_dts = AV_NOPTS_VALUE;
2982 st->info->duration_count = 0;
2983 st->info->rfps_duration_sum = 0;
2984 }
2985 }
2986
2987 int avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options)
2988 {
2989 int i, count, ret = 0, j;
2990 int64_t read_size;
2991 AVStream *st;
2992 AVPacket pkt1, *pkt;
2993 int64_t old_offset = avio_tell(ic->pb);
2994 // new streams might appear, no options for those
2995 int orig_nb_streams = ic->nb_streams;
2996 int flush_codecs;
2997 int64_t max_analyze_duration = ic->max_analyze_duration2;
2998 int64_t max_stream_analyze_duration;
2999 int64_t probesize = ic->probesize2;
3000
3001 if (!max_analyze_duration)
3002 max_analyze_duration = ic->max_analyze_duration;
3003 if (ic->probesize)
3004 probesize = ic->probesize;
3005 flush_codecs = probesize > 0;
3006
3007 av_opt_set(ic, "skip_clear", "1", AV_OPT_SEARCH_CHILDREN);
3008
3009 max_stream_analyze_duration = max_analyze_duration;
3010 if (!max_analyze_duration) {
3011 max_stream_analyze_duration =
3012 max_analyze_duration = 5*AV_TIME_BASE;
3013 if (!strcmp(ic->iformat->name, "flv"))
3014 max_stream_analyze_duration = 30*AV_TIME_BASE;
3015 }
3016
3017 if (ic->pb)
3018 av_log(ic, AV_LOG_DEBUG, "Before avformat_find_stream_info() pos: %"PRId64" bytes read:%"PRId64" seeks:%d\n",
3019 avio_tell(ic->pb), ic->pb->bytes_read, ic->pb->seek_count);
3020
3021 for (i = 0; i < ic->nb_streams; i++) {
3022 const AVCodec *codec;
3023 AVDictionary *thread_opt = NULL;
3024 st = ic->streams[i];
3025
3026 if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO ||
3027 st->codec->codec_type == AVMEDIA_TYPE_SUBTITLE) {
3028 /* if (!st->time_base.num)
3029 st->time_base = */
3030 if (!st->codec->time_base.num)
3031 st->codec->time_base = st->time_base;
3032 }
3033 // only for the split stuff
3034 if (!st->parser && !(ic->flags & AVFMT_FLAG_NOPARSE)) {
3035 st->parser = av_parser_init(st->codec->codec_id);
3036 if (st->parser) {
3037 if (st->need_parsing == AVSTREAM_PARSE_HEADERS) {
3038 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
3039 } else if (st->need_parsing == AVSTREAM_PARSE_FULL_RAW) {
3040 st->parser->flags |= PARSER_FLAG_USE_CODEC_TS;
3041 }
3042 } else if (st->need_parsing) {
3043 av_log(ic, AV_LOG_VERBOSE, "parser not found for codec "
3044 "%s, packets or times may be invalid.\n",
3045 avcodec_get_name(st->codec->codec_id));
3046 }
3047 }
3048 codec = find_decoder(ic, st, st->codec->codec_id);
3049
3050 /* Force thread count to 1 since the H.264 decoder will not extract
3051 * SPS and PPS to extradata during multi-threaded decoding. */
3052 av_dict_set(options ? &options[i] : &thread_opt, "threads", "1", 0);
3053
3054 if (ic->codec_whitelist)
3055 av_dict_set(options ? &options[i] : &thread_opt, "codec_whitelist", ic->codec_whitelist, 0);
3056
3057 /* Ensure that subtitle_header is properly set. */
3058 if (st->codec->codec_type == AVMEDIA_TYPE_SUBTITLE
3059 && codec && !st->codec->codec) {
3060 if (avcodec_open2(st->codec, codec, options ? &options[i] : &thread_opt) < 0)
3061 av_log(ic, AV_LOG_WARNING,
3062 "Failed to open codec in av_find_stream_info\n");
3063 }
3064
3065 // Try to just open decoders, in case this is enough to get parameters.
3066 if (!has_codec_parameters(st, NULL) && st->request_probe <= 0) {
3067 if (codec && !st->codec->codec)
3068 if (avcodec_open2(st->codec, codec, options ? &options[i] : &thread_opt) < 0)
3069 av_log(ic, AV_LOG_WARNING,
3070 "Failed to open codec in av_find_stream_info\n");
3071 }
3072 if (!options)
3073 av_dict_free(&thread_opt);
3074 }
3075
3076 for (i = 0; i < ic->nb_streams; i++) {
3077 #if FF_API_R_FRAME_RATE
3078 ic->streams[i]->info->last_dts = AV_NOPTS_VALUE;
3079 #endif
3080 ic->streams[i]->info->fps_first_dts = AV_NOPTS_VALUE;
3081 ic->streams[i]->info->fps_last_dts = AV_NOPTS_VALUE;
3082 }
3083
3084 count = 0;
3085 read_size = 0;
3086 for (;;) {
3087 int analyzed_all_streams;
3088 if (ff_check_interrupt(&ic->interrupt_callback)) {
3089 ret = AVERROR_EXIT;
3090 av_log(ic, AV_LOG_DEBUG, "interrupted\n");
3091 break;
3092 }
3093
3094 /* check if one codec still needs to be handled */
3095 for (i = 0; i < ic->nb_streams; i++) {
3096 int fps_analyze_framecount = 20;
3097
3098 st = ic->streams[i];
3099 if (!has_codec_parameters(st, NULL))
3100 break;
3101 /* If the timebase is coarse (like the usual millisecond precision
3102 * of mkv), we need to analyze more frames to reliably arrive at
3103 * the correct fps. */
3104 if (av_q2d(st->time_base) > 0.0005)
3105 fps_analyze_framecount *= 2;
3106 if (!tb_unreliable(st->codec))
3107 fps_analyze_framecount = 0;
3108 if (ic->fps_probe_size >= 0)
3109 fps_analyze_framecount = ic->fps_probe_size;
3110 if (st->disposition & AV_DISPOSITION_ATTACHED_PIC)
3111 fps_analyze_framecount = 0;
3112 /* variable fps and no guess at the real fps */
3113 if (!(st->r_frame_rate.num && st->avg_frame_rate.num) &&
3114 st->info->duration_count < fps_analyze_framecount &&
3115 st->codec->codec_type == AVMEDIA_TYPE_VIDEO)
3116 break;
3117 if (st->parser && st->parser->parser->split &&
3118 !st->codec->extradata)
3119 break;
3120 if (st->first_dts == AV_NOPTS_VALUE &&
3121 !(ic->iformat->flags & AVFMT_NOTIMESTAMPS) &&
3122 st->codec_info_nb_frames < ic->max_ts_probe &&
3123 (st->codec->codec_type == AVMEDIA_TYPE_VIDEO ||
3124 st->codec->codec_type == AVMEDIA_TYPE_AUDIO))
3125 break;
3126 }
3127 analyzed_all_streams = 0;
3128 if (i == ic->nb_streams) {
3129 analyzed_all_streams = 1;
3130 /* NOTE: If the format has no header, then we need to read some
3131 * packets to get most of the streams, so we cannot stop here. */
3132 if (!(ic->ctx_flags & AVFMTCTX_NOHEADER)) {
3133 /* If we found the info for all the codecs, we can stop. */
3134 ret = count;
3135 av_log(ic, AV_LOG_DEBUG, "All info found\n");
3136 flush_codecs = 0;
3137 break;
3138 }
3139 }
3140 /* We did not get all the codec info, but we read too much data. */
3141 if (read_size >= probesize) {
3142 ret = count;
3143 av_log(ic, AV_LOG_DEBUG,
3144 "Probe buffer size limit of %"PRId64" bytes reached\n", probesize);
3145 for (i = 0; i < ic->nb_streams; i++)
3146 if (!ic->streams[i]->r_frame_rate.num &&
3147 ic->streams[i]->info->duration_count <= 1 &&
3148 ic->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO &&
3149 strcmp(ic->iformat->name, "image2"))
3150 av_log(ic, AV_LOG_WARNING,
3151 "Stream #%d: not enough frames to estimate rate; "
3152 "consider increasing probesize\n", i);
3153 break;
3154 }
3155
3156 /* NOTE: A new stream can be added there if no header in file
3157 * (AVFMTCTX_NOHEADER). */
3158 ret = read_frame_internal(ic, &pkt1);
3159 if (ret == AVERROR(EAGAIN))
3160 continue;
3161
3162 if (ret < 0) {
3163 /* EOF or error*/
3164 break;
3165 }
3166
3167 if (ic->flags & AVFMT_FLAG_NOBUFFER)
3168 free_packet_buffer(&ic->packet_buffer, &ic->packet_buffer_end);
3169 {
3170 pkt = add_to_pktbuf(&ic->packet_buffer, &pkt1,
3171 &ic->packet_buffer_end);
3172 if (!pkt) {
3173 ret = AVERROR(ENOMEM);
3174 goto find_stream_info_err;
3175 }
3176 if ((ret = av_dup_packet(pkt)) < 0)
3177 goto find_stream_info_err;
3178 }
3179
3180 st = ic->streams[pkt->stream_index];
3181 if (!(st->disposition & AV_DISPOSITION_ATTACHED_PIC))
3182 read_size += pkt->size;
3183
3184 if (pkt->dts != AV_NOPTS_VALUE && st->codec_info_nb_frames > 1) {
3185 /* check for non-increasing dts */
3186 if (st->info->fps_last_dts != AV_NOPTS_VALUE &&
3187 st->info->fps_last_dts >= pkt->dts) {
3188 av_log(ic, AV_LOG_DEBUG,
3189 "Non-increasing DTS in stream %d: packet %d with DTS "
3190 "%"PRId64", packet %d with DTS %"PRId64"\n",
3191 st->index, st->info->fps_last_dts_idx,
3192 st->info->fps_last_dts, st->codec_info_nb_frames,
3193 pkt->dts);
3194 st->info->fps_first_dts =
3195 st->info->fps_last_dts = AV_NOPTS_VALUE;
3196 }
3197 /* Check for a discontinuity in dts. If the difference in dts
3198 * is more than 1000 times the average packet duration in the
3199 * sequence, we treat it as a discontinuity. */
3200 if (st->info->fps_last_dts != AV_NOPTS_VALUE &&
3201 st->info->fps_last_dts_idx > st->info->fps_first_dts_idx &&
3202 (pkt->dts - st->info->fps_last_dts) / 1000 >
3203 (st->info->fps_last_dts - st->info->fps_first_dts) /
3204 (st->info->fps_last_dts_idx - st->info->fps_first_dts_idx)) {
3205 av_log(ic, AV_LOG_WARNING,
3206 "DTS discontinuity in stream %d: packet %d with DTS "
3207 "%"PRId64", packet %d with DTS %"PRId64"\n",
3208 st->index, st->info->fps_last_dts_idx,
3209 st->info->fps_last_dts, st->codec_info_nb_frames,
3210 pkt->dts);
3211 st->info->fps_first_dts =
3212 st->info->fps_last_dts = AV_NOPTS_VALUE;
3213 }
3214
3215 /* update stored dts values */
3216 if (st->info->fps_first_dts == AV_NOPTS_VALUE) {
3217 st->info->fps_first_dts = pkt->dts;
3218 st->info->fps_first_dts_idx = st->codec_info_nb_frames;
3219 }
3220 st->info->fps_last_dts = pkt->dts;
3221 st->info->fps_last_dts_idx = st->codec_info_nb_frames;
3222 }
3223 if (st->codec_info_nb_frames>1) {
3224 int64_t t = 0;
3225
3226 if (st->time_base.den > 0)
3227 t = av_rescale_q(st->info->codec_info_duration, st->time_base, AV_TIME_BASE_Q);
3228 if (st->avg_frame_rate.num > 0)
3229 t = FFMAX(t, av_rescale_q(st->codec_info_nb_frames, av_inv_q(st->avg_frame_rate), AV_TIME_BASE_Q));
3230
3231 if ( t == 0
3232 && st->codec_info_nb_frames>30
3233 && st->info->fps_first_dts != AV_NOPTS_VALUE
3234 && st->info->fps_last_dts != AV_NOPTS_VALUE)
3235 t = FFMAX(t, av_rescale_q(st->info->fps_last_dts - st->info->fps_first_dts, st->time_base, AV_TIME_BASE_Q));
3236
3237 if (t >= (analyzed_all_streams ? max_analyze_duration : max_stream_analyze_duration)) {
3238 av_log(ic, AV_LOG_VERBOSE, "max_analyze_duration %"PRId64" reached at %"PRId64" microseconds\n",
3239 max_analyze_duration,
3240 t);
3241 if (ic->flags & AVFMT_FLAG_NOBUFFER)
3242 av_packet_unref(pkt);
3243 break;
3244 }
3245 if (pkt->duration) {
3246 st->info->codec_info_duration += pkt->duration;
3247 st->info->codec_info_duration_fields += st->parser && st->need_parsing && st->codec->ticks_per_frame ==2 ? st->parser->repeat_pict + 1 : 2;
3248 }
3249 }
3250 #if FF_API_R_FRAME_RATE
3251 if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO)
3252 ff_rfps_add_frame(ic, st, pkt->dts);
3253 #endif
3254 if (st->parser && st->parser->parser->split && !st->codec->extradata) {
3255 int i = st->parser->parser->split(st->codec, pkt->data, pkt->size);
3256 if (i > 0 && i < FF_MAX_EXTRADATA_SIZE) {
3257 if (ff_alloc_extradata(st->codec, i))
3258 return AVERROR(ENOMEM);
3259 memcpy(st->codec->extradata, pkt->data,
3260 st->codec->extradata_size);
3261 }
3262 }
3263
3264 /* If still no information, we try to open the codec and to
3265 * decompress the frame. We try to avoid that in most cases as
3266 * it takes longer and uses more memory. For MPEG-4, we need to
3267 * decompress for QuickTime.
3268 *
3269 * If CODEC_CAP_CHANNEL_CONF is set this will force decoding of at
3270 * least one frame of codec data, this makes sure the codec initializes
3271 * the channel configuration and does not only trust the values from
3272 * the container. */
3273 try_decode_frame(ic, st, pkt,
3274 (options && i < orig_nb_streams) ? &options[i] : NULL);
3275
3276 if (ic->flags & AVFMT_FLAG_NOBUFFER)
3277 av_packet_unref(pkt);
3278
3279 st->codec_info_nb_frames++;
3280 count++;
3281 }
3282
3283 if (flush_codecs) {
3284 AVPacket empty_pkt = { 0 };
3285 int err = 0;
3286 av_init_packet(&empty_pkt);
3287
3288 for (i = 0; i < ic->nb_streams; i++) {
3289
3290 st = ic->streams[i];
3291
3292 /* flush the decoders */
3293 if (st->info->found_decoder == 1) {
3294 do {
3295 err = try_decode_frame(ic, st, &empty_pkt,
3296 (options && i < orig_nb_streams)
3297 ? &options[i] : NULL);
3298 } while (err > 0 && !has_codec_parameters(st, NULL));
3299
3300 if (err < 0) {
3301 av_log(ic, AV_LOG_INFO,
3302 "decoding for stream %d failed\n", st->index);
3303 }
3304 }
3305 }
3306 }
3307
3308 // close codecs which were opened in try_decode_frame()
3309 for (i = 0; i < ic->nb_streams; i++) {
3310 st = ic->streams[i];
3311 avcodec_close(st->codec);
3312 }
3313
3314 ff_rfps_calculate(ic);
3315
3316 for (i = 0; i < ic->nb_streams; i++) {
3317 st = ic->streams[i];
3318 if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
3319 if (st->codec->codec_id == AV_CODEC_ID_RAWVIDEO && !st->codec->codec_tag && !st->codec->bits_per_coded_sample) {
3320 uint32_t tag= avcodec_pix_fmt_to_codec_tag(st->codec->pix_fmt);
3321 if (avpriv_find_pix_fmt(avpriv_get_raw_pix_fmt_tags(), tag) == st->codec->pix_fmt)
3322 st->codec->codec_tag= tag;
3323 }
3324
3325 /* estimate average framerate if not set by demuxer */
3326 if (st->info->codec_info_duration_fields &&
3327 !st->avg_frame_rate.num &&
3328 st->info->codec_info_duration) {
3329 int best_fps = 0;
3330 double best_error = 0.01;
3331
3332 if (st->info->codec_info_duration >= INT64_MAX / st->time_base.num / 2||
3333 st->info->codec_info_duration_fields >= INT64_MAX / st->time_base.den ||
3334 st->info->codec_info_duration < 0)
3335 continue;
3336 av_reduce(&st->avg_frame_rate.num, &st->avg_frame_rate.den,
3337 st->info->codec_info_duration_fields * (int64_t) st->time_base.den,
3338 st->info->codec_info_duration * 2 * (int64_t) st->time_base.num, 60000);
3339
3340 /* Round guessed framerate to a "standard" framerate if it's
3341 * within 1% of the original estimate. */
3342 for (j = 0; j < MAX_STD_TIMEBASES; j++) {
3343 AVRational std_fps = { get_std_framerate(j), 12 * 1001 };
3344 double error = fabs(av_q2d(st->avg_frame_rate) /
3345 av_q2d(std_fps) - 1);
3346
3347 if (error < best_error) {
3348 best_error = error;
3349 best_fps = std_fps.num;
3350 }
3351 }
3352 if (best_fps)
3353 av_reduce(&st->avg_frame_rate.num, &st->avg_frame_rate.den,
3354 best_fps, 12 * 1001, INT_MAX);
3355 }
3356
3357 if (!st->r_frame_rate.num) {
3358 if ( st->codec->time_base.den * (int64_t) st->time_base.num
3359 <= st->codec->time_base.num * st->codec->ticks_per_frame * (int64_t) st->time_base.den) {
3360 st->r_frame_rate.num = st->codec->time_base.den;
3361 st->r_frame_rate.den = st->codec->time_base.num * st->codec->ticks_per_frame;
3362 } else {
3363 st->r_frame_rate.num = st->time_base.den;
3364 st->r_frame_rate.den = st->time_base.num;
3365 }
3366 }
3367 if (st->display_aspect_ratio.num && st->display_aspect_ratio.den) {
3368 AVRational hw_ratio = { st->codec->height, st->codec->width };
3369 st->sample_aspect_ratio = av_mul_q(st->display_aspect_ratio,
3370 hw_ratio);
3371 }
3372 } else if (st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
3373 if (!st->codec->bits_per_coded_sample)
3374 st->codec->bits_per_coded_sample =
3375 av_get_bits_per_sample(st->codec->codec_id);
3376 // set stream disposition based on audio service type
3377 switch (st->codec->audio_service_type) {
3378 case AV_AUDIO_SERVICE_TYPE_EFFECTS:
3379 st->disposition = AV_DISPOSITION_CLEAN_EFFECTS;
3380 break;
3381 case AV_AUDIO_SERVICE_TYPE_VISUALLY_IMPAIRED:
3382 st->disposition = AV_DISPOSITION_VISUAL_IMPAIRED;
3383 break;
3384 case AV_AUDIO_SERVICE_TYPE_HEARING_IMPAIRED:
3385 st->disposition = AV_DISPOSITION_HEARING_IMPAIRED;
3386 break;
3387 case AV_AUDIO_SERVICE_TYPE_COMMENTARY:
3388 st->disposition = AV_DISPOSITION_COMMENT;
3389 break;
3390 case AV_AUDIO_SERVICE_TYPE_KARAOKE:
3391 st->disposition = AV_DISPOSITION_KARAOKE;
3392 break;
3393 }
3394 }
3395 }
3396
3397 if (probesize)
3398 estimate_timings(ic, old_offset);
3399
3400 av_opt_set(ic, "skip_clear", "0", AV_OPT_SEARCH_CHILDREN);
3401
3402 if (ret >= 0 && ic->nb_streams)
3403 /* We could not have all the codec parameters before EOF. */
3404 ret = -1;
3405 for (i = 0; i < ic->nb_streams; i++) {
3406 const char *errmsg;
3407 st = ic->streams[i];
3408 if (!has_codec_parameters(st, &errmsg)) {
3409 char buf[256];
3410 avcodec_string(buf, sizeof(buf), st->codec, 0);
3411 av_log(ic, AV_LOG_WARNING,
3412 "Could not find codec parameters for stream %d (%s): %s\n"
3413 "Consider increasing the value for the 'analyzeduration' and 'probesize' options\n",
3414 i, buf, errmsg);
3415 } else {
3416 ret = 0;
3417 }
3418 }
3419
3420 compute_chapters_end(ic);
3421
3422 find_stream_info_err:
3423 for (i = 0; i < ic->nb_streams; i++) {
3424 st = ic->streams[i];
3425 if (ic->streams[i]->codec->codec_type != AVMEDIA_TYPE_AUDIO)
3426 ic->streams[i]->codec->thread_count = 0;
3427 if (st->info)
3428 av_freep(&st->info->duration_error);
3429 av_freep(&ic->streams[i]->info);
3430 }
3431 if (ic->pb)
3432 av_log(ic, AV_LOG_DEBUG, "After avformat_find_stream_info() pos: %"PRId64" bytes read:%"PRId64" seeks:%d frames:%d\n",
3433 avio_tell(ic->pb), ic->pb->bytes_read, ic->pb->seek_count, count);
3434 return ret;
3435 }
3436
3437 AVProgram *av_find_program_from_stream(AVFormatContext *ic, AVProgram *last, int s)
3438 {
3439 int i, j;
3440
3441 for (i = 0; i < ic->nb_programs; i++) {
3442 if (ic->programs[i] == last) {
3443 last = NULL;
3444 } else {
3445 if (!last)
3446 for (j = 0; j < ic->programs[i]->nb_stream_indexes; j++)
3447 if (ic->programs[i]->stream_index[j] == s)
3448 return ic->programs[i];
3449 }
3450 }
3451 return NULL;
3452 }
3453
3454 int av_find_best_stream(AVFormatContext *ic, enum AVMediaType type,
3455 int wanted_stream_nb, int related_stream,
3456 AVCodec **decoder_ret, int flags)
3457 {
3458 int i, nb_streams = ic->nb_streams;
3459 int ret = AVERROR_STREAM_NOT_FOUND, best_count = -1, best_bitrate = -1, best_multiframe = -1, count, bitrate, multiframe;
3460 unsigned *program = NULL;
3461 const AVCodec *decoder = NULL, *best_decoder = NULL;
3462
3463 if (related_stream >= 0 && wanted_stream_nb < 0) {
3464 AVProgram *p = av_find_program_from_stream(ic, NULL, related_stream);
3465 if (p) {
3466 program = p->stream_index;
3467 nb_streams = p->nb_stream_indexes;
3468 }
3469 }
3470 for (i = 0; i < nb_streams; i++) {
3471 int real_stream_index = program ? program[i] : i;
3472 AVStream *st = ic->streams[real_stream_index];
3473 AVCodecContext *avctx = st->codec;
3474 if (avctx->codec_type != type)
3475 continue;
3476 if (wanted_stream_nb >= 0 && real_stream_index != wanted_stream_nb)
3477 continue;
3478 if (wanted_stream_nb != real_stream_index &&
3479 st->disposition & (AV_DISPOSITION_HEARING_IMPAIRED |
3480 AV_DISPOSITION_VISUAL_IMPAIRED))
3481 continue;
3482 if (type == AVMEDIA_TYPE_AUDIO && !avctx->channels)
3483 continue;
3484 if (decoder_ret) {
3485 decoder = find_decoder(ic, st, st->codec->codec_id);
3486 if (!decoder) {
3487 if (ret < 0)
3488 ret = AVERROR_DECODER_NOT_FOUND;
3489 continue;
3490 }
3491 }
3492 count = st->codec_info_nb_frames;
3493 bitrate = avctx->bit_rate;
3494 if (!bitrate)
3495 bitrate = avctx->rc_max_rate;
3496 multiframe = FFMIN(5, count);
3497 if ((best_multiframe > multiframe) ||
3498 (best_multiframe == multiframe && best_bitrate > bitrate) ||
3499 (best_multiframe == multiframe && best_bitrate == bitrate && best_count >= count))
3500 continue;
3501 best_count = count;
3502 best_bitrate = bitrate;
3503 best_multiframe = multiframe;
3504 ret = real_stream_index;
3505 best_decoder = decoder;
3506 if (program && i == nb_streams - 1 && ret < 0) {
3507 program = NULL;
3508 nb_streams = ic->nb_streams;
3509 /* no related stream found, try again with everything */
3510 i = 0;
3511 }
3512 }
3513 if (decoder_ret)
3514 *decoder_ret = (AVCodec*)best_decoder;
3515 return ret;
3516 }
3517
3518 /*******************************************************/
3519
3520 int av_read_play(AVFormatContext *s)
3521 {
3522 if (s->iformat->read_play)
3523 return s->iformat->read_play(s);
3524 if (s->pb)
3525 return avio_pause(s->pb, 0);
3526 return AVERROR(ENOSYS);
3527 }
3528
3529 int av_read_pause(AVFormatContext *s)
3530 {
3531 if (s->iformat->read_pause)
3532 return s->iformat->read_pause(s);
3533 if (s->pb)
3534 return avio_pause(s->pb, 1);
3535 return AVERROR(ENOSYS);
3536 }
3537
3538 void ff_free_stream(AVFormatContext *s, AVStream *st) {
3539 int j;
3540 av_assert0(s->nb_streams>0);
3541 av_assert0(s->streams[ s->nb_streams - 1 ] == st);
3542
3543 for (j = 0; j < st->nb_side_data; j++)
3544 av_freep(&st->side_data[j].data);
3545 av_freep(&st->side_data);
3546 st->nb_side_data = 0;
3547
3548 if (st->parser) {
3549 av_parser_close(st->parser);
3550 }
3551 if (st->attached_pic.data)
3552 av_free_packet(&st->attached_pic);
3553 av_dict_free(&st->metadata);
3554 av_freep(&st->probe_data.buf);
3555 av_freep(&st->index_entries);
3556 av_freep(&st->codec->extradata);
3557 av_freep(&st->codec->subtitle_header);
3558 av_freep(&st->codec);
3559 av_freep(&st->priv_data);
3560 if (st->info)
3561 av_freep(&st->info->duration_error);
3562 av_freep(&st->info);
3563 av_freep(&st->recommended_encoder_configuration);
3564 av_freep(&s->streams[ --s->nb_streams ]);
3565 }
3566
3567 void avformat_free_context(AVFormatContext *s)
3568 {
3569 int i;
3570
3571 if (!s)
3572 return;
3573
3574 av_opt_free(s);
3575 if (s->iformat && s->iformat->priv_class && s->priv_data)
3576 av_opt_free(s->priv_data);
3577 if (s->oformat && s->oformat->priv_class && s->priv_data)
3578 av_opt_free(s->priv_data);
3579
3580 for (i = s->nb_streams - 1; i >= 0; i--) {
3581 ff_free_stream(s, s->streams[i]);
3582 }
3583 for (i = s->nb_programs - 1; i >= 0; i--) {
3584 av_dict_free(&s->programs[i]->metadata);
3585 av_freep(&s->programs[i]->stream_index);
3586 av_freep(&s->programs[i]);
3587 }
3588 av_freep(&s->programs);
3589 av_freep(&s->priv_data);
3590 while (s->nb_chapters--) {
3591 av_dict_free(&s->chapters[s->nb_chapters]->metadata);
3592 av_freep(&s->chapters[s->nb_chapters]);
3593 }
3594 av_freep(&s->chapters);
3595 av_dict_free(&s->metadata);
3596 av_freep(&s->streams);
3597 av_freep(&s->internal);
3598 flush_packet_queue(s);
3599 av_free(s);
3600 }
3601
3602 void avformat_close_input(AVFormatContext **ps)
3603 {
3604 AVFormatContext *s;
3605 AVIOContext *pb;
3606
3607 if (!ps || !*ps)
3608 return;
3609
3610 s = *ps;
3611 pb = s->pb;
3612
3613 if ((s->iformat && strcmp(s->iformat->name, "image2") && s->iformat->flags & AVFMT_NOFILE) ||
3614 (s->flags & AVFMT_FLAG_CUSTOM_IO))
3615 pb = NULL;
3616
3617 flush_packet_queue(s);
3618
3619 if (s->iformat)
3620 if (s->iformat->read_close)
3621 s->iformat->read_close(s);
3622
3623 avformat_free_context(s);
3624
3625 *ps = NULL;
3626
3627 avio_close(pb);
3628 }
3629
3630 AVStream *avformat_new_stream(AVFormatContext *s, const AVCodec *c)
3631 {
3632 AVStream *st;
3633 int i;
3634 AVStream **streams;
3635
3636 if (s->nb_streams >= INT_MAX/sizeof(*streams))
3637 return NULL;
3638 streams = av_realloc_array(s->streams, s->nb_streams + 1, sizeof(*streams));
3639 if (!streams)
3640 return NULL;
3641 s->streams = streams;
3642
3643 st = av_mallocz(sizeof(AVStream));
3644 if (!st)
3645 return NULL;
3646 if (!(st->info = av_mallocz(sizeof(*st->info)))) {
3647 av_free(st);
3648 return NULL;
3649 }
3650 st->info->last_dts = AV_NOPTS_VALUE;
3651
3652 st->codec = avcodec_alloc_context3(c);
3653 if (s->iformat) {
3654 /* no default bitrate if decoding */
3655 st->codec->bit_rate = 0;
3656
3657 /* default pts setting is MPEG-like */
3658 avpriv_set_pts_info(st, 33, 1, 90000);
3659 }
3660
3661 st->index = s->nb_streams;
3662 st->start_time = AV_NOPTS_VALUE;
3663 st->duration = AV_NOPTS_VALUE;
3664 /* we set the current DTS to 0 so that formats without any timestamps
3665 * but durations get some timestamps, formats with some unknown
3666 * timestamps have their first few packets buffered and the
3667 * timestamps corrected before they are returned to the user */
3668 st->cur_dts = s->iformat ? RELATIVE_TS_BASE : 0;
3669 st->first_dts = AV_NOPTS_VALUE;
3670 st->probe_packets = MAX_PROBE_PACKETS;
3671 st->pts_wrap_reference = AV_NOPTS_VALUE;
3672 st->pts_wrap_behavior = AV_PTS_WRAP_IGNORE;
3673
3674 st->last_IP_pts = AV_NOPTS_VALUE;
3675 st->last_dts_for_order_check = AV_NOPTS_VALUE;
3676 for (i = 0; i < MAX_REORDER_DELAY + 1; i++)
3677 st->pts_buffer[i] = AV_NOPTS_VALUE;
3678
3679 st->sample_aspect_ratio = (AVRational) { 0, 1 };
3680
3681 #if FF_API_R_FRAME_RATE
3682 st->info->last_dts = AV_NOPTS_VALUE;
3683 #endif
3684 st->info->fps_first_dts = AV_NOPTS_VALUE;
3685 st->info->fps_last_dts = AV_NOPTS_VALUE;
3686
3687 st->inject_global_side_data = s->internal->inject_global_side_data;
3688
3689 s->streams[s->nb_streams++] = st;
3690 return st;
3691 }
3692
3693 AVProgram *av_new_program(AVFormatContext *ac, int id)
3694 {
3695 AVProgram *program = NULL;
3696 int i;
3697
3698 av_dlog(ac, "new_program: id=0x%04x\n", id);
3699
3700 for (i = 0; i < ac->nb_programs; i++)
3701 if (ac->programs[i]->id == id)
3702 program = ac->programs[i];
3703
3704 if (!program) {
3705 program = av_mallocz(sizeof(AVProgram));
3706 if (!program)
3707 return NULL;
3708 dynarray_add(&ac->programs, &ac->nb_programs, program);
3709 program->discard = AVDISCARD_NONE;
3710 }
3711 program->id = id;
3712 program->pts_wrap_reference = AV_NOPTS_VALUE;
3713 program->pts_wrap_behavior = AV_PTS_WRAP_IGNORE;
3714
3715 program->start_time =
3716 program->end_time = AV_NOPTS_VALUE;
3717
3718 return program;
3719 }
3720
3721 AVChapter *avpriv_new_chapter(AVFormatContext *s, int id, AVRational time_base,
3722 int64_t start, int64_t end, const char *title)
3723 {
3724 AVChapter *chapter = NULL;
3725 int i;
3726
3727 if (end != AV_NOPTS_VALUE && start > end) {
3728 av_log(s, AV_LOG_ERROR, "Chapter end time %"PRId64" before start %"PRId64"\n", end, start);
3729 return NULL;
3730 }
3731
3732 for (i = 0; i < s->nb_chapters; i++)
3733 if (s->chapters[i]->id == id)
3734 chapter = s->chapters[i];
3735
3736 if (!chapter) {
3737 chapter = av_mallocz(sizeof(AVChapter));
3738 if (!chapter)
3739 return NULL;
3740 dynarray_add(&s->chapters, &s->nb_chapters, chapter);
3741 }
3742 av_dict_set(&chapter->metadata, "title", title, 0);
3743 chapter->id = id;
3744 chapter->time_base = time_base;
3745 chapter->start = start;
3746 chapter->end = end;
3747
3748 return chapter;
3749 }
3750
3751 void ff_program_add_stream_index(AVFormatContext *ac, int progid, unsigned idx)
3752 {
3753 int i, j;
3754 AVProgram *program = NULL;
3755 void *tmp;
3756
3757 if (idx >= ac->nb_streams) {
3758 av_log(ac, AV_LOG_ERROR, "stream index %d is not valid\n", idx);
3759 return;
3760 }
3761
3762 for (i = 0; i < ac->nb_programs; i++) {
3763 if (ac->programs[i]->id != progid)
3764 continue;
3765 program = ac->programs[i];
3766 for (j = 0; j < program->nb_stream_indexes; j++)
3767 if (program->stream_index[j] == idx)
3768 return;
3769
3770 tmp = av_realloc_array(program->stream_index, program->nb_stream_indexes+1, sizeof(unsigned int));
3771 if (!tmp)
3772 return;
3773 program->stream_index = tmp;
3774 program->stream_index[program->nb_stream_indexes++] = idx;
3775 return;
3776 }
3777 }
3778
3779 uint64_t ff_ntp_time(void)
3780 {
3781 return (av_gettime() / 1000) * 1000 + NTP_OFFSET_US;
3782 }
3783
3784 int av_get_frame_filename(char *buf, int buf_size, const char *path, int number)
3785 {
3786 const char *p;
3787 char *q, buf1[20], c;
3788 int nd, len, percentd_found;
3789
3790 q = buf;
3791 p = path;
3792 percentd_found = 0;
3793 for (;;) {
3794 c = *p++;
3795 if (c == '\0')
3796 break;
3797 if (c == '%') {
3798 do {
3799 nd = 0;
3800 while (av_isdigit(*p))
3801 nd = nd * 10 + *p++ - '0';
3802 c = *p++;
3803 } while (av_isdigit(c));
3804
3805 switch (c) {
3806 case '%':
3807 goto addchar;
3808 case 'd':
3809 if (percentd_found)
3810 goto fail;
3811 percentd_found = 1;
3812 snprintf(buf1, sizeof(buf1), "%0*d", nd, number);
3813 len = strlen(buf1);
3814 if ((q - buf + len) > buf_size - 1)
3815 goto fail;
3816 memcpy(q, buf1, len);
3817 q += len;
3818 break;
3819 default:
3820 goto fail;
3821 }
3822 } else {
3823 addchar:
3824 if ((q - buf) < buf_size - 1)
3825 *q++ = c;
3826 }
3827 }
3828 if (!percentd_found)
3829 goto fail;
3830 *q = '\0';
3831 return 0;
3832 fail:
3833 *q = '\0';
3834 return -1;
3835 }
3836
3837 void av_url_split(char *proto, int proto_size,
3838 char *authorization, int authorization_size,
3839 char *hostname, int hostname_size,
3840 int *port_ptr, char *path, int path_size, const char *url)
3841 {
3842 const char *p, *ls, *ls2, *at, *at2, *col, *brk;
3843
3844 if (port_ptr)
3845 *port_ptr = -1;
3846 if (proto_size > 0)
3847 proto[0] = 0;
3848 if (authorization_size > 0)
3849 authorization[0] = 0;
3850 if (hostname_size > 0)
3851 hostname[0] = 0;
3852 if (path_size > 0)
3853 path[0] = 0;
3854
3855 /* parse protocol */
3856 if ((p = strchr(url, ':'))) {
3857 av_strlcpy(proto, url, FFMIN(proto_size, p + 1 - url));
3858 p++; /* skip ':' */
3859 if (*p == '/')
3860 p++;
3861 if (*p == '/')
3862 p++;
3863 } else {
3864 /* no protocol means plain filename */
3865 av_strlcpy(path, url, path_size);
3866 return;
3867 }
3868
3869 /* separate path from hostname */
3870 ls = strchr(p, '/');
3871 ls2 = strchr(p, '?');
3872 if (!ls)
3873 ls = ls2;
3874 else if (ls && ls2)
3875 ls = FFMIN(ls, ls2);
3876 if (ls)
3877 av_strlcpy(path, ls, path_size);
3878 else
3879 ls = &p[strlen(p)]; // XXX
3880
3881 /* the rest is hostname, use that to parse auth/port */
3882 if (ls != p) {
3883 /* authorization (user[:pass]@hostname) */
3884 at2 = p;
3885 while ((at = strchr(p, '@')) && at < ls) {
3886 av_strlcpy(authorization, at2,
3887 FFMIN(authorization_size, at + 1 - at2));
3888 p = at + 1; /* skip '@' */
3889 }
3890
3891 if (*p == '[' && (brk = strchr(p, ']')) && brk < ls) {
3892 /* [host]:port */
3893 av_strlcpy(hostname, p + 1,
3894 FFMIN(hostname_size, brk - p));
3895 if (brk[1] == ':' && port_ptr)
3896 *port_ptr = atoi(brk + 2);
3897 } else if ((col = strchr(p, ':')) && col < ls) {
3898 av_strlcpy(hostname, p,
3899 FFMIN(col + 1 - p, hostname_size));
3900 if (port_ptr)
3901 *port_ptr = atoi(col + 1);
3902 } else
3903 av_strlcpy(hostname, p,
3904 FFMIN(ls + 1 - p, hostname_size));
3905 }
3906 }
3907
3908 char *ff_data_to_hex(char *buff, const uint8_t *src, int s, int lowercase)
3909 {
3910 int i;
3911 static const char hex_table_uc[16] = { '0', '1', '2', '3',
3912 '4', '5', '6', '7',
3913 '8', '9', 'A', 'B',
3914 'C', 'D', 'E', 'F' };
3915 static const char hex_table_lc[16] = { '0', '1', '2', '3',
3916 '4', '5', '6', '7',
3917 '8', '9', 'a', 'b',
3918 'c', 'd', 'e', 'f' };
3919 const char *hex_table = lowercase ? hex_table_lc : hex_table_uc;
3920
3921 for (i = 0; i < s; i++) {
3922 buff[i * 2] = hex_table[src[i] >> 4];
3923 buff[i * 2 + 1] = hex_table[src[i] & 0xF];
3924 }
3925
3926 return buff;
3927 }
3928
3929 int ff_hex_to_data(uint8_t *data, const char *p)
3930 {
3931 int c, len, v;
3932
3933 len = 0;
3934 v = 1;
3935 for (;;) {
3936 p += strspn(p, SPACE_CHARS);
3937 if (*p == '\0')
3938 break;
3939 c = av_toupper((unsigned char) *p++);
3940 if (c >= '0' && c <= '9')
3941 c = c - '0';
3942 else if (c >= 'A' && c <= 'F')
3943 c = c - 'A' + 10;
3944 else
3945 break;
3946 v = (v << 4) | c;
3947 if (v & 0x100) {
3948 if (data)
3949 data[len] = v;
3950 len++;
3951 v = 1;
3952 }
3953 }
3954 return len;
3955 }
3956
3957 void avpriv_set_pts_info(AVStream *s, int pts_wrap_bits,
3958 unsigned int pts_num, unsigned int pts_den)
3959 {
3960 AVRational new_tb;
3961 if (av_reduce(&new_tb.num, &new_tb.den, pts_num, pts_den, INT_MAX)) {
3962 if (new_tb.num != pts_num)
3963 av_log(NULL, AV_LOG_DEBUG,
3964 "st:%d removing common factor %d from timebase\n",
3965 s->index, pts_num / new_tb.num);
3966 } else
3967 av_log(NULL, AV_LOG_WARNING,
3968 "st:%d has too large timebase, reducing\n", s->index);
3969
3970 if (new_tb.num <= 0 || new_tb.den <= 0) {
3971 av_log(NULL, AV_LOG_ERROR,
3972 "Ignoring attempt to set invalid timebase %d/%d for st:%d\n",
3973 new_tb.num, new_tb.den,
3974 s->index);
3975 return;
3976 }
3977 s->time_base = new_tb;
3978 av_codec_set_pkt_timebase(s->codec, new_tb);
3979 s->pts_wrap_bits = pts_wrap_bits;
3980 }
3981
3982 void ff_parse_key_value(const char *str, ff_parse_key_val_cb callback_get_buf,
3983 void *context)
3984 {
3985 const char *ptr = str;
3986
3987 /* Parse key=value pairs. */
3988 for (;;) {
3989 const char *key;
3990 char *dest = NULL, *dest_end;
3991 int key_len, dest_len = 0;
3992
3993 /* Skip whitespace and potential commas. */
3994 while (*ptr && (av_isspace(*ptr) || *ptr == ','))
3995 ptr++;
3996 if (!*ptr)
3997 break;
3998
3999 key = ptr;
4000
4001 if (!(ptr = strchr(key, '=')))
4002 break;
4003 ptr++;
4004 key_len = ptr - key;
4005
4006 callback_get_buf(context, key, key_len, &dest, &dest_len);
4007 dest_end = dest + dest_len - 1;
4008
4009 if (*ptr == '\"') {
4010 ptr++;
4011 while (*ptr && *ptr != '\"') {
4012 if (*ptr == '\\') {
4013 if (!ptr[1])
4014 break;
4015 if (dest && dest < dest_end)
4016 *dest++ = ptr[1];
4017 ptr += 2;
4018 } else {
4019 if (dest && dest < dest_end)
4020 *dest++ = *ptr;
4021 ptr++;
4022 }
4023 }
4024 if (*ptr == '\"')
4025 ptr++;
4026 } else {
4027 for (; *ptr && !(av_isspace(*ptr) || *ptr == ','); ptr++)
4028 if (dest && dest < dest_end)
4029 *dest++ = *ptr;
4030 }
4031 if (dest)
4032 *dest = 0;
4033 }
4034 }
4035
4036 int ff_find_stream_index(AVFormatContext *s, int id)
4037 {
4038 int i;
4039 for (i = 0; i < s->nb_streams; i++)
4040 if (s->streams[i]->id == id)
4041 return i;
4042 return -1;
4043 }
4044
4045 int64_t ff_iso8601_to_unix_time(const char *datestr)
4046 {
4047 struct tm time1 = { 0 }, time2 = { 0 };
4048 char *ret1, *ret2;
4049 ret1 = av_small_strptime(datestr, "%Y - %m - %d %H:%M:%S", &time1);
4050 ret2 = av_small_strptime(datestr, "%Y - %m - %dT%H:%M:%S", &time2);
4051 if (ret2 && !ret1)
4052 return av_timegm(&time2);
4053 else
4054 return av_timegm(&time1);
4055 }
4056
4057 int avformat_query_codec(const AVOutputFormat *ofmt, enum AVCodecID codec_id,
4058 int std_compliance)
4059 {
4060 if (ofmt) {
4061 if (ofmt->query_codec)
4062 return ofmt->query_codec(codec_id, std_compliance);
4063 else if (ofmt->codec_tag)
4064 return !!av_codec_get_tag(ofmt->codec_tag, codec_id);
4065 else if (codec_id == ofmt->video_codec ||
4066 codec_id == ofmt->audio_codec ||
4067 codec_id == ofmt->subtitle_codec)
4068 return 1;
4069 }
4070 return AVERROR_PATCHWELCOME;
4071 }
4072
4073 int avformat_network_init(void)
4074 {
4075 #if CONFIG_NETWORK
4076 int ret;
4077 ff_network_inited_globally = 1;
4078 if ((ret = ff_network_init()) < 0)
4079 return ret;
4080 ff_tls_init();
4081 #endif
4082 return 0;
4083 }
4084
4085 int avformat_network_deinit(void)
4086 {
4087 #if CONFIG_NETWORK
4088 ff_network_close();
4089 ff_tls_deinit();
4090 #endif
4091 return 0;
4092 }
4093
4094 int ff_add_param_change(AVPacket *pkt, int32_t channels,
4095 uint64_t channel_layout, int32_t sample_rate,
4096 int32_t width, int32_t height)
4097 {
4098 uint32_t flags = 0;
4099 int size = 4;
4100 uint8_t *data;
4101 if (!pkt)
4102 return AVERROR(EINVAL);
4103 if (channels) {
4104 size += 4;
4105 flags |= AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_COUNT;
4106 }
4107 if (channel_layout) {
4108 size += 8;
4109 flags |= AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_LAYOUT;
4110 }
4111 if (sample_rate) {
4112 size += 4;
4113 flags |= AV_SIDE_DATA_PARAM_CHANGE_SAMPLE_RATE;
4114 }
4115 if (width || height) {
4116 size += 8;
4117 flags |= AV_SIDE_DATA_PARAM_CHANGE_DIMENSIONS;
4118 }
4119 data = av_packet_new_side_data(pkt, AV_PKT_DATA_PARAM_CHANGE, size);
4120 if (!data)
4121 return AVERROR(ENOMEM);
4122 bytestream_put_le32(&data, flags);
4123 if (channels)
4124 bytestream_put_le32(&data, channels);
4125 if (channel_layout)
4126 bytestream_put_le64(&data, channel_layout);
4127 if (sample_rate)
4128 bytestream_put_le32(&data, sample_rate);
4129 if (width || height) {
4130 bytestream_put_le32(&data, width);
4131 bytestream_put_le32(&data, height);
4132 }
4133 return 0;
4134 }
4135
4136 AVRational av_guess_sample_aspect_ratio(AVFormatContext *format, AVStream *stream, AVFrame *frame)
4137 {
4138 AVRational undef = {0, 1};
4139 AVRational stream_sample_aspect_ratio = stream ? stream->sample_aspect_ratio : undef;
4140 AVRational codec_sample_aspect_ratio = stream && stream->codec ? stream->codec->sample_aspect_ratio : undef;
4141 AVRational frame_sample_aspect_ratio = frame ? frame->sample_aspect_ratio : codec_sample_aspect_ratio;
4142
4143 av_reduce(&stream_sample_aspect_ratio.num, &stream_sample_aspect_ratio.den,
4144 stream_sample_aspect_ratio.num, stream_sample_aspect_ratio.den, INT_MAX);
4145 if (stream_sample_aspect_ratio.num <= 0 || stream_sample_aspect_ratio.den <= 0)
4146 stream_sample_aspect_ratio = undef;
4147
4148 av_reduce(&frame_sample_aspect_ratio.num, &frame_sample_aspect_ratio.den,
4149 frame_sample_aspect_ratio.num, frame_sample_aspect_ratio.den, INT_MAX);
4150 if (frame_sample_aspect_ratio.num <= 0 || frame_sample_aspect_ratio.den <= 0)
4151 frame_sample_aspect_ratio = undef;
4152
4153 if (stream_sample_aspect_ratio.num)
4154 return stream_sample_aspect_ratio;
4155 else
4156 return frame_sample_aspect_ratio;
4157 }
4158
4159 AVRational av_guess_frame_rate(AVFormatContext *format, AVStream *st, AVFrame *frame)
4160 {
4161 AVRational fr = st->r_frame_rate;
4162 AVRational codec_fr = st->codec->framerate;
4163 AVRational avg_fr = st->avg_frame_rate;
4164
4165 if (avg_fr.num > 0 && avg_fr.den > 0 && fr.num > 0 && fr.den > 0 &&
4166 av_q2d(avg_fr) < 70 && av_q2d(fr) > 210) {
4167 fr = avg_fr;
4168 }
4169
4170
4171 if (st->codec->ticks_per_frame > 1) {
4172 if ( codec_fr.num > 0 && codec_fr.den > 0 && av_q2d(codec_fr) < av_q2d(fr)*0.7
4173 && fabs(1.0 - av_q2d(av_div_q(avg_fr, fr))) > 0.1)
4174 fr = codec_fr;
4175 }
4176
4177 return fr;
4178 }
4179
4180 int avformat_match_stream_specifier(AVFormatContext *s, AVStream *st,
4181 const char *spec)
4182 {
4183 if (*spec <= '9' && *spec >= '0') /* opt:index */
4184 return strtol(spec, NULL, 0) == st->index;
4185 else if (*spec == 'v' || *spec == 'a' || *spec == 's' || *spec == 'd' ||
4186 *spec == 't') { /* opt:[vasdt] */
4187 enum AVMediaType type;
4188
4189 switch (*spec++) {
4190 case 'v': type = AVMEDIA_TYPE_VIDEO; break;
4191 case 'a': type = AVMEDIA_TYPE_AUDIO; break;
4192 case 's': type = AVMEDIA_TYPE_SUBTITLE; break;
4193 case 'd': type = AVMEDIA_TYPE_DATA; break;
4194 case 't': type = AVMEDIA_TYPE_ATTACHMENT; break;
4195 default: av_assert0(0);
4196 }
4197 if (type != st->codec->codec_type)
4198 return 0;
4199 if (*spec++ == ':') { /* possibly followed by :index */
4200 int i, index = strtol(spec, NULL, 0);
4201 for (i = 0; i < s->nb_streams; i++)
4202 if (s->streams[i]->codec->codec_type == type && index-- == 0)
4203 return i == st->index;
4204 return 0;
4205 }
4206 return 1;
4207 } else if (*spec == 'p' && *(spec + 1) == ':') {
4208 int prog_id, i, j;
4209 char *endptr;
4210 spec += 2;
4211 prog_id = strtol(spec, &endptr, 0);
4212 for (i = 0; i < s->nb_programs; i++) {
4213 if (s->programs[i]->id != prog_id)
4214 continue;
4215
4216 if (*endptr++ == ':') {
4217 int stream_idx = strtol(endptr, NULL, 0);
4218 return stream_idx >= 0 &&
4219 stream_idx < s->programs[i]->nb_stream_indexes &&
4220 st->index == s->programs[i]->stream_index[stream_idx];
4221 }
4222
4223 for (j = 0; j < s->programs[i]->nb_stream_indexes; j++)
4224 if (st->index == s->programs[i]->stream_index[j])
4225 return 1;
4226 }
4227 return 0;
4228 } else if (*spec == '#' ||
4229 (*spec == 'i' && *(spec + 1) == ':')) {
4230 int stream_id;
4231 char *endptr;
4232 spec += 1 + (*spec == 'i');
4233 stream_id = strtol(spec, &endptr, 0);
4234 if (!*endptr)
4235 return stream_id == st->id;
4236 } else if (*spec == 'm' && *(spec + 1) == ':') {
4237 AVDictionaryEntry *tag;
4238 char *key, *val;
4239 int ret;
4240
4241 spec += 2;
4242 val = strchr(spec, ':');
4243
4244 key = val ? av_strndup(spec, val - spec) : av_strdup(spec);
4245 if (!key)
4246 return AVERROR(ENOMEM);
4247
4248 tag = av_dict_get(st->metadata, key, NULL, 0);
4249 if (tag) {
4250 if (!val || !strcmp(tag->value, val + 1))
4251 ret = 1;
4252 else
4253 ret = 0;
4254 } else
4255 ret = 0;
4256
4257 av_freep(&key);
4258 return ret;
4259 } else if (!*spec) /* empty specifier, matches everything */
4260 return 1;
4261
4262 av_log(s, AV_LOG_ERROR, "Invalid stream specifier: %s.\n", spec);
4263 return AVERROR(EINVAL);
4264 }
4265
4266 int ff_generate_avci_extradata(AVStream *st)
4267 {
4268 static const uint8_t avci100_1080p_extradata[] = {
4269 // SPS
4270 0x00, 0x00, 0x00, 0x01, 0x67, 0x7a, 0x10, 0x29,
4271 0xb6, 0xd4, 0x20, 0x22, 0x33, 0x19, 0xc6, 0x63,
4272 0x23, 0x21, 0x01, 0x11, 0x98, 0xce, 0x33, 0x19,
4273 0x18, 0x21, 0x02, 0x56, 0xb9, 0x3d, 0x7d, 0x7e,
4274 0x4f, 0xe3, 0x3f, 0x11, 0xf1, 0x9e, 0x08, 0xb8,
4275 0x8c, 0x54, 0x43, 0xc0, 0x78, 0x02, 0x27, 0xe2,
4276 0x70, 0x1e, 0x30, 0x10, 0x10, 0x14, 0x00, 0x00,
4277 0x03, 0x00, 0x04, 0x00, 0x00, 0x03, 0x00, 0xca,
4278 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
4279 // PPS
4280 0x00, 0x00, 0x00, 0x01, 0x68, 0xce, 0x33, 0x48,
4281 0xd0
4282 };
4283 static const uint8_t avci100_1080i_extradata[] = {
4284 // SPS
4285 0x00, 0x00, 0x00, 0x01, 0x67, 0x7a, 0x10, 0x29,
4286 0xb6, 0xd4, 0x20, 0x22, 0x33, 0x19, 0xc6, 0x63,
4287 0x23, 0x21, 0x01, 0x11, 0x98, 0xce, 0x33, 0x19,
4288 0x18, 0x21, 0x03, 0x3a, 0x46, 0x65, 0x6a, 0x65,
4289 0x24, 0xad, 0xe9, 0x12, 0x32, 0x14, 0x1a, 0x26,
4290 0x34, 0xad, 0xa4, 0x41, 0x82, 0x23, 0x01, 0x50,
4291 0x2b, 0x1a, 0x24, 0x69, 0x48, 0x30, 0x40, 0x2e,
4292 0x11, 0x12, 0x08, 0xc6, 0x8c, 0x04, 0x41, 0x28,
4293 0x4c, 0x34, 0xf0, 0x1e, 0x01, 0x13, 0xf2, 0xe0,
4294 0x3c, 0x60, 0x20, 0x20, 0x28, 0x00, 0x00, 0x03,
4295 0x00, 0x08, 0x00, 0x00, 0x03, 0x01, 0x94, 0x00,
4296 // PPS
4297 0x00, 0x00, 0x00, 0x01, 0x68, 0xce, 0x33, 0x48,
4298 0xd0
4299 };
4300 static const uint8_t avci50_1080p_extradata[] = {
4301 // SPS
4302 0x00, 0x00, 0x00, 0x01, 0x67, 0x6e, 0x10, 0x28,
4303 0xa6, 0xd4, 0x20, 0x32, 0x33, 0x0c, 0x71, 0x18,
4304 0x88, 0x62, 0x10, 0x19, 0x19, 0x86, 0x38, 0x8c,
4305 0x44, 0x30, 0x21, 0x02, 0x56, 0x4e, 0x6f, 0x37,
4306 0xcd, 0xf9, 0xbf, 0x81, 0x6b, 0xf3, 0x7c, 0xde,
4307 0x6e, 0x6c, 0xd3, 0x3c, 0x05, 0xa0, 0x22, 0x7e,
4308 0x5f, 0xfc, 0x00, 0x0c, 0x00, 0x13, 0x8c, 0x04,
4309 0x04, 0x05, 0x00, 0x00, 0x03, 0x00, 0x01, 0x00,
4310 0x00, 0x03, 0x00, 0x32, 0x84, 0x00, 0x00, 0x00,
4311 // PPS
4312 0x00, 0x00, 0x00, 0x01, 0x68, 0xee, 0x31, 0x12,
4313 0x11
4314 };
4315 static const uint8_t avci50_1080i_extradata[] = {
4316 // SPS
4317 0x00, 0x00, 0x00, 0x01, 0x67, 0x6e, 0x10, 0x28,
4318 0xa6, 0xd4, 0x20, 0x32, 0x33, 0x0c, 0x71, 0x18,
4319 0x88, 0x62, 0x10, 0x19, 0x19, 0x86, 0x38, 0x8c,
4320 0x44, 0x30, 0x21, 0x02, 0x56, 0x4e, 0x6e, 0x61,
4321 0x87, 0x3e, 0x73, 0x4d, 0x98, 0x0c, 0x03, 0x06,
4322 0x9c, 0x0b, 0x73, 0xe6, 0xc0, 0xb5, 0x18, 0x63,
4323 0x0d, 0x39, 0xe0, 0x5b, 0x02, 0xd4, 0xc6, 0x19,
4324 0x1a, 0x79, 0x8c, 0x32, 0x34, 0x24, 0xf0, 0x16,
4325 0x81, 0x13, 0xf7, 0xff, 0x80, 0x02, 0x00, 0x01,
4326 0xf1, 0x80, 0x80, 0x80, 0xa0, 0x00, 0x00, 0x03,
4327 0x00, 0x20, 0x00, 0x00, 0x06, 0x50, 0x80, 0x00,
4328 // PPS
4329 0x00, 0x00, 0x00, 0x01, 0x68, 0xee, 0x31, 0x12,
4330 0x11
4331 };
4332 static const uint8_t avci100_720p_extradata[] = {
4333 // SPS
4334 0x00, 0x00, 0x00, 0x01, 0x67, 0x7a, 0x10, 0x29,
4335 0xb6, 0xd4, 0x20, 0x2a, 0x33, 0x1d, 0xc7, 0x62,
4336 0xa1, 0x08, 0x40, 0x54, 0x66, 0x3b, 0x8e, 0xc5,
4337 0x42, 0x02, 0x10, 0x25, 0x64, 0x2c, 0x89, 0xe8,
4338 0x85, 0xe4, 0x21, 0x4b, 0x90, 0x83, 0x06, 0x95,
4339 0xd1, 0x06, 0x46, 0x97, 0x20, 0xc8, 0xd7, 0x43,
4340 0x08, 0x11, 0xc2, 0x1e, 0x4c, 0x91, 0x0f, 0x01,
4341 0x40, 0x16, 0xec, 0x07, 0x8c, 0x04, 0x04, 0x05,
4342 0x00, 0x00, 0x03, 0x00, 0x01, 0x00, 0x00, 0x03,
4343 0x00, 0x64, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00,
4344 // PPS
4345 0x00, 0x00, 0x00, 0x01, 0x68, 0xce, 0x31, 0x12,
4346 0x11
4347 };
4348 static const uint8_t avci50_720p_extradata[] = {
4349 // SPS
4350 0x00, 0x00, 0x00, 0x01, 0x67, 0x6e, 0x10, 0x20,
4351 0xa6, 0xd4, 0x20, 0x32, 0x33, 0x0c, 0x71, 0x18,
4352 0x88, 0x62, 0x10, 0x19, 0x19, 0x86, 0x38, 0x8c,
4353 0x44, 0x30, 0x21, 0x02, 0x56, 0x4e, 0x6f, 0x37,
4354 0xcd, 0xf9, 0xbf, 0x81, 0x6b, 0xf3, 0x7c, 0xde,
4355 0x6e, 0x6c, 0xd3, 0x3c, 0x0f, 0x01, 0x6e, 0xff,
4356 0xc0, 0x00, 0xc0, 0x01, 0x38, 0xc0, 0x40, 0x40,
4357 0x50, 0x00, 0x00, 0x03, 0x00, 0x10, 0x00, 0x00,
4358 0x06, 0x48, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00,
4359 // PPS
4360 0x00, 0x00, 0x00, 0x01, 0x68, 0xee, 0x31, 0x12,
4361 0x11
4362 };
4363
4364 const uint8_t *data = NULL;
4365 int size = 0;
4366
4367 if (st->codec->width == 1920) {
4368 if (st->codec->field_order == AV_FIELD_PROGRESSIVE) {
4369 data = avci100_1080p_extradata;
4370 size = sizeof(avci100_1080p_extradata);
4371 } else {
4372 data = avci100_1080i_extradata;
4373 size = sizeof(avci100_1080i_extradata);
4374 }
4375 } else if (st->codec->width == 1440) {
4376 if (st->codec->field_order == AV_FIELD_PROGRESSIVE) {
4377 data = avci50_1080p_extradata;
4378 size = sizeof(avci50_1080p_extradata);
4379 } else {
4380 data = avci50_1080i_extradata;
4381 size = sizeof(avci50_1080i_extradata);
4382 }
4383 } else if (st->codec->width == 1280) {
4384 data = avci100_720p_extradata;
4385 size = sizeof(avci100_720p_extradata);
4386 } else if (st->codec->width == 960) {
4387 data = avci50_720p_extradata;
4388 size = sizeof(avci50_720p_extradata);
4389 }
4390
4391 if (!size)
4392 return 0;
4393
4394 av_freep(&st->codec->extradata);
4395 if (ff_alloc_extradata(st->codec, size))
4396 return AVERROR(ENOMEM);
4397 memcpy(st->codec->extradata, data, size);
4398
4399 return 0;
4400 }
4401
4402 uint8_t *av_stream_get_side_data(AVStream *st, enum AVPacketSideDataType type,
4403 int *size)
4404 {
4405 int i;
4406
4407 for (i = 0; i < st->nb_side_data; i++) {
4408 if (st->side_data[i].type == type) {
4409 if (size)
4410 *size = st->side_data[i].size;
4411 return st->side_data[i].data;
4412 }
4413 }
4414 return NULL;
4415 }