Imported Debian version 2.4.3~trusty1
[deb_ffmpeg.git] / ffmpeg / libavformat / utils.c
CommitLineData
2ba45a60
DM
1/*
2 * various utility functions for use within FFmpeg
3 * Copyright (c) 2000, 2001, 2002 Fabrice Bellard
4 *
5 * This file is part of FFmpeg.
6 *
7 * FFmpeg is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
11 *
12 * FFmpeg is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with FFmpeg; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20 */
21
22#undef NDEBUG
23#include <assert.h>
24#include <stdarg.h>
25#include <stdint.h>
26
27#include "config.h"
28
29#include "libavutil/avassert.h"
30#include "libavutil/avstring.h"
31#include "libavutil/dict.h"
32#include "libavutil/internal.h"
33#include "libavutil/mathematics.h"
34#include "libavutil/opt.h"
35#include "libavutil/parseutils.h"
36#include "libavutil/pixdesc.h"
37#include "libavutil/time.h"
38#include "libavutil/timestamp.h"
39
40#include "libavcodec/bytestream.h"
41#include "libavcodec/internal.h"
42#include "libavcodec/raw.h"
43
44#include "audiointerleave.h"
45#include "avformat.h"
46#include "avio_internal.h"
47#include "id3v2.h"
48#include "internal.h"
49#include "metadata.h"
50#if CONFIG_NETWORK
51#include "network.h"
52#endif
53#include "riff.h"
54#include "url.h"
55
56/**
57 * @file
58 * various utility functions for use within FFmpeg
59 */
60
61unsigned avformat_version(void)
62{
63 av_assert0(LIBAVFORMAT_VERSION_MICRO >= 100);
64 return LIBAVFORMAT_VERSION_INT;
65}
66
67const char *avformat_configuration(void)
68{
69 return FFMPEG_CONFIGURATION;
70}
71
72const char *avformat_license(void)
73{
74#define LICENSE_PREFIX "libavformat license: "
75 return LICENSE_PREFIX FFMPEG_LICENSE + sizeof(LICENSE_PREFIX) - 1;
76}
77
78#define RELATIVE_TS_BASE (INT64_MAX - (1LL<<48))
79
80static int is_relative(int64_t ts) {
81 return ts > (RELATIVE_TS_BASE - (1LL<<48));
82}
83
84/**
85 * Wrap a given time stamp, if there is an indication for an overflow
86 *
87 * @param st stream
88 * @param timestamp the time stamp to wrap
89 * @return resulting time stamp
90 */
91static int64_t wrap_timestamp(AVStream *st, int64_t timestamp)
92{
93 if (st->pts_wrap_behavior != AV_PTS_WRAP_IGNORE &&
94 st->pts_wrap_reference != AV_NOPTS_VALUE && timestamp != AV_NOPTS_VALUE) {
95 if (st->pts_wrap_behavior == AV_PTS_WRAP_ADD_OFFSET &&
96 timestamp < st->pts_wrap_reference)
97 return timestamp + (1ULL << st->pts_wrap_bits);
98 else if (st->pts_wrap_behavior == AV_PTS_WRAP_SUB_OFFSET &&
99 timestamp >= st->pts_wrap_reference)
100 return timestamp - (1ULL << st->pts_wrap_bits);
101 }
102 return timestamp;
103}
104
105MAKE_ACCESSORS(AVStream, stream, AVRational, r_frame_rate)
106MAKE_ACCESSORS(AVFormatContext, format, AVCodec *, video_codec)
107MAKE_ACCESSORS(AVFormatContext, format, AVCodec *, audio_codec)
108MAKE_ACCESSORS(AVFormatContext, format, AVCodec *, subtitle_codec)
109MAKE_ACCESSORS(AVFormatContext, format, int, metadata_header_padding)
110MAKE_ACCESSORS(AVFormatContext, format, void *, opaque)
111MAKE_ACCESSORS(AVFormatContext, format, av_format_control_message, control_message_cb)
112
113int64_t av_stream_get_end_pts(const AVStream *st)
114{
115 return st->pts.val;
116}
117
118struct AVCodecParserContext *av_stream_get_parser(const AVStream *st)
119{
120 return st->parser;
121}
122
123void av_format_inject_global_side_data(AVFormatContext *s)
124{
125 int i;
126 s->internal->inject_global_side_data = 1;
127 for (i = 0; i < s->nb_streams; i++) {
128 AVStream *st = s->streams[i];
129 st->inject_global_side_data = 1;
130 }
131}
132
133static const AVCodec *find_decoder(AVFormatContext *s, AVStream *st, enum AVCodecID codec_id)
134{
135 if (st->codec->codec)
136 return st->codec->codec;
137
138 switch (st->codec->codec_type) {
139 case AVMEDIA_TYPE_VIDEO:
140 if (s->video_codec) return s->video_codec;
141 break;
142 case AVMEDIA_TYPE_AUDIO:
143 if (s->audio_codec) return s->audio_codec;
144 break;
145 case AVMEDIA_TYPE_SUBTITLE:
146 if (s->subtitle_codec) return s->subtitle_codec;
147 break;
148 }
149
150 return avcodec_find_decoder(codec_id);
151}
152
153int av_format_get_probe_score(const AVFormatContext *s)
154{
155 return s->probe_score;
156}
157
158/* an arbitrarily chosen "sane" max packet size -- 50M */
159#define SANE_CHUNK_SIZE (50000000)
160
161int ffio_limit(AVIOContext *s, int size)
162{
163 if (s->maxsize>= 0) {
164 int64_t remaining= s->maxsize - avio_tell(s);
165 if (remaining < size) {
166 int64_t newsize = avio_size(s);
167 if (!s->maxsize || s->maxsize<newsize)
168 s->maxsize = newsize - !newsize;
169 remaining= s->maxsize - avio_tell(s);
170 remaining= FFMAX(remaining, 0);
171 }
172
173 if (s->maxsize>= 0 && remaining+1 < size) {
174 av_log(NULL, remaining ? AV_LOG_ERROR : AV_LOG_DEBUG, "Truncating packet of size %d to %"PRId64"\n", size, remaining+1);
175 size = remaining+1;
176 }
177 }
178 return size;
179}
180
181/* Read the data in sane-sized chunks and append to pkt.
182 * Return the number of bytes read or an error. */
183static int append_packet_chunked(AVIOContext *s, AVPacket *pkt, int size)
184{
185 int64_t orig_pos = pkt->pos; // av_grow_packet might reset pos
186 int orig_size = pkt->size;
187 int ret;
188
189 do {
190 int prev_size = pkt->size;
191 int read_size;
192
193 /* When the caller requests a lot of data, limit it to the amount
194 * left in file or SANE_CHUNK_SIZE when it is not known. */
195 read_size = size;
196 if (read_size > SANE_CHUNK_SIZE/10) {
197 read_size = ffio_limit(s, read_size);
198 // If filesize/maxsize is unknown, limit to SANE_CHUNK_SIZE
199 if (s->maxsize < 0)
200 read_size = FFMIN(read_size, SANE_CHUNK_SIZE);
201 }
202
203 ret = av_grow_packet(pkt, read_size);
204 if (ret < 0)
205 break;
206
207 ret = avio_read(s, pkt->data + prev_size, read_size);
208 if (ret != read_size) {
209 av_shrink_packet(pkt, prev_size + FFMAX(ret, 0));
210 break;
211 }
212
213 size -= read_size;
214 } while (size > 0);
215 if (size > 0)
216 pkt->flags |= AV_PKT_FLAG_CORRUPT;
217
218 pkt->pos = orig_pos;
219 if (!pkt->size)
220 av_free_packet(pkt);
221 return pkt->size > orig_size ? pkt->size - orig_size : ret;
222}
223
224int av_get_packet(AVIOContext *s, AVPacket *pkt, int size)
225{
226 av_init_packet(pkt);
227 pkt->data = NULL;
228 pkt->size = 0;
229 pkt->pos = avio_tell(s);
230
231 return append_packet_chunked(s, pkt, size);
232}
233
234int av_append_packet(AVIOContext *s, AVPacket *pkt, int size)
235{
236 if (!pkt->size)
237 return av_get_packet(s, pkt, size);
238 return append_packet_chunked(s, pkt, size);
239}
240
241int av_filename_number_test(const char *filename)
242{
243 char buf[1024];
244 return filename &&
245 (av_get_frame_filename(buf, sizeof(buf), filename, 1) >= 0);
246}
247
248static int set_codec_from_probe_data(AVFormatContext *s, AVStream *st,
249 AVProbeData *pd)
250{
251 static const struct {
252 const char *name;
253 enum AVCodecID id;
254 enum AVMediaType type;
255 } fmt_id_type[] = {
256 { "aac", AV_CODEC_ID_AAC, AVMEDIA_TYPE_AUDIO },
257 { "ac3", AV_CODEC_ID_AC3, AVMEDIA_TYPE_AUDIO },
258 { "dts", AV_CODEC_ID_DTS, AVMEDIA_TYPE_AUDIO },
259 { "eac3", AV_CODEC_ID_EAC3, AVMEDIA_TYPE_AUDIO },
260 { "h264", AV_CODEC_ID_H264, AVMEDIA_TYPE_VIDEO },
261 { "hevc", AV_CODEC_ID_HEVC, AVMEDIA_TYPE_VIDEO },
262 { "loas", AV_CODEC_ID_AAC_LATM, AVMEDIA_TYPE_AUDIO },
263 { "m4v", AV_CODEC_ID_MPEG4, AVMEDIA_TYPE_VIDEO },
264 { "mp3", AV_CODEC_ID_MP3, AVMEDIA_TYPE_AUDIO },
265 { "mpegvideo", AV_CODEC_ID_MPEG2VIDEO, AVMEDIA_TYPE_VIDEO },
266 { 0 }
267 };
268 int score;
269 AVInputFormat *fmt = av_probe_input_format3(pd, 1, &score);
270
271 if (fmt && st->request_probe <= score) {
272 int i;
273 av_log(s, AV_LOG_DEBUG,
274 "Probe with size=%d, packets=%d detected %s with score=%d\n",
275 pd->buf_size, MAX_PROBE_PACKETS - st->probe_packets,
276 fmt->name, score);
277 for (i = 0; fmt_id_type[i].name; i++) {
278 if (!strcmp(fmt->name, fmt_id_type[i].name)) {
279 st->codec->codec_id = fmt_id_type[i].id;
280 st->codec->codec_type = fmt_id_type[i].type;
281 return score;
282 }
283 }
284 }
285 return 0;
286}
287
288/************************************************************/
289/* input media file */
290
291int av_demuxer_open(AVFormatContext *ic) {
292 int err;
293
294 if (ic->iformat->read_header) {
295 err = ic->iformat->read_header(ic);
296 if (err < 0)
297 return err;
298 }
299
300 if (ic->pb && !ic->data_offset)
301 ic->data_offset = avio_tell(ic->pb);
302
303 return 0;
304}
305
306/* Open input file and probe the format if necessary. */
307static int init_input(AVFormatContext *s, const char *filename,
308 AVDictionary **options)
309{
310 int ret;
311 AVProbeData pd = { filename, NULL, 0 };
312 int score = AVPROBE_SCORE_RETRY;
313
314 if (s->pb) {
315 s->flags |= AVFMT_FLAG_CUSTOM_IO;
316 if (!s->iformat)
317 return av_probe_input_buffer2(s->pb, &s->iformat, filename,
318 s, 0, s->format_probesize);
319 else if (s->iformat->flags & AVFMT_NOFILE)
320 av_log(s, AV_LOG_WARNING, "Custom AVIOContext makes no sense and "
321 "will be ignored with AVFMT_NOFILE format.\n");
322 return 0;
323 }
324
325 if ((s->iformat && s->iformat->flags & AVFMT_NOFILE) ||
326 (!s->iformat && (s->iformat = av_probe_input_format2(&pd, 0, &score))))
327 return score;
328
329 if ((ret = avio_open2(&s->pb, filename, AVIO_FLAG_READ | s->avio_flags,
330 &s->interrupt_callback, options)) < 0)
331 return ret;
332 if (s->iformat)
333 return 0;
334 return av_probe_input_buffer2(s->pb, &s->iformat, filename,
335 s, 0, s->format_probesize);
336}
337
338static AVPacket *add_to_pktbuf(AVPacketList **packet_buffer, AVPacket *pkt,
339 AVPacketList **plast_pktl)
340{
341 AVPacketList *pktl = av_mallocz(sizeof(AVPacketList));
342 if (!pktl)
343 return NULL;
344
345 if (*packet_buffer)
346 (*plast_pktl)->next = pktl;
347 else
348 *packet_buffer = pktl;
349
350 /* Add the packet in the buffered packet list. */
351 *plast_pktl = pktl;
352 pktl->pkt = *pkt;
353 return &pktl->pkt;
354}
355
356int avformat_queue_attached_pictures(AVFormatContext *s)
357{
358 int i;
359 for (i = 0; i < s->nb_streams; i++)
360 if (s->streams[i]->disposition & AV_DISPOSITION_ATTACHED_PIC &&
361 s->streams[i]->discard < AVDISCARD_ALL) {
362 AVPacket copy = s->streams[i]->attached_pic;
363 if (copy.size <= 0) {
364 av_log(s, AV_LOG_WARNING,
365 "Attached picture on stream %d has invalid size, "
366 "ignoring\n", i);
367 continue;
368 }
369 copy.buf = av_buffer_ref(copy.buf);
370 if (!copy.buf)
371 return AVERROR(ENOMEM);
372
373 add_to_pktbuf(&s->raw_packet_buffer, &copy,
374 &s->raw_packet_buffer_end);
375 }
376 return 0;
377}
378
379int avformat_open_input(AVFormatContext **ps, const char *filename,
380 AVInputFormat *fmt, AVDictionary **options)
381{
382 AVFormatContext *s = *ps;
383 int ret = 0;
384 AVDictionary *tmp = NULL;
385 ID3v2ExtraMeta *id3v2_extra_meta = NULL;
386
387 if (!s && !(s = avformat_alloc_context()))
388 return AVERROR(ENOMEM);
389 if (!s->av_class) {
390 av_log(NULL, AV_LOG_ERROR, "Input context has not been properly allocated by avformat_alloc_context() and is not NULL either\n");
391 return AVERROR(EINVAL);
392 }
393 if (fmt)
394 s->iformat = fmt;
395
396 if (options)
397 av_dict_copy(&tmp, *options, 0);
398
399 if ((ret = av_opt_set_dict(s, &tmp)) < 0)
400 goto fail;
401
402 if ((ret = init_input(s, filename, &tmp)) < 0)
403 goto fail;
404 s->probe_score = ret;
405 avio_skip(s->pb, s->skip_initial_bytes);
406
407 /* Check filename in case an image number is expected. */
408 if (s->iformat->flags & AVFMT_NEEDNUMBER) {
409 if (!av_filename_number_test(filename)) {
410 ret = AVERROR(EINVAL);
411 goto fail;
412 }
413 }
414
415 s->duration = s->start_time = AV_NOPTS_VALUE;
416 av_strlcpy(s->filename, filename ? filename : "", sizeof(s->filename));
417
418 /* Allocate private data. */
419 if (s->iformat->priv_data_size > 0) {
420 if (!(s->priv_data = av_mallocz(s->iformat->priv_data_size))) {
421 ret = AVERROR(ENOMEM);
422 goto fail;
423 }
424 if (s->iformat->priv_class) {
425 *(const AVClass **) s->priv_data = s->iformat->priv_class;
426 av_opt_set_defaults(s->priv_data);
427 if ((ret = av_opt_set_dict(s->priv_data, &tmp)) < 0)
428 goto fail;
429 }
430 }
431
432 /* e.g. AVFMT_NOFILE formats will not have a AVIOContext */
433 if (s->pb)
434 ff_id3v2_read(s, ID3v2_DEFAULT_MAGIC, &id3v2_extra_meta, 0);
435
436 if (!(s->flags&AVFMT_FLAG_PRIV_OPT) && s->iformat->read_header)
437 if ((ret = s->iformat->read_header(s)) < 0)
438 goto fail;
439
440 if (id3v2_extra_meta) {
441 if (!strcmp(s->iformat->name, "mp3") || !strcmp(s->iformat->name, "aac") ||
442 !strcmp(s->iformat->name, "tta")) {
443 if ((ret = ff_id3v2_parse_apic(s, &id3v2_extra_meta)) < 0)
444 goto fail;
445 } else
446 av_log(s, AV_LOG_DEBUG, "demuxer does not support additional id3 data, skipping\n");
447 }
448 ff_id3v2_free_extra_meta(&id3v2_extra_meta);
449
450 if ((ret = avformat_queue_attached_pictures(s)) < 0)
451 goto fail;
452
453 if (!(s->flags&AVFMT_FLAG_PRIV_OPT) && s->pb && !s->data_offset)
454 s->data_offset = avio_tell(s->pb);
455
456 s->raw_packet_buffer_remaining_size = RAW_PACKET_BUFFER_SIZE;
457
458 if (options) {
459 av_dict_free(options);
460 *options = tmp;
461 }
462 *ps = s;
463 return 0;
464
465fail:
466 ff_id3v2_free_extra_meta(&id3v2_extra_meta);
467 av_dict_free(&tmp);
468 if (s->pb && !(s->flags & AVFMT_FLAG_CUSTOM_IO))
469 avio_close(s->pb);
470 avformat_free_context(s);
471 *ps = NULL;
472 return ret;
473}
474
475/*******************************************************/
476
477static void force_codec_ids(AVFormatContext *s, AVStream *st)
478{
479 switch (st->codec->codec_type) {
480 case AVMEDIA_TYPE_VIDEO:
481 if (s->video_codec_id)
482 st->codec->codec_id = s->video_codec_id;
483 break;
484 case AVMEDIA_TYPE_AUDIO:
485 if (s->audio_codec_id)
486 st->codec->codec_id = s->audio_codec_id;
487 break;
488 case AVMEDIA_TYPE_SUBTITLE:
489 if (s->subtitle_codec_id)
490 st->codec->codec_id = s->subtitle_codec_id;
491 break;
492 }
493}
494
495static int probe_codec(AVFormatContext *s, AVStream *st, const AVPacket *pkt)
496{
497 if (st->request_probe>0) {
498 AVProbeData *pd = &st->probe_data;
499 int end;
500 av_log(s, AV_LOG_DEBUG, "probing stream %d pp:%d\n", st->index, st->probe_packets);
501 --st->probe_packets;
502
503 if (pkt) {
504 uint8_t *new_buf = av_realloc(pd->buf, pd->buf_size+pkt->size+AVPROBE_PADDING_SIZE);
505 if (!new_buf) {
506 av_log(s, AV_LOG_WARNING,
507 "Failed to reallocate probe buffer for stream %d\n",
508 st->index);
509 goto no_packet;
510 }
511 pd->buf = new_buf;
512 memcpy(pd->buf + pd->buf_size, pkt->data, pkt->size);
513 pd->buf_size += pkt->size;
514 memset(pd->buf + pd->buf_size, 0, AVPROBE_PADDING_SIZE);
515 } else {
516no_packet:
517 st->probe_packets = 0;
518 if (!pd->buf_size) {
519 av_log(s, AV_LOG_WARNING,
520 "nothing to probe for stream %d\n", st->index);
521 }
522 }
523
524 end= s->raw_packet_buffer_remaining_size <= 0
525 || st->probe_packets<= 0;
526
527 if (end || av_log2(pd->buf_size) != av_log2(pd->buf_size - pkt->size)) {
528 int score = set_codec_from_probe_data(s, st, pd);
529 if ( (st->codec->codec_id != AV_CODEC_ID_NONE && score > AVPROBE_SCORE_STREAM_RETRY)
530 || end) {
531 pd->buf_size = 0;
532 av_freep(&pd->buf);
533 st->request_probe = -1;
534 if (st->codec->codec_id != AV_CODEC_ID_NONE) {
535 av_log(s, AV_LOG_DEBUG, "probed stream %d\n", st->index);
536 } else
537 av_log(s, AV_LOG_WARNING, "probed stream %d failed\n", st->index);
538 }
539 force_codec_ids(s, st);
540 }
541 }
542 return 0;
543}
544
545static int update_wrap_reference(AVFormatContext *s, AVStream *st, int stream_index, AVPacket *pkt)
546{
547 int64_t ref = pkt->dts;
548 int i, pts_wrap_behavior;
549 int64_t pts_wrap_reference;
550 AVProgram *first_program;
551
552 if (ref == AV_NOPTS_VALUE)
553 ref = pkt->pts;
554 if (st->pts_wrap_reference != AV_NOPTS_VALUE || st->pts_wrap_bits >= 63 || ref == AV_NOPTS_VALUE || !s->correct_ts_overflow)
555 return 0;
556 ref &= (1LL << st->pts_wrap_bits)-1;
557
558 // reference time stamp should be 60 s before first time stamp
559 pts_wrap_reference = ref - av_rescale(60, st->time_base.den, st->time_base.num);
560 // if first time stamp is not more than 1/8 and 60s before the wrap point, subtract rather than add wrap offset
561 pts_wrap_behavior = (ref < (1LL << st->pts_wrap_bits) - (1LL << st->pts_wrap_bits-3)) ||
562 (ref < (1LL << st->pts_wrap_bits) - av_rescale(60, st->time_base.den, st->time_base.num)) ?
563 AV_PTS_WRAP_ADD_OFFSET : AV_PTS_WRAP_SUB_OFFSET;
564
565 first_program = av_find_program_from_stream(s, NULL, stream_index);
566
567 if (!first_program) {
568 int default_stream_index = av_find_default_stream_index(s);
569 if (s->streams[default_stream_index]->pts_wrap_reference == AV_NOPTS_VALUE) {
570 for (i = 0; i < s->nb_streams; i++) {
571 s->streams[i]->pts_wrap_reference = pts_wrap_reference;
572 s->streams[i]->pts_wrap_behavior = pts_wrap_behavior;
573 }
574 }
575 else {
576 st->pts_wrap_reference = s->streams[default_stream_index]->pts_wrap_reference;
577 st->pts_wrap_behavior = s->streams[default_stream_index]->pts_wrap_behavior;
578 }
579 }
580 else {
581 AVProgram *program = first_program;
582 while (program) {
583 if (program->pts_wrap_reference != AV_NOPTS_VALUE) {
584 pts_wrap_reference = program->pts_wrap_reference;
585 pts_wrap_behavior = program->pts_wrap_behavior;
586 break;
587 }
588 program = av_find_program_from_stream(s, program, stream_index);
589 }
590
591 // update every program with differing pts_wrap_reference
592 program = first_program;
593 while (program) {
594 if (program->pts_wrap_reference != pts_wrap_reference) {
595 for (i = 0; i<program->nb_stream_indexes; i++) {
596 s->streams[program->stream_index[i]]->pts_wrap_reference = pts_wrap_reference;
597 s->streams[program->stream_index[i]]->pts_wrap_behavior = pts_wrap_behavior;
598 }
599
600 program->pts_wrap_reference = pts_wrap_reference;
601 program->pts_wrap_behavior = pts_wrap_behavior;
602 }
603 program = av_find_program_from_stream(s, program, stream_index);
604 }
605 }
606 return 1;
607}
608
609int ff_read_packet(AVFormatContext *s, AVPacket *pkt)
610{
611 int ret, i, err;
612 AVStream *st;
613
614 for (;;) {
615 AVPacketList *pktl = s->raw_packet_buffer;
616
617 if (pktl) {
618 *pkt = pktl->pkt;
619 st = s->streams[pkt->stream_index];
620 if (s->raw_packet_buffer_remaining_size <= 0)
621 if ((err = probe_codec(s, st, NULL)) < 0)
622 return err;
623 if (st->request_probe <= 0) {
624 s->raw_packet_buffer = pktl->next;
625 s->raw_packet_buffer_remaining_size += pkt->size;
626 av_free(pktl);
627 return 0;
628 }
629 }
630
631 pkt->data = NULL;
632 pkt->size = 0;
633 av_init_packet(pkt);
634 ret = s->iformat->read_packet(s, pkt);
635 if (ret < 0) {
636 if (!pktl || ret == AVERROR(EAGAIN))
637 return ret;
638 for (i = 0; i < s->nb_streams; i++) {
639 st = s->streams[i];
640 if (st->probe_packets)
641 if ((err = probe_codec(s, st, NULL)) < 0)
642 return err;
643 av_assert0(st->request_probe <= 0);
644 }
645 continue;
646 }
647
648 if ((s->flags & AVFMT_FLAG_DISCARD_CORRUPT) &&
649 (pkt->flags & AV_PKT_FLAG_CORRUPT)) {
650 av_log(s, AV_LOG_WARNING,
651 "Dropped corrupted packet (stream = %d)\n",
652 pkt->stream_index);
653 av_free_packet(pkt);
654 continue;
655 }
656
657 if (pkt->stream_index >= (unsigned)s->nb_streams) {
658 av_log(s, AV_LOG_ERROR, "Invalid stream index %d\n", pkt->stream_index);
659 continue;
660 }
661
662 st = s->streams[pkt->stream_index];
663
664 if (update_wrap_reference(s, st, pkt->stream_index, pkt) && st->pts_wrap_behavior == AV_PTS_WRAP_SUB_OFFSET) {
665 // correct first time stamps to negative values
666 if (!is_relative(st->first_dts))
667 st->first_dts = wrap_timestamp(st, st->first_dts);
668 if (!is_relative(st->start_time))
669 st->start_time = wrap_timestamp(st, st->start_time);
670 if (!is_relative(st->cur_dts))
671 st->cur_dts = wrap_timestamp(st, st->cur_dts);
672 }
673
674 pkt->dts = wrap_timestamp(st, pkt->dts);
675 pkt->pts = wrap_timestamp(st, pkt->pts);
676
677 force_codec_ids(s, st);
678
679 /* TODO: audio: time filter; video: frame reordering (pts != dts) */
680 if (s->use_wallclock_as_timestamps)
681 pkt->dts = pkt->pts = av_rescale_q(av_gettime(), AV_TIME_BASE_Q, st->time_base);
682
683 if (!pktl && st->request_probe <= 0)
684 return ret;
685
686 add_to_pktbuf(&s->raw_packet_buffer, pkt, &s->raw_packet_buffer_end);
687 s->raw_packet_buffer_remaining_size -= pkt->size;
688
689 if ((err = probe_codec(s, st, pkt)) < 0)
690 return err;
691 }
692}
693
694#if FF_API_READ_PACKET
695int av_read_packet(AVFormatContext *s, AVPacket *pkt)
696{
697 return ff_read_packet(s, pkt);
698}
699#endif
700
701
702/**********************************************************/
703
704static int determinable_frame_size(AVCodecContext *avctx)
705{
706 if (/*avctx->codec_id == AV_CODEC_ID_AAC ||*/
707 avctx->codec_id == AV_CODEC_ID_MP1 ||
708 avctx->codec_id == AV_CODEC_ID_MP2 ||
709 avctx->codec_id == AV_CODEC_ID_MP3/* ||
710 avctx->codec_id == AV_CODEC_ID_CELT*/)
711 return 1;
712 return 0;
713}
714
715/**
716 * Return the frame duration in seconds. Return 0 if not available.
717 */
718void ff_compute_frame_duration(int *pnum, int *pden, AVStream *st,
719 AVCodecParserContext *pc, AVPacket *pkt)
720{
721 int frame_size;
722
723 *pnum = 0;
724 *pden = 0;
725 switch (st->codec->codec_type) {
726 case AVMEDIA_TYPE_VIDEO:
727 if (st->r_frame_rate.num && !pc) {
728 *pnum = st->r_frame_rate.den;
729 *pden = st->r_frame_rate.num;
730 } else if (st->time_base.num * 1000LL > st->time_base.den) {
731 *pnum = st->time_base.num;
732 *pden = st->time_base.den;
733 } else if (st->codec->time_base.num * 1000LL > st->codec->time_base.den) {
734 *pnum = st->codec->time_base.num;
735 *pden = st->codec->time_base.den;
736 if (pc && pc->repeat_pict) {
737 if (*pnum > INT_MAX / (1 + pc->repeat_pict))
738 *pden /= 1 + pc->repeat_pict;
739 else
740 *pnum *= 1 + pc->repeat_pict;
741 }
742 /* If this codec can be interlaced or progressive then we need
743 * a parser to compute duration of a packet. Thus if we have
744 * no parser in such case leave duration undefined. */
745 if (st->codec->ticks_per_frame > 1 && !pc)
746 *pnum = *pden = 0;
747 }
748 break;
749 case AVMEDIA_TYPE_AUDIO:
750 frame_size = av_get_audio_frame_duration(st->codec, pkt->size);
751 if (frame_size <= 0 || st->codec->sample_rate <= 0)
752 break;
753 *pnum = frame_size;
754 *pden = st->codec->sample_rate;
755 break;
756 default:
757 break;
758 }
759}
760
761static int is_intra_only(AVCodecContext *enc) {
762 const AVCodecDescriptor *desc;
763
764 if (enc->codec_type != AVMEDIA_TYPE_VIDEO)
765 return 1;
766
767 desc = av_codec_get_codec_descriptor(enc);
768 if (!desc) {
769 desc = avcodec_descriptor_get(enc->codec_id);
770 av_codec_set_codec_descriptor(enc, desc);
771 }
772 if (desc)
773 return !!(desc->props & AV_CODEC_PROP_INTRA_ONLY);
774 return 0;
775}
776
777static int has_decode_delay_been_guessed(AVStream *st)
778{
779 if (st->codec->codec_id != AV_CODEC_ID_H264) return 1;
780 if (!st->info) // if we have left find_stream_info then nb_decoded_frames won't increase anymore for stream copy
781 return 1;
782#if CONFIG_H264_DECODER
783 if (st->codec->has_b_frames &&
784 avpriv_h264_has_num_reorder_frames(st->codec) == st->codec->has_b_frames)
785 return 1;
786#endif
787 if (st->codec->has_b_frames<3)
788 return st->nb_decoded_frames >= 7;
789 else if (st->codec->has_b_frames<4)
790 return st->nb_decoded_frames >= 18;
791 else
792 return st->nb_decoded_frames >= 20;
793}
794
795static AVPacketList *get_next_pkt(AVFormatContext *s, AVStream *st, AVPacketList *pktl)
796{
797 if (pktl->next)
798 return pktl->next;
799 if (pktl == s->packet_buffer_end)
800 return s->parse_queue;
801 return NULL;
802}
803
804static int64_t select_from_pts_buffer(AVStream *st, int64_t *pts_buffer, int64_t dts) {
805 int onein_oneout = st->codec->codec_id != AV_CODEC_ID_H264 &&
806 st->codec->codec_id != AV_CODEC_ID_HEVC;
807
808 if(!onein_oneout) {
809 int delay = st->codec->has_b_frames;
810 int i;
811
812 if (dts == AV_NOPTS_VALUE) {
813 int64_t best_score = INT64_MAX;
814 for (i = 0; i<delay; i++) {
815 if (st->pts_reorder_error_count[i]) {
816 int64_t score = st->pts_reorder_error[i] / st->pts_reorder_error_count[i];
817 if (score < best_score) {
818 best_score = score;
819 dts = pts_buffer[i];
820 }
821 }
822 }
823 } else {
824 for (i = 0; i<delay; i++) {
825 if (pts_buffer[i] != AV_NOPTS_VALUE) {
826 int64_t diff = FFABS(pts_buffer[i] - dts)
827 + (uint64_t)st->pts_reorder_error[i];
828 diff = FFMAX(diff, st->pts_reorder_error[i]);
829 st->pts_reorder_error[i] = diff;
830 st->pts_reorder_error_count[i]++;
831 if (st->pts_reorder_error_count[i] > 250) {
832 st->pts_reorder_error[i] >>= 1;
833 st->pts_reorder_error_count[i] >>= 1;
834 }
835 }
836 }
837 }
838 }
839
840 if (dts == AV_NOPTS_VALUE)
841 dts = pts_buffer[0];
842
843 return dts;
844}
845
846static void update_initial_timestamps(AVFormatContext *s, int stream_index,
847 int64_t dts, int64_t pts, AVPacket *pkt)
848{
849 AVStream *st = s->streams[stream_index];
850 AVPacketList *pktl = s->packet_buffer ? s->packet_buffer : s->parse_queue;
851 int64_t pts_buffer[MAX_REORDER_DELAY+1];
852 int64_t shift;
853 int i, delay;
854
855 if (st->first_dts != AV_NOPTS_VALUE ||
856 dts == AV_NOPTS_VALUE ||
857 st->cur_dts == AV_NOPTS_VALUE ||
858 is_relative(dts))
859 return;
860
861 delay = st->codec->has_b_frames;
862 st->first_dts = dts - (st->cur_dts - RELATIVE_TS_BASE);
863 st->cur_dts = dts;
864 shift = st->first_dts - RELATIVE_TS_BASE;
865
866 for (i = 0; i<MAX_REORDER_DELAY+1; i++)
867 pts_buffer[i] = AV_NOPTS_VALUE;
868
869 if (is_relative(pts))
870 pts += shift;
871
872 for (; pktl; pktl = get_next_pkt(s, st, pktl)) {
873 if (pktl->pkt.stream_index != stream_index)
874 continue;
875 if (is_relative(pktl->pkt.pts))
876 pktl->pkt.pts += shift;
877
878 if (is_relative(pktl->pkt.dts))
879 pktl->pkt.dts += shift;
880
881 if (st->start_time == AV_NOPTS_VALUE && pktl->pkt.pts != AV_NOPTS_VALUE)
882 st->start_time = pktl->pkt.pts;
883
884 if (pktl->pkt.pts != AV_NOPTS_VALUE && delay <= MAX_REORDER_DELAY && has_decode_delay_been_guessed(st)) {
885 pts_buffer[0] = pktl->pkt.pts;
886 for (i = 0; i<delay && pts_buffer[i] > pts_buffer[i + 1]; i++)
887 FFSWAP(int64_t, pts_buffer[i], pts_buffer[i + 1]);
888
889 pktl->pkt.dts = select_from_pts_buffer(st, pts_buffer, pktl->pkt.dts);
890 }
891 }
892
893 if (st->start_time == AV_NOPTS_VALUE)
894 st->start_time = pts;
895}
896
897static void update_initial_durations(AVFormatContext *s, AVStream *st,
898 int stream_index, int duration)
899{
900 AVPacketList *pktl = s->packet_buffer ? s->packet_buffer : s->parse_queue;
901 int64_t cur_dts = RELATIVE_TS_BASE;
902
903 if (st->first_dts != AV_NOPTS_VALUE) {
904 if (st->update_initial_durations_done)
905 return;
906 st->update_initial_durations_done = 1;
907 cur_dts = st->first_dts;
908 for (; pktl; pktl = get_next_pkt(s, st, pktl)) {
909 if (pktl->pkt.stream_index == stream_index) {
910 if (pktl->pkt.pts != pktl->pkt.dts ||
911 pktl->pkt.dts != AV_NOPTS_VALUE ||
912 pktl->pkt.duration)
913 break;
914 cur_dts -= duration;
915 }
916 }
917 if (pktl && pktl->pkt.dts != st->first_dts) {
918 av_log(s, AV_LOG_DEBUG, "first_dts %s not matching first dts %s (pts %s, duration %d) in the queue\n",
919 av_ts2str(st->first_dts), av_ts2str(pktl->pkt.dts), av_ts2str(pktl->pkt.pts), pktl->pkt.duration);
920 return;
921 }
922 if (!pktl) {
923 av_log(s, AV_LOG_DEBUG, "first_dts %s but no packet with dts in the queue\n", av_ts2str(st->first_dts));
924 return;
925 }
926 pktl = s->packet_buffer ? s->packet_buffer : s->parse_queue;
927 st->first_dts = cur_dts;
928 } else if (st->cur_dts != RELATIVE_TS_BASE)
929 return;
930
931 for (; pktl; pktl = get_next_pkt(s, st, pktl)) {
932 if (pktl->pkt.stream_index != stream_index)
933 continue;
934 if (pktl->pkt.pts == pktl->pkt.dts &&
935 (pktl->pkt.dts == AV_NOPTS_VALUE || pktl->pkt.dts == st->first_dts) &&
936 !pktl->pkt.duration) {
937 pktl->pkt.dts = cur_dts;
938 if (!st->codec->has_b_frames)
939 pktl->pkt.pts = cur_dts;
940// if (st->codec->codec_type != AVMEDIA_TYPE_AUDIO)
941 pktl->pkt.duration = duration;
942 } else
943 break;
944 cur_dts = pktl->pkt.dts + pktl->pkt.duration;
945 }
946 if (!pktl)
947 st->cur_dts = cur_dts;
948}
949
950static void compute_pkt_fields(AVFormatContext *s, AVStream *st,
951 AVCodecParserContext *pc, AVPacket *pkt)
952{
953 int num, den, presentation_delayed, delay, i;
954 int64_t offset;
955 AVRational duration;
956 int onein_oneout = st->codec->codec_id != AV_CODEC_ID_H264 &&
957 st->codec->codec_id != AV_CODEC_ID_HEVC;
958
959 if (s->flags & AVFMT_FLAG_NOFILLIN)
960 return;
961
962 if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO && pkt->dts != AV_NOPTS_VALUE) {
963 if (pkt->dts == pkt->pts && st->last_dts_for_order_check != AV_NOPTS_VALUE) {
964 if (st->last_dts_for_order_check <= pkt->dts) {
965 st->dts_ordered++;
966 } else {
967 av_log(s, st->dts_misordered ? AV_LOG_DEBUG : AV_LOG_WARNING,
968 "DTS %"PRIi64" < %"PRIi64" out of order\n",
969 pkt->dts,
970 st->last_dts_for_order_check);
971 st->dts_misordered++;
972 }
973 if (st->dts_ordered + st->dts_misordered > 250) {
974 st->dts_ordered >>= 1;
975 st->dts_misordered >>= 1;
976 }
977 }
978
979 st->last_dts_for_order_check = pkt->dts;
980 if (st->dts_ordered < 8*st->dts_misordered && pkt->dts == pkt->pts)
981 pkt->dts = AV_NOPTS_VALUE;
982 }
983
984 if ((s->flags & AVFMT_FLAG_IGNDTS) && pkt->pts != AV_NOPTS_VALUE)
985 pkt->dts = AV_NOPTS_VALUE;
986
987 if (pc && pc->pict_type == AV_PICTURE_TYPE_B
988 && !st->codec->has_b_frames)
989 //FIXME Set low_delay = 0 when has_b_frames = 1
990 st->codec->has_b_frames = 1;
991
992 /* do we have a video B-frame ? */
993 delay = st->codec->has_b_frames;
994 presentation_delayed = 0;
995
996 /* XXX: need has_b_frame, but cannot get it if the codec is
997 * not initialized */
998 if (delay &&
999 pc && pc->pict_type != AV_PICTURE_TYPE_B)
1000 presentation_delayed = 1;
1001
1002 if (pkt->pts != AV_NOPTS_VALUE && pkt->dts != AV_NOPTS_VALUE &&
1003 st->pts_wrap_bits < 63 &&
1004 pkt->dts - (1LL << (st->pts_wrap_bits - 1)) > pkt->pts) {
1005 if (is_relative(st->cur_dts) || pkt->dts - (1LL<<(st->pts_wrap_bits - 1)) > st->cur_dts) {
1006 pkt->dts -= 1LL << st->pts_wrap_bits;
1007 } else
1008 pkt->pts += 1LL << st->pts_wrap_bits;
1009 }
1010
1011 /* Some MPEG-2 in MPEG-PS lack dts (issue #171 / input_file.mpg).
1012 * We take the conservative approach and discard both.
1013 * Note: If this is misbehaving for an H.264 file, then possibly
1014 * presentation_delayed is not set correctly. */
1015 if (delay == 1 && pkt->dts == pkt->pts &&
1016 pkt->dts != AV_NOPTS_VALUE && presentation_delayed) {
1017 av_log(s, AV_LOG_DEBUG, "invalid dts/pts combination %"PRIi64"\n", pkt->dts);
1018 if ( strcmp(s->iformat->name, "mov,mp4,m4a,3gp,3g2,mj2")
1019 && strcmp(s->iformat->name, "flv")) // otherwise we discard correct timestamps for vc1-wmapro.ism
1020 pkt->dts = AV_NOPTS_VALUE;
1021 }
1022
1023 duration = av_mul_q((AVRational) {pkt->duration, 1}, st->time_base);
1024 if (pkt->duration == 0) {
1025 ff_compute_frame_duration(&num, &den, st, pc, pkt);
1026 if (den && num) {
1027 duration = (AVRational) {num, den};
1028 pkt->duration = av_rescale_rnd(1,
1029 num * (int64_t) st->time_base.den,
1030 den * (int64_t) st->time_base.num,
1031 AV_ROUND_DOWN);
1032 }
1033 }
1034
1035 if (pkt->duration != 0 && (s->packet_buffer || s->parse_queue))
1036 update_initial_durations(s, st, pkt->stream_index, pkt->duration);
1037
1038 /* Correct timestamps with byte offset if demuxers only have timestamps
1039 * on packet boundaries */
1040 if (pc && st->need_parsing == AVSTREAM_PARSE_TIMESTAMPS && pkt->size) {
1041 /* this will estimate bitrate based on this frame's duration and size */
1042 offset = av_rescale(pc->offset, pkt->duration, pkt->size);
1043 if (pkt->pts != AV_NOPTS_VALUE)
1044 pkt->pts += offset;
1045 if (pkt->dts != AV_NOPTS_VALUE)
1046 pkt->dts += offset;
1047 }
1048
1049 /* This may be redundant, but it should not hurt. */
1050 if (pkt->dts != AV_NOPTS_VALUE &&
1051 pkt->pts != AV_NOPTS_VALUE &&
1052 pkt->pts > pkt->dts)
1053 presentation_delayed = 1;
1054
1055 av_dlog(NULL,
1056 "IN delayed:%d pts:%s, dts:%s cur_dts:%s st:%d pc:%p duration:%d\n",
1057 presentation_delayed, av_ts2str(pkt->pts), av_ts2str(pkt->dts), av_ts2str(st->cur_dts),
1058 pkt->stream_index, pc, pkt->duration);
1059 /* Interpolate PTS and DTS if they are not present. We skip H264
1060 * currently because delay and has_b_frames are not reliably set. */
1061 if ((delay == 0 || (delay == 1 && pc)) &&
1062 onein_oneout) {
1063 if (presentation_delayed) {
1064 /* DTS = decompression timestamp */
1065 /* PTS = presentation timestamp */
1066 if (pkt->dts == AV_NOPTS_VALUE)
1067 pkt->dts = st->last_IP_pts;
1068 update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts, pkt);
1069 if (pkt->dts == AV_NOPTS_VALUE)
1070 pkt->dts = st->cur_dts;
1071
1072 /* This is tricky: the dts must be incremented by the duration
1073 * of the frame we are displaying, i.e. the last I- or P-frame. */
1074 if (st->last_IP_duration == 0)
1075 st->last_IP_duration = pkt->duration;
1076 if (pkt->dts != AV_NOPTS_VALUE)
1077 st->cur_dts = pkt->dts + st->last_IP_duration;
1078 st->last_IP_duration = pkt->duration;
1079 st->last_IP_pts = pkt->pts;
1080 /* Cannot compute PTS if not present (we can compute it only
1081 * by knowing the future. */
1082 } else if (pkt->pts != AV_NOPTS_VALUE ||
1083 pkt->dts != AV_NOPTS_VALUE ||
1084 pkt->duration ) {
1085
1086 /* presentation is not delayed : PTS and DTS are the same */
1087 if (pkt->pts == AV_NOPTS_VALUE)
1088 pkt->pts = pkt->dts;
1089 update_initial_timestamps(s, pkt->stream_index, pkt->pts,
1090 pkt->pts, pkt);
1091 if (pkt->pts == AV_NOPTS_VALUE)
1092 pkt->pts = st->cur_dts;
1093 pkt->dts = pkt->pts;
1094 if (pkt->pts != AV_NOPTS_VALUE)
1095 st->cur_dts = av_add_stable(st->time_base, pkt->pts, duration, 1);
1096 }
1097 }
1098
1099 if (pkt->pts != AV_NOPTS_VALUE && delay <= MAX_REORDER_DELAY && has_decode_delay_been_guessed(st)) {
1100 st->pts_buffer[0] = pkt->pts;
1101 for (i = 0; i<delay && st->pts_buffer[i] > st->pts_buffer[i + 1]; i++)
1102 FFSWAP(int64_t, st->pts_buffer[i], st->pts_buffer[i + 1]);
1103
1104 pkt->dts = select_from_pts_buffer(st, st->pts_buffer, pkt->dts);
1105 }
1106 // We skipped it above so we try here.
1107 if (!onein_oneout)
1108 // This should happen on the first packet
1109 update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts, pkt);
1110 if (pkt->dts > st->cur_dts)
1111 st->cur_dts = pkt->dts;
1112
1113 av_dlog(NULL, "OUTdelayed:%d/%d pts:%s, dts:%s cur_dts:%s\n",
1114 presentation_delayed, delay, av_ts2str(pkt->pts), av_ts2str(pkt->dts), av_ts2str(st->cur_dts));
1115
1116 /* update flags */
1117 if (is_intra_only(st->codec))
1118 pkt->flags |= AV_PKT_FLAG_KEY;
1119 if (pc)
1120 pkt->convergence_duration = pc->convergence_duration;
1121}
1122
1123static void free_packet_buffer(AVPacketList **pkt_buf, AVPacketList **pkt_buf_end)
1124{
1125 while (*pkt_buf) {
1126 AVPacketList *pktl = *pkt_buf;
1127 *pkt_buf = pktl->next;
1128 av_free_packet(&pktl->pkt);
1129 av_freep(&pktl);
1130 }
1131 *pkt_buf_end = NULL;
1132}
1133
1134/**
1135 * Parse a packet, add all split parts to parse_queue.
1136 *
1137 * @param pkt Packet to parse, NULL when flushing the parser at end of stream.
1138 */
1139static int parse_packet(AVFormatContext *s, AVPacket *pkt, int stream_index)
1140{
1141 AVPacket out_pkt = { 0 }, flush_pkt = { 0 };
1142 AVStream *st = s->streams[stream_index];
1143 uint8_t *data = pkt ? pkt->data : NULL;
1144 int size = pkt ? pkt->size : 0;
1145 int ret = 0, got_output = 0;
1146
1147 if (!pkt) {
1148 av_init_packet(&flush_pkt);
1149 pkt = &flush_pkt;
1150 got_output = 1;
1151 } else if (!size && st->parser->flags & PARSER_FLAG_COMPLETE_FRAMES) {
1152 // preserve 0-size sync packets
1153 compute_pkt_fields(s, st, st->parser, pkt);
1154 }
1155
1156 while (size > 0 || (pkt == &flush_pkt && got_output)) {
1157 int len;
1158
1159 av_init_packet(&out_pkt);
1160 len = av_parser_parse2(st->parser, st->codec,
1161 &out_pkt.data, &out_pkt.size, data, size,
1162 pkt->pts, pkt->dts, pkt->pos);
1163
1164 pkt->pts = pkt->dts = AV_NOPTS_VALUE;
1165 pkt->pos = -1;
1166 /* increment read pointer */
1167 data += len;
1168 size -= len;
1169
1170 got_output = !!out_pkt.size;
1171
1172 if (!out_pkt.size)
1173 continue;
1174
1175 if (pkt->side_data) {
1176 out_pkt.side_data = pkt->side_data;
1177 out_pkt.side_data_elems = pkt->side_data_elems;
1178 pkt->side_data = NULL;
1179 pkt->side_data_elems = 0;
1180 }
1181
1182 /* set the duration */
1183 out_pkt.duration = 0;
1184 if (st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
1185 if (st->codec->sample_rate > 0) {
1186 out_pkt.duration =
1187 av_rescale_q_rnd(st->parser->duration,
1188 (AVRational) { 1, st->codec->sample_rate },
1189 st->time_base,
1190 AV_ROUND_DOWN);
1191 }
1192 }
1193
1194 out_pkt.stream_index = st->index;
1195 out_pkt.pts = st->parser->pts;
1196 out_pkt.dts = st->parser->dts;
1197 out_pkt.pos = st->parser->pos;
1198
1199 if (st->need_parsing == AVSTREAM_PARSE_FULL_RAW)
1200 out_pkt.pos = st->parser->frame_offset;
1201
1202 if (st->parser->key_frame == 1 ||
1203 (st->parser->key_frame == -1 &&
1204 st->parser->pict_type == AV_PICTURE_TYPE_I))
1205 out_pkt.flags |= AV_PKT_FLAG_KEY;
1206
1207 if (st->parser->key_frame == -1 && st->parser->pict_type ==AV_PICTURE_TYPE_NONE && (pkt->flags&AV_PKT_FLAG_KEY))
1208 out_pkt.flags |= AV_PKT_FLAG_KEY;
1209
1210 compute_pkt_fields(s, st, st->parser, &out_pkt);
1211
1212 if (out_pkt.data == pkt->data && out_pkt.size == pkt->size) {
1213 out_pkt.buf = pkt->buf;
1214 pkt->buf = NULL;
1215#if FF_API_DESTRUCT_PACKET
1216FF_DISABLE_DEPRECATION_WARNINGS
1217 out_pkt.destruct = pkt->destruct;
1218 pkt->destruct = NULL;
1219FF_ENABLE_DEPRECATION_WARNINGS
1220#endif
1221 }
1222 if ((ret = av_dup_packet(&out_pkt)) < 0)
1223 goto fail;
1224
1225 if (!add_to_pktbuf(&s->parse_queue, &out_pkt, &s->parse_queue_end)) {
1226 av_free_packet(&out_pkt);
1227 ret = AVERROR(ENOMEM);
1228 goto fail;
1229 }
1230 }
1231
1232 /* end of the stream => close and free the parser */
1233 if (pkt == &flush_pkt) {
1234 av_parser_close(st->parser);
1235 st->parser = NULL;
1236 }
1237
1238fail:
1239 av_free_packet(pkt);
1240 return ret;
1241}
1242
1243static int read_from_packet_buffer(AVPacketList **pkt_buffer,
1244 AVPacketList **pkt_buffer_end,
1245 AVPacket *pkt)
1246{
1247 AVPacketList *pktl;
1248 av_assert0(*pkt_buffer);
1249 pktl = *pkt_buffer;
1250 *pkt = pktl->pkt;
1251 *pkt_buffer = pktl->next;
1252 if (!pktl->next)
1253 *pkt_buffer_end = NULL;
1254 av_freep(&pktl);
1255 return 0;
1256}
1257
1258static int read_frame_internal(AVFormatContext *s, AVPacket *pkt)
1259{
1260 int ret = 0, i, got_packet = 0;
1261 AVDictionary *metadata = NULL;
1262
1263 av_init_packet(pkt);
1264
1265 while (!got_packet && !s->parse_queue) {
1266 AVStream *st;
1267 AVPacket cur_pkt;
1268
1269 /* read next packet */
1270 ret = ff_read_packet(s, &cur_pkt);
1271 if (ret < 0) {
1272 if (ret == AVERROR(EAGAIN))
1273 return ret;
1274 /* flush the parsers */
1275 for (i = 0; i < s->nb_streams; i++) {
1276 st = s->streams[i];
1277 if (st->parser && st->need_parsing)
1278 parse_packet(s, NULL, st->index);
1279 }
1280 /* all remaining packets are now in parse_queue =>
1281 * really terminate parsing */
1282 break;
1283 }
1284 ret = 0;
1285 st = s->streams[cur_pkt.stream_index];
1286
1287 if (cur_pkt.pts != AV_NOPTS_VALUE &&
1288 cur_pkt.dts != AV_NOPTS_VALUE &&
1289 cur_pkt.pts < cur_pkt.dts) {
1290 av_log(s, AV_LOG_WARNING,
1291 "Invalid timestamps stream=%d, pts=%s, dts=%s, size=%d\n",
1292 cur_pkt.stream_index,
1293 av_ts2str(cur_pkt.pts),
1294 av_ts2str(cur_pkt.dts),
1295 cur_pkt.size);
1296 }
1297 if (s->debug & FF_FDEBUG_TS)
1298 av_log(s, AV_LOG_DEBUG,
1299 "ff_read_packet stream=%d, pts=%s, dts=%s, size=%d, duration=%d, flags=%d\n",
1300 cur_pkt.stream_index,
1301 av_ts2str(cur_pkt.pts),
1302 av_ts2str(cur_pkt.dts),
1303 cur_pkt.size, cur_pkt.duration, cur_pkt.flags);
1304
1305 if (st->need_parsing && !st->parser && !(s->flags & AVFMT_FLAG_NOPARSE)) {
1306 st->parser = av_parser_init(st->codec->codec_id);
1307 if (!st->parser) {
1308 av_log(s, AV_LOG_VERBOSE, "parser not found for codec "
1309 "%s, packets or times may be invalid.\n",
1310 avcodec_get_name(st->codec->codec_id));
1311 /* no parser available: just output the raw packets */
1312 st->need_parsing = AVSTREAM_PARSE_NONE;
1313 } else if (st->need_parsing == AVSTREAM_PARSE_HEADERS)
1314 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
1315 else if (st->need_parsing == AVSTREAM_PARSE_FULL_ONCE)
1316 st->parser->flags |= PARSER_FLAG_ONCE;
1317 else if (st->need_parsing == AVSTREAM_PARSE_FULL_RAW)
1318 st->parser->flags |= PARSER_FLAG_USE_CODEC_TS;
1319 }
1320
1321 if (!st->need_parsing || !st->parser) {
1322 /* no parsing needed: we just output the packet as is */
1323 *pkt = cur_pkt;
1324 compute_pkt_fields(s, st, NULL, pkt);
1325 if ((s->iformat->flags & AVFMT_GENERIC_INDEX) &&
1326 (pkt->flags & AV_PKT_FLAG_KEY) && pkt->dts != AV_NOPTS_VALUE) {
1327 ff_reduce_index(s, st->index);
1328 av_add_index_entry(st, pkt->pos, pkt->dts,
1329 0, 0, AVINDEX_KEYFRAME);
1330 }
1331 got_packet = 1;
1332 } else if (st->discard < AVDISCARD_ALL) {
1333 if ((ret = parse_packet(s, &cur_pkt, cur_pkt.stream_index)) < 0)
1334 return ret;
1335 } else {
1336 /* free packet */
1337 av_free_packet(&cur_pkt);
1338 }
1339 if (pkt->flags & AV_PKT_FLAG_KEY)
1340 st->skip_to_keyframe = 0;
1341 if (st->skip_to_keyframe) {
1342 av_free_packet(&cur_pkt);
1343 if (got_packet) {
1344 *pkt = cur_pkt;
1345 }
1346 got_packet = 0;
1347 }
1348 }
1349
1350 if (!got_packet && s->parse_queue)
1351 ret = read_from_packet_buffer(&s->parse_queue, &s->parse_queue_end, pkt);
1352
1353 if (ret >= 0) {
1354 AVStream *st = s->streams[pkt->stream_index];
1355 if (st->skip_samples) {
1356 uint8_t *p = av_packet_new_side_data(pkt, AV_PKT_DATA_SKIP_SAMPLES, 10);
1357 if (p) {
1358 AV_WL32(p, st->skip_samples);
1359 av_log(s, AV_LOG_DEBUG, "demuxer injecting skip %d\n", st->skip_samples);
1360 }
1361 st->skip_samples = 0;
1362 }
1363
1364 if (st->inject_global_side_data) {
1365 for (i = 0; i < st->nb_side_data; i++) {
1366 AVPacketSideData *src_sd = &st->side_data[i];
1367 uint8_t *dst_data;
1368
1369 if (av_packet_get_side_data(pkt, src_sd->type, NULL))
1370 continue;
1371
1372 dst_data = av_packet_new_side_data(pkt, src_sd->type, src_sd->size);
1373 if (!dst_data) {
1374 av_log(s, AV_LOG_WARNING, "Could not inject global side data\n");
1375 continue;
1376 }
1377
1378 memcpy(dst_data, src_sd->data, src_sd->size);
1379 }
1380 st->inject_global_side_data = 0;
1381 }
1382
1383 if (!(s->flags & AVFMT_FLAG_KEEP_SIDE_DATA))
1384 av_packet_merge_side_data(pkt);
1385 }
1386
1387 av_opt_get_dict_val(s, "metadata", AV_OPT_SEARCH_CHILDREN, &metadata);
1388 if (metadata) {
1389 s->event_flags |= AVFMT_EVENT_FLAG_METADATA_UPDATED;
1390 av_dict_copy(&s->metadata, metadata, 0);
1391 av_dict_free(&metadata);
1392 av_opt_set_dict_val(s, "metadata", NULL, AV_OPT_SEARCH_CHILDREN);
1393 }
1394
1395 if (s->debug & FF_FDEBUG_TS)
1396 av_log(s, AV_LOG_DEBUG,
1397 "read_frame_internal stream=%d, pts=%s, dts=%s, "
1398 "size=%d, duration=%d, flags=%d\n",
1399 pkt->stream_index,
1400 av_ts2str(pkt->pts),
1401 av_ts2str(pkt->dts),
1402 pkt->size, pkt->duration, pkt->flags);
1403
1404 return ret;
1405}
1406
1407int av_read_frame(AVFormatContext *s, AVPacket *pkt)
1408{
1409 const int genpts = s->flags & AVFMT_FLAG_GENPTS;
1410 int eof = 0;
1411 int ret;
1412 AVStream *st;
1413
1414 if (!genpts) {
1415 ret = s->packet_buffer
1416 ? read_from_packet_buffer(&s->packet_buffer,
1417 &s->packet_buffer_end, pkt)
1418 : read_frame_internal(s, pkt);
1419 if (ret < 0)
1420 return ret;
1421 goto return_packet;
1422 }
1423
1424 for (;;) {
1425 AVPacketList *pktl = s->packet_buffer;
1426
1427 if (pktl) {
1428 AVPacket *next_pkt = &pktl->pkt;
1429
1430 if (next_pkt->dts != AV_NOPTS_VALUE) {
1431 int wrap_bits = s->streams[next_pkt->stream_index]->pts_wrap_bits;
1432 // last dts seen for this stream. if any of packets following
1433 // current one had no dts, we will set this to AV_NOPTS_VALUE.
1434 int64_t last_dts = next_pkt->dts;
1435 while (pktl && next_pkt->pts == AV_NOPTS_VALUE) {
1436 if (pktl->pkt.stream_index == next_pkt->stream_index &&
1437 (av_compare_mod(next_pkt->dts, pktl->pkt.dts, 2LL << (wrap_bits - 1)) < 0)) {
1438 if (av_compare_mod(pktl->pkt.pts, pktl->pkt.dts, 2LL << (wrap_bits - 1))) {
1439 // not B-frame
1440 next_pkt->pts = pktl->pkt.dts;
1441 }
1442 if (last_dts != AV_NOPTS_VALUE) {
1443 // Once last dts was set to AV_NOPTS_VALUE, we don't change it.
1444 last_dts = pktl->pkt.dts;
1445 }
1446 }
1447 pktl = pktl->next;
1448 }
1449 if (eof && next_pkt->pts == AV_NOPTS_VALUE && last_dts != AV_NOPTS_VALUE) {
1450 // Fixing the last reference frame had none pts issue (For MXF etc).
1451 // We only do this when
1452 // 1. eof.
1453 // 2. we are not able to resolve a pts value for current packet.
1454 // 3. the packets for this stream at the end of the files had valid dts.
1455 next_pkt->pts = last_dts + next_pkt->duration;
1456 }
1457 pktl = s->packet_buffer;
1458 }
1459
1460 /* read packet from packet buffer, if there is data */
1461 st = s->streams[next_pkt->stream_index];
1462 if (!(next_pkt->pts == AV_NOPTS_VALUE && st->discard < AVDISCARD_ALL &&
1463 next_pkt->dts != AV_NOPTS_VALUE && !eof)) {
1464 ret = read_from_packet_buffer(&s->packet_buffer,
1465 &s->packet_buffer_end, pkt);
1466 goto return_packet;
1467 }
1468 }
1469
1470 ret = read_frame_internal(s, pkt);
1471 if (ret < 0) {
1472 if (pktl && ret != AVERROR(EAGAIN)) {
1473 eof = 1;
1474 continue;
1475 } else
1476 return ret;
1477 }
1478
1479 if (av_dup_packet(add_to_pktbuf(&s->packet_buffer, pkt,
1480 &s->packet_buffer_end)) < 0)
1481 return AVERROR(ENOMEM);
1482 }
1483
1484return_packet:
1485
1486 st = s->streams[pkt->stream_index];
1487 if ((s->iformat->flags & AVFMT_GENERIC_INDEX) && pkt->flags & AV_PKT_FLAG_KEY) {
1488 ff_reduce_index(s, st->index);
1489 av_add_index_entry(st, pkt->pos, pkt->dts, 0, 0, AVINDEX_KEYFRAME);
1490 }
1491
1492 if (is_relative(pkt->dts))
1493 pkt->dts -= RELATIVE_TS_BASE;
1494 if (is_relative(pkt->pts))
1495 pkt->pts -= RELATIVE_TS_BASE;
1496
1497 return ret;
1498}
1499
1500/* XXX: suppress the packet queue */
1501static void flush_packet_queue(AVFormatContext *s)
1502{
1503 free_packet_buffer(&s->parse_queue, &s->parse_queue_end);
1504 free_packet_buffer(&s->packet_buffer, &s->packet_buffer_end);
1505 free_packet_buffer(&s->raw_packet_buffer, &s->raw_packet_buffer_end);
1506
1507 s->raw_packet_buffer_remaining_size = RAW_PACKET_BUFFER_SIZE;
1508}
1509
1510/*******************************************************/
1511/* seek support */
1512
1513int av_find_default_stream_index(AVFormatContext *s)
1514{
1515 int i;
1516 AVStream *st;
1517 int best_stream = 0;
1518 int best_score = -1;
1519
1520 if (s->nb_streams <= 0)
1521 return -1;
1522 for (i = 0; i < s->nb_streams; i++) {
1523 int score = 0;
1524 st = s->streams[i];
1525 if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO &&
1526 !(st->disposition & AV_DISPOSITION_ATTACHED_PIC)) {
1527 if (!st->codec->width && !st->codec->height && !st->codec_info_nb_frames)
1528 score += 25;
1529 else
1530 score += 100;
1531 }
1532 if (st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
1533 if (!st->codec->sample_rate && !st->codec_info_nb_frames)
1534 score += 12;
1535 else
1536 score += 50;
1537 }
1538
1539 if (score > best_score) {
1540 best_score = score;
1541 best_stream = i;
1542 }
1543 }
1544 return best_stream;
1545}
1546
1547/** Flush the frame reader. */
1548void ff_read_frame_flush(AVFormatContext *s)
1549{
1550 AVStream *st;
1551 int i, j;
1552
1553 flush_packet_queue(s);
1554
1555 /* Reset read state for each stream. */
1556 for (i = 0; i < s->nb_streams; i++) {
1557 st = s->streams[i];
1558
1559 if (st->parser) {
1560 av_parser_close(st->parser);
1561 st->parser = NULL;
1562 }
1563 st->last_IP_pts = AV_NOPTS_VALUE;
1564 st->last_dts_for_order_check = AV_NOPTS_VALUE;
1565 if (st->first_dts == AV_NOPTS_VALUE)
1566 st->cur_dts = RELATIVE_TS_BASE;
1567 else
1568 /* We set the current DTS to an unspecified origin. */
1569 st->cur_dts = AV_NOPTS_VALUE;
1570
1571 st->probe_packets = MAX_PROBE_PACKETS;
1572
1573 for (j = 0; j < MAX_REORDER_DELAY + 1; j++)
1574 st->pts_buffer[j] = AV_NOPTS_VALUE;
1575
1576 if (s->internal->inject_global_side_data)
1577 st->inject_global_side_data = 1;
1578 }
1579}
1580
1581void ff_update_cur_dts(AVFormatContext *s, AVStream *ref_st, int64_t timestamp)
1582{
1583 int i;
1584
1585 for (i = 0; i < s->nb_streams; i++) {
1586 AVStream *st = s->streams[i];
1587
1588 st->cur_dts =
1589 av_rescale(timestamp,
1590 st->time_base.den * (int64_t) ref_st->time_base.num,
1591 st->time_base.num * (int64_t) ref_st->time_base.den);
1592 }
1593}
1594
1595void ff_reduce_index(AVFormatContext *s, int stream_index)
1596{
1597 AVStream *st = s->streams[stream_index];
1598 unsigned int max_entries = s->max_index_size / sizeof(AVIndexEntry);
1599
1600 if ((unsigned) st->nb_index_entries >= max_entries) {
1601 int i;
1602 for (i = 0; 2 * i < st->nb_index_entries; i++)
1603 st->index_entries[i] = st->index_entries[2 * i];
1604 st->nb_index_entries = i;
1605 }
1606}
1607
1608int ff_add_index_entry(AVIndexEntry **index_entries,
1609 int *nb_index_entries,
1610 unsigned int *index_entries_allocated_size,
1611 int64_t pos, int64_t timestamp,
1612 int size, int distance, int flags)
1613{
1614 AVIndexEntry *entries, *ie;
1615 int index;
1616
1617 if ((unsigned) *nb_index_entries + 1 >= UINT_MAX / sizeof(AVIndexEntry))
1618 return -1;
1619
1620 if (timestamp == AV_NOPTS_VALUE)
1621 return AVERROR(EINVAL);
1622
1623 if (size < 0 || size > 0x3FFFFFFF)
1624 return AVERROR(EINVAL);
1625
1626 if (is_relative(timestamp)) //FIXME this maintains previous behavior but we should shift by the correct offset once known
1627 timestamp -= RELATIVE_TS_BASE;
1628
1629 entries = av_fast_realloc(*index_entries,
1630 index_entries_allocated_size,
1631 (*nb_index_entries + 1) *
1632 sizeof(AVIndexEntry));
1633 if (!entries)
1634 return -1;
1635
1636 *index_entries = entries;
1637
1638 index = ff_index_search_timestamp(*index_entries, *nb_index_entries,
1639 timestamp, AVSEEK_FLAG_ANY);
1640
1641 if (index < 0) {
1642 index = (*nb_index_entries)++;
1643 ie = &entries[index];
1644 av_assert0(index == 0 || ie[-1].timestamp < timestamp);
1645 } else {
1646 ie = &entries[index];
1647 if (ie->timestamp != timestamp) {
1648 if (ie->timestamp <= timestamp)
1649 return -1;
1650 memmove(entries + index + 1, entries + index,
1651 sizeof(AVIndexEntry) * (*nb_index_entries - index));
1652 (*nb_index_entries)++;
1653 } else if (ie->pos == pos && distance < ie->min_distance)
1654 // do not reduce the distance
1655 distance = ie->min_distance;
1656 }
1657
1658 ie->pos = pos;
1659 ie->timestamp = timestamp;
1660 ie->min_distance = distance;
1661 ie->size = size;
1662 ie->flags = flags;
1663
1664 return index;
1665}
1666
1667int av_add_index_entry(AVStream *st, int64_t pos, int64_t timestamp,
1668 int size, int distance, int flags)
1669{
1670 timestamp = wrap_timestamp(st, timestamp);
1671 return ff_add_index_entry(&st->index_entries, &st->nb_index_entries,
1672 &st->index_entries_allocated_size, pos,
1673 timestamp, size, distance, flags);
1674}
1675
1676int ff_index_search_timestamp(const AVIndexEntry *entries, int nb_entries,
1677 int64_t wanted_timestamp, int flags)
1678{
1679 int a, b, m;
1680 int64_t timestamp;
1681
1682 a = -1;
1683 b = nb_entries;
1684
1685 // Optimize appending index entries at the end.
1686 if (b && entries[b - 1].timestamp < wanted_timestamp)
1687 a = b - 1;
1688
1689 while (b - a > 1) {
1690 m = (a + b) >> 1;
1691 timestamp = entries[m].timestamp;
1692 if (timestamp >= wanted_timestamp)
1693 b = m;
1694 if (timestamp <= wanted_timestamp)
1695 a = m;
1696 }
1697 m = (flags & AVSEEK_FLAG_BACKWARD) ? a : b;
1698
1699 if (!(flags & AVSEEK_FLAG_ANY))
1700 while (m >= 0 && m < nb_entries &&
1701 !(entries[m].flags & AVINDEX_KEYFRAME))
1702 m += (flags & AVSEEK_FLAG_BACKWARD) ? -1 : 1;
1703
1704 if (m == nb_entries)
1705 return -1;
1706 return m;
1707}
1708
1709int av_index_search_timestamp(AVStream *st, int64_t wanted_timestamp, int flags)
1710{
1711 return ff_index_search_timestamp(st->index_entries, st->nb_index_entries,
1712 wanted_timestamp, flags);
1713}
1714
1715static int64_t ff_read_timestamp(AVFormatContext *s, int stream_index, int64_t *ppos, int64_t pos_limit,
1716 int64_t (*read_timestamp)(struct AVFormatContext *, int , int64_t *, int64_t ))
1717{
1718 int64_t ts = read_timestamp(s, stream_index, ppos, pos_limit);
1719 if (stream_index >= 0)
1720 ts = wrap_timestamp(s->streams[stream_index], ts);
1721 return ts;
1722}
1723
1724int ff_seek_frame_binary(AVFormatContext *s, int stream_index,
1725 int64_t target_ts, int flags)
1726{
1727 AVInputFormat *avif = s->iformat;
1728 int64_t av_uninit(pos_min), av_uninit(pos_max), pos, pos_limit;
1729 int64_t ts_min, ts_max, ts;
1730 int index;
1731 int64_t ret;
1732 AVStream *st;
1733
1734 if (stream_index < 0)
1735 return -1;
1736
1737 av_dlog(s, "read_seek: %d %s\n", stream_index, av_ts2str(target_ts));
1738
1739 ts_max =
1740 ts_min = AV_NOPTS_VALUE;
1741 pos_limit = -1; // GCC falsely says it may be uninitialized.
1742
1743 st = s->streams[stream_index];
1744 if (st->index_entries) {
1745 AVIndexEntry *e;
1746
1747 /* FIXME: Whole function must be checked for non-keyframe entries in
1748 * index case, especially read_timestamp(). */
1749 index = av_index_search_timestamp(st, target_ts,
1750 flags | AVSEEK_FLAG_BACKWARD);
1751 index = FFMAX(index, 0);
1752 e = &st->index_entries[index];
1753
1754 if (e->timestamp <= target_ts || e->pos == e->min_distance) {
1755 pos_min = e->pos;
1756 ts_min = e->timestamp;
1757 av_dlog(s, "using cached pos_min=0x%"PRIx64" dts_min=%s\n",
1758 pos_min, av_ts2str(ts_min));
1759 } else {
1760 av_assert1(index == 0);
1761 }
1762
1763 index = av_index_search_timestamp(st, target_ts,
1764 flags & ~AVSEEK_FLAG_BACKWARD);
1765 av_assert0(index < st->nb_index_entries);
1766 if (index >= 0) {
1767 e = &st->index_entries[index];
1768 av_assert1(e->timestamp >= target_ts);
1769 pos_max = e->pos;
1770 ts_max = e->timestamp;
1771 pos_limit = pos_max - e->min_distance;
1772 av_dlog(s, "using cached pos_max=0x%"PRIx64" pos_limit=0x%"PRIx64
1773 " dts_max=%s\n", pos_max, pos_limit, av_ts2str(ts_max));
1774 }
1775 }
1776
1777 pos = ff_gen_search(s, stream_index, target_ts, pos_min, pos_max, pos_limit,
1778 ts_min, ts_max, flags, &ts, avif->read_timestamp);
1779 if (pos < 0)
1780 return -1;
1781
1782 /* do the seek */
1783 if ((ret = avio_seek(s->pb, pos, SEEK_SET)) < 0)
1784 return ret;
1785
1786 ff_read_frame_flush(s);
1787 ff_update_cur_dts(s, st, ts);
1788
1789 return 0;
1790}
1791
1792int ff_find_last_ts(AVFormatContext *s, int stream_index, int64_t *ts, int64_t *pos,
1793 int64_t (*read_timestamp)(struct AVFormatContext *, int , int64_t *, int64_t ))
1794{
1795 int64_t step = 1024;
1796 int64_t limit, ts_max;
1797 int64_t filesize = avio_size(s->pb);
1798 int64_t pos_max = filesize - 1;
1799 do {
1800 limit = pos_max;
1801 pos_max = FFMAX(0, (pos_max) - step);
1802 ts_max = ff_read_timestamp(s, stream_index,
1803 &pos_max, limit, read_timestamp);
1804 step += step;
1805 } while (ts_max == AV_NOPTS_VALUE && 2*limit > step);
1806 if (ts_max == AV_NOPTS_VALUE)
1807 return -1;
1808
1809 for (;;) {
1810 int64_t tmp_pos = pos_max + 1;
1811 int64_t tmp_ts = ff_read_timestamp(s, stream_index,
1812 &tmp_pos, INT64_MAX, read_timestamp);
1813 if (tmp_ts == AV_NOPTS_VALUE)
1814 break;
1815 av_assert0(tmp_pos > pos_max);
1816 ts_max = tmp_ts;
1817 pos_max = tmp_pos;
1818 if (tmp_pos >= filesize)
1819 break;
1820 }
1821
1822 if (ts)
1823 *ts = ts_max;
1824 if (pos)
1825 *pos = pos_max;
1826
1827 return 0;
1828}
1829
1830int64_t ff_gen_search(AVFormatContext *s, int stream_index, int64_t target_ts,
1831 int64_t pos_min, int64_t pos_max, int64_t pos_limit,
1832 int64_t ts_min, int64_t ts_max,
1833 int flags, int64_t *ts_ret,
1834 int64_t (*read_timestamp)(struct AVFormatContext *, int,
1835 int64_t *, int64_t))
1836{
1837 int64_t pos, ts;
1838 int64_t start_pos;
1839 int no_change;
1840 int ret;
1841
1842 av_dlog(s, "gen_seek: %d %s\n", stream_index, av_ts2str(target_ts));
1843
1844 if (ts_min == AV_NOPTS_VALUE) {
1845 pos_min = s->data_offset;
1846 ts_min = ff_read_timestamp(s, stream_index, &pos_min, INT64_MAX, read_timestamp);
1847 if (ts_min == AV_NOPTS_VALUE)
1848 return -1;
1849 }
1850
1851 if (ts_min >= target_ts) {
1852 *ts_ret = ts_min;
1853 return pos_min;
1854 }
1855
1856 if (ts_max == AV_NOPTS_VALUE) {
1857 if ((ret = ff_find_last_ts(s, stream_index, &ts_max, &pos_max, read_timestamp)) < 0)
1858 return ret;
1859 pos_limit = pos_max;
1860 }
1861
1862 if (ts_max <= target_ts) {
1863 *ts_ret = ts_max;
1864 return pos_max;
1865 }
1866
1867 if (ts_min > ts_max)
1868 return -1;
1869 else if (ts_min == ts_max)
1870 pos_limit = pos_min;
1871
1872 no_change = 0;
1873 while (pos_min < pos_limit) {
1874 av_dlog(s,
1875 "pos_min=0x%"PRIx64" pos_max=0x%"PRIx64" dts_min=%s dts_max=%s\n",
1876 pos_min, pos_max, av_ts2str(ts_min), av_ts2str(ts_max));
1877 assert(pos_limit <= pos_max);
1878
1879 if (no_change == 0) {
1880 int64_t approximate_keyframe_distance = pos_max - pos_limit;
1881 // interpolate position (better than dichotomy)
1882 pos = av_rescale(target_ts - ts_min, pos_max - pos_min,
1883 ts_max - ts_min) +
1884 pos_min - approximate_keyframe_distance;
1885 } else if (no_change == 1) {
1886 // bisection if interpolation did not change min / max pos last time
1887 pos = (pos_min + pos_limit) >> 1;
1888 } else {
1889 /* linear search if bisection failed, can only happen if there
1890 * are very few or no keyframes between min/max */
1891 pos = pos_min;
1892 }
1893 if (pos <= pos_min)
1894 pos = pos_min + 1;
1895 else if (pos > pos_limit)
1896 pos = pos_limit;
1897 start_pos = pos;
1898
1899 // May pass pos_limit instead of -1.
1900 ts = ff_read_timestamp(s, stream_index, &pos, INT64_MAX, read_timestamp);
1901 if (pos == pos_max)
1902 no_change++;
1903 else
1904 no_change = 0;
1905 av_dlog(s, "%"PRId64" %"PRId64" %"PRId64" / %s %s %s"
1906 " target:%s limit:%"PRId64" start:%"PRId64" noc:%d\n",
1907 pos_min, pos, pos_max,
1908 av_ts2str(ts_min), av_ts2str(ts), av_ts2str(ts_max), av_ts2str(target_ts),
1909 pos_limit, start_pos, no_change);
1910 if (ts == AV_NOPTS_VALUE) {
1911 av_log(s, AV_LOG_ERROR, "read_timestamp() failed in the middle\n");
1912 return -1;
1913 }
1914 if (target_ts <= ts) {
1915 pos_limit = start_pos - 1;
1916 pos_max = pos;
1917 ts_max = ts;
1918 }
1919 if (target_ts >= ts) {
1920 pos_min = pos;
1921 ts_min = ts;
1922 }
1923 }
1924
1925 pos = (flags & AVSEEK_FLAG_BACKWARD) ? pos_min : pos_max;
1926 ts = (flags & AVSEEK_FLAG_BACKWARD) ? ts_min : ts_max;
1927#if 0
1928 pos_min = pos;
1929 ts_min = ff_read_timestamp(s, stream_index, &pos_min, INT64_MAX, read_timestamp);
1930 pos_min++;
1931 ts_max = ff_read_timestamp(s, stream_index, &pos_min, INT64_MAX, read_timestamp);
1932 av_dlog(s, "pos=0x%"PRIx64" %s<=%s<=%s\n",
1933 pos, av_ts2str(ts_min), av_ts2str(target_ts), av_ts2str(ts_max));
1934#endif
1935 *ts_ret = ts;
1936 return pos;
1937}
1938
1939static int seek_frame_byte(AVFormatContext *s, int stream_index,
1940 int64_t pos, int flags)
1941{
1942 int64_t pos_min, pos_max;
1943
1944 pos_min = s->data_offset;
1945 pos_max = avio_size(s->pb) - 1;
1946
1947 if (pos < pos_min)
1948 pos = pos_min;
1949 else if (pos > pos_max)
1950 pos = pos_max;
1951
1952 avio_seek(s->pb, pos, SEEK_SET);
1953
1954 s->io_repositioned = 1;
1955
1956 return 0;
1957}
1958
1959static int seek_frame_generic(AVFormatContext *s, int stream_index,
1960 int64_t timestamp, int flags)
1961{
1962 int index;
1963 int64_t ret;
1964 AVStream *st;
1965 AVIndexEntry *ie;
1966
1967 st = s->streams[stream_index];
1968
1969 index = av_index_search_timestamp(st, timestamp, flags);
1970
1971 if (index < 0 && st->nb_index_entries &&
1972 timestamp < st->index_entries[0].timestamp)
1973 return -1;
1974
1975 if (index < 0 || index == st->nb_index_entries - 1) {
1976 AVPacket pkt;
1977 int nonkey = 0;
1978
1979 if (st->nb_index_entries) {
1980 av_assert0(st->index_entries);
1981 ie = &st->index_entries[st->nb_index_entries - 1];
1982 if ((ret = avio_seek(s->pb, ie->pos, SEEK_SET)) < 0)
1983 return ret;
1984 ff_update_cur_dts(s, st, ie->timestamp);
1985 } else {
1986 if ((ret = avio_seek(s->pb, s->data_offset, SEEK_SET)) < 0)
1987 return ret;
1988 }
1989 for (;;) {
1990 int read_status;
1991 do {
1992 read_status = av_read_frame(s, &pkt);
1993 } while (read_status == AVERROR(EAGAIN));
1994 if (read_status < 0)
1995 break;
1996 av_free_packet(&pkt);
1997 if (stream_index == pkt.stream_index && pkt.dts > timestamp) {
1998 if (pkt.flags & AV_PKT_FLAG_KEY)
1999 break;
2000 if (nonkey++ > 1000 && st->codec->codec_id != AV_CODEC_ID_CDGRAPHICS) {
2001 av_log(s, AV_LOG_ERROR,"seek_frame_generic failed as this stream seems to contain no keyframes after the target timestamp, %d non keyframes found\n", nonkey);
2002 break;
2003 }
2004 }
2005 }
2006 index = av_index_search_timestamp(st, timestamp, flags);
2007 }
2008 if (index < 0)
2009 return -1;
2010
2011 ff_read_frame_flush(s);
2012 if (s->iformat->read_seek)
2013 if (s->iformat->read_seek(s, stream_index, timestamp, flags) >= 0)
2014 return 0;
2015 ie = &st->index_entries[index];
2016 if ((ret = avio_seek(s->pb, ie->pos, SEEK_SET)) < 0)
2017 return ret;
2018 ff_update_cur_dts(s, st, ie->timestamp);
2019
2020 return 0;
2021}
2022
2023static int seek_frame_internal(AVFormatContext *s, int stream_index,
2024 int64_t timestamp, int flags)
2025{
2026 int ret;
2027 AVStream *st;
2028
2029 if (flags & AVSEEK_FLAG_BYTE) {
2030 if (s->iformat->flags & AVFMT_NO_BYTE_SEEK)
2031 return -1;
2032 ff_read_frame_flush(s);
2033 return seek_frame_byte(s, stream_index, timestamp, flags);
2034 }
2035
2036 if (stream_index < 0) {
2037 stream_index = av_find_default_stream_index(s);
2038 if (stream_index < 0)
2039 return -1;
2040
2041 st = s->streams[stream_index];
2042 /* timestamp for default must be expressed in AV_TIME_BASE units */
2043 timestamp = av_rescale(timestamp, st->time_base.den,
2044 AV_TIME_BASE * (int64_t) st->time_base.num);
2045 }
2046
2047 /* first, we try the format specific seek */
2048 if (s->iformat->read_seek) {
2049 ff_read_frame_flush(s);
2050 ret = s->iformat->read_seek(s, stream_index, timestamp, flags);
2051 } else
2052 ret = -1;
2053 if (ret >= 0)
2054 return 0;
2055
2056 if (s->iformat->read_timestamp &&
2057 !(s->iformat->flags & AVFMT_NOBINSEARCH)) {
2058 ff_read_frame_flush(s);
2059 return ff_seek_frame_binary(s, stream_index, timestamp, flags);
2060 } else if (!(s->iformat->flags & AVFMT_NOGENSEARCH)) {
2061 ff_read_frame_flush(s);
2062 return seek_frame_generic(s, stream_index, timestamp, flags);
2063 } else
2064 return -1;
2065}
2066
2067int av_seek_frame(AVFormatContext *s, int stream_index,
2068 int64_t timestamp, int flags)
2069{
2070 int ret;
2071
2072 if (s->iformat->read_seek2 && !s->iformat->read_seek) {
2073 int64_t min_ts = INT64_MIN, max_ts = INT64_MAX;
2074 if ((flags & AVSEEK_FLAG_BACKWARD))
2075 max_ts = timestamp;
2076 else
2077 min_ts = timestamp;
2078 return avformat_seek_file(s, stream_index, min_ts, timestamp, max_ts,
2079 flags & ~AVSEEK_FLAG_BACKWARD);
2080 }
2081
2082 ret = seek_frame_internal(s, stream_index, timestamp, flags);
2083
2084 if (ret >= 0)
2085 ret = avformat_queue_attached_pictures(s);
2086
2087 return ret;
2088}
2089
2090int avformat_seek_file(AVFormatContext *s, int stream_index, int64_t min_ts,
2091 int64_t ts, int64_t max_ts, int flags)
2092{
2093 if (min_ts > ts || max_ts < ts)
2094 return -1;
2095 if (stream_index < -1 || stream_index >= (int)s->nb_streams)
2096 return AVERROR(EINVAL);
2097
2098 if (s->seek2any>0)
2099 flags |= AVSEEK_FLAG_ANY;
2100 flags &= ~AVSEEK_FLAG_BACKWARD;
2101
2102 if (s->iformat->read_seek2) {
2103 int ret;
2104 ff_read_frame_flush(s);
2105
2106 if (stream_index == -1 && s->nb_streams == 1) {
2107 AVRational time_base = s->streams[0]->time_base;
2108 ts = av_rescale_q(ts, AV_TIME_BASE_Q, time_base);
2109 min_ts = av_rescale_rnd(min_ts, time_base.den,
2110 time_base.num * (int64_t)AV_TIME_BASE,
2111 AV_ROUND_UP | AV_ROUND_PASS_MINMAX);
2112 max_ts = av_rescale_rnd(max_ts, time_base.den,
2113 time_base.num * (int64_t)AV_TIME_BASE,
2114 AV_ROUND_DOWN | AV_ROUND_PASS_MINMAX);
2115 }
2116
2117 ret = s->iformat->read_seek2(s, stream_index, min_ts,
2118 ts, max_ts, flags);
2119
2120 if (ret >= 0)
2121 ret = avformat_queue_attached_pictures(s);
2122 return ret;
2123 }
2124
2125 if (s->iformat->read_timestamp) {
2126 // try to seek via read_timestamp()
2127 }
2128
2129 // Fall back on old API if new is not implemented but old is.
2130 // Note the old API has somewhat different semantics.
2131 if (s->iformat->read_seek || 1) {
2132 int dir = (ts - (uint64_t)min_ts > (uint64_t)max_ts - ts ? AVSEEK_FLAG_BACKWARD : 0);
2133 int ret = av_seek_frame(s, stream_index, ts, flags | dir);
2134 if (ret<0 && ts != min_ts && max_ts != ts) {
2135 ret = av_seek_frame(s, stream_index, dir ? max_ts : min_ts, flags | dir);
2136 if (ret >= 0)
2137 ret = av_seek_frame(s, stream_index, ts, flags | (dir^AVSEEK_FLAG_BACKWARD));
2138 }
2139 return ret;
2140 }
2141
2142 // try some generic seek like seek_frame_generic() but with new ts semantics
2143 return -1; //unreachable
2144}
2145
2146/*******************************************************/
2147
2148/**
2149 * Return TRUE if the stream has accurate duration in any stream.
2150 *
2151 * @return TRUE if the stream has accurate duration for at least one component.
2152 */
2153static int has_duration(AVFormatContext *ic)
2154{
2155 int i;
2156 AVStream *st;
2157
2158 for (i = 0; i < ic->nb_streams; i++) {
2159 st = ic->streams[i];
2160 if (st->duration != AV_NOPTS_VALUE)
2161 return 1;
2162 }
2163 if (ic->duration != AV_NOPTS_VALUE)
2164 return 1;
2165 return 0;
2166}
2167
2168/**
2169 * Estimate the stream timings from the one of each components.
2170 *
2171 * Also computes the global bitrate if possible.
2172 */
2173static void update_stream_timings(AVFormatContext *ic)
2174{
2175 int64_t start_time, start_time1, start_time_text, end_time, end_time1;
2176 int64_t duration, duration1, filesize;
2177 int i;
2178 AVStream *st;
2179 AVProgram *p;
2180
2181 start_time = INT64_MAX;
2182 start_time_text = INT64_MAX;
2183 end_time = INT64_MIN;
2184 duration = INT64_MIN;
2185 for (i = 0; i < ic->nb_streams; i++) {
2186 st = ic->streams[i];
2187 if (st->start_time != AV_NOPTS_VALUE && st->time_base.den) {
2188 start_time1 = av_rescale_q(st->start_time, st->time_base,
2189 AV_TIME_BASE_Q);
2190 if (st->codec->codec_type == AVMEDIA_TYPE_SUBTITLE || st->codec->codec_type == AVMEDIA_TYPE_DATA) {
2191 if (start_time1 < start_time_text)
2192 start_time_text = start_time1;
2193 } else
2194 start_time = FFMIN(start_time, start_time1);
2195 end_time1 = AV_NOPTS_VALUE;
2196 if (st->duration != AV_NOPTS_VALUE) {
2197 end_time1 = start_time1 +
2198 av_rescale_q(st->duration, st->time_base,
2199 AV_TIME_BASE_Q);
2200 end_time = FFMAX(end_time, end_time1);
2201 }
2202 for (p = NULL; (p = av_find_program_from_stream(ic, p, i)); ) {
2203 if (p->start_time == AV_NOPTS_VALUE || p->start_time > start_time1)
2204 p->start_time = start_time1;
2205 if (p->end_time < end_time1)
2206 p->end_time = end_time1;
2207 }
2208 }
2209 if (st->duration != AV_NOPTS_VALUE) {
2210 duration1 = av_rescale_q(st->duration, st->time_base,
2211 AV_TIME_BASE_Q);
2212 duration = FFMAX(duration, duration1);
2213 }
2214 }
2215 if (start_time == INT64_MAX || (start_time > start_time_text && start_time - start_time_text < AV_TIME_BASE))
2216 start_time = start_time_text;
2217 else if (start_time > start_time_text)
2218 av_log(ic, AV_LOG_VERBOSE, "Ignoring outlier non primary stream starttime %f\n", start_time_text / (float)AV_TIME_BASE);
2219
2220 if (start_time != INT64_MAX) {
2221 ic->start_time = start_time;
2222 if (end_time != INT64_MIN) {
2223 if (ic->nb_programs) {
2224 for (i = 0; i < ic->nb_programs; i++) {
2225 p = ic->programs[i];
2226 if (p->start_time != AV_NOPTS_VALUE && p->end_time > p->start_time)
2227 duration = FFMAX(duration, p->end_time - p->start_time);
2228 }
2229 } else
2230 duration = FFMAX(duration, end_time - start_time);
2231 }
2232 }
2233 if (duration != INT64_MIN && duration > 0 && ic->duration == AV_NOPTS_VALUE) {
2234 ic->duration = duration;
2235 }
2236 if (ic->pb && (filesize = avio_size(ic->pb)) > 0 && ic->duration != AV_NOPTS_VALUE) {
2237 /* compute the bitrate */
2238 double bitrate = (double) filesize * 8.0 * AV_TIME_BASE /
2239 (double) ic->duration;
2240 if (bitrate >= 0 && bitrate <= INT_MAX)
2241 ic->bit_rate = bitrate;
2242 }
2243}
2244
2245static void fill_all_stream_timings(AVFormatContext *ic)
2246{
2247 int i;
2248 AVStream *st;
2249
2250 update_stream_timings(ic);
2251 for (i = 0; i < ic->nb_streams; i++) {
2252 st = ic->streams[i];
2253 if (st->start_time == AV_NOPTS_VALUE) {
2254 if (ic->start_time != AV_NOPTS_VALUE)
2255 st->start_time = av_rescale_q(ic->start_time, AV_TIME_BASE_Q,
2256 st->time_base);
2257 if (ic->duration != AV_NOPTS_VALUE)
2258 st->duration = av_rescale_q(ic->duration, AV_TIME_BASE_Q,
2259 st->time_base);
2260 }
2261 }
2262}
2263
2264static void estimate_timings_from_bit_rate(AVFormatContext *ic)
2265{
2266 int64_t filesize, duration;
2267 int i, show_warning = 0;
2268 AVStream *st;
2269
2270 /* if bit_rate is already set, we believe it */
2271 if (ic->bit_rate <= 0) {
2272 int bit_rate = 0;
2273 for (i = 0; i < ic->nb_streams; i++) {
2274 st = ic->streams[i];
2275 if (st->codec->bit_rate > 0) {
2276 if (INT_MAX - st->codec->bit_rate < bit_rate) {
2277 bit_rate = 0;
2278 break;
2279 }
2280 bit_rate += st->codec->bit_rate;
2281 }
2282 }
2283 ic->bit_rate = bit_rate;
2284 }
2285
2286 /* if duration is already set, we believe it */
2287 if (ic->duration == AV_NOPTS_VALUE &&
2288 ic->bit_rate != 0) {
2289 filesize = ic->pb ? avio_size(ic->pb) : 0;
2290 if (filesize > ic->data_offset) {
2291 filesize -= ic->data_offset;
2292 for (i = 0; i < ic->nb_streams; i++) {
2293 st = ic->streams[i];
2294 if ( st->time_base.num <= INT64_MAX / ic->bit_rate
2295 && st->duration == AV_NOPTS_VALUE) {
2296 duration = av_rescale(8 * filesize, st->time_base.den,
2297 ic->bit_rate *
2298 (int64_t) st->time_base.num);
2299 st->duration = duration;
2300 show_warning = 1;
2301 }
2302 }
2303 }
2304 }
2305 if (show_warning)
2306 av_log(ic, AV_LOG_WARNING,
2307 "Estimating duration from bitrate, this may be inaccurate\n");
2308}
2309
2310#define DURATION_MAX_READ_SIZE 250000LL
2311#define DURATION_MAX_RETRY 4
2312
2313/* only usable for MPEG-PS streams */
2314static void estimate_timings_from_pts(AVFormatContext *ic, int64_t old_offset)
2315{
2316 AVPacket pkt1, *pkt = &pkt1;
2317 AVStream *st;
2318 int num, den, read_size, i, ret;
2319 int found_duration = 0;
2320 int is_end;
2321 int64_t filesize, offset, duration;
2322 int retry = 0;
2323
2324 /* flush packet queue */
2325 flush_packet_queue(ic);
2326
2327 for (i = 0; i < ic->nb_streams; i++) {
2328 st = ic->streams[i];
2329 if (st->start_time == AV_NOPTS_VALUE &&
2330 st->first_dts == AV_NOPTS_VALUE &&
2331 st->codec->codec_type != AVMEDIA_TYPE_UNKNOWN)
2332 av_log(st->codec, AV_LOG_WARNING,
2333 "start time for stream %d is not set in estimate_timings_from_pts\n", i);
2334
2335 if (st->parser) {
2336 av_parser_close(st->parser);
2337 st->parser = NULL;
2338 }
2339 }
2340
2341 av_opt_set(ic, "skip_changes", "1", AV_OPT_SEARCH_CHILDREN);
2342 /* estimate the end time (duration) */
2343 /* XXX: may need to support wrapping */
2344 filesize = ic->pb ? avio_size(ic->pb) : 0;
2345 do {
2346 is_end = found_duration;
2347 offset = filesize - (DURATION_MAX_READ_SIZE << retry);
2348 if (offset < 0)
2349 offset = 0;
2350
2351 avio_seek(ic->pb, offset, SEEK_SET);
2352 read_size = 0;
2353 for (;;) {
2354 if (read_size >= DURATION_MAX_READ_SIZE << (FFMAX(retry - 1, 0)))
2355 break;
2356
2357 do {
2358 ret = ff_read_packet(ic, pkt);
2359 } while (ret == AVERROR(EAGAIN));
2360 if (ret != 0)
2361 break;
2362 read_size += pkt->size;
2363 st = ic->streams[pkt->stream_index];
2364 if (pkt->pts != AV_NOPTS_VALUE &&
2365 (st->start_time != AV_NOPTS_VALUE ||
2366 st->first_dts != AV_NOPTS_VALUE)) {
2367 if (pkt->duration == 0) {
2368 ff_compute_frame_duration(&num, &den, st, st->parser, pkt);
2369 if (den && num) {
2370 pkt->duration = av_rescale_rnd(1,
2371 num * (int64_t) st->time_base.den,
2372 den * (int64_t) st->time_base.num,
2373 AV_ROUND_DOWN);
2374 }
2375 }
2376 duration = pkt->pts + pkt->duration;
2377 found_duration = 1;
2378 if (st->start_time != AV_NOPTS_VALUE)
2379 duration -= st->start_time;
2380 else
2381 duration -= st->first_dts;
2382 if (duration > 0) {
2383 if (st->duration == AV_NOPTS_VALUE || st->info->last_duration<= 0 ||
2384 (st->duration < duration && FFABS(duration - st->info->last_duration) < 60LL*st->time_base.den / st->time_base.num))
2385 st->duration = duration;
2386 st->info->last_duration = duration;
2387 }
2388 }
2389 av_free_packet(pkt);
2390 }
2391
2392 /* check if all audio/video streams have valid duration */
2393 if (!is_end) {
2394 is_end = 1;
2395 for (i = 0; i < ic->nb_streams; i++) {
2396 st = ic->streams[i];
2397 switch (st->codec->codec_type) {
2398 case AVMEDIA_TYPE_VIDEO:
2399 case AVMEDIA_TYPE_AUDIO:
2400 if (st->duration == AV_NOPTS_VALUE)
2401 is_end = 0;
2402 }
2403 }
2404 }
2405 } while (!is_end &&
2406 offset &&
2407 ++retry <= DURATION_MAX_RETRY);
2408
2409 av_opt_set(ic, "skip_changes", "0", AV_OPT_SEARCH_CHILDREN);
2410
2411 /* warn about audio/video streams which duration could not be estimated */
2412 for (i = 0; i < ic->nb_streams; i++) {
2413 st = ic->streams[i];
2414 if (st->duration == AV_NOPTS_VALUE) {
2415 switch (st->codec->codec_type) {
2416 case AVMEDIA_TYPE_VIDEO:
2417 case AVMEDIA_TYPE_AUDIO:
2418 if (st->start_time != AV_NOPTS_VALUE || st->first_dts != AV_NOPTS_VALUE) {
2419 av_log(ic, AV_LOG_DEBUG, "stream %d : no PTS found at end of file, duration not set\n", i);
2420 } else
2421 av_log(ic, AV_LOG_DEBUG, "stream %d : no TS found at start of file, duration not set\n", i);
2422 }
2423 }
2424 }
2425 fill_all_stream_timings(ic);
2426
2427 avio_seek(ic->pb, old_offset, SEEK_SET);
2428 for (i = 0; i < ic->nb_streams; i++) {
2429 int j;
2430
2431 st = ic->streams[i];
2432 st->cur_dts = st->first_dts;
2433 st->last_IP_pts = AV_NOPTS_VALUE;
2434 st->last_dts_for_order_check = AV_NOPTS_VALUE;
2435 for (j = 0; j < MAX_REORDER_DELAY + 1; j++)
2436 st->pts_buffer[j] = AV_NOPTS_VALUE;
2437 }
2438}
2439
2440static void estimate_timings(AVFormatContext *ic, int64_t old_offset)
2441{
2442 int64_t file_size;
2443
2444 /* get the file size, if possible */
2445 if (ic->iformat->flags & AVFMT_NOFILE) {
2446 file_size = 0;
2447 } else {
2448 file_size = avio_size(ic->pb);
2449 file_size = FFMAX(0, file_size);
2450 }
2451
2452 if ((!strcmp(ic->iformat->name, "mpeg") ||
2453 !strcmp(ic->iformat->name, "mpegts")) &&
2454 file_size && ic->pb->seekable) {
2455 /* get accurate estimate from the PTSes */
2456 estimate_timings_from_pts(ic, old_offset);
2457 ic->duration_estimation_method = AVFMT_DURATION_FROM_PTS;
2458 } else if (has_duration(ic)) {
2459 /* at least one component has timings - we use them for all
2460 * the components */
2461 fill_all_stream_timings(ic);
2462 ic->duration_estimation_method = AVFMT_DURATION_FROM_STREAM;
2463 } else {
2464 /* less precise: use bitrate info */
2465 estimate_timings_from_bit_rate(ic);
2466 ic->duration_estimation_method = AVFMT_DURATION_FROM_BITRATE;
2467 }
2468 update_stream_timings(ic);
2469
2470 {
2471 int i;
2472 AVStream av_unused *st;
2473 for (i = 0; i < ic->nb_streams; i++) {
2474 st = ic->streams[i];
2475 av_dlog(ic, "%d: start_time: %0.3f duration: %0.3f\n", i,
2476 (double) st->start_time / AV_TIME_BASE,
2477 (double) st->duration / AV_TIME_BASE);
2478 }
2479 av_dlog(ic,
2480 "stream: start_time: %0.3f duration: %0.3f bitrate=%d kb/s\n",
2481 (double) ic->start_time / AV_TIME_BASE,
2482 (double) ic->duration / AV_TIME_BASE,
2483 ic->bit_rate / 1000);
2484 }
2485}
2486
2487static int has_codec_parameters(AVStream *st, const char **errmsg_ptr)
2488{
2489 AVCodecContext *avctx = st->codec;
2490
2491#define FAIL(errmsg) do { \
2492 if (errmsg_ptr) \
2493 *errmsg_ptr = errmsg; \
2494 return 0; \
2495 } while (0)
2496
2497 if ( avctx->codec_id == AV_CODEC_ID_NONE
2498 && avctx->codec_type != AVMEDIA_TYPE_DATA)
2499 FAIL("unknown codec");
2500 switch (avctx->codec_type) {
2501 case AVMEDIA_TYPE_AUDIO:
2502 if (!avctx->frame_size && determinable_frame_size(avctx))
2503 FAIL("unspecified frame size");
2504 if (st->info->found_decoder >= 0 &&
2505 avctx->sample_fmt == AV_SAMPLE_FMT_NONE)
2506 FAIL("unspecified sample format");
2507 if (!avctx->sample_rate)
2508 FAIL("unspecified sample rate");
2509 if (!avctx->channels)
2510 FAIL("unspecified number of channels");
2511 if (st->info->found_decoder >= 0 && !st->nb_decoded_frames && avctx->codec_id == AV_CODEC_ID_DTS)
2512 FAIL("no decodable DTS frames");
2513 break;
2514 case AVMEDIA_TYPE_VIDEO:
2515 if (!avctx->width)
2516 FAIL("unspecified size");
2517 if (st->info->found_decoder >= 0 && avctx->pix_fmt == AV_PIX_FMT_NONE)
2518 FAIL("unspecified pixel format");
2519 if (st->codec->codec_id == AV_CODEC_ID_RV30 || st->codec->codec_id == AV_CODEC_ID_RV40)
2520 if (!st->sample_aspect_ratio.num && !st->codec->sample_aspect_ratio.num && !st->codec_info_nb_frames)
2521 FAIL("no frame in rv30/40 and no sar");
2522 break;
2523 case AVMEDIA_TYPE_SUBTITLE:
2524 if (avctx->codec_id == AV_CODEC_ID_HDMV_PGS_SUBTITLE && !avctx->width)
2525 FAIL("unspecified size");
2526 break;
2527 case AVMEDIA_TYPE_DATA:
2528 if (avctx->codec_id == AV_CODEC_ID_NONE) return 1;
2529 }
2530
2531 return 1;
2532}
2533
2534/* returns 1 or 0 if or if not decoded data was returned, or a negative error */
2535static int try_decode_frame(AVFormatContext *s, AVStream *st, AVPacket *avpkt,
2536 AVDictionary **options)
2537{
2538 const AVCodec *codec;
2539 int got_picture = 1, ret = 0;
2540 AVFrame *frame = av_frame_alloc();
2541 AVSubtitle subtitle;
2542 AVPacket pkt = *avpkt;
2543
2544 if (!frame)
2545 return AVERROR(ENOMEM);
2546
2547 if (!avcodec_is_open(st->codec) &&
2548 st->info->found_decoder <= 0 &&
2549 (st->codec->codec_id != -st->info->found_decoder || !st->codec->codec_id)) {
2550 AVDictionary *thread_opt = NULL;
2551
2552 codec = find_decoder(s, st, st->codec->codec_id);
2553
2554 if (!codec) {
2555 st->info->found_decoder = -st->codec->codec_id;
2556 ret = -1;
2557 goto fail;
2558 }
2559
2560 /* Force thread count to 1 since the H.264 decoder will not extract
2561 * SPS and PPS to extradata during multi-threaded decoding. */
2562 av_dict_set(options ? options : &thread_opt, "threads", "1", 0);
2563 ret = avcodec_open2(st->codec, codec, options ? options : &thread_opt);
2564 if (!options)
2565 av_dict_free(&thread_opt);
2566 if (ret < 0) {
2567 st->info->found_decoder = -st->codec->codec_id;
2568 goto fail;
2569 }
2570 st->info->found_decoder = 1;
2571 } else if (!st->info->found_decoder)
2572 st->info->found_decoder = 1;
2573
2574 if (st->info->found_decoder < 0) {
2575 ret = -1;
2576 goto fail;
2577 }
2578
2579 while ((pkt.size > 0 || (!pkt.data && got_picture)) &&
2580 ret >= 0 &&
2581 (!has_codec_parameters(st, NULL) || !has_decode_delay_been_guessed(st) ||
2582 (!st->codec_info_nb_frames &&
2583 st->codec->codec->capabilities & CODEC_CAP_CHANNEL_CONF))) {
2584 got_picture = 0;
2585 switch (st->codec->codec_type) {
2586 case AVMEDIA_TYPE_VIDEO:
2587 ret = avcodec_decode_video2(st->codec, frame,
2588 &got_picture, &pkt);
2589 break;
2590 case AVMEDIA_TYPE_AUDIO:
2591 ret = avcodec_decode_audio4(st->codec, frame, &got_picture, &pkt);
2592 break;
2593 case AVMEDIA_TYPE_SUBTITLE:
2594 ret = avcodec_decode_subtitle2(st->codec, &subtitle,
2595 &got_picture, &pkt);
2596 ret = pkt.size;
2597 break;
2598 default:
2599 break;
2600 }
2601 if (ret >= 0) {
2602 if (got_picture)
2603 st->nb_decoded_frames++;
2604 pkt.data += ret;
2605 pkt.size -= ret;
2606 ret = got_picture;
2607 }
2608 }
2609
2610 if (!pkt.data && !got_picture)
2611 ret = -1;
2612
2613fail:
2614 av_frame_free(&frame);
2615 return ret;
2616}
2617
2618unsigned int ff_codec_get_tag(const AVCodecTag *tags, enum AVCodecID id)
2619{
2620 while (tags->id != AV_CODEC_ID_NONE) {
2621 if (tags->id == id)
2622 return tags->tag;
2623 tags++;
2624 }
2625 return 0;
2626}
2627
2628enum AVCodecID ff_codec_get_id(const AVCodecTag *tags, unsigned int tag)
2629{
2630 int i;
2631 for (i = 0; tags[i].id != AV_CODEC_ID_NONE; i++)
2632 if (tag == tags[i].tag)
2633 return tags[i].id;
2634 for (i = 0; tags[i].id != AV_CODEC_ID_NONE; i++)
2635 if (avpriv_toupper4(tag) == avpriv_toupper4(tags[i].tag))
2636 return tags[i].id;
2637 return AV_CODEC_ID_NONE;
2638}
2639
2640enum AVCodecID ff_get_pcm_codec_id(int bps, int flt, int be, int sflags)
2641{
2642 if (flt) {
2643 switch (bps) {
2644 case 32:
2645 return be ? AV_CODEC_ID_PCM_F32BE : AV_CODEC_ID_PCM_F32LE;
2646 case 64:
2647 return be ? AV_CODEC_ID_PCM_F64BE : AV_CODEC_ID_PCM_F64LE;
2648 default:
2649 return AV_CODEC_ID_NONE;
2650 }
2651 } else {
2652 bps += 7;
2653 bps >>= 3;
2654 if (sflags & (1 << (bps - 1))) {
2655 switch (bps) {
2656 case 1:
2657 return AV_CODEC_ID_PCM_S8;
2658 case 2:
2659 return be ? AV_CODEC_ID_PCM_S16BE : AV_CODEC_ID_PCM_S16LE;
2660 case 3:
2661 return be ? AV_CODEC_ID_PCM_S24BE : AV_CODEC_ID_PCM_S24LE;
2662 case 4:
2663 return be ? AV_CODEC_ID_PCM_S32BE : AV_CODEC_ID_PCM_S32LE;
2664 default:
2665 return AV_CODEC_ID_NONE;
2666 }
2667 } else {
2668 switch (bps) {
2669 case 1:
2670 return AV_CODEC_ID_PCM_U8;
2671 case 2:
2672 return be ? AV_CODEC_ID_PCM_U16BE : AV_CODEC_ID_PCM_U16LE;
2673 case 3:
2674 return be ? AV_CODEC_ID_PCM_U24BE : AV_CODEC_ID_PCM_U24LE;
2675 case 4:
2676 return be ? AV_CODEC_ID_PCM_U32BE : AV_CODEC_ID_PCM_U32LE;
2677 default:
2678 return AV_CODEC_ID_NONE;
2679 }
2680 }
2681 }
2682}
2683
2684unsigned int av_codec_get_tag(const AVCodecTag *const *tags, enum AVCodecID id)
2685{
2686 unsigned int tag;
2687 if (!av_codec_get_tag2(tags, id, &tag))
2688 return 0;
2689 return tag;
2690}
2691
2692int av_codec_get_tag2(const AVCodecTag * const *tags, enum AVCodecID id,
2693 unsigned int *tag)
2694{
2695 int i;
2696 for (i = 0; tags && tags[i]; i++) {
2697 const AVCodecTag *codec_tags = tags[i];
2698 while (codec_tags->id != AV_CODEC_ID_NONE) {
2699 if (codec_tags->id == id) {
2700 *tag = codec_tags->tag;
2701 return 1;
2702 }
2703 codec_tags++;
2704 }
2705 }
2706 return 0;
2707}
2708
2709enum AVCodecID av_codec_get_id(const AVCodecTag *const *tags, unsigned int tag)
2710{
2711 int i;
2712 for (i = 0; tags && tags[i]; i++) {
2713 enum AVCodecID id = ff_codec_get_id(tags[i], tag);
2714 if (id != AV_CODEC_ID_NONE)
2715 return id;
2716 }
2717 return AV_CODEC_ID_NONE;
2718}
2719
2720static void compute_chapters_end(AVFormatContext *s)
2721{
2722 unsigned int i, j;
2723 int64_t max_time = s->duration +
2724 ((s->start_time == AV_NOPTS_VALUE) ? 0 : s->start_time);
2725
2726 for (i = 0; i < s->nb_chapters; i++)
2727 if (s->chapters[i]->end == AV_NOPTS_VALUE) {
2728 AVChapter *ch = s->chapters[i];
2729 int64_t end = max_time ? av_rescale_q(max_time, AV_TIME_BASE_Q,
2730 ch->time_base)
2731 : INT64_MAX;
2732
2733 for (j = 0; j < s->nb_chapters; j++) {
2734 AVChapter *ch1 = s->chapters[j];
2735 int64_t next_start = av_rescale_q(ch1->start, ch1->time_base,
2736 ch->time_base);
2737 if (j != i && next_start > ch->start && next_start < end)
2738 end = next_start;
2739 }
2740 ch->end = (end == INT64_MAX) ? ch->start : end;
2741 }
2742}
2743
2744static int get_std_framerate(int i)
2745{
2746 if (i < 60 * 12)
2747 return (i + 1) * 1001;
2748 else
2749 return ((const int[]) { 24, 30, 60, 12, 15, 48 })[i - 60 * 12] * 1000 * 12;
2750}
2751
2752/* Is the time base unreliable?
2753 * This is a heuristic to balance between quick acceptance of the values in
2754 * the headers vs. some extra checks.
2755 * Old DivX and Xvid often have nonsense timebases like 1fps or 2fps.
2756 * MPEG-2 commonly misuses field repeat flags to store different framerates.
2757 * And there are "variable" fps files this needs to detect as well. */
2758static int tb_unreliable(AVCodecContext *c)
2759{
2760 if (c->time_base.den >= 101L * c->time_base.num ||
2761 c->time_base.den < 5L * c->time_base.num ||
2762 // c->codec_tag == AV_RL32("DIVX") ||
2763 // c->codec_tag == AV_RL32("XVID") ||
2764 c->codec_tag == AV_RL32("mp4v") ||
2765 c->codec_id == AV_CODEC_ID_MPEG2VIDEO ||
2766 c->codec_id == AV_CODEC_ID_GIF ||
2767 c->codec_id == AV_CODEC_ID_H264)
2768 return 1;
2769 return 0;
2770}
2771
2772#if FF_API_FORMAT_PARAMETERS
2773int av_find_stream_info(AVFormatContext *ic)
2774{
2775 return avformat_find_stream_info(ic, NULL);
2776}
2777#endif
2778
2779int ff_alloc_extradata(AVCodecContext *avctx, int size)
2780{
2781 int ret;
2782
2783 if (size < 0 || size >= INT32_MAX - FF_INPUT_BUFFER_PADDING_SIZE) {
2784 avctx->extradata_size = 0;
2785 return AVERROR(EINVAL);
2786 }
2787 avctx->extradata = av_malloc(size + FF_INPUT_BUFFER_PADDING_SIZE);
2788 if (avctx->extradata) {
2789 memset(avctx->extradata + size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
2790 avctx->extradata_size = size;
2791 ret = 0;
2792 } else {
2793 avctx->extradata_size = 0;
2794 ret = AVERROR(ENOMEM);
2795 }
2796 return ret;
2797}
2798
2799int ff_get_extradata(AVCodecContext *avctx, AVIOContext *pb, int size)
2800{
2801 int ret = ff_alloc_extradata(avctx, size);
2802 if (ret < 0)
2803 return ret;
2804 ret = avio_read(pb, avctx->extradata, size);
2805 if (ret != size) {
2806 av_freep(&avctx->extradata);
2807 avctx->extradata_size = 0;
2808 av_log(avctx, AV_LOG_ERROR, "Failed to read extradata of size %d\n", size);
2809 return ret < 0 ? ret : AVERROR_INVALIDDATA;
2810 }
2811
2812 return ret;
2813}
2814
2815int ff_rfps_add_frame(AVFormatContext *ic, AVStream *st, int64_t ts)
2816{
2817 int i, j;
2818 int64_t last = st->info->last_dts;
2819
2820 if ( ts != AV_NOPTS_VALUE && last != AV_NOPTS_VALUE && ts > last
2821 && ts - (uint64_t)last < INT64_MAX) {
2822 double dts = (is_relative(ts) ? ts - RELATIVE_TS_BASE : ts) * av_q2d(st->time_base);
2823 int64_t duration = ts - last;
2824
2825 if (!st->info->duration_error)
2826 st->info->duration_error = av_mallocz(sizeof(st->info->duration_error[0])*2);
2827 if (!st->info->duration_error)
2828 return AVERROR(ENOMEM);
2829
2830// if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO)
2831// av_log(NULL, AV_LOG_ERROR, "%f\n", dts);
2832 for (i = 0; i<MAX_STD_TIMEBASES; i++) {
2833 if (st->info->duration_error[0][1][i] < 1e10) {
2834 int framerate = get_std_framerate(i);
2835 double sdts = dts*framerate/(1001*12);
2836 for (j= 0; j<2; j++) {
2837 int64_t ticks = llrint(sdts+j*0.5);
2838 double error= sdts - ticks + j*0.5;
2839 st->info->duration_error[j][0][i] += error;
2840 st->info->duration_error[j][1][i] += error*error;
2841 }
2842 }
2843 }
2844 st->info->duration_count++;
2845 st->info->rfps_duration_sum += duration;
2846
2847 if (st->info->duration_count % 10 == 0) {
2848 int n = st->info->duration_count;
2849 for (i = 0; i<MAX_STD_TIMEBASES; i++) {
2850 if (st->info->duration_error[0][1][i] < 1e10) {
2851 double a0 = st->info->duration_error[0][0][i] / n;
2852 double error0 = st->info->duration_error[0][1][i] / n - a0*a0;
2853 double a1 = st->info->duration_error[1][0][i] / n;
2854 double error1 = st->info->duration_error[1][1][i] / n - a1*a1;
2855 if (error0 > 0.04 && error1 > 0.04) {
2856 st->info->duration_error[0][1][i] = 2e10;
2857 st->info->duration_error[1][1][i] = 2e10;
2858 }
2859 }
2860 }
2861 }
2862
2863 // ignore the first 4 values, they might have some random jitter
2864 if (st->info->duration_count > 3 && is_relative(ts) == is_relative(last))
2865 st->info->duration_gcd = av_gcd(st->info->duration_gcd, duration);
2866 }
2867 if (ts != AV_NOPTS_VALUE)
2868 st->info->last_dts = ts;
2869
2870 return 0;
2871}
2872
2873void ff_rfps_calculate(AVFormatContext *ic)
2874{
2875 int i, j;
2876
2877 for (i = 0; i < ic->nb_streams; i++) {
2878 AVStream *st = ic->streams[i];
2879
2880 if (st->codec->codec_type != AVMEDIA_TYPE_VIDEO)
2881 continue;
2882 // the check for tb_unreliable() is not completely correct, since this is not about handling
2883 // a unreliable/inexact time base, but a time base that is finer than necessary, as e.g.
2884 // ipmovie.c produces.
2885 if (tb_unreliable(st->codec) && st->info->duration_count > 15 && st->info->duration_gcd > FFMAX(1, st->time_base.den/(500LL*st->time_base.num)) && !st->r_frame_rate.num)
2886 av_reduce(&st->r_frame_rate.num, &st->r_frame_rate.den, st->time_base.den, st->time_base.num * st->info->duration_gcd, INT_MAX);
2887 if (st->info->duration_count>1 && !st->r_frame_rate.num
2888 && tb_unreliable(st->codec)) {
2889 int num = 0;
2890 double best_error= 0.01;
2891 AVRational ref_rate = st->r_frame_rate.num ? st->r_frame_rate : av_inv_q(st->time_base);
2892
2893 for (j= 0; j<MAX_STD_TIMEBASES; j++) {
2894 int k;
2895
2896 if (st->info->codec_info_duration && st->info->codec_info_duration*av_q2d(st->time_base) < (1001*12.0)/get_std_framerate(j))
2897 continue;
2898 if (!st->info->codec_info_duration && 1.0 < (1001*12.0)/get_std_framerate(j))
2899 continue;
2900
2901 if (av_q2d(st->time_base) * st->info->rfps_duration_sum / st->info->duration_count < (1001*12.0 * 0.8)/get_std_framerate(j))
2902 continue;
2903
2904 for (k= 0; k<2; k++) {
2905 int n = st->info->duration_count;
2906 double a= st->info->duration_error[k][0][j] / n;
2907 double error= st->info->duration_error[k][1][j]/n - a*a;
2908
2909 if (error < best_error && best_error> 0.000000001) {
2910 best_error= error;
2911 num = get_std_framerate(j);
2912 }
2913 if (error < 0.02)
2914 av_log(NULL, AV_LOG_DEBUG, "rfps: %f %f\n", get_std_framerate(j) / 12.0/1001, error);
2915 }
2916 }
2917 // do not increase frame rate by more than 1 % in order to match a standard rate.
2918 if (num && (!ref_rate.num || (double)num/(12*1001) < 1.01 * av_q2d(ref_rate)))
2919 av_reduce(&st->r_frame_rate.num, &st->r_frame_rate.den, num, 12*1001, INT_MAX);
2920 }
2921 if ( !st->avg_frame_rate.num
2922 && st->r_frame_rate.num && st->info->rfps_duration_sum
2923 && st->info->codec_info_duration <= 0
2924 && st->info->duration_count > 2
2925 && fabs(1.0 / (av_q2d(st->r_frame_rate) * av_q2d(st->time_base)) - st->info->rfps_duration_sum / (double)st->info->duration_count) <= 1.0
2926 ) {
2927 av_log(ic, AV_LOG_DEBUG, "Setting avg frame rate based on r frame rate\n");
2928 st->avg_frame_rate = st->r_frame_rate;
2929 }
2930
2931 av_freep(&st->info->duration_error);
2932 st->info->last_dts = AV_NOPTS_VALUE;
2933 st->info->duration_count = 0;
2934 st->info->rfps_duration_sum = 0;
2935 }
2936}
2937
2938int avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options)
2939{
2940 int i, count, ret = 0, j;
2941 int64_t read_size;
2942 AVStream *st;
2943 AVPacket pkt1, *pkt;
2944 int64_t old_offset = avio_tell(ic->pb);
2945 // new streams might appear, no options for those
2946 int orig_nb_streams = ic->nb_streams;
2947 int flush_codecs;
2948 int64_t max_analyze_duration = ic->max_analyze_duration2;
2949 int64_t probesize = ic->probesize2;
2950
2951 if (!max_analyze_duration)
2952 max_analyze_duration = ic->max_analyze_duration;
2953 if (ic->probesize)
2954 probesize = ic->probesize;
2955 flush_codecs = probesize > 0;
2956
2957 av_opt_set(ic, "skip_clear", "1", AV_OPT_SEARCH_CHILDREN);
2958
2959 if (!max_analyze_duration) {
2960 if (!strcmp(ic->iformat->name, "flv") && !(ic->ctx_flags & AVFMTCTX_NOHEADER)) {
2961 max_analyze_duration = 10*AV_TIME_BASE;
2962 } else
2963 max_analyze_duration = 5*AV_TIME_BASE;
2964 }
2965
2966 if (ic->pb)
2967 av_log(ic, AV_LOG_DEBUG, "Before avformat_find_stream_info() pos: %"PRId64" bytes read:%"PRId64" seeks:%d\n",
2968 avio_tell(ic->pb), ic->pb->bytes_read, ic->pb->seek_count);
2969
2970 for (i = 0; i < ic->nb_streams; i++) {
2971 const AVCodec *codec;
2972 AVDictionary *thread_opt = NULL;
2973 st = ic->streams[i];
2974
2975 if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO ||
2976 st->codec->codec_type == AVMEDIA_TYPE_SUBTITLE) {
2977/* if (!st->time_base.num)
2978 st->time_base = */
2979 if (!st->codec->time_base.num)
2980 st->codec->time_base = st->time_base;
2981 }
2982 // only for the split stuff
2983 if (!st->parser && !(ic->flags & AVFMT_FLAG_NOPARSE)) {
2984 st->parser = av_parser_init(st->codec->codec_id);
2985 if (st->parser) {
2986 if (st->need_parsing == AVSTREAM_PARSE_HEADERS) {
2987 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
2988 } else if (st->need_parsing == AVSTREAM_PARSE_FULL_RAW) {
2989 st->parser->flags |= PARSER_FLAG_USE_CODEC_TS;
2990 }
2991 } else if (st->need_parsing) {
2992 av_log(ic, AV_LOG_VERBOSE, "parser not found for codec "
2993 "%s, packets or times may be invalid.\n",
2994 avcodec_get_name(st->codec->codec_id));
2995 }
2996 }
2997 codec = find_decoder(ic, st, st->codec->codec_id);
2998
2999 /* Force thread count to 1 since the H.264 decoder will not extract
3000 * SPS and PPS to extradata during multi-threaded decoding. */
3001 av_dict_set(options ? &options[i] : &thread_opt, "threads", "1", 0);
3002
3003 /* Ensure that subtitle_header is properly set. */
3004 if (st->codec->codec_type == AVMEDIA_TYPE_SUBTITLE
3005 && codec && !st->codec->codec) {
3006 if (avcodec_open2(st->codec, codec, options ? &options[i] : &thread_opt) < 0)
3007 av_log(ic, AV_LOG_WARNING,
3008 "Failed to open codec in av_find_stream_info\n");
3009 }
3010
3011 // Try to just open decoders, in case this is enough to get parameters.
3012 if (!has_codec_parameters(st, NULL) && st->request_probe <= 0) {
3013 if (codec && !st->codec->codec)
3014 if (avcodec_open2(st->codec, codec, options ? &options[i] : &thread_opt) < 0)
3015 av_log(ic, AV_LOG_WARNING,
3016 "Failed to open codec in av_find_stream_info\n");
3017 }
3018 if (!options)
3019 av_dict_free(&thread_opt);
3020 }
3021
3022 for (i = 0; i < ic->nb_streams; i++) {
3023#if FF_API_R_FRAME_RATE
3024 ic->streams[i]->info->last_dts = AV_NOPTS_VALUE;
3025#endif
3026 ic->streams[i]->info->fps_first_dts = AV_NOPTS_VALUE;
3027 ic->streams[i]->info->fps_last_dts = AV_NOPTS_VALUE;
3028 }
3029
3030 count = 0;
3031 read_size = 0;
3032 for (;;) {
3033 if (ff_check_interrupt(&ic->interrupt_callback)) {
3034 ret = AVERROR_EXIT;
3035 av_log(ic, AV_LOG_DEBUG, "interrupted\n");
3036 break;
3037 }
3038
3039 /* check if one codec still needs to be handled */
3040 for (i = 0; i < ic->nb_streams; i++) {
3041 int fps_analyze_framecount = 20;
3042
3043 st = ic->streams[i];
3044 if (!has_codec_parameters(st, NULL))
3045 break;
3046 /* If the timebase is coarse (like the usual millisecond precision
3047 * of mkv), we need to analyze more frames to reliably arrive at
3048 * the correct fps. */
3049 if (av_q2d(st->time_base) > 0.0005)
3050 fps_analyze_framecount *= 2;
3051 if (!tb_unreliable(st->codec))
3052 fps_analyze_framecount = 0;
3053 if (ic->fps_probe_size >= 0)
3054 fps_analyze_framecount = ic->fps_probe_size;
3055 if (st->disposition & AV_DISPOSITION_ATTACHED_PIC)
3056 fps_analyze_framecount = 0;
3057 /* variable fps and no guess at the real fps */
3058 if (!(st->r_frame_rate.num && st->avg_frame_rate.num) &&
3059 st->info->duration_count < fps_analyze_framecount &&
3060 st->codec->codec_type == AVMEDIA_TYPE_VIDEO)
3061 break;
3062 if (st->parser && st->parser->parser->split &&
3063 !st->codec->extradata)
3064 break;
3065 if (st->first_dts == AV_NOPTS_VALUE &&
3066 !(ic->iformat->flags & AVFMT_NOTIMESTAMPS) &&
3067 st->codec_info_nb_frames < ic->max_ts_probe &&
3068 (st->codec->codec_type == AVMEDIA_TYPE_VIDEO ||
3069 st->codec->codec_type == AVMEDIA_TYPE_AUDIO))
3070 break;
3071 }
3072 if (i == ic->nb_streams) {
3073 /* NOTE: If the format has no header, then we need to read some
3074 * packets to get most of the streams, so we cannot stop here. */
3075 if (!(ic->ctx_flags & AVFMTCTX_NOHEADER)) {
3076 /* If we found the info for all the codecs, we can stop. */
3077 ret = count;
3078 av_log(ic, AV_LOG_DEBUG, "All info found\n");
3079 flush_codecs = 0;
3080 break;
3081 }
3082 }
3083 /* We did not get all the codec info, but we read too much data. */
3084 if (read_size >= probesize) {
3085 ret = count;
3086 av_log(ic, AV_LOG_DEBUG,
3087 "Probe buffer size limit of %"PRId64" bytes reached\n", probesize);
3088 for (i = 0; i < ic->nb_streams; i++)
3089 if (!ic->streams[i]->r_frame_rate.num &&
3090 ic->streams[i]->info->duration_count <= 1 &&
3091 ic->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO &&
3092 strcmp(ic->iformat->name, "image2"))
3093 av_log(ic, AV_LOG_WARNING,
3094 "Stream #%d: not enough frames to estimate rate; "
3095 "consider increasing probesize\n", i);
3096 break;
3097 }
3098
3099 /* NOTE: A new stream can be added there if no header in file
3100 * (AVFMTCTX_NOHEADER). */
3101 ret = read_frame_internal(ic, &pkt1);
3102 if (ret == AVERROR(EAGAIN))
3103 continue;
3104
3105 if (ret < 0) {
3106 /* EOF or error*/
3107 break;
3108 }
3109
3110 if (ic->flags & AVFMT_FLAG_NOBUFFER)
3111 free_packet_buffer(&ic->packet_buffer, &ic->packet_buffer_end);
3112 {
3113 pkt = add_to_pktbuf(&ic->packet_buffer, &pkt1,
3114 &ic->packet_buffer_end);
3115 if (!pkt) {
3116 ret = AVERROR(ENOMEM);
3117 goto find_stream_info_err;
3118 }
3119 if ((ret = av_dup_packet(pkt)) < 0)
3120 goto find_stream_info_err;
3121 }
3122
3123 st = ic->streams[pkt->stream_index];
3124 if (!(st->disposition & AV_DISPOSITION_ATTACHED_PIC))
3125 read_size += pkt->size;
3126
3127 if (pkt->dts != AV_NOPTS_VALUE && st->codec_info_nb_frames > 1) {
3128 /* check for non-increasing dts */
3129 if (st->info->fps_last_dts != AV_NOPTS_VALUE &&
3130 st->info->fps_last_dts >= pkt->dts) {
3131 av_log(ic, AV_LOG_DEBUG,
3132 "Non-increasing DTS in stream %d: packet %d with DTS "
3133 "%"PRId64", packet %d with DTS %"PRId64"\n",
3134 st->index, st->info->fps_last_dts_idx,
3135 st->info->fps_last_dts, st->codec_info_nb_frames,
3136 pkt->dts);
3137 st->info->fps_first_dts =
3138 st->info->fps_last_dts = AV_NOPTS_VALUE;
3139 }
3140 /* Check for a discontinuity in dts. If the difference in dts
3141 * is more than 1000 times the average packet duration in the
3142 * sequence, we treat it as a discontinuity. */
3143 if (st->info->fps_last_dts != AV_NOPTS_VALUE &&
3144 st->info->fps_last_dts_idx > st->info->fps_first_dts_idx &&
3145 (pkt->dts - st->info->fps_last_dts) / 1000 >
3146 (st->info->fps_last_dts - st->info->fps_first_dts) /
3147 (st->info->fps_last_dts_idx - st->info->fps_first_dts_idx)) {
3148 av_log(ic, AV_LOG_WARNING,
3149 "DTS discontinuity in stream %d: packet %d with DTS "
3150 "%"PRId64", packet %d with DTS %"PRId64"\n",
3151 st->index, st->info->fps_last_dts_idx,
3152 st->info->fps_last_dts, st->codec_info_nb_frames,
3153 pkt->dts);
3154 st->info->fps_first_dts =
3155 st->info->fps_last_dts = AV_NOPTS_VALUE;
3156 }
3157
3158 /* update stored dts values */
3159 if (st->info->fps_first_dts == AV_NOPTS_VALUE) {
3160 st->info->fps_first_dts = pkt->dts;
3161 st->info->fps_first_dts_idx = st->codec_info_nb_frames;
3162 }
3163 st->info->fps_last_dts = pkt->dts;
3164 st->info->fps_last_dts_idx = st->codec_info_nb_frames;
3165 }
3166 if (st->codec_info_nb_frames>1) {
3167 int64_t t = 0;
3168
3169 if (st->time_base.den > 0)
3170 t = av_rescale_q(st->info->codec_info_duration, st->time_base, AV_TIME_BASE_Q);
3171 if (st->avg_frame_rate.num > 0)
3172 t = FFMAX(t, av_rescale_q(st->codec_info_nb_frames, av_inv_q(st->avg_frame_rate), AV_TIME_BASE_Q));
3173
3174 if ( t == 0
3175 && st->codec_info_nb_frames>30
3176 && st->info->fps_first_dts != AV_NOPTS_VALUE
3177 && st->info->fps_last_dts != AV_NOPTS_VALUE)
3178 t = FFMAX(t, av_rescale_q(st->info->fps_last_dts - st->info->fps_first_dts, st->time_base, AV_TIME_BASE_Q));
3179
3180 if (t >= max_analyze_duration) {
3181 av_log(ic, AV_LOG_VERBOSE, "max_analyze_duration %"PRId64" reached at %"PRId64" microseconds\n",
3182 max_analyze_duration,
3183 t);
3184 break;
3185 }
3186 if (pkt->duration) {
3187 st->info->codec_info_duration += pkt->duration;
3188 st->info->codec_info_duration_fields += st->parser && st->need_parsing && st->codec->ticks_per_frame ==2 ? st->parser->repeat_pict + 1 : 2;
3189 }
3190 }
3191#if FF_API_R_FRAME_RATE
3192 if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO)
3193 ff_rfps_add_frame(ic, st, pkt->dts);
3194#endif
3195 if (st->parser && st->parser->parser->split && !st->codec->extradata) {
3196 int i = st->parser->parser->split(st->codec, pkt->data, pkt->size);
3197 if (i > 0 && i < FF_MAX_EXTRADATA_SIZE) {
3198 if (ff_alloc_extradata(st->codec, i))
3199 return AVERROR(ENOMEM);
3200 memcpy(st->codec->extradata, pkt->data,
3201 st->codec->extradata_size);
3202 }
3203 }
3204
3205 /* If still no information, we try to open the codec and to
3206 * decompress the frame. We try to avoid that in most cases as
3207 * it takes longer and uses more memory. For MPEG-4, we need to
3208 * decompress for QuickTime.
3209 *
3210 * If CODEC_CAP_CHANNEL_CONF is set this will force decoding of at
3211 * least one frame of codec data, this makes sure the codec initializes
3212 * the channel configuration and does not only trust the values from
3213 * the container. */
3214 try_decode_frame(ic, st, pkt,
3215 (options && i < orig_nb_streams) ? &options[i] : NULL);
3216
3217 st->codec_info_nb_frames++;
3218 count++;
3219 }
3220
3221 if (flush_codecs) {
3222 AVPacket empty_pkt = { 0 };
3223 int err = 0;
3224 av_init_packet(&empty_pkt);
3225
3226 for (i = 0; i < ic->nb_streams; i++) {
3227
3228 st = ic->streams[i];
3229
3230 /* flush the decoders */
3231 if (st->info->found_decoder == 1) {
3232 do {
3233 err = try_decode_frame(ic, st, &empty_pkt,
3234 (options && i < orig_nb_streams)
3235 ? &options[i] : NULL);
3236 } while (err > 0 && !has_codec_parameters(st, NULL));
3237
3238 if (err < 0) {
3239 av_log(ic, AV_LOG_INFO,
3240 "decoding for stream %d failed\n", st->index);
3241 }
3242 }
3243 }
3244 }
3245 av_opt_set(ic, "skip_clear", "0", AV_OPT_SEARCH_CHILDREN);
3246
3247 // close codecs which were opened in try_decode_frame()
3248 for (i = 0; i < ic->nb_streams; i++) {
3249 st = ic->streams[i];
3250 avcodec_close(st->codec);
3251 }
3252
3253 ff_rfps_calculate(ic);
3254
3255 for (i = 0; i < ic->nb_streams; i++) {
3256 st = ic->streams[i];
3257 if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
3258 if (st->codec->codec_id == AV_CODEC_ID_RAWVIDEO && !st->codec->codec_tag && !st->codec->bits_per_coded_sample) {
3259 uint32_t tag= avcodec_pix_fmt_to_codec_tag(st->codec->pix_fmt);
3260 if (avpriv_find_pix_fmt(avpriv_get_raw_pix_fmt_tags(), tag) == st->codec->pix_fmt)
3261 st->codec->codec_tag= tag;
3262 }
3263
3264 /* estimate average framerate if not set by demuxer */
3265 if (st->info->codec_info_duration_fields &&
3266 !st->avg_frame_rate.num &&
3267 st->info->codec_info_duration) {
3268 int best_fps = 0;
3269 double best_error = 0.01;
3270
3271 if (st->info->codec_info_duration >= INT64_MAX / st->time_base.num / 2||
3272 st->info->codec_info_duration_fields >= INT64_MAX / st->time_base.den ||
3273 st->info->codec_info_duration < 0)
3274 continue;
3275 av_reduce(&st->avg_frame_rate.num, &st->avg_frame_rate.den,
3276 st->info->codec_info_duration_fields * (int64_t) st->time_base.den,
3277 st->info->codec_info_duration * 2 * (int64_t) st->time_base.num, 60000);
3278
3279 /* Round guessed framerate to a "standard" framerate if it's
3280 * within 1% of the original estimate. */
3281 for (j = 0; j < MAX_STD_TIMEBASES; j++) {
3282 AVRational std_fps = { get_std_framerate(j), 12 * 1001 };
3283 double error = fabs(av_q2d(st->avg_frame_rate) /
3284 av_q2d(std_fps) - 1);
3285
3286 if (error < best_error) {
3287 best_error = error;
3288 best_fps = std_fps.num;
3289 }
3290 }
3291 if (best_fps)
3292 av_reduce(&st->avg_frame_rate.num, &st->avg_frame_rate.den,
3293 best_fps, 12 * 1001, INT_MAX);
3294 }
3295
3296 if (!st->r_frame_rate.num) {
3297 if ( st->codec->time_base.den * (int64_t) st->time_base.num
3298 <= st->codec->time_base.num * st->codec->ticks_per_frame * (int64_t) st->time_base.den) {
3299 st->r_frame_rate.num = st->codec->time_base.den;
3300 st->r_frame_rate.den = st->codec->time_base.num * st->codec->ticks_per_frame;
3301 } else {
3302 st->r_frame_rate.num = st->time_base.den;
3303 st->r_frame_rate.den = st->time_base.num;
3304 }
3305 }
3306 } else if (st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
3307 if (!st->codec->bits_per_coded_sample)
3308 st->codec->bits_per_coded_sample =
3309 av_get_bits_per_sample(st->codec->codec_id);
3310 // set stream disposition based on audio service type
3311 switch (st->codec->audio_service_type) {
3312 case AV_AUDIO_SERVICE_TYPE_EFFECTS:
3313 st->disposition = AV_DISPOSITION_CLEAN_EFFECTS;
3314 break;
3315 case AV_AUDIO_SERVICE_TYPE_VISUALLY_IMPAIRED:
3316 st->disposition = AV_DISPOSITION_VISUAL_IMPAIRED;
3317 break;
3318 case AV_AUDIO_SERVICE_TYPE_HEARING_IMPAIRED:
3319 st->disposition = AV_DISPOSITION_HEARING_IMPAIRED;
3320 break;
3321 case AV_AUDIO_SERVICE_TYPE_COMMENTARY:
3322 st->disposition = AV_DISPOSITION_COMMENT;
3323 break;
3324 case AV_AUDIO_SERVICE_TYPE_KARAOKE:
3325 st->disposition = AV_DISPOSITION_KARAOKE;
3326 break;
3327 }
3328 }
3329 }
3330
3331 if (probesize)
3332 estimate_timings(ic, old_offset);
3333
3334 if (ret >= 0 && ic->nb_streams)
3335 /* We could not have all the codec parameters before EOF. */
3336 ret = -1;
3337 for (i = 0; i < ic->nb_streams; i++) {
3338 const char *errmsg;
3339 st = ic->streams[i];
3340 if (!has_codec_parameters(st, &errmsg)) {
3341 char buf[256];
3342 avcodec_string(buf, sizeof(buf), st->codec, 0);
3343 av_log(ic, AV_LOG_WARNING,
3344 "Could not find codec parameters for stream %d (%s): %s\n"
3345 "Consider increasing the value for the 'analyzeduration' and 'probesize' options\n",
3346 i, buf, errmsg);
3347 } else {
3348 ret = 0;
3349 }
3350 }
3351
3352 compute_chapters_end(ic);
3353
3354find_stream_info_err:
3355 for (i = 0; i < ic->nb_streams; i++) {
3356 st = ic->streams[i];
3357 if (ic->streams[i]->codec->codec_type != AVMEDIA_TYPE_AUDIO)
3358 ic->streams[i]->codec->thread_count = 0;
3359 if (st->info)
3360 av_freep(&st->info->duration_error);
3361 av_freep(&ic->streams[i]->info);
3362 }
3363 if (ic->pb)
3364 av_log(ic, AV_LOG_DEBUG, "After avformat_find_stream_info() pos: %"PRId64" bytes read:%"PRId64" seeks:%d frames:%d\n",
3365 avio_tell(ic->pb), ic->pb->bytes_read, ic->pb->seek_count, count);
3366 return ret;
3367}
3368
3369AVProgram *av_find_program_from_stream(AVFormatContext *ic, AVProgram *last, int s)
3370{
3371 int i, j;
3372
3373 for (i = 0; i < ic->nb_programs; i++) {
3374 if (ic->programs[i] == last) {
3375 last = NULL;
3376 } else {
3377 if (!last)
3378 for (j = 0; j < ic->programs[i]->nb_stream_indexes; j++)
3379 if (ic->programs[i]->stream_index[j] == s)
3380 return ic->programs[i];
3381 }
3382 }
3383 return NULL;
3384}
3385
3386int av_find_best_stream(AVFormatContext *ic, enum AVMediaType type,
3387 int wanted_stream_nb, int related_stream,
3388 AVCodec **decoder_ret, int flags)
3389{
3390 int i, nb_streams = ic->nb_streams;
3391 int ret = AVERROR_STREAM_NOT_FOUND, best_count = -1, best_bitrate = -1, best_multiframe = -1, count, bitrate, multiframe;
3392 unsigned *program = NULL;
3393 const AVCodec *decoder = NULL, *best_decoder = NULL;
3394
3395 if (related_stream >= 0 && wanted_stream_nb < 0) {
3396 AVProgram *p = av_find_program_from_stream(ic, NULL, related_stream);
3397 if (p) {
3398 program = p->stream_index;
3399 nb_streams = p->nb_stream_indexes;
3400 }
3401 }
3402 for (i = 0; i < nb_streams; i++) {
3403 int real_stream_index = program ? program[i] : i;
3404 AVStream *st = ic->streams[real_stream_index];
3405 AVCodecContext *avctx = st->codec;
3406 if (avctx->codec_type != type)
3407 continue;
3408 if (wanted_stream_nb >= 0 && real_stream_index != wanted_stream_nb)
3409 continue;
3410 if (wanted_stream_nb != real_stream_index &&
3411 st->disposition & (AV_DISPOSITION_HEARING_IMPAIRED |
3412 AV_DISPOSITION_VISUAL_IMPAIRED))
3413 continue;
3414 if (type == AVMEDIA_TYPE_AUDIO && !avctx->channels)
3415 continue;
3416 if (decoder_ret) {
3417 decoder = find_decoder(ic, st, st->codec->codec_id);
3418 if (!decoder) {
3419 if (ret < 0)
3420 ret = AVERROR_DECODER_NOT_FOUND;
3421 continue;
3422 }
3423 }
3424 count = st->codec_info_nb_frames;
3425 bitrate = avctx->bit_rate;
3426 if (!bitrate)
3427 bitrate = avctx->rc_max_rate;
3428 multiframe = FFMIN(5, count);
3429 if ((best_multiframe > multiframe) ||
3430 (best_multiframe == multiframe && best_bitrate > bitrate) ||
3431 (best_multiframe == multiframe && best_bitrate == bitrate && best_count >= count))
3432 continue;
3433 best_count = count;
3434 best_bitrate = bitrate;
3435 best_multiframe = multiframe;
3436 ret = real_stream_index;
3437 best_decoder = decoder;
3438 if (program && i == nb_streams - 1 && ret < 0) {
3439 program = NULL;
3440 nb_streams = ic->nb_streams;
3441 /* no related stream found, try again with everything */
3442 i = 0;
3443 }
3444 }
3445 if (decoder_ret)
3446 *decoder_ret = (AVCodec*)best_decoder;
3447 return ret;
3448}
3449
3450/*******************************************************/
3451
3452int av_read_play(AVFormatContext *s)
3453{
3454 if (s->iformat->read_play)
3455 return s->iformat->read_play(s);
3456 if (s->pb)
3457 return avio_pause(s->pb, 0);
3458 return AVERROR(ENOSYS);
3459}
3460
3461int av_read_pause(AVFormatContext *s)
3462{
3463 if (s->iformat->read_pause)
3464 return s->iformat->read_pause(s);
3465 if (s->pb)
3466 return avio_pause(s->pb, 1);
3467 return AVERROR(ENOSYS);
3468}
3469
3470void ff_free_stream(AVFormatContext *s, AVStream *st) {
3471 int j;
3472 av_assert0(s->nb_streams>0);
3473 av_assert0(s->streams[ s->nb_streams - 1 ] == st);
3474
3475 for (j = 0; j < st->nb_side_data; j++)
3476 av_freep(&st->side_data[j].data);
3477 av_freep(&st->side_data);
3478 st->nb_side_data = 0;
3479
3480 if (st->parser) {
3481 av_parser_close(st->parser);
3482 }
3483 if (st->attached_pic.data)
3484 av_free_packet(&st->attached_pic);
3485 av_dict_free(&st->metadata);
3486 av_freep(&st->probe_data.buf);
3487 av_freep(&st->index_entries);
3488 av_freep(&st->codec->extradata);
3489 av_freep(&st->codec->subtitle_header);
3490 av_freep(&st->codec);
3491 av_freep(&st->priv_data);
3492 if (st->info)
3493 av_freep(&st->info->duration_error);
3494 av_freep(&st->info);
3495 av_freep(&s->streams[ --s->nb_streams ]);
3496}
3497
3498void avformat_free_context(AVFormatContext *s)
3499{
3500 int i;
3501
3502 if (!s)
3503 return;
3504
3505 av_opt_free(s);
3506 if (s->iformat && s->iformat->priv_class && s->priv_data)
3507 av_opt_free(s->priv_data);
3508 if (s->oformat && s->oformat->priv_class && s->priv_data)
3509 av_opt_free(s->priv_data);
3510
3511 for (i = s->nb_streams - 1; i >= 0; i--) {
3512 ff_free_stream(s, s->streams[i]);
3513 }
3514 for (i = s->nb_programs - 1; i >= 0; i--) {
3515 av_dict_free(&s->programs[i]->metadata);
3516 av_freep(&s->programs[i]->stream_index);
3517 av_freep(&s->programs[i]);
3518 }
3519 av_freep(&s->programs);
3520 av_freep(&s->priv_data);
3521 while (s->nb_chapters--) {
3522 av_dict_free(&s->chapters[s->nb_chapters]->metadata);
3523 av_freep(&s->chapters[s->nb_chapters]);
3524 }
3525 av_freep(&s->chapters);
3526 av_dict_free(&s->metadata);
3527 av_freep(&s->streams);
3528 av_freep(&s->internal);
3529 flush_packet_queue(s);
3530 av_free(s);
3531}
3532
3533#if FF_API_CLOSE_INPUT_FILE
3534void av_close_input_file(AVFormatContext *s)
3535{
3536 avformat_close_input(&s);
3537}
3538#endif
3539
3540void avformat_close_input(AVFormatContext **ps)
3541{
3542 AVFormatContext *s;
3543 AVIOContext *pb;
3544
3545 if (!ps || !*ps)
3546 return;
3547
3548 s = *ps;
3549 pb = s->pb;
3550
3551 if ((s->iformat && strcmp(s->iformat->name, "image2") && s->iformat->flags & AVFMT_NOFILE) ||
3552 (s->flags & AVFMT_FLAG_CUSTOM_IO))
3553 pb = NULL;
3554
3555 flush_packet_queue(s);
3556
3557 if (s->iformat)
3558 if (s->iformat->read_close)
3559 s->iformat->read_close(s);
3560
3561 avformat_free_context(s);
3562
3563 *ps = NULL;
3564
3565 avio_close(pb);
3566}
3567
3568#if FF_API_NEW_STREAM
3569AVStream *av_new_stream(AVFormatContext *s, int id)
3570{
3571 AVStream *st = avformat_new_stream(s, NULL);
3572 if (st)
3573 st->id = id;
3574 return st;
3575}
3576#endif
3577
3578AVStream *avformat_new_stream(AVFormatContext *s, const AVCodec *c)
3579{
3580 AVStream *st;
3581 int i;
3582 AVStream **streams;
3583
3584 if (s->nb_streams >= INT_MAX/sizeof(*streams))
3585 return NULL;
3586 streams = av_realloc_array(s->streams, s->nb_streams + 1, sizeof(*streams));
3587 if (!streams)
3588 return NULL;
3589 s->streams = streams;
3590
3591 st = av_mallocz(sizeof(AVStream));
3592 if (!st)
3593 return NULL;
3594 if (!(st->info = av_mallocz(sizeof(*st->info)))) {
3595 av_free(st);
3596 return NULL;
3597 }
3598 st->info->last_dts = AV_NOPTS_VALUE;
3599
3600 st->codec = avcodec_alloc_context3(c);
3601 if (s->iformat) {
3602 /* no default bitrate if decoding */
3603 st->codec->bit_rate = 0;
3604
3605 /* default pts setting is MPEG-like */
3606 avpriv_set_pts_info(st, 33, 1, 90000);
3607 }
3608
3609 st->index = s->nb_streams;
3610 st->start_time = AV_NOPTS_VALUE;
3611 st->duration = AV_NOPTS_VALUE;
3612 /* we set the current DTS to 0 so that formats without any timestamps
3613 * but durations get some timestamps, formats with some unknown
3614 * timestamps have their first few packets buffered and the
3615 * timestamps corrected before they are returned to the user */
3616 st->cur_dts = s->iformat ? RELATIVE_TS_BASE : 0;
3617 st->first_dts = AV_NOPTS_VALUE;
3618 st->probe_packets = MAX_PROBE_PACKETS;
3619 st->pts_wrap_reference = AV_NOPTS_VALUE;
3620 st->pts_wrap_behavior = AV_PTS_WRAP_IGNORE;
3621
3622 st->last_IP_pts = AV_NOPTS_VALUE;
3623 st->last_dts_for_order_check = AV_NOPTS_VALUE;
3624 for (i = 0; i < MAX_REORDER_DELAY + 1; i++)
3625 st->pts_buffer[i] = AV_NOPTS_VALUE;
3626
3627 st->sample_aspect_ratio = (AVRational) { 0, 1 };
3628
3629#if FF_API_R_FRAME_RATE
3630 st->info->last_dts = AV_NOPTS_VALUE;
3631#endif
3632 st->info->fps_first_dts = AV_NOPTS_VALUE;
3633 st->info->fps_last_dts = AV_NOPTS_VALUE;
3634
3635 st->inject_global_side_data = s->internal->inject_global_side_data;
3636
3637 s->streams[s->nb_streams++] = st;
3638 return st;
3639}
3640
3641AVProgram *av_new_program(AVFormatContext *ac, int id)
3642{
3643 AVProgram *program = NULL;
3644 int i;
3645
3646 av_dlog(ac, "new_program: id=0x%04x\n", id);
3647
3648 for (i = 0; i < ac->nb_programs; i++)
3649 if (ac->programs[i]->id == id)
3650 program = ac->programs[i];
3651
3652 if (!program) {
3653 program = av_mallocz(sizeof(AVProgram));
3654 if (!program)
3655 return NULL;
3656 dynarray_add(&ac->programs, &ac->nb_programs, program);
3657 program->discard = AVDISCARD_NONE;
3658 }
3659 program->id = id;
3660 program->pts_wrap_reference = AV_NOPTS_VALUE;
3661 program->pts_wrap_behavior = AV_PTS_WRAP_IGNORE;
3662
3663 program->start_time =
3664 program->end_time = AV_NOPTS_VALUE;
3665
3666 return program;
3667}
3668
3669AVChapter *avpriv_new_chapter(AVFormatContext *s, int id, AVRational time_base,
3670 int64_t start, int64_t end, const char *title)
3671{
3672 AVChapter *chapter = NULL;
3673 int i;
3674
3675 if (end != AV_NOPTS_VALUE && start > end) {
3676 av_log(s, AV_LOG_ERROR, "Chapter end time %"PRId64" before start %"PRId64"\n", end, start);
3677 return NULL;
3678 }
3679
3680 for (i = 0; i < s->nb_chapters; i++)
3681 if (s->chapters[i]->id == id)
3682 chapter = s->chapters[i];
3683
3684 if (!chapter) {
3685 chapter = av_mallocz(sizeof(AVChapter));
3686 if (!chapter)
3687 return NULL;
3688 dynarray_add(&s->chapters, &s->nb_chapters, chapter);
3689 }
3690 av_dict_set(&chapter->metadata, "title", title, 0);
3691 chapter->id = id;
3692 chapter->time_base = time_base;
3693 chapter->start = start;
3694 chapter->end = end;
3695
3696 return chapter;
3697}
3698
3699void ff_program_add_stream_index(AVFormatContext *ac, int progid, unsigned idx)
3700{
3701 int i, j;
3702 AVProgram *program = NULL;
3703 void *tmp;
3704
3705 if (idx >= ac->nb_streams) {
3706 av_log(ac, AV_LOG_ERROR, "stream index %d is not valid\n", idx);
3707 return;
3708 }
3709
3710 for (i = 0; i < ac->nb_programs; i++) {
3711 if (ac->programs[i]->id != progid)
3712 continue;
3713 program = ac->programs[i];
3714 for (j = 0; j < program->nb_stream_indexes; j++)
3715 if (program->stream_index[j] == idx)
3716 return;
3717
3718 tmp = av_realloc_array(program->stream_index, program->nb_stream_indexes+1, sizeof(unsigned int));
3719 if (!tmp)
3720 return;
3721 program->stream_index = tmp;
3722 program->stream_index[program->nb_stream_indexes++] = idx;
3723 return;
3724 }
3725}
3726
3727uint64_t ff_ntp_time(void)
3728{
3729 return (av_gettime() / 1000) * 1000 + NTP_OFFSET_US;
3730}
3731
3732int av_get_frame_filename(char *buf, int buf_size, const char *path, int number)
3733{
3734 const char *p;
3735 char *q, buf1[20], c;
3736 int nd, len, percentd_found;
3737
3738 q = buf;
3739 p = path;
3740 percentd_found = 0;
3741 for (;;) {
3742 c = *p++;
3743 if (c == '\0')
3744 break;
3745 if (c == '%') {
3746 do {
3747 nd = 0;
3748 while (av_isdigit(*p))
3749 nd = nd * 10 + *p++ - '0';
3750 c = *p++;
3751 } while (av_isdigit(c));
3752
3753 switch (c) {
3754 case '%':
3755 goto addchar;
3756 case 'd':
3757 if (percentd_found)
3758 goto fail;
3759 percentd_found = 1;
3760 snprintf(buf1, sizeof(buf1), "%0*d", nd, number);
3761 len = strlen(buf1);
3762 if ((q - buf + len) > buf_size - 1)
3763 goto fail;
3764 memcpy(q, buf1, len);
3765 q += len;
3766 break;
3767 default:
3768 goto fail;
3769 }
3770 } else {
3771addchar:
3772 if ((q - buf) < buf_size - 1)
3773 *q++ = c;
3774 }
3775 }
3776 if (!percentd_found)
3777 goto fail;
3778 *q = '\0';
3779 return 0;
3780fail:
3781 *q = '\0';
3782 return -1;
3783}
3784
3785void av_url_split(char *proto, int proto_size,
3786 char *authorization, int authorization_size,
3787 char *hostname, int hostname_size,
3788 int *port_ptr, char *path, int path_size, const char *url)
3789{
3790 const char *p, *ls, *ls2, *at, *at2, *col, *brk;
3791
3792 if (port_ptr)
3793 *port_ptr = -1;
3794 if (proto_size > 0)
3795 proto[0] = 0;
3796 if (authorization_size > 0)
3797 authorization[0] = 0;
3798 if (hostname_size > 0)
3799 hostname[0] = 0;
3800 if (path_size > 0)
3801 path[0] = 0;
3802
3803 /* parse protocol */
3804 if ((p = strchr(url, ':'))) {
3805 av_strlcpy(proto, url, FFMIN(proto_size, p + 1 - url));
3806 p++; /* skip ':' */
3807 if (*p == '/')
3808 p++;
3809 if (*p == '/')
3810 p++;
3811 } else {
3812 /* no protocol means plain filename */
3813 av_strlcpy(path, url, path_size);
3814 return;
3815 }
3816
3817 /* separate path from hostname */
3818 ls = strchr(p, '/');
3819 ls2 = strchr(p, '?');
3820 if (!ls)
3821 ls = ls2;
3822 else if (ls && ls2)
3823 ls = FFMIN(ls, ls2);
3824 if (ls)
3825 av_strlcpy(path, ls, path_size);
3826 else
3827 ls = &p[strlen(p)]; // XXX
3828
3829 /* the rest is hostname, use that to parse auth/port */
3830 if (ls != p) {
3831 /* authorization (user[:pass]@hostname) */
3832 at2 = p;
3833 while ((at = strchr(p, '@')) && at < ls) {
3834 av_strlcpy(authorization, at2,
3835 FFMIN(authorization_size, at + 1 - at2));
3836 p = at + 1; /* skip '@' */
3837 }
3838
3839 if (*p == '[' && (brk = strchr(p, ']')) && brk < ls) {
3840 /* [host]:port */
3841 av_strlcpy(hostname, p + 1,
3842 FFMIN(hostname_size, brk - p));
3843 if (brk[1] == ':' && port_ptr)
3844 *port_ptr = atoi(brk + 2);
3845 } else if ((col = strchr(p, ':')) && col < ls) {
3846 av_strlcpy(hostname, p,
3847 FFMIN(col + 1 - p, hostname_size));
3848 if (port_ptr)
3849 *port_ptr = atoi(col + 1);
3850 } else
3851 av_strlcpy(hostname, p,
3852 FFMIN(ls + 1 - p, hostname_size));
3853 }
3854}
3855
3856char *ff_data_to_hex(char *buff, const uint8_t *src, int s, int lowercase)
3857{
3858 int i;
3859 static const char hex_table_uc[16] = { '0', '1', '2', '3',
3860 '4', '5', '6', '7',
3861 '8', '9', 'A', 'B',
3862 'C', 'D', 'E', 'F' };
3863 static const char hex_table_lc[16] = { '0', '1', '2', '3',
3864 '4', '5', '6', '7',
3865 '8', '9', 'a', 'b',
3866 'c', 'd', 'e', 'f' };
3867 const char *hex_table = lowercase ? hex_table_lc : hex_table_uc;
3868
3869 for (i = 0; i < s; i++) {
3870 buff[i * 2] = hex_table[src[i] >> 4];
3871 buff[i * 2 + 1] = hex_table[src[i] & 0xF];
3872 }
3873
3874 return buff;
3875}
3876
3877int ff_hex_to_data(uint8_t *data, const char *p)
3878{
3879 int c, len, v;
3880
3881 len = 0;
3882 v = 1;
3883 for (;;) {
3884 p += strspn(p, SPACE_CHARS);
3885 if (*p == '\0')
3886 break;
3887 c = av_toupper((unsigned char) *p++);
3888 if (c >= '0' && c <= '9')
3889 c = c - '0';
3890 else if (c >= 'A' && c <= 'F')
3891 c = c - 'A' + 10;
3892 else
3893 break;
3894 v = (v << 4) | c;
3895 if (v & 0x100) {
3896 if (data)
3897 data[len] = v;
3898 len++;
3899 v = 1;
3900 }
3901 }
3902 return len;
3903}
3904
3905#if FF_API_SET_PTS_INFO
3906void av_set_pts_info(AVStream *s, int pts_wrap_bits,
3907 unsigned int pts_num, unsigned int pts_den)
3908{
3909 avpriv_set_pts_info(s, pts_wrap_bits, pts_num, pts_den);
3910}
3911#endif
3912
3913void avpriv_set_pts_info(AVStream *s, int pts_wrap_bits,
3914 unsigned int pts_num, unsigned int pts_den)
3915{
3916 AVRational new_tb;
3917 if (av_reduce(&new_tb.num, &new_tb.den, pts_num, pts_den, INT_MAX)) {
3918 if (new_tb.num != pts_num)
3919 av_log(NULL, AV_LOG_DEBUG,
3920 "st:%d removing common factor %d from timebase\n",
3921 s->index, pts_num / new_tb.num);
3922 } else
3923 av_log(NULL, AV_LOG_WARNING,
3924 "st:%d has too large timebase, reducing\n", s->index);
3925
3926 if (new_tb.num <= 0 || new_tb.den <= 0) {
3927 av_log(NULL, AV_LOG_ERROR,
3928 "Ignoring attempt to set invalid timebase %d/%d for st:%d\n",
3929 new_tb.num, new_tb.den,
3930 s->index);
3931 return;
3932 }
3933 s->time_base = new_tb;
3934 av_codec_set_pkt_timebase(s->codec, new_tb);
3935 s->pts_wrap_bits = pts_wrap_bits;
3936}
3937
3938void ff_parse_key_value(const char *str, ff_parse_key_val_cb callback_get_buf,
3939 void *context)
3940{
3941 const char *ptr = str;
3942
3943 /* Parse key=value pairs. */
3944 for (;;) {
3945 const char *key;
3946 char *dest = NULL, *dest_end;
3947 int key_len, dest_len = 0;
3948
3949 /* Skip whitespace and potential commas. */
3950 while (*ptr && (av_isspace(*ptr) || *ptr == ','))
3951 ptr++;
3952 if (!*ptr)
3953 break;
3954
3955 key = ptr;
3956
3957 if (!(ptr = strchr(key, '=')))
3958 break;
3959 ptr++;
3960 key_len = ptr - key;
3961
3962 callback_get_buf(context, key, key_len, &dest, &dest_len);
3963 dest_end = dest + dest_len - 1;
3964
3965 if (*ptr == '\"') {
3966 ptr++;
3967 while (*ptr && *ptr != '\"') {
3968 if (*ptr == '\\') {
3969 if (!ptr[1])
3970 break;
3971 if (dest && dest < dest_end)
3972 *dest++ = ptr[1];
3973 ptr += 2;
3974 } else {
3975 if (dest && dest < dest_end)
3976 *dest++ = *ptr;
3977 ptr++;
3978 }
3979 }
3980 if (*ptr == '\"')
3981 ptr++;
3982 } else {
3983 for (; *ptr && !(av_isspace(*ptr) || *ptr == ','); ptr++)
3984 if (dest && dest < dest_end)
3985 *dest++ = *ptr;
3986 }
3987 if (dest)
3988 *dest = 0;
3989 }
3990}
3991
3992int ff_find_stream_index(AVFormatContext *s, int id)
3993{
3994 int i;
3995 for (i = 0; i < s->nb_streams; i++)
3996 if (s->streams[i]->id == id)
3997 return i;
3998 return -1;
3999}
4000
4001int64_t ff_iso8601_to_unix_time(const char *datestr)
4002{
4003 struct tm time1 = { 0 }, time2 = { 0 };
4004 char *ret1, *ret2;
4005 ret1 = av_small_strptime(datestr, "%Y - %m - %d %H:%M:%S", &time1);
4006 ret2 = av_small_strptime(datestr, "%Y - %m - %dT%H:%M:%S", &time2);
4007 if (ret2 && !ret1)
4008 return av_timegm(&time2);
4009 else
4010 return av_timegm(&time1);
4011}
4012
4013int avformat_query_codec(const AVOutputFormat *ofmt, enum AVCodecID codec_id,
4014 int std_compliance)
4015{
4016 if (ofmt) {
4017 if (ofmt->query_codec)
4018 return ofmt->query_codec(codec_id, std_compliance);
4019 else if (ofmt->codec_tag)
4020 return !!av_codec_get_tag(ofmt->codec_tag, codec_id);
4021 else if (codec_id == ofmt->video_codec ||
4022 codec_id == ofmt->audio_codec ||
4023 codec_id == ofmt->subtitle_codec)
4024 return 1;
4025 }
4026 return AVERROR_PATCHWELCOME;
4027}
4028
4029int avformat_network_init(void)
4030{
4031#if CONFIG_NETWORK
4032 int ret;
4033 ff_network_inited_globally = 1;
4034 if ((ret = ff_network_init()) < 0)
4035 return ret;
4036 ff_tls_init();
4037#endif
4038 return 0;
4039}
4040
4041int avformat_network_deinit(void)
4042{
4043#if CONFIG_NETWORK
4044 ff_network_close();
4045 ff_tls_deinit();
4046#endif
4047 return 0;
4048}
4049
4050int ff_add_param_change(AVPacket *pkt, int32_t channels,
4051 uint64_t channel_layout, int32_t sample_rate,
4052 int32_t width, int32_t height)
4053{
4054 uint32_t flags = 0;
4055 int size = 4;
4056 uint8_t *data;
4057 if (!pkt)
4058 return AVERROR(EINVAL);
4059 if (channels) {
4060 size += 4;
4061 flags |= AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_COUNT;
4062 }
4063 if (channel_layout) {
4064 size += 8;
4065 flags |= AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_LAYOUT;
4066 }
4067 if (sample_rate) {
4068 size += 4;
4069 flags |= AV_SIDE_DATA_PARAM_CHANGE_SAMPLE_RATE;
4070 }
4071 if (width || height) {
4072 size += 8;
4073 flags |= AV_SIDE_DATA_PARAM_CHANGE_DIMENSIONS;
4074 }
4075 data = av_packet_new_side_data(pkt, AV_PKT_DATA_PARAM_CHANGE, size);
4076 if (!data)
4077 return AVERROR(ENOMEM);
4078 bytestream_put_le32(&data, flags);
4079 if (channels)
4080 bytestream_put_le32(&data, channels);
4081 if (channel_layout)
4082 bytestream_put_le64(&data, channel_layout);
4083 if (sample_rate)
4084 bytestream_put_le32(&data, sample_rate);
4085 if (width || height) {
4086 bytestream_put_le32(&data, width);
4087 bytestream_put_le32(&data, height);
4088 }
4089 return 0;
4090}
4091
4092AVRational av_guess_sample_aspect_ratio(AVFormatContext *format, AVStream *stream, AVFrame *frame)
4093{
4094 AVRational undef = {0, 1};
4095 AVRational stream_sample_aspect_ratio = stream ? stream->sample_aspect_ratio : undef;
4096 AVRational codec_sample_aspect_ratio = stream && stream->codec ? stream->codec->sample_aspect_ratio : undef;
4097 AVRational frame_sample_aspect_ratio = frame ? frame->sample_aspect_ratio : codec_sample_aspect_ratio;
4098
4099 av_reduce(&stream_sample_aspect_ratio.num, &stream_sample_aspect_ratio.den,
4100 stream_sample_aspect_ratio.num, stream_sample_aspect_ratio.den, INT_MAX);
4101 if (stream_sample_aspect_ratio.num <= 0 || stream_sample_aspect_ratio.den <= 0)
4102 stream_sample_aspect_ratio = undef;
4103
4104 av_reduce(&frame_sample_aspect_ratio.num, &frame_sample_aspect_ratio.den,
4105 frame_sample_aspect_ratio.num, frame_sample_aspect_ratio.den, INT_MAX);
4106 if (frame_sample_aspect_ratio.num <= 0 || frame_sample_aspect_ratio.den <= 0)
4107 frame_sample_aspect_ratio = undef;
4108
4109 if (stream_sample_aspect_ratio.num)
4110 return stream_sample_aspect_ratio;
4111 else
4112 return frame_sample_aspect_ratio;
4113}
4114
4115AVRational av_guess_frame_rate(AVFormatContext *format, AVStream *st, AVFrame *frame)
4116{
4117 AVRational fr = st->r_frame_rate;
4118 AVRational codec_fr = av_inv_q(st->codec->time_base);
4119 AVRational avg_fr = st->avg_frame_rate;
4120
4121 if (avg_fr.num > 0 && avg_fr.den > 0 && fr.num > 0 && fr.den > 0 &&
4122 av_q2d(avg_fr) < 70 && av_q2d(fr) > 210) {
4123 fr = avg_fr;
4124 }
4125
4126
4127 if (st->codec->ticks_per_frame > 1) {
4128 codec_fr.den *= st->codec->ticks_per_frame;
4129 if ( codec_fr.num > 0 && codec_fr.den > 0 && av_q2d(codec_fr) < av_q2d(fr)*0.7
4130 && fabs(1.0 - av_q2d(av_div_q(avg_fr, fr))) > 0.1)
4131 fr = codec_fr;
4132 }
4133
4134 return fr;
4135}
4136
4137int avformat_match_stream_specifier(AVFormatContext *s, AVStream *st,
4138 const char *spec)
4139{
4140 if (*spec <= '9' && *spec >= '0') /* opt:index */
4141 return strtol(spec, NULL, 0) == st->index;
4142 else if (*spec == 'v' || *spec == 'a' || *spec == 's' || *spec == 'd' ||
4143 *spec == 't') { /* opt:[vasdt] */
4144 enum AVMediaType type;
4145
4146 switch (*spec++) {
4147 case 'v': type = AVMEDIA_TYPE_VIDEO; break;
4148 case 'a': type = AVMEDIA_TYPE_AUDIO; break;
4149 case 's': type = AVMEDIA_TYPE_SUBTITLE; break;
4150 case 'd': type = AVMEDIA_TYPE_DATA; break;
4151 case 't': type = AVMEDIA_TYPE_ATTACHMENT; break;
4152 default: av_assert0(0);
4153 }
4154 if (type != st->codec->codec_type)
4155 return 0;
4156 if (*spec++ == ':') { /* possibly followed by :index */
4157 int i, index = strtol(spec, NULL, 0);
4158 for (i = 0; i < s->nb_streams; i++)
4159 if (s->streams[i]->codec->codec_type == type && index-- == 0)
4160 return i == st->index;
4161 return 0;
4162 }
4163 return 1;
4164 } else if (*spec == 'p' && *(spec + 1) == ':') {
4165 int prog_id, i, j;
4166 char *endptr;
4167 spec += 2;
4168 prog_id = strtol(spec, &endptr, 0);
4169 for (i = 0; i < s->nb_programs; i++) {
4170 if (s->programs[i]->id != prog_id)
4171 continue;
4172
4173 if (*endptr++ == ':') {
4174 int stream_idx = strtol(endptr, NULL, 0);
4175 return stream_idx >= 0 &&
4176 stream_idx < s->programs[i]->nb_stream_indexes &&
4177 st->index == s->programs[i]->stream_index[stream_idx];
4178 }
4179
4180 for (j = 0; j < s->programs[i]->nb_stream_indexes; j++)
4181 if (st->index == s->programs[i]->stream_index[j])
4182 return 1;
4183 }
4184 return 0;
4185 } else if (*spec == '#' ||
4186 (*spec == 'i' && *(spec + 1) == ':')) {
4187 int stream_id;
4188 char *endptr;
4189 spec += 1 + (*spec == 'i');
4190 stream_id = strtol(spec, &endptr, 0);
4191 if (!*endptr)
4192 return stream_id == st->id;
4193 } else if (*spec == 'm' && *(spec + 1) == ':') {
4194 AVDictionaryEntry *tag;
4195 char *key, *val;
4196 int ret;
4197
4198 spec += 2;
4199 val = strchr(spec, ':');
4200
4201 key = val ? av_strndup(spec, val - spec) : av_strdup(spec);
4202 if (!key)
4203 return AVERROR(ENOMEM);
4204
4205 tag = av_dict_get(st->metadata, key, NULL, 0);
4206 if (tag) {
4207 if (!val || !strcmp(tag->value, val + 1))
4208 ret = 1;
4209 else
4210 ret = 0;
4211 } else
4212 ret = 0;
4213
4214 av_freep(&key);
4215 return ret;
4216 } else if (!*spec) /* empty specifier, matches everything */
4217 return 1;
4218
4219 av_log(s, AV_LOG_ERROR, "Invalid stream specifier: %s.\n", spec);
4220 return AVERROR(EINVAL);
4221}
4222
4223int ff_generate_avci_extradata(AVStream *st)
4224{
4225 static const uint8_t avci100_1080p_extradata[] = {
4226 // SPS
4227 0x00, 0x00, 0x00, 0x01, 0x67, 0x7a, 0x10, 0x29,
4228 0xb6, 0xd4, 0x20, 0x22, 0x33, 0x19, 0xc6, 0x63,
4229 0x23, 0x21, 0x01, 0x11, 0x98, 0xce, 0x33, 0x19,
4230 0x18, 0x21, 0x02, 0x56, 0xb9, 0x3d, 0x7d, 0x7e,
4231 0x4f, 0xe3, 0x3f, 0x11, 0xf1, 0x9e, 0x08, 0xb8,
4232 0x8c, 0x54, 0x43, 0xc0, 0x78, 0x02, 0x27, 0xe2,
4233 0x70, 0x1e, 0x30, 0x10, 0x10, 0x14, 0x00, 0x00,
4234 0x03, 0x00, 0x04, 0x00, 0x00, 0x03, 0x00, 0xca,
4235 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
4236 // PPS
4237 0x00, 0x00, 0x00, 0x01, 0x68, 0xce, 0x33, 0x48,
4238 0xd0
4239 };
4240 static const uint8_t avci100_1080i_extradata[] = {
4241 // SPS
4242 0x00, 0x00, 0x00, 0x01, 0x67, 0x7a, 0x10, 0x29,
4243 0xb6, 0xd4, 0x20, 0x22, 0x33, 0x19, 0xc6, 0x63,
4244 0x23, 0x21, 0x01, 0x11, 0x98, 0xce, 0x33, 0x19,
4245 0x18, 0x21, 0x03, 0x3a, 0x46, 0x65, 0x6a, 0x65,
4246 0x24, 0xad, 0xe9, 0x12, 0x32, 0x14, 0x1a, 0x26,
4247 0x34, 0xad, 0xa4, 0x41, 0x82, 0x23, 0x01, 0x50,
4248 0x2b, 0x1a, 0x24, 0x69, 0x48, 0x30, 0x40, 0x2e,
4249 0x11, 0x12, 0x08, 0xc6, 0x8c, 0x04, 0x41, 0x28,
4250 0x4c, 0x34, 0xf0, 0x1e, 0x01, 0x13, 0xf2, 0xe0,
4251 0x3c, 0x60, 0x20, 0x20, 0x28, 0x00, 0x00, 0x03,
4252 0x00, 0x08, 0x00, 0x00, 0x03, 0x01, 0x94, 0x00,
4253 // PPS
4254 0x00, 0x00, 0x00, 0x01, 0x68, 0xce, 0x33, 0x48,
4255 0xd0
4256 };
4257 static const uint8_t avci50_1080i_extradata[] = {
4258 // SPS
4259 0x00, 0x00, 0x00, 0x01, 0x67, 0x6e, 0x10, 0x28,
4260 0xa6, 0xd4, 0x20, 0x32, 0x33, 0x0c, 0x71, 0x18,
4261 0x88, 0x62, 0x10, 0x19, 0x19, 0x86, 0x38, 0x8c,
4262 0x44, 0x30, 0x21, 0x02, 0x56, 0x4e, 0x6e, 0x61,
4263 0x87, 0x3e, 0x73, 0x4d, 0x98, 0x0c, 0x03, 0x06,
4264 0x9c, 0x0b, 0x73, 0xe6, 0xc0, 0xb5, 0x18, 0x63,
4265 0x0d, 0x39, 0xe0, 0x5b, 0x02, 0xd4, 0xc6, 0x19,
4266 0x1a, 0x79, 0x8c, 0x32, 0x34, 0x24, 0xf0, 0x16,
4267 0x81, 0x13, 0xf7, 0xff, 0x80, 0x02, 0x00, 0x01,
4268 0xf1, 0x80, 0x80, 0x80, 0xa0, 0x00, 0x00, 0x03,
4269 0x00, 0x20, 0x00, 0x00, 0x06, 0x50, 0x80, 0x00,
4270 // PPS
4271 0x00, 0x00, 0x00, 0x01, 0x68, 0xee, 0x31, 0x12,
4272 0x11
4273 };
4274 static const uint8_t avci100_720p_extradata[] = {
4275 // SPS
4276 0x00, 0x00, 0x00, 0x01, 0x67, 0x7a, 0x10, 0x29,
4277 0xb6, 0xd4, 0x20, 0x2a, 0x33, 0x1d, 0xc7, 0x62,
4278 0xa1, 0x08, 0x40, 0x54, 0x66, 0x3b, 0x8e, 0xc5,
4279 0x42, 0x02, 0x10, 0x25, 0x64, 0x2c, 0x89, 0xe8,
4280 0x85, 0xe4, 0x21, 0x4b, 0x90, 0x83, 0x06, 0x95,
4281 0xd1, 0x06, 0x46, 0x97, 0x20, 0xc8, 0xd7, 0x43,
4282 0x08, 0x11, 0xc2, 0x1e, 0x4c, 0x91, 0x0f, 0x01,
4283 0x40, 0x16, 0xec, 0x07, 0x8c, 0x04, 0x04, 0x05,
4284 0x00, 0x00, 0x03, 0x00, 0x01, 0x00, 0x00, 0x03,
4285 0x00, 0x64, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00,
4286 // PPS
4287 0x00, 0x00, 0x00, 0x01, 0x68, 0xce, 0x31, 0x12,
4288 0x11
4289 };
4290
4291 const uint8_t *data = NULL;
4292 int size = 0;
4293
4294 if (st->codec->width == 1920) {
4295 if (st->codec->field_order == AV_FIELD_PROGRESSIVE) {
4296 data = avci100_1080p_extradata;
4297 size = sizeof(avci100_1080p_extradata);
4298 } else {
4299 data = avci100_1080i_extradata;
4300 size = sizeof(avci100_1080i_extradata);
4301 }
4302 } else if (st->codec->width == 1440) {
4303 data = avci50_1080i_extradata;
4304 size = sizeof(avci50_1080i_extradata);
4305 } else if (st->codec->width == 1280) {
4306 data = avci100_720p_extradata;
4307 size = sizeof(avci100_720p_extradata);
4308 }
4309
4310 if (!size)
4311 return 0;
4312
4313 av_freep(&st->codec->extradata);
4314 if (ff_alloc_extradata(st->codec, size))
4315 return AVERROR(ENOMEM);
4316 memcpy(st->codec->extradata, data, size);
4317
4318 return 0;
4319}
4320
4321uint8_t *av_stream_get_side_data(AVStream *st, enum AVPacketSideDataType type,
4322 int *size)
4323{
4324 int i;
4325
4326 for (i = 0; i < st->nb_side_data; i++) {
4327 if (st->side_data[i].type == type) {
4328 if (size)
4329 *size = st->side_data[i].size;
4330 return st->side_data[i].data;
4331 }
4332 }
4333 return NULL;
4334}