Commit | Line | Data |
---|---|---|
2ba45a60 DM |
1 | /* |
2 | * Copyright (c) 2010 Stefano Sabatini | |
3 | * Copyright (c) 2008 Victor Paesa | |
4 | * | |
5 | * This file is part of FFmpeg. | |
6 | * | |
7 | * FFmpeg is free software; you can redistribute it and/or | |
8 | * modify it under the terms of the GNU Lesser General Public | |
9 | * License as published by the Free Software Foundation; either | |
10 | * version 2.1 of the License, or (at your option) any later version. | |
11 | * | |
12 | * FFmpeg is distributed in the hope that it will be useful, | |
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
15 | * Lesser General Public License for more details. | |
16 | * | |
17 | * You should have received a copy of the GNU Lesser General Public | |
18 | * License along with FFmpeg; if not, write to the Free Software | |
19 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA | |
20 | */ | |
21 | ||
22 | /** | |
23 | * @file | |
24 | * video presentation timestamp (PTS) modification filter | |
25 | */ | |
26 | ||
27 | #include <inttypes.h> | |
28 | ||
29 | #include "libavutil/eval.h" | |
30 | #include "libavutil/internal.h" | |
31 | #include "libavutil/mathematics.h" | |
32 | #include "libavutil/opt.h" | |
33 | #include "libavutil/time.h" | |
34 | #include "audio.h" | |
35 | #include "avfilter.h" | |
36 | #include "internal.h" | |
37 | #include "video.h" | |
38 | ||
39 | static const char *const var_names[] = { | |
40 | "FRAME_RATE", ///< defined only for constant frame-rate video | |
41 | "INTERLACED", ///< tell if the current frame is interlaced | |
42 | "N", ///< frame / sample number (starting at zero) | |
43 | "NB_CONSUMED_SAMPLES", ///< number of samples consumed by the filter (only audio) | |
44 | "NB_SAMPLES", ///< number of samples in the current frame (only audio) | |
45 | "POS", ///< original position in the file of the frame | |
46 | "PREV_INPTS", ///< previous input PTS | |
47 | "PREV_INT", ///< previous input time in seconds | |
48 | "PREV_OUTPTS", ///< previous output PTS | |
49 | "PREV_OUTT", ///< previous output time in seconds | |
50 | "PTS", ///< original pts in the file of the frame | |
51 | "SAMPLE_RATE", ///< sample rate (only audio) | |
52 | "STARTPTS", ///< PTS at start of movie | |
53 | "STARTT", ///< time at start of movie | |
54 | "T", ///< original time in the file of the frame | |
55 | "TB", ///< timebase | |
56 | "RTCTIME", ///< wallclock (RTC) time in micro seconds | |
57 | "RTCSTART", ///< wallclock (RTC) time at the start of the movie in micro seconds | |
58 | "S", // Number of samples in the current frame | |
59 | "SR", // Audio sample rate | |
60 | NULL | |
61 | }; | |
62 | ||
63 | enum var_name { | |
64 | VAR_FRAME_RATE, | |
65 | VAR_INTERLACED, | |
66 | VAR_N, | |
67 | VAR_NB_CONSUMED_SAMPLES, | |
68 | VAR_NB_SAMPLES, | |
69 | VAR_POS, | |
70 | VAR_PREV_INPTS, | |
71 | VAR_PREV_INT, | |
72 | VAR_PREV_OUTPTS, | |
73 | VAR_PREV_OUTT, | |
74 | VAR_PTS, | |
75 | VAR_SAMPLE_RATE, | |
76 | VAR_STARTPTS, | |
77 | VAR_STARTT, | |
78 | VAR_T, | |
79 | VAR_TB, | |
80 | VAR_RTCTIME, | |
81 | VAR_RTCSTART, | |
82 | VAR_S, | |
83 | VAR_SR, | |
84 | VAR_VARS_NB | |
85 | }; | |
86 | ||
87 | typedef struct SetPTSContext { | |
88 | const AVClass *class; | |
89 | char *expr_str; | |
90 | AVExpr *expr; | |
91 | double var_values[VAR_VARS_NB]; | |
92 | enum AVMediaType type; | |
93 | } SetPTSContext; | |
94 | ||
95 | static av_cold int init(AVFilterContext *ctx) | |
96 | { | |
97 | SetPTSContext *setpts = ctx->priv; | |
98 | int ret; | |
99 | ||
100 | if ((ret = av_expr_parse(&setpts->expr, setpts->expr_str, | |
101 | var_names, NULL, NULL, NULL, NULL, 0, ctx)) < 0) { | |
102 | av_log(ctx, AV_LOG_ERROR, "Error while parsing expression '%s'\n", setpts->expr_str); | |
103 | return ret; | |
104 | } | |
105 | ||
106 | setpts->var_values[VAR_N] = 0.0; | |
107 | setpts->var_values[VAR_S] = 0.0; | |
108 | setpts->var_values[VAR_PREV_INPTS] = NAN; | |
109 | setpts->var_values[VAR_PREV_INT] = NAN; | |
110 | setpts->var_values[VAR_PREV_OUTPTS] = NAN; | |
111 | setpts->var_values[VAR_PREV_OUTT] = NAN; | |
112 | setpts->var_values[VAR_STARTPTS] = NAN; | |
113 | setpts->var_values[VAR_STARTT] = NAN; | |
114 | return 0; | |
115 | } | |
116 | ||
117 | static int config_input(AVFilterLink *inlink) | |
118 | { | |
119 | AVFilterContext *ctx = inlink->dst; | |
120 | SetPTSContext *setpts = ctx->priv; | |
121 | ||
122 | setpts->type = inlink->type; | |
123 | setpts->var_values[VAR_TB] = av_q2d(inlink->time_base); | |
124 | setpts->var_values[VAR_RTCSTART] = av_gettime(); | |
125 | ||
126 | setpts->var_values[VAR_SR] = | |
127 | setpts->var_values[VAR_SAMPLE_RATE] = | |
128 | setpts->type == AVMEDIA_TYPE_AUDIO ? inlink->sample_rate : NAN; | |
129 | ||
130 | setpts->var_values[VAR_FRAME_RATE] = inlink->frame_rate.num && inlink->frame_rate.den ? | |
131 | av_q2d(inlink->frame_rate) : NAN; | |
132 | ||
133 | av_log(inlink->src, AV_LOG_VERBOSE, "TB:%f FRAME_RATE:%f SAMPLE_RATE:%f\n", | |
134 | setpts->var_values[VAR_TB], | |
135 | setpts->var_values[VAR_FRAME_RATE], | |
136 | setpts->var_values[VAR_SAMPLE_RATE]); | |
137 | return 0; | |
138 | } | |
139 | ||
140 | #define D2TS(d) (isnan(d) ? AV_NOPTS_VALUE : (int64_t)(d)) | |
141 | #define TS2D(ts) ((ts) == AV_NOPTS_VALUE ? NAN : (double)(ts)) | |
142 | #define TS2T(ts, tb) ((ts) == AV_NOPTS_VALUE ? NAN : (double)(ts)*av_q2d(tb)) | |
143 | ||
144 | #define BUF_SIZE 64 | |
145 | ||
146 | static inline char *double2int64str(char *buf, double v) | |
147 | { | |
148 | if (isnan(v)) snprintf(buf, BUF_SIZE, "nan"); | |
149 | else snprintf(buf, BUF_SIZE, "%"PRId64, (int64_t)v); | |
150 | return buf; | |
151 | } | |
152 | ||
153 | #define d2istr(v) double2int64str((char[BUF_SIZE]){0}, v) | |
154 | ||
155 | static int filter_frame(AVFilterLink *inlink, AVFrame *frame) | |
156 | { | |
157 | SetPTSContext *setpts = inlink->dst->priv; | |
158 | int64_t in_pts = frame->pts; | |
159 | double d; | |
160 | ||
161 | if (isnan(setpts->var_values[VAR_STARTPTS])) { | |
162 | setpts->var_values[VAR_STARTPTS] = TS2D(frame->pts); | |
163 | setpts->var_values[VAR_STARTT ] = TS2T(frame->pts, inlink->time_base); | |
164 | } | |
165 | setpts->var_values[VAR_PTS ] = TS2D(frame->pts); | |
166 | setpts->var_values[VAR_T ] = TS2T(frame->pts, inlink->time_base); | |
167 | setpts->var_values[VAR_POS ] = av_frame_get_pkt_pos(frame) == -1 ? NAN : av_frame_get_pkt_pos(frame); | |
168 | setpts->var_values[VAR_RTCTIME ] = av_gettime(); | |
169 | ||
170 | if (inlink->type == AVMEDIA_TYPE_VIDEO) { | |
171 | setpts->var_values[VAR_INTERLACED] = frame->interlaced_frame; | |
172 | } else if (inlink->type == AVMEDIA_TYPE_AUDIO) { | |
173 | setpts->var_values[VAR_S] = frame->nb_samples; | |
174 | setpts->var_values[VAR_NB_SAMPLES] = frame->nb_samples; | |
175 | } | |
176 | ||
177 | d = av_expr_eval(setpts->expr, setpts->var_values, NULL); | |
178 | frame->pts = D2TS(d); | |
179 | ||
180 | av_dlog(inlink->dst, | |
181 | "N:%"PRId64" PTS:%s T:%f POS:%s", | |
182 | (int64_t)setpts->var_values[VAR_N], | |
183 | d2istr(setpts->var_values[VAR_PTS]), | |
184 | setpts->var_values[VAR_T], | |
185 | d2istr(setpts->var_values[VAR_POS])); | |
186 | switch (inlink->type) { | |
187 | case AVMEDIA_TYPE_VIDEO: | |
188 | av_dlog(inlink->dst, " INTERLACED:%"PRId64, | |
189 | (int64_t)setpts->var_values[VAR_INTERLACED]); | |
190 | break; | |
191 | case AVMEDIA_TYPE_AUDIO: | |
192 | av_dlog(inlink->dst, " NB_SAMPLES:%"PRId64" NB_CONSUMED_SAMPLES:%"PRId64, | |
193 | (int64_t)setpts->var_values[VAR_NB_SAMPLES], | |
194 | (int64_t)setpts->var_values[VAR_NB_CONSUMED_SAMPLES]); | |
195 | break; | |
196 | } | |
197 | av_dlog(inlink->dst, " -> PTS:%s T:%f\n", d2istr(d), TS2T(d, inlink->time_base)); | |
198 | ||
199 | if (inlink->type == AVMEDIA_TYPE_VIDEO) { | |
200 | setpts->var_values[VAR_N] += 1.0; | |
201 | } else { | |
202 | setpts->var_values[VAR_N] += frame->nb_samples; | |
203 | } | |
204 | ||
205 | setpts->var_values[VAR_PREV_INPTS ] = TS2D(in_pts); | |
206 | setpts->var_values[VAR_PREV_INT ] = TS2T(in_pts, inlink->time_base); | |
207 | setpts->var_values[VAR_PREV_OUTPTS] = TS2D(frame->pts); | |
208 | setpts->var_values[VAR_PREV_OUTT] = TS2T(frame->pts, inlink->time_base); | |
209 | if (setpts->type == AVMEDIA_TYPE_AUDIO) { | |
210 | setpts->var_values[VAR_NB_CONSUMED_SAMPLES] += frame->nb_samples; | |
211 | } | |
212 | return ff_filter_frame(inlink->dst->outputs[0], frame); | |
213 | } | |
214 | ||
215 | static av_cold void uninit(AVFilterContext *ctx) | |
216 | { | |
217 | SetPTSContext *setpts = ctx->priv; | |
218 | av_expr_free(setpts->expr); | |
219 | setpts->expr = NULL; | |
220 | } | |
221 | ||
222 | #define OFFSET(x) offsetof(SetPTSContext, x) | |
223 | #define FLAGS AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_AUDIO_PARAM | AV_OPT_FLAG_FILTERING_PARAM | |
224 | static const AVOption options[] = { | |
225 | { "expr", "Expression determining the frame timestamp", OFFSET(expr_str), AV_OPT_TYPE_STRING, { .str = "PTS" }, .flags = FLAGS }, | |
226 | { NULL } | |
227 | }; | |
228 | ||
229 | #if CONFIG_SETPTS_FILTER | |
230 | #define setpts_options options | |
231 | AVFILTER_DEFINE_CLASS(setpts); | |
232 | ||
233 | static const AVFilterPad avfilter_vf_setpts_inputs[] = { | |
234 | { | |
235 | .name = "default", | |
236 | .type = AVMEDIA_TYPE_VIDEO, | |
237 | .config_props = config_input, | |
238 | .filter_frame = filter_frame, | |
239 | }, | |
240 | { NULL } | |
241 | }; | |
242 | ||
243 | static const AVFilterPad avfilter_vf_setpts_outputs[] = { | |
244 | { | |
245 | .name = "default", | |
246 | .type = AVMEDIA_TYPE_VIDEO, | |
247 | }, | |
248 | { NULL } | |
249 | }; | |
250 | ||
251 | AVFilter ff_vf_setpts = { | |
252 | .name = "setpts", | |
253 | .description = NULL_IF_CONFIG_SMALL("Set PTS for the output video frame."), | |
254 | .init = init, | |
255 | .uninit = uninit, | |
256 | ||
257 | .priv_size = sizeof(SetPTSContext), | |
258 | .priv_class = &setpts_class, | |
259 | ||
260 | .inputs = avfilter_vf_setpts_inputs, | |
261 | .outputs = avfilter_vf_setpts_outputs, | |
262 | }; | |
263 | #endif /* CONFIG_SETPTS_FILTER */ | |
264 | ||
265 | #if CONFIG_ASETPTS_FILTER | |
266 | ||
267 | #define asetpts_options options | |
268 | AVFILTER_DEFINE_CLASS(asetpts); | |
269 | ||
270 | static const AVFilterPad asetpts_inputs[] = { | |
271 | { | |
272 | .name = "default", | |
273 | .type = AVMEDIA_TYPE_AUDIO, | |
274 | .config_props = config_input, | |
275 | .filter_frame = filter_frame, | |
276 | }, | |
277 | { NULL } | |
278 | }; | |
279 | ||
280 | static const AVFilterPad asetpts_outputs[] = { | |
281 | { | |
282 | .name = "default", | |
283 | .type = AVMEDIA_TYPE_AUDIO, | |
284 | }, | |
285 | { NULL } | |
286 | }; | |
287 | ||
288 | AVFilter ff_af_asetpts = { | |
289 | .name = "asetpts", | |
290 | .description = NULL_IF_CONFIG_SMALL("Set PTS for the output audio frame."), | |
291 | .init = init, | |
292 | .uninit = uninit, | |
293 | .priv_size = sizeof(SetPTSContext), | |
294 | .priv_class = &asetpts_class, | |
295 | .inputs = asetpts_inputs, | |
296 | .outputs = asetpts_outputs, | |
297 | }; | |
298 | #endif /* CONFIG_ASETPTS_FILTER */ |