Commit | Line | Data |
---|---|---|
2ba45a60 DM |
1 | /* |
2 | * This file is part of FFmpeg. | |
3 | * | |
4 | * FFmpeg is free software; you can redistribute it and/or | |
5 | * modify it under the terms of the GNU Lesser General Public | |
6 | * License as published by the Free Software Foundation; either | |
7 | * version 2.1 of the License, or (at your option) any later version. | |
8 | * | |
9 | * FFmpeg is distributed in the hope that it will be useful, | |
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
12 | * Lesser General Public License for more details. | |
13 | * | |
14 | * You should have received a copy of the GNU Lesser General Public | |
15 | * License along with FFmpeg; if not, write to the Free Software | |
16 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA | |
17 | */ | |
18 | ||
19 | /** | |
20 | * @file | |
21 | * sample format and channel layout conversion audio filter | |
22 | */ | |
23 | ||
24 | #include "libavutil/avassert.h" | |
25 | #include "libavutil/avstring.h" | |
26 | #include "libavutil/common.h" | |
27 | #include "libavutil/dict.h" | |
28 | #include "libavutil/mathematics.h" | |
29 | #include "libavutil/opt.h" | |
30 | ||
31 | #include "libavresample/avresample.h" | |
32 | ||
33 | #include "audio.h" | |
34 | #include "avfilter.h" | |
35 | #include "formats.h" | |
36 | #include "internal.h" | |
37 | ||
38 | typedef struct ResampleContext { | |
39 | const AVClass *class; | |
40 | AVAudioResampleContext *avr; | |
41 | AVDictionary *options; | |
42 | ||
43 | int64_t next_pts; | |
44 | int64_t next_in_pts; | |
45 | ||
46 | /* set by filter_frame() to signal an output frame to request_frame() */ | |
47 | int got_output; | |
48 | } ResampleContext; | |
49 | ||
50 | static av_cold int init(AVFilterContext *ctx, AVDictionary **opts) | |
51 | { | |
52 | ResampleContext *s = ctx->priv; | |
53 | const AVClass *avr_class = avresample_get_class(); | |
54 | AVDictionaryEntry *e = NULL; | |
55 | ||
56 | while ((e = av_dict_get(*opts, "", e, AV_DICT_IGNORE_SUFFIX))) { | |
57 | if (av_opt_find(&avr_class, e->key, NULL, 0, | |
58 | AV_OPT_SEARCH_FAKE_OBJ | AV_OPT_SEARCH_CHILDREN)) | |
59 | av_dict_set(&s->options, e->key, e->value, 0); | |
60 | } | |
61 | ||
62 | e = NULL; | |
63 | while ((e = av_dict_get(s->options, "", e, AV_DICT_IGNORE_SUFFIX))) | |
64 | av_dict_set(opts, e->key, NULL, 0); | |
65 | ||
66 | /* do not allow the user to override basic format options */ | |
67 | av_dict_set(&s->options, "in_channel_layout", NULL, 0); | |
68 | av_dict_set(&s->options, "out_channel_layout", NULL, 0); | |
69 | av_dict_set(&s->options, "in_sample_fmt", NULL, 0); | |
70 | av_dict_set(&s->options, "out_sample_fmt", NULL, 0); | |
71 | av_dict_set(&s->options, "in_sample_rate", NULL, 0); | |
72 | av_dict_set(&s->options, "out_sample_rate", NULL, 0); | |
73 | ||
74 | return 0; | |
75 | } | |
76 | ||
77 | static av_cold void uninit(AVFilterContext *ctx) | |
78 | { | |
79 | ResampleContext *s = ctx->priv; | |
80 | ||
81 | if (s->avr) { | |
82 | avresample_close(s->avr); | |
83 | avresample_free(&s->avr); | |
84 | } | |
85 | av_dict_free(&s->options); | |
86 | } | |
87 | ||
88 | static int query_formats(AVFilterContext *ctx) | |
89 | { | |
90 | AVFilterLink *inlink = ctx->inputs[0]; | |
91 | AVFilterLink *outlink = ctx->outputs[0]; | |
92 | ||
93 | AVFilterFormats *in_formats = ff_all_formats(AVMEDIA_TYPE_AUDIO); | |
94 | AVFilterFormats *out_formats = ff_all_formats(AVMEDIA_TYPE_AUDIO); | |
95 | AVFilterFormats *in_samplerates = ff_all_samplerates(); | |
96 | AVFilterFormats *out_samplerates = ff_all_samplerates(); | |
97 | AVFilterChannelLayouts *in_layouts = ff_all_channel_layouts(); | |
98 | AVFilterChannelLayouts *out_layouts = ff_all_channel_layouts(); | |
99 | ||
100 | ff_formats_ref(in_formats, &inlink->out_formats); | |
101 | ff_formats_ref(out_formats, &outlink->in_formats); | |
102 | ||
103 | ff_formats_ref(in_samplerates, &inlink->out_samplerates); | |
104 | ff_formats_ref(out_samplerates, &outlink->in_samplerates); | |
105 | ||
106 | ff_channel_layouts_ref(in_layouts, &inlink->out_channel_layouts); | |
107 | ff_channel_layouts_ref(out_layouts, &outlink->in_channel_layouts); | |
108 | ||
109 | return 0; | |
110 | } | |
111 | ||
112 | static int config_output(AVFilterLink *outlink) | |
113 | { | |
114 | AVFilterContext *ctx = outlink->src; | |
115 | AVFilterLink *inlink = ctx->inputs[0]; | |
116 | ResampleContext *s = ctx->priv; | |
117 | char buf1[64], buf2[64]; | |
118 | int ret; | |
119 | ||
120 | if (s->avr) { | |
121 | avresample_close(s->avr); | |
122 | avresample_free(&s->avr); | |
123 | } | |
124 | ||
125 | if (inlink->channel_layout == outlink->channel_layout && | |
126 | inlink->sample_rate == outlink->sample_rate && | |
127 | (inlink->format == outlink->format || | |
128 | (av_get_channel_layout_nb_channels(inlink->channel_layout) == 1 && | |
129 | av_get_channel_layout_nb_channels(outlink->channel_layout) == 1 && | |
130 | av_get_planar_sample_fmt(inlink->format) == | |
131 | av_get_planar_sample_fmt(outlink->format)))) | |
132 | return 0; | |
133 | ||
134 | if (!(s->avr = avresample_alloc_context())) | |
135 | return AVERROR(ENOMEM); | |
136 | ||
137 | if (s->options) { | |
138 | AVDictionaryEntry *e = NULL; | |
139 | while ((e = av_dict_get(s->options, "", e, AV_DICT_IGNORE_SUFFIX))) | |
140 | av_log(ctx, AV_LOG_VERBOSE, "lavr option: %s=%s\n", e->key, e->value); | |
141 | ||
142 | av_opt_set_dict(s->avr, &s->options); | |
143 | } | |
144 | ||
145 | av_opt_set_int(s->avr, "in_channel_layout", inlink ->channel_layout, 0); | |
146 | av_opt_set_int(s->avr, "out_channel_layout", outlink->channel_layout, 0); | |
147 | av_opt_set_int(s->avr, "in_sample_fmt", inlink ->format, 0); | |
148 | av_opt_set_int(s->avr, "out_sample_fmt", outlink->format, 0); | |
149 | av_opt_set_int(s->avr, "in_sample_rate", inlink ->sample_rate, 0); | |
150 | av_opt_set_int(s->avr, "out_sample_rate", outlink->sample_rate, 0); | |
151 | ||
152 | if ((ret = avresample_open(s->avr)) < 0) | |
153 | return ret; | |
154 | ||
155 | outlink->time_base = (AVRational){ 1, outlink->sample_rate }; | |
156 | s->next_pts = AV_NOPTS_VALUE; | |
157 | s->next_in_pts = AV_NOPTS_VALUE; | |
158 | ||
159 | av_get_channel_layout_string(buf1, sizeof(buf1), | |
160 | -1, inlink ->channel_layout); | |
161 | av_get_channel_layout_string(buf2, sizeof(buf2), | |
162 | -1, outlink->channel_layout); | |
163 | av_log(ctx, AV_LOG_VERBOSE, | |
164 | "fmt:%s srate:%d cl:%s -> fmt:%s srate:%d cl:%s\n", | |
165 | av_get_sample_fmt_name(inlink ->format), inlink ->sample_rate, buf1, | |
166 | av_get_sample_fmt_name(outlink->format), outlink->sample_rate, buf2); | |
167 | ||
168 | return 0; | |
169 | } | |
170 | ||
171 | static int request_frame(AVFilterLink *outlink) | |
172 | { | |
173 | AVFilterContext *ctx = outlink->src; | |
174 | ResampleContext *s = ctx->priv; | |
175 | int ret = 0; | |
176 | ||
177 | s->got_output = 0; | |
178 | while (ret >= 0 && !s->got_output) | |
179 | ret = ff_request_frame(ctx->inputs[0]); | |
180 | ||
181 | /* flush the lavr delay buffer */ | |
182 | if (ret == AVERROR_EOF && s->avr) { | |
183 | AVFrame *frame; | |
184 | int nb_samples = avresample_get_out_samples(s->avr, 0); | |
185 | ||
186 | if (!nb_samples) | |
187 | return ret; | |
188 | ||
189 | frame = ff_get_audio_buffer(outlink, nb_samples); | |
190 | if (!frame) | |
191 | return AVERROR(ENOMEM); | |
192 | ||
193 | ret = avresample_convert(s->avr, frame->extended_data, | |
194 | frame->linesize[0], nb_samples, | |
195 | NULL, 0, 0); | |
196 | if (ret <= 0) { | |
197 | av_frame_free(&frame); | |
198 | return (ret == 0) ? AVERROR_EOF : ret; | |
199 | } | |
200 | ||
201 | frame->pts = s->next_pts; | |
202 | return ff_filter_frame(outlink, frame); | |
203 | } | |
204 | return ret; | |
205 | } | |
206 | ||
207 | static int filter_frame(AVFilterLink *inlink, AVFrame *in) | |
208 | { | |
209 | AVFilterContext *ctx = inlink->dst; | |
210 | ResampleContext *s = ctx->priv; | |
211 | AVFilterLink *outlink = ctx->outputs[0]; | |
212 | int ret; | |
213 | ||
214 | if (s->avr) { | |
215 | AVFrame *out; | |
216 | int delay, nb_samples; | |
217 | ||
218 | /* maximum possible samples lavr can output */ | |
219 | delay = avresample_get_delay(s->avr); | |
220 | nb_samples = avresample_get_out_samples(s->avr, in->nb_samples); | |
221 | ||
222 | out = ff_get_audio_buffer(outlink, nb_samples); | |
223 | if (!out) { | |
224 | ret = AVERROR(ENOMEM); | |
225 | goto fail; | |
226 | } | |
227 | ||
228 | ret = avresample_convert(s->avr, out->extended_data, out->linesize[0], | |
229 | nb_samples, in->extended_data, in->linesize[0], | |
230 | in->nb_samples); | |
231 | if (ret <= 0) { | |
232 | av_frame_free(&out); | |
233 | if (ret < 0) | |
234 | goto fail; | |
235 | } | |
236 | ||
237 | av_assert0(!avresample_available(s->avr)); | |
238 | ||
239 | if (s->next_pts == AV_NOPTS_VALUE) { | |
240 | if (in->pts == AV_NOPTS_VALUE) { | |
241 | av_log(ctx, AV_LOG_WARNING, "First timestamp is missing, " | |
242 | "assuming 0.\n"); | |
243 | s->next_pts = 0; | |
244 | } else | |
245 | s->next_pts = av_rescale_q(in->pts, inlink->time_base, | |
246 | outlink->time_base); | |
247 | } | |
248 | ||
249 | if (ret > 0) { | |
250 | out->nb_samples = ret; | |
251 | ||
252 | ret = av_frame_copy_props(out, in); | |
253 | if (ret < 0) { | |
254 | av_frame_free(&out); | |
255 | goto fail; | |
256 | } | |
257 | ||
258 | out->sample_rate = outlink->sample_rate; | |
259 | /* Only convert in->pts if there is a discontinuous jump. | |
260 | This ensures that out->pts tracks the number of samples actually | |
261 | output by the resampler in the absence of such a jump. | |
262 | Otherwise, the rounding in av_rescale_q() and av_rescale() | |
263 | causes off-by-1 errors. */ | |
264 | if (in->pts != AV_NOPTS_VALUE && in->pts != s->next_in_pts) { | |
265 | out->pts = av_rescale_q(in->pts, inlink->time_base, | |
266 | outlink->time_base) - | |
267 | av_rescale(delay, outlink->sample_rate, | |
268 | inlink->sample_rate); | |
269 | } else | |
270 | out->pts = s->next_pts; | |
271 | ||
272 | s->next_pts = out->pts + out->nb_samples; | |
273 | s->next_in_pts = in->pts + in->nb_samples; | |
274 | ||
275 | ret = ff_filter_frame(outlink, out); | |
276 | s->got_output = 1; | |
277 | } | |
278 | ||
279 | fail: | |
280 | av_frame_free(&in); | |
281 | } else { | |
282 | in->format = outlink->format; | |
283 | ret = ff_filter_frame(outlink, in); | |
284 | s->got_output = 1; | |
285 | } | |
286 | ||
287 | return ret; | |
288 | } | |
289 | ||
290 | static const AVClass *resample_child_class_next(const AVClass *prev) | |
291 | { | |
292 | return prev ? NULL : avresample_get_class(); | |
293 | } | |
294 | ||
295 | static void *resample_child_next(void *obj, void *prev) | |
296 | { | |
297 | ResampleContext *s = obj; | |
298 | return prev ? NULL : s->avr; | |
299 | } | |
300 | ||
301 | static const AVClass resample_class = { | |
302 | .class_name = "resample", | |
303 | .item_name = av_default_item_name, | |
304 | .version = LIBAVUTIL_VERSION_INT, | |
305 | .child_class_next = resample_child_class_next, | |
306 | .child_next = resample_child_next, | |
307 | }; | |
308 | ||
309 | static const AVFilterPad avfilter_af_resample_inputs[] = { | |
310 | { | |
311 | .name = "default", | |
312 | .type = AVMEDIA_TYPE_AUDIO, | |
313 | .filter_frame = filter_frame, | |
314 | }, | |
315 | { NULL } | |
316 | }; | |
317 | ||
318 | static const AVFilterPad avfilter_af_resample_outputs[] = { | |
319 | { | |
320 | .name = "default", | |
321 | .type = AVMEDIA_TYPE_AUDIO, | |
322 | .config_props = config_output, | |
323 | .request_frame = request_frame | |
324 | }, | |
325 | { NULL } | |
326 | }; | |
327 | ||
328 | AVFilter ff_af_resample = { | |
329 | .name = "resample", | |
330 | .description = NULL_IF_CONFIG_SMALL("Audio resampling and conversion."), | |
331 | .priv_size = sizeof(ResampleContext), | |
332 | .priv_class = &resample_class, | |
333 | .init_dict = init, | |
334 | .uninit = uninit, | |
335 | .query_formats = query_formats, | |
336 | .inputs = avfilter_af_resample_inputs, | |
337 | .outputs = avfilter_af_resample_outputs, | |
338 | }; |