| 1 | /* |
| 2 | * Copyright (c) 2012 Stefano Sabatini |
| 3 | * |
| 4 | * This file is part of FFmpeg. |
| 5 | * |
| 6 | * FFmpeg is free software; you can redistribute it and/or |
| 7 | * modify it under the terms of the GNU Lesser General Public |
| 8 | * License as published by the Free Software Foundation; either |
| 9 | * version 2.1 of the License, or (at your option) any later version. |
| 10 | * |
| 11 | * FFmpeg is distributed in the hope that it will be useful, |
| 12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
| 14 | * Lesser General Public License for more details. |
| 15 | * |
| 16 | * You should have received a copy of the GNU Lesser General Public |
| 17 | * License along with FFmpeg; if not, write to the Free Software |
| 18 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA |
| 19 | */ |
| 20 | |
| 21 | /** |
| 22 | * @file |
| 23 | * audio to video multimedia filter |
| 24 | */ |
| 25 | |
| 26 | #include "libavutil/channel_layout.h" |
| 27 | #include "libavutil/opt.h" |
| 28 | #include "libavutil/parseutils.h" |
| 29 | #include "avfilter.h" |
| 30 | #include "formats.h" |
| 31 | #include "audio.h" |
| 32 | #include "video.h" |
| 33 | #include "internal.h" |
| 34 | |
| 35 | enum ShowWavesMode { |
| 36 | MODE_POINT, |
| 37 | MODE_LINE, |
| 38 | MODE_P2P, |
| 39 | MODE_CENTERED_LINE, |
| 40 | MODE_NB, |
| 41 | }; |
| 42 | |
| 43 | typedef struct { |
| 44 | const AVClass *class; |
| 45 | int w, h; |
| 46 | AVRational rate; |
| 47 | int buf_idx; |
| 48 | int16_t *buf_idy; /* y coordinate of previous sample for each channel */ |
| 49 | AVFrame *outpicref; |
| 50 | int req_fullfilled; |
| 51 | int n; |
| 52 | int sample_count_mod; |
| 53 | enum ShowWavesMode mode; |
| 54 | int split_channels; |
| 55 | void (*draw_sample)(uint8_t *buf, int height, int linesize, |
| 56 | int16_t sample, int16_t *prev_y, int intensity); |
| 57 | } ShowWavesContext; |
| 58 | |
| 59 | #define OFFSET(x) offsetof(ShowWavesContext, x) |
| 60 | #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM |
| 61 | |
| 62 | static const AVOption showwaves_options[] = { |
| 63 | { "size", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = "600x240"}, 0, 0, FLAGS }, |
| 64 | { "s", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = "600x240"}, 0, 0, FLAGS }, |
| 65 | { "mode", "select display mode", OFFSET(mode), AV_OPT_TYPE_INT, {.i64=MODE_POINT}, 0, MODE_NB-1, FLAGS, "mode"}, |
| 66 | { "point", "draw a point for each sample", 0, AV_OPT_TYPE_CONST, {.i64=MODE_POINT}, .flags=FLAGS, .unit="mode"}, |
| 67 | { "line", "draw a line for each sample", 0, AV_OPT_TYPE_CONST, {.i64=MODE_LINE}, .flags=FLAGS, .unit="mode"}, |
| 68 | { "p2p", "draw a line between samples", 0, AV_OPT_TYPE_CONST, {.i64=MODE_P2P}, .flags=FLAGS, .unit="mode"}, |
| 69 | { "cline", "draw a centered line for each sample", 0, AV_OPT_TYPE_CONST, {.i64=MODE_CENTERED_LINE}, .flags=FLAGS, .unit="mode"}, |
| 70 | { "n", "set how many samples to show in the same point", OFFSET(n), AV_OPT_TYPE_INT, {.i64 = 0}, 0, INT_MAX, FLAGS }, |
| 71 | { "rate", "set video rate", OFFSET(rate), AV_OPT_TYPE_VIDEO_RATE, {.str = "25"}, 0, 0, FLAGS }, |
| 72 | { "r", "set video rate", OFFSET(rate), AV_OPT_TYPE_VIDEO_RATE, {.str = "25"}, 0, 0, FLAGS }, |
| 73 | { "split_channels", "draw channels separately", OFFSET(split_channels), AV_OPT_TYPE_INT, {.i64 = 0}, 0, 1, FLAGS }, |
| 74 | { NULL } |
| 75 | }; |
| 76 | |
| 77 | AVFILTER_DEFINE_CLASS(showwaves); |
| 78 | |
| 79 | static av_cold void uninit(AVFilterContext *ctx) |
| 80 | { |
| 81 | ShowWavesContext *showwaves = ctx->priv; |
| 82 | |
| 83 | av_frame_free(&showwaves->outpicref); |
| 84 | av_freep(&showwaves->buf_idy); |
| 85 | } |
| 86 | |
| 87 | static int query_formats(AVFilterContext *ctx) |
| 88 | { |
| 89 | AVFilterFormats *formats = NULL; |
| 90 | AVFilterChannelLayouts *layouts = NULL; |
| 91 | AVFilterLink *inlink = ctx->inputs[0]; |
| 92 | AVFilterLink *outlink = ctx->outputs[0]; |
| 93 | static const enum AVSampleFormat sample_fmts[] = { AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_NONE }; |
| 94 | static const enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_GRAY8, AV_PIX_FMT_NONE }; |
| 95 | |
| 96 | /* set input audio formats */ |
| 97 | formats = ff_make_format_list(sample_fmts); |
| 98 | if (!formats) |
| 99 | return AVERROR(ENOMEM); |
| 100 | ff_formats_ref(formats, &inlink->out_formats); |
| 101 | |
| 102 | layouts = ff_all_channel_layouts(); |
| 103 | if (!layouts) |
| 104 | return AVERROR(ENOMEM); |
| 105 | ff_channel_layouts_ref(layouts, &inlink->out_channel_layouts); |
| 106 | |
| 107 | formats = ff_all_samplerates(); |
| 108 | if (!formats) |
| 109 | return AVERROR(ENOMEM); |
| 110 | ff_formats_ref(formats, &inlink->out_samplerates); |
| 111 | |
| 112 | /* set output video format */ |
| 113 | formats = ff_make_format_list(pix_fmts); |
| 114 | if (!formats) |
| 115 | return AVERROR(ENOMEM); |
| 116 | ff_formats_ref(formats, &outlink->in_formats); |
| 117 | |
| 118 | return 0; |
| 119 | } |
| 120 | |
| 121 | static int config_output(AVFilterLink *outlink) |
| 122 | { |
| 123 | AVFilterContext *ctx = outlink->src; |
| 124 | AVFilterLink *inlink = ctx->inputs[0]; |
| 125 | ShowWavesContext *showwaves = ctx->priv; |
| 126 | int nb_channels = inlink->channels; |
| 127 | |
| 128 | if (!showwaves->n) |
| 129 | showwaves->n = FFMAX(1, ((double)inlink->sample_rate / (showwaves->w * av_q2d(showwaves->rate))) + 0.5); |
| 130 | |
| 131 | showwaves->buf_idx = 0; |
| 132 | if (!(showwaves->buf_idy = av_mallocz_array(nb_channels, sizeof(*showwaves->buf_idy)))) { |
| 133 | av_log(ctx, AV_LOG_ERROR, "Could not allocate showwaves buffer\n"); |
| 134 | return AVERROR(ENOMEM); |
| 135 | } |
| 136 | outlink->w = showwaves->w; |
| 137 | outlink->h = showwaves->h; |
| 138 | outlink->sample_aspect_ratio = (AVRational){1,1}; |
| 139 | |
| 140 | outlink->frame_rate = av_div_q((AVRational){inlink->sample_rate,showwaves->n}, |
| 141 | (AVRational){showwaves->w,1}); |
| 142 | |
| 143 | av_log(ctx, AV_LOG_VERBOSE, "s:%dx%d r:%f n:%d\n", |
| 144 | showwaves->w, showwaves->h, av_q2d(outlink->frame_rate), showwaves->n); |
| 145 | return 0; |
| 146 | } |
| 147 | |
| 148 | inline static int push_frame(AVFilterLink *outlink) |
| 149 | { |
| 150 | AVFilterContext *ctx = outlink->src; |
| 151 | AVFilterLink *inlink = ctx->inputs[0]; |
| 152 | ShowWavesContext *showwaves = outlink->src->priv; |
| 153 | int nb_channels = inlink->channels; |
| 154 | int ret, i; |
| 155 | |
| 156 | if ((ret = ff_filter_frame(outlink, showwaves->outpicref)) >= 0) |
| 157 | showwaves->req_fullfilled = 1; |
| 158 | showwaves->outpicref = NULL; |
| 159 | showwaves->buf_idx = 0; |
| 160 | for (i = 0; i <= nb_channels; i++) |
| 161 | showwaves->buf_idy[i] = 0; |
| 162 | return ret; |
| 163 | } |
| 164 | |
| 165 | static int request_frame(AVFilterLink *outlink) |
| 166 | { |
| 167 | ShowWavesContext *showwaves = outlink->src->priv; |
| 168 | AVFilterLink *inlink = outlink->src->inputs[0]; |
| 169 | int ret; |
| 170 | |
| 171 | showwaves->req_fullfilled = 0; |
| 172 | do { |
| 173 | ret = ff_request_frame(inlink); |
| 174 | } while (!showwaves->req_fullfilled && ret >= 0); |
| 175 | |
| 176 | if (ret == AVERROR_EOF && showwaves->outpicref) |
| 177 | push_frame(outlink); |
| 178 | return ret; |
| 179 | } |
| 180 | |
| 181 | #define MAX_INT16 ((1<<15) -1) |
| 182 | |
| 183 | static void draw_sample_point(uint8_t *buf, int height, int linesize, |
| 184 | int16_t sample, int16_t *prev_y, int intensity) |
| 185 | { |
| 186 | const int h = height/2 - av_rescale(sample, height/2, MAX_INT16); |
| 187 | if (h >= 0 && h < height) |
| 188 | buf[h * linesize] += intensity; |
| 189 | } |
| 190 | |
| 191 | static void draw_sample_line(uint8_t *buf, int height, int linesize, |
| 192 | int16_t sample, int16_t *prev_y, int intensity) |
| 193 | { |
| 194 | int k; |
| 195 | const int h = height/2 - av_rescale(sample, height/2, MAX_INT16); |
| 196 | int start = height/2; |
| 197 | int end = av_clip(h, 0, height-1); |
| 198 | if (start > end) |
| 199 | FFSWAP(int16_t, start, end); |
| 200 | for (k = start; k < end; k++) |
| 201 | buf[k * linesize] += intensity; |
| 202 | } |
| 203 | |
| 204 | static void draw_sample_p2p(uint8_t *buf, int height, int linesize, |
| 205 | int16_t sample, int16_t *prev_y, int intensity) |
| 206 | { |
| 207 | int k; |
| 208 | const int h = height/2 - av_rescale(sample, height/2, MAX_INT16); |
| 209 | if (h >= 0 && h < height) { |
| 210 | buf[h * linesize] += intensity; |
| 211 | if (*prev_y && h != *prev_y) { |
| 212 | int start = *prev_y; |
| 213 | int end = av_clip(h, 0, height-1); |
| 214 | if (start > end) |
| 215 | FFSWAP(int16_t, start, end); |
| 216 | for (k = start + 1; k < end; k++) |
| 217 | buf[k * linesize] += intensity; |
| 218 | } |
| 219 | } |
| 220 | *prev_y = h; |
| 221 | } |
| 222 | |
| 223 | static void draw_sample_cline(uint8_t *buf, int height, int linesize, |
| 224 | int16_t sample, int16_t *prev_y, int intensity) |
| 225 | { |
| 226 | int k; |
| 227 | const int h = av_rescale(abs(sample), height, UINT16_MAX); |
| 228 | const int start = (height - h) / 2; |
| 229 | const int end = start + h; |
| 230 | for (k = start; k < end; k++) |
| 231 | buf[k * linesize] += intensity; |
| 232 | } |
| 233 | |
| 234 | static int filter_frame(AVFilterLink *inlink, AVFrame *insamples) |
| 235 | { |
| 236 | AVFilterContext *ctx = inlink->dst; |
| 237 | AVFilterLink *outlink = ctx->outputs[0]; |
| 238 | ShowWavesContext *showwaves = ctx->priv; |
| 239 | const int nb_samples = insamples->nb_samples; |
| 240 | AVFrame *outpicref = showwaves->outpicref; |
| 241 | int linesize = outpicref ? outpicref->linesize[0] : 0; |
| 242 | int16_t *p = (int16_t *)insamples->data[0]; |
| 243 | int nb_channels = inlink->channels; |
| 244 | int i, j, ret = 0; |
| 245 | const int n = showwaves->n; |
| 246 | const int x = 255 / ((showwaves->split_channels ? 1 : nb_channels) * n); /* multiplication factor, pre-computed to avoid in-loop divisions */ |
| 247 | const int ch_height = showwaves->split_channels ? outlink->h / nb_channels : outlink->h; |
| 248 | |
| 249 | /* draw data in the buffer */ |
| 250 | for (i = 0; i < nb_samples; i++) { |
| 251 | if (!showwaves->outpicref) { |
| 252 | showwaves->outpicref = outpicref = |
| 253 | ff_get_video_buffer(outlink, outlink->w, outlink->h); |
| 254 | if (!outpicref) |
| 255 | return AVERROR(ENOMEM); |
| 256 | outpicref->width = outlink->w; |
| 257 | outpicref->height = outlink->h; |
| 258 | outpicref->pts = insamples->pts + |
| 259 | av_rescale_q((p - (int16_t *)insamples->data[0]) / nb_channels, |
| 260 | (AVRational){ 1, inlink->sample_rate }, |
| 261 | outlink->time_base); |
| 262 | linesize = outpicref->linesize[0]; |
| 263 | for (j = 0; j < outlink->h; j++) |
| 264 | memset(outpicref->data[0] + j * linesize, 0, outlink->w); |
| 265 | } |
| 266 | for (j = 0; j < nb_channels; j++) { |
| 267 | uint8_t *buf = outpicref->data[0] + showwaves->buf_idx; |
| 268 | if (showwaves->split_channels) |
| 269 | buf += j*ch_height*linesize; |
| 270 | showwaves->draw_sample(buf, ch_height, linesize, *p++, |
| 271 | &showwaves->buf_idy[j], x); |
| 272 | } |
| 273 | |
| 274 | showwaves->sample_count_mod++; |
| 275 | if (showwaves->sample_count_mod == n) { |
| 276 | showwaves->sample_count_mod = 0; |
| 277 | showwaves->buf_idx++; |
| 278 | } |
| 279 | if (showwaves->buf_idx == showwaves->w) |
| 280 | if ((ret = push_frame(outlink)) < 0) |
| 281 | break; |
| 282 | outpicref = showwaves->outpicref; |
| 283 | } |
| 284 | |
| 285 | av_frame_free(&insamples); |
| 286 | return ret; |
| 287 | } |
| 288 | |
| 289 | static av_cold int init(AVFilterContext *ctx) |
| 290 | { |
| 291 | ShowWavesContext *showwaves = ctx->priv; |
| 292 | |
| 293 | switch (showwaves->mode) { |
| 294 | case MODE_POINT: showwaves->draw_sample = draw_sample_point; break; |
| 295 | case MODE_LINE: showwaves->draw_sample = draw_sample_line; break; |
| 296 | case MODE_P2P: showwaves->draw_sample = draw_sample_p2p; break; |
| 297 | case MODE_CENTERED_LINE: showwaves->draw_sample = draw_sample_cline; break; |
| 298 | default: |
| 299 | return AVERROR_BUG; |
| 300 | } |
| 301 | return 0; |
| 302 | } |
| 303 | |
| 304 | static const AVFilterPad showwaves_inputs[] = { |
| 305 | { |
| 306 | .name = "default", |
| 307 | .type = AVMEDIA_TYPE_AUDIO, |
| 308 | .filter_frame = filter_frame, |
| 309 | }, |
| 310 | { NULL } |
| 311 | }; |
| 312 | |
| 313 | static const AVFilterPad showwaves_outputs[] = { |
| 314 | { |
| 315 | .name = "default", |
| 316 | .type = AVMEDIA_TYPE_VIDEO, |
| 317 | .config_props = config_output, |
| 318 | .request_frame = request_frame, |
| 319 | }, |
| 320 | { NULL } |
| 321 | }; |
| 322 | |
| 323 | AVFilter ff_avf_showwaves = { |
| 324 | .name = "showwaves", |
| 325 | .description = NULL_IF_CONFIG_SMALL("Convert input audio to a video output."), |
| 326 | .init = init, |
| 327 | .uninit = uninit, |
| 328 | .query_formats = query_formats, |
| 329 | .priv_size = sizeof(ShowWavesContext), |
| 330 | .inputs = showwaves_inputs, |
| 331 | .outputs = showwaves_outputs, |
| 332 | .priv_class = &showwaves_class, |
| 333 | }; |