Commit | Line | Data |
---|---|---|
2ba45a60 DM |
1 | /* |
2 | * Copyright (c) 2013 Georg Martius <georg dot martius at web dot de> | |
3 | * | |
4 | * This file is part of FFmpeg. | |
5 | * | |
6 | * FFmpeg is free software; you can redistribute it and/or | |
7 | * modify it under the terms of the GNU Lesser General Public | |
8 | * License as published by the Free Software Foundation; either | |
9 | * version 2.1 of the License, or (at your option) any later version. | |
10 | * | |
11 | * FFmpeg is distributed in the hope that it will be useful, | |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
14 | * Lesser General Public License for more details. | |
15 | * | |
16 | * You should have received a copy of the GNU Lesser General Public | |
17 | * License along with FFmpeg; if not, write to the Free Software | |
18 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA | |
19 | */ | |
20 | ||
21 | #define DEFAULT_RESULT_NAME "transforms.trf" | |
22 | ||
23 | #include <vid.stab/libvidstab.h> | |
24 | ||
25 | #include "libavutil/common.h" | |
26 | #include "libavutil/opt.h" | |
27 | #include "libavutil/imgutils.h" | |
28 | #include "avfilter.h" | |
29 | #include "internal.h" | |
30 | ||
31 | #include "vidstabutils.h" | |
32 | ||
33 | typedef struct { | |
34 | const AVClass *class; | |
35 | ||
36 | VSMotionDetect md; | |
37 | VSMotionDetectConfig conf; | |
38 | ||
39 | char *result; | |
40 | FILE *f; | |
41 | } StabData; | |
42 | ||
43 | ||
44 | #define OFFSET(x) offsetof(StabData, x) | |
45 | #define OFFSETC(x) (offsetof(StabData, conf)+offsetof(VSMotionDetectConfig, x)) | |
46 | #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM | |
47 | ||
48 | static const AVOption vidstabdetect_options[] = { | |
49 | {"result", "path to the file used to write the transforms", OFFSET(result), AV_OPT_TYPE_STRING, {.str = DEFAULT_RESULT_NAME}, .flags = FLAGS}, | |
50 | {"shakiness", "how shaky is the video and how quick is the camera?" | |
51 | " 1: little (fast) 10: very strong/quick (slow)", OFFSETC(shakiness), AV_OPT_TYPE_INT, {.i64 = 5}, 1, 10, FLAGS}, | |
52 | {"accuracy", "(>=shakiness) 1: low 15: high (slow)", OFFSETC(accuracy), AV_OPT_TYPE_INT, {.i64 = 15}, 1, 15, FLAGS}, | |
53 | {"stepsize", "region around minimum is scanned with 1 pixel resolution", OFFSETC(stepSize), AV_OPT_TYPE_INT, {.i64 = 6}, 1, 32, FLAGS}, | |
54 | {"mincontrast", "below this contrast a field is discarded (0-1)", OFFSETC(contrastThreshold), AV_OPT_TYPE_DOUBLE, {.dbl = 0.25}, 0.0, 1.0, FLAGS}, | |
55 | {"show", "0: draw nothing; 1,2: show fields and transforms", OFFSETC(show), AV_OPT_TYPE_INT, {.i64 = 0}, 0, 2, FLAGS}, | |
56 | {"tripod", "virtual tripod mode (if >0): motion is compared to a reference" | |
57 | " reference frame (frame # is the value)", OFFSETC(virtualTripod), AV_OPT_TYPE_INT, {.i64 = 0}, 0, INT_MAX, FLAGS}, | |
58 | {NULL} | |
59 | }; | |
60 | ||
61 | AVFILTER_DEFINE_CLASS(vidstabdetect); | |
62 | ||
63 | static av_cold int init(AVFilterContext *ctx) | |
64 | { | |
65 | StabData *sd = ctx->priv; | |
66 | ff_vs_init(); | |
67 | sd->class = &vidstabdetect_class; | |
68 | av_log(ctx, AV_LOG_VERBOSE, "vidstabdetect filter: init %s\n", LIBVIDSTAB_VERSION); | |
69 | return 0; | |
70 | } | |
71 | ||
72 | static av_cold void uninit(AVFilterContext *ctx) | |
73 | { | |
74 | StabData *sd = ctx->priv; | |
75 | VSMotionDetect *md = &(sd->md); | |
76 | ||
77 | if (sd->f) { | |
78 | fclose(sd->f); | |
79 | sd->f = NULL; | |
80 | } | |
81 | ||
82 | vsMotionDetectionCleanup(md); | |
83 | } | |
84 | ||
85 | static int query_formats(AVFilterContext *ctx) | |
86 | { | |
87 | // If you add something here also add it in vidstabutils.c | |
88 | static const enum AVPixelFormat pix_fmts[] = { | |
89 | AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV420P, | |
90 | AV_PIX_FMT_YUV411P, AV_PIX_FMT_YUV410P, AV_PIX_FMT_YUVA420P, | |
91 | AV_PIX_FMT_YUV440P, AV_PIX_FMT_GRAY8, | |
92 | AV_PIX_FMT_RGB24, AV_PIX_FMT_BGR24, AV_PIX_FMT_RGBA, | |
93 | AV_PIX_FMT_NONE | |
94 | }; | |
95 | ||
96 | ff_set_common_formats(ctx, ff_make_format_list(pix_fmts)); | |
97 | return 0; | |
98 | } | |
99 | ||
100 | static int config_input(AVFilterLink *inlink) | |
101 | { | |
102 | AVFilterContext *ctx = inlink->dst; | |
103 | StabData *sd = ctx->priv; | |
104 | ||
105 | VSMotionDetect* md = &(sd->md); | |
106 | VSFrameInfo fi; | |
107 | const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format); | |
108 | ||
109 | vsFrameInfoInit(&fi, inlink->w, inlink->h, | |
110 | ff_av2vs_pixfmt(ctx, inlink->format)); | |
111 | if (fi.bytesPerPixel != av_get_bits_per_pixel(desc)/8) { | |
112 | av_log(ctx, AV_LOG_ERROR, "pixel-format error: wrong bits/per/pixel, please report a BUG"); | |
113 | return AVERROR(EINVAL); | |
114 | } | |
115 | if (fi.log2ChromaW != desc->log2_chroma_w) { | |
116 | av_log(ctx, AV_LOG_ERROR, "pixel-format error: log2_chroma_w, please report a BUG"); | |
117 | return AVERROR(EINVAL); | |
118 | } | |
119 | ||
120 | if (fi.log2ChromaH != desc->log2_chroma_h) { | |
121 | av_log(ctx, AV_LOG_ERROR, "pixel-format error: log2_chroma_h, please report a BUG"); | |
122 | return AVERROR(EINVAL); | |
123 | } | |
124 | ||
125 | // set values that are not initialized by the options | |
126 | sd->conf.algo = 1; | |
127 | sd->conf.modName = "vidstabdetect"; | |
128 | if (vsMotionDetectInit(md, &sd->conf, &fi) != VS_OK) { | |
129 | av_log(ctx, AV_LOG_ERROR, "initialization of Motion Detection failed, please report a BUG"); | |
130 | return AVERROR(EINVAL); | |
131 | } | |
132 | ||
133 | vsMotionDetectGetConfig(&sd->conf, md); | |
134 | av_log(ctx, AV_LOG_INFO, "Video stabilization settings (pass 1/2):\n"); | |
135 | av_log(ctx, AV_LOG_INFO, " shakiness = %d\n", sd->conf.shakiness); | |
136 | av_log(ctx, AV_LOG_INFO, " accuracy = %d\n", sd->conf.accuracy); | |
137 | av_log(ctx, AV_LOG_INFO, " stepsize = %d\n", sd->conf.stepSize); | |
138 | av_log(ctx, AV_LOG_INFO, " mincontrast = %f\n", sd->conf.contrastThreshold); | |
139 | av_log(ctx, AV_LOG_INFO, " tripod = %d\n", sd->conf.virtualTripod); | |
140 | av_log(ctx, AV_LOG_INFO, " show = %d\n", sd->conf.show); | |
141 | av_log(ctx, AV_LOG_INFO, " result = %s\n", sd->result); | |
142 | ||
143 | sd->f = fopen(sd->result, "w"); | |
144 | if (sd->f == NULL) { | |
145 | av_log(ctx, AV_LOG_ERROR, "cannot open transform file %s\n", sd->result); | |
146 | return AVERROR(EINVAL); | |
147 | } else { | |
148 | if (vsPrepareFile(md, sd->f) != VS_OK) { | |
149 | av_log(ctx, AV_LOG_ERROR, "cannot write to transform file %s\n", sd->result); | |
150 | return AVERROR(EINVAL); | |
151 | } | |
152 | } | |
153 | return 0; | |
154 | } | |
155 | ||
156 | static int filter_frame(AVFilterLink *inlink, AVFrame *in) | |
157 | { | |
158 | AVFilterContext *ctx = inlink->dst; | |
159 | StabData *sd = ctx->priv; | |
160 | VSMotionDetect *md = &(sd->md); | |
161 | LocalMotions localmotions; | |
162 | ||
163 | AVFilterLink *outlink = inlink->dst->outputs[0]; | |
164 | VSFrame frame; | |
165 | int plane; | |
166 | ||
167 | if (sd->conf.show > 0 && !av_frame_is_writable(in)) | |
168 | av_frame_make_writable(in); | |
169 | ||
170 | for (plane = 0; plane < md->fi.planes; plane++) { | |
171 | frame.data[plane] = in->data[plane]; | |
172 | frame.linesize[plane] = in->linesize[plane]; | |
173 | } | |
174 | if (vsMotionDetection(md, &localmotions, &frame) != VS_OK) { | |
175 | av_log(ctx, AV_LOG_ERROR, "motion detection failed"); | |
176 | return AVERROR(AVERROR_EXTERNAL); | |
177 | } else { | |
178 | if (vsWriteToFile(md, sd->f, &localmotions) != VS_OK) { | |
f6fa7814 | 179 | int ret = AVERROR(errno); |
2ba45a60 | 180 | av_log(ctx, AV_LOG_ERROR, "cannot write to transform file"); |
f6fa7814 | 181 | return ret; |
2ba45a60 DM |
182 | } |
183 | vs_vector_del(&localmotions); | |
184 | } | |
185 | ||
186 | return ff_filter_frame(outlink, in); | |
187 | } | |
188 | ||
189 | static const AVFilterPad avfilter_vf_vidstabdetect_inputs[] = { | |
190 | { | |
191 | .name = "default", | |
192 | .type = AVMEDIA_TYPE_VIDEO, | |
193 | .filter_frame = filter_frame, | |
194 | .config_props = config_input, | |
195 | }, | |
196 | { NULL } | |
197 | }; | |
198 | ||
199 | static const AVFilterPad avfilter_vf_vidstabdetect_outputs[] = { | |
200 | { | |
201 | .name = "default", | |
202 | .type = AVMEDIA_TYPE_VIDEO, | |
203 | }, | |
204 | { NULL } | |
205 | }; | |
206 | ||
207 | AVFilter ff_vf_vidstabdetect = { | |
208 | .name = "vidstabdetect", | |
209 | .description = NULL_IF_CONFIG_SMALL("Extract relative transformations, " | |
210 | "pass 1 of 2 for stabilization " | |
211 | "(see vidstabtransform for pass 2)."), | |
212 | .priv_size = sizeof(StabData), | |
213 | .init = init, | |
214 | .uninit = uninit, | |
215 | .query_formats = query_formats, | |
216 | .inputs = avfilter_vf_vidstabdetect_inputs, | |
217 | .outputs = avfilter_vf_vidstabdetect_outputs, | |
218 | .priv_class = &vidstabdetect_class, | |
219 | }; |