2 * Copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at>
4 * FFmpeg is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * FFmpeg is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License along
15 * with FFmpeg; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
21 * Motion Compensation Deinterlacer
22 * Ported from MPlayer libmpcodecs/vf_mcdeint.c.
26 * The motion estimation is somewhat at the mercy of the input, if the
27 * input frames are created purely based on spatial interpolation then
28 * for example a thin black line or another random and not
29 * interpolateable pattern will cause problems.
30 * Note: completely ignoring the "unavailable" lines during motion
31 * estimation did not look any better, so the most obvious solution
32 * would be to improve tfields or penalize problematic motion vectors.
34 * If non iterative ME is used then snow currently ignores the OBMC
35 * window and as a result sometimes creates artifacts.
37 * Only past frames are used, we should ideally use future frames too,
38 * something like filtering the whole movie in forward and then
39 * backward direction seems like a interesting idea but the current
40 * filter framework is FAR from supporting such things.
42 * Combining the motion compensated image with the input image also is
43 * not as trivial as it seems, simple blindly taking even lines from
44 * one and odd ones from the other does not work at all as ME/MC
45 * sometimes has nothing in the previous frames which matches the
46 * current. The current algorithm has been found by trial and error
47 * and almost certainly can be improved...
50 #include "libavutil/opt.h"
51 #include "libavutil/pixdesc.h"
52 #include "libavcodec/avcodec.h"
66 PARITY_TFF
= 0, ///< top field first
67 PARITY_BFF
= 1, ///< bottom field first
72 enum MCDeintMode mode
;
73 enum MCDeintParity parity
;
75 AVCodecContext
*enc_ctx
;
78 #define OFFSET(x) offsetof(MCDeintContext, x)
79 #define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
80 #define CONST(name, help, val, unit) { name, help, 0, AV_OPT_TYPE_CONST, {.i64=val}, INT_MIN, INT_MAX, FLAGS, unit }
82 static const AVOption mcdeint_options
[] = {
83 { "mode", "set mode", OFFSET(mode
), AV_OPT_TYPE_INT
, {.i64
=MODE_FAST
}, 0, MODE_NB
-1, FLAGS
, .unit
="mode" },
84 CONST("fast", NULL
, MODE_FAST
, "mode"),
85 CONST("medium", NULL
, MODE_MEDIUM
, "mode"),
86 CONST("slow", NULL
, MODE_SLOW
, "mode"),
87 CONST("extra_slow", NULL
, MODE_EXTRA_SLOW
, "mode"),
89 { "parity", "set the assumed picture field parity", OFFSET(parity
), AV_OPT_TYPE_INT
, {.i64
=PARITY_BFF
}, -1, 1, FLAGS
, "parity" },
90 CONST("tff", "assume top field first", PARITY_TFF
, "parity"),
91 CONST("bff", "assume bottom field first", PARITY_BFF
, "parity"),
93 { "qp", "set qp", OFFSET(qp
), AV_OPT_TYPE_INT
, {.i64
=1}, INT_MIN
, INT_MAX
, FLAGS
},
97 AVFILTER_DEFINE_CLASS(mcdeint
);
99 static int config_props(AVFilterLink
*inlink
)
101 AVFilterContext
*ctx
= inlink
->dst
;
102 MCDeintContext
*mcdeint
= ctx
->priv
;
104 AVCodecContext
*enc_ctx
;
105 AVDictionary
*opts
= NULL
;
108 if (!(enc
= avcodec_find_encoder(AV_CODEC_ID_SNOW
))) {
109 av_log(ctx
, AV_LOG_ERROR
, "Snow encoder is not enabled in libavcodec\n");
110 return AVERROR(EINVAL
);
113 mcdeint
->enc_ctx
= avcodec_alloc_context3(enc
);
114 if (!mcdeint
->enc_ctx
)
115 return AVERROR(ENOMEM
);
116 enc_ctx
= mcdeint
->enc_ctx
;
117 enc_ctx
->width
= inlink
->w
;
118 enc_ctx
->height
= inlink
->h
;
119 enc_ctx
->time_base
= (AVRational
){1,25}; // meaningless
120 enc_ctx
->gop_size
= 300;
121 enc_ctx
->max_b_frames
= 0;
122 enc_ctx
->pix_fmt
= AV_PIX_FMT_YUV420P
;
123 enc_ctx
->flags
= CODEC_FLAG_QSCALE
| CODEC_FLAG_LOW_DELAY
;
124 enc_ctx
->strict_std_compliance
= FF_COMPLIANCE_EXPERIMENTAL
;
125 enc_ctx
->global_quality
= 1;
126 enc_ctx
->me_cmp
= enc_ctx
->me_sub_cmp
= FF_CMP_SAD
;
127 enc_ctx
->mb_cmp
= FF_CMP_SSE
;
128 av_dict_set(&opts
, "memc_only", "1", 0);
130 switch (mcdeint
->mode
) {
131 case MODE_EXTRA_SLOW
:
134 enc_ctx
->me_method
= ME_ITER
;
136 enc_ctx
->flags
|= CODEC_FLAG_4MV
;
137 enc_ctx
->dia_size
= 2;
139 enc_ctx
->flags
|= CODEC_FLAG_QPEL
;
142 ret
= avcodec_open2(enc_ctx
, enc
, &opts
);
150 static av_cold
void uninit(AVFilterContext
*ctx
)
152 MCDeintContext
*mcdeint
= ctx
->priv
;
154 if (mcdeint
->enc_ctx
) {
155 avcodec_close(mcdeint
->enc_ctx
);
156 av_freep(&mcdeint
->enc_ctx
);
160 static int query_formats(AVFilterContext
*ctx
)
162 static const enum PixelFormat pix_fmts
[] = {
163 AV_PIX_FMT_YUV420P
, AV_PIX_FMT_NONE
166 ff_set_common_formats(ctx
, ff_make_format_list(pix_fmts
));
171 static int filter_frame(AVFilterLink
*inlink
, AVFrame
*inpic
)
173 MCDeintContext
*mcdeint
= inlink
->dst
->priv
;
174 AVFilterLink
*outlink
= inlink
->dst
->outputs
[0];
175 AVFrame
*outpic
, *frame_dec
;
177 int x
, y
, i
, ret
, got_frame
= 0;
179 outpic
= ff_get_video_buffer(outlink
, outlink
->w
, outlink
->h
);
181 av_frame_free(&inpic
);
182 return AVERROR(ENOMEM
);
184 av_frame_copy_props(outpic
, inpic
);
185 inpic
->quality
= mcdeint
->qp
* FF_QP2LAMBDA
;
187 av_init_packet(&pkt
);
188 pkt
.data
= NULL
; // packet data will be allocated by the encoder
191 ret
= avcodec_encode_video2(mcdeint
->enc_ctx
, &pkt
, inpic
, &got_frame
);
195 frame_dec
= mcdeint
->enc_ctx
->coded_frame
;
197 for (i
= 0; i
< 3; i
++) {
199 int w
= FF_CEIL_RSHIFT(inlink
->w
, is_chroma
);
200 int h
= FF_CEIL_RSHIFT(inlink
->h
, is_chroma
);
201 int fils
= frame_dec
->linesize
[i
];
202 int srcs
= inpic
->linesize
[i
];
203 int dsts
= outpic
->linesize
[i
];
205 for (y
= 0; y
< h
; y
++) {
206 if ((y
^ mcdeint
->parity
) & 1) {
207 for (x
= 0; x
< w
; x
++) {
208 uint8_t *filp
= &frame_dec
->data
[i
][x
+ y
*fils
];
209 uint8_t *srcp
= &inpic
->data
[i
][x
+ y
*srcs
];
210 uint8_t *dstp
= &outpic
->data
[i
][x
+ y
*dsts
];
212 if (y
> 0 && y
< h
-1){
213 int is_edge
= x
< 3 || x
> w
-4;
214 int diff0
= filp
[-fils
] - srcp
[-srcs
];
215 int diff1
= filp
[+fils
] - srcp
[+srcs
];
218 #define DELTA(j) av_clip(j, -x, w-1-x)
220 #define GET_SCORE_EDGE(j)\
221 FFABS(srcp[-srcs+DELTA(-1+(j))] - srcp[+srcs+DELTA(-1-(j))])+\
222 FFABS(srcp[-srcs+DELTA(j) ] - srcp[+srcs+DELTA( -(j))])+\
223 FFABS(srcp[-srcs+DELTA(1+(j)) ] - srcp[+srcs+DELTA( 1-(j))])
225 #define GET_SCORE(j)\
226 FFABS(srcp[-srcs-1+(j)] - srcp[+srcs-1-(j)])+\
227 FFABS(srcp[-srcs +(j)] - srcp[+srcs -(j)])+\
228 FFABS(srcp[-srcs+1+(j)] - srcp[+srcs+1-(j)])
230 #define CHECK_EDGE(j)\
231 { int score = GET_SCORE_EDGE(j);\
232 if (score < spatial_score){\
233 spatial_score = score;\
234 diff0 = filp[-fils+DELTA(j)] - srcp[-srcs+DELTA(j)];\
235 diff1 = filp[+fils+DELTA(-(j))] - srcp[+srcs+DELTA(-(j))];\
238 { int score = GET_SCORE(j);\
239 if (score < spatial_score){\
240 spatial_score= score;\
241 diff0 = filp[-fils+(j)] - srcp[-srcs+(j)];\
242 diff1 = filp[+fils-(j)] - srcp[+srcs-(j)];\
245 int spatial_score
= GET_SCORE_EDGE(0) - 1;
246 CHECK_EDGE(-1) CHECK_EDGE(-2) }} }}
247 CHECK_EDGE( 1) CHECK_EDGE( 2) }} }}
249 int spatial_score
= GET_SCORE(0) - 1;
250 CHECK(-1) CHECK(-2) }} }}
251 CHECK( 1) CHECK( 2) }} }}
255 if (diff0
+ diff1
> 0)
256 temp
-= (diff0
+ diff1
- FFABS(FFABS(diff0
) - FFABS(diff1
)) / 2) / 2;
258 temp
-= (diff0
+ diff1
+ FFABS(FFABS(diff0
) - FFABS(diff1
)) / 2) / 2;
259 *filp
= *dstp
= temp
> 255U ? ~(temp
>>31) : temp
;
267 for (y
= 0; y
< h
; y
++) {
268 if (!((y
^ mcdeint
->parity
) & 1)) {
269 for (x
= 0; x
< w
; x
++) {
270 frame_dec
->data
[i
][x
+ y
*fils
] =
271 outpic
->data
[i
][x
+ y
*dsts
] = inpic
->data
[i
][x
+ y
*srcs
];
276 mcdeint
->parity
^= 1;
279 av_free_packet(&pkt
);
280 av_frame_free(&inpic
);
282 av_frame_free(&outpic
);
285 return ff_filter_frame(outlink
, outpic
);
288 static const AVFilterPad mcdeint_inputs
[] = {
291 .type
= AVMEDIA_TYPE_VIDEO
,
292 .filter_frame
= filter_frame
,
293 .config_props
= config_props
,
298 static const AVFilterPad mcdeint_outputs
[] = {
301 .type
= AVMEDIA_TYPE_VIDEO
,
306 AVFilter ff_vf_mcdeint
= {
308 .description
= NULL_IF_CONFIG_SMALL("Apply motion compensating deinterlacing."),
309 .priv_size
= sizeof(MCDeintContext
),
311 .query_formats
= query_formats
,
312 .inputs
= mcdeint_inputs
,
313 .outputs
= mcdeint_outputs
,
314 .priv_class
= &mcdeint_class
,