2 * Copyright (C) 2006-2011 Michael Niedermayer <michaelni@gmx.at>
3 * 2010 James Darnley <james.darnley@gmail.com>
5 * FFmpeg is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU Lesser General Public
7 * License as published by the Free Software Foundation; either
8 * version 2.1 of the License, or (at your option) any later version.
10 * FFmpeg is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * Lesser General Public License for more details.
15 * You should have received a copy of the GNU Lesser General Public
16 * License along with FFmpeg; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20 #include "libavutil/avassert.h"
21 #include "libavutil/cpu.h"
22 #include "libavutil/common.h"
23 #include "libavutil/opt.h"
24 #include "libavutil/pixdesc.h"
25 #include "libavutil/imgutils.h"
32 typedef struct ThreadData
{
41 { int score = FFABS(cur[mrefs - 1 + (j)] - cur[prefs - 1 - (j)])\
42 + FFABS(cur[mrefs +(j)] - cur[prefs -(j)])\
43 + FFABS(cur[mrefs + 1 + (j)] - cur[prefs + 1 - (j)]);\
44 if (score < spatial_score) {\
45 spatial_score= score;\
46 spatial_pred= (cur[mrefs +(j)] + cur[prefs -(j)])>>1;\
48 /* The is_not_edge argument here controls when the code will enter a branch
49 * which reads up to and including x-3 and x+3. */
51 #define FILTER(start, end, is_not_edge) \
52 for (x = start; x < end; x++) { \
54 int d = (prev2[0] + next2[0])>>1; \
56 int temporal_diff0 = FFABS(prev2[0] - next2[0]); \
57 int temporal_diff1 =(FFABS(prev[mrefs] - c) + FFABS(prev[prefs] - e) )>>1; \
58 int temporal_diff2 =(FFABS(next[mrefs] - c) + FFABS(next[prefs] - e) )>>1; \
59 int diff = FFMAX3(temporal_diff0 >> 1, temporal_diff1, temporal_diff2); \
60 int spatial_pred = (c+e) >> 1; \
63 int spatial_score = FFABS(cur[mrefs - 1] - cur[prefs - 1]) + FFABS(c-e) \
64 + FFABS(cur[mrefs + 1] - cur[prefs + 1]) - 1; \
65 CHECK(-1) CHECK(-2) }} }} \
66 CHECK( 1) CHECK( 2) }} }} \
70 int b = (prev2[2 * mrefs] + next2[2 * mrefs])>>1; \
71 int f = (prev2[2 * prefs] + next2[2 * prefs])>>1; \
72 int max = FFMAX3(d - e, d - c, FFMIN(b - c, f - e)); \
73 int min = FFMIN3(d - e, d - c, FFMAX(b - c, f - e)); \
75 diff = FFMAX3(diff, min, -max); \
78 if (spatial_pred > d + diff) \
79 spatial_pred = d + diff; \
80 else if (spatial_pred < d - diff) \
81 spatial_pred = d - diff; \
83 dst[0] = spatial_pred; \
93 static void filter_line_c(void *dst1
,
94 void *prev1
, void *cur1
, void *next1
,
95 int w
, int prefs
, int mrefs
, int parity
, int mode
)
98 uint8_t *prev
= prev1
;
100 uint8_t *next
= next1
;
102 uint8_t *prev2
= parity
? prev
: cur
;
103 uint8_t *next2
= parity
? cur
: next
;
105 /* The function is called with the pointers already pointing to data[3] and
106 * with 6 subtracted from the width. This allows the FILTER macro to be
107 * called so that it processes all the pixels normally. A constant value of
108 * true for is_not_edge lets the compiler ignore the if statement. */
113 static void filter_edges(void *dst1
, void *prev1
, void *cur1
, void *next1
,
114 int w
, int prefs
, int mrefs
, int parity
, int mode
)
117 uint8_t *prev
= prev1
;
119 uint8_t *next
= next1
;
121 uint8_t *prev2
= parity
? prev
: cur
;
122 uint8_t *next2
= parity
? cur
: next
;
124 /* Only edge pixels need to be processed here. A constant value of false
125 * for is_not_edge should let the compiler ignore the whole branch. */
128 dst
= (uint8_t*)dst1
+ w
- (MAX_ALIGN
-1);
129 prev
= (uint8_t*)prev1
+ w
- (MAX_ALIGN
-1);
130 cur
= (uint8_t*)cur1
+ w
- (MAX_ALIGN
-1);
131 next
= (uint8_t*)next1
+ w
- (MAX_ALIGN
-1);
132 prev2
= (uint8_t*)(parity
? prev
: cur
);
133 next2
= (uint8_t*)(parity
? cur
: next
);
135 FILTER(w
- (MAX_ALIGN
-1), w
- 3, 1)
140 static void filter_line_c_16bit(void *dst1
,
141 void *prev1
, void *cur1
, void *next1
,
142 int w
, int prefs
, int mrefs
, int parity
,
145 uint16_t *dst
= dst1
;
146 uint16_t *prev
= prev1
;
147 uint16_t *cur
= cur1
;
148 uint16_t *next
= next1
;
150 uint16_t *prev2
= parity
? prev
: cur
;
151 uint16_t *next2
= parity
? cur
: next
;
158 static void filter_edges_16bit(void *dst1
, void *prev1
, void *cur1
, void *next1
,
159 int w
, int prefs
, int mrefs
, int parity
, int mode
)
161 uint16_t *dst
= dst1
;
162 uint16_t *prev
= prev1
;
163 uint16_t *cur
= cur1
;
164 uint16_t *next
= next1
;
166 uint16_t *prev2
= parity
? prev
: cur
;
167 uint16_t *next2
= parity
? cur
: next
;
173 dst
= (uint16_t*)dst1
+ w
- (MAX_ALIGN
/2-1);
174 prev
= (uint16_t*)prev1
+ w
- (MAX_ALIGN
/2-1);
175 cur
= (uint16_t*)cur1
+ w
- (MAX_ALIGN
/2-1);
176 next
= (uint16_t*)next1
+ w
- (MAX_ALIGN
/2-1);
177 prev2
= (uint16_t*)(parity
? prev
: cur
);
178 next2
= (uint16_t*)(parity
? cur
: next
);
180 FILTER(w
- (MAX_ALIGN
/2-1), w
- 3, 1)
184 static int filter_slice(AVFilterContext
*ctx
, void *arg
, int jobnr
, int nb_jobs
)
186 YADIFContext
*s
= ctx
->priv
;
187 ThreadData
*td
= arg
;
188 int refs
= s
->cur
->linesize
[td
->plane
];
189 int df
= (s
->csp
->comp
[td
->plane
].depth_minus1
+ 8) / 8;
191 int slice_start
= (td
->h
* jobnr
) / nb_jobs
;
192 int slice_end
= (td
->h
* (jobnr
+1)) / nb_jobs
;
195 /* filtering reads 3 pixels to the left/right; to avoid invalid reads,
196 * we need to call the c variant which avoids this for border pixels
198 for (y
= slice_start
; y
< slice_end
; y
++) {
199 if ((y
^ td
->parity
) & 1) {
200 uint8_t *prev
= &s
->prev
->data
[td
->plane
][y
* refs
];
201 uint8_t *cur
= &s
->cur
->data
[td
->plane
][y
* refs
];
202 uint8_t *next
= &s
->next
->data
[td
->plane
][y
* refs
];
203 uint8_t *dst
= &td
->frame
->data
[td
->plane
][y
* td
->frame
->linesize
[td
->plane
]];
204 int mode
= y
== 1 || y
+ 2 == td
->h
? 2 : s
->mode
;
205 s
->filter_line(dst
+ pix_3
, prev
+ pix_3
, cur
+ pix_3
,
206 next
+ pix_3
, td
->w
- (3 + MAX_ALIGN
/df
-1),
207 y
+ 1 < td
->h
? refs
: -refs
,
209 td
->parity
^ td
->tff
, mode
);
210 s
->filter_edges(dst
, prev
, cur
, next
, td
->w
,
211 y
+ 1 < td
->h
? refs
: -refs
,
213 td
->parity
^ td
->tff
, mode
);
215 memcpy(&td
->frame
->data
[td
->plane
][y
* td
->frame
->linesize
[td
->plane
]],
216 &s
->cur
->data
[td
->plane
][y
* refs
], td
->w
* df
);
222 static void filter(AVFilterContext
*ctx
, AVFrame
*dstpic
,
225 YADIFContext
*yadif
= ctx
->priv
;
226 ThreadData td
= { .frame
= dstpic
, .parity
= parity
, .tff
= tff
};
229 for (i
= 0; i
< yadif
->csp
->nb_components
; i
++) {
230 int w
= dstpic
->width
;
231 int h
= dstpic
->height
;
233 if (i
== 1 || i
== 2) {
234 w
= FF_CEIL_RSHIFT(w
, yadif
->csp
->log2_chroma_w
);
235 h
= FF_CEIL_RSHIFT(h
, yadif
->csp
->log2_chroma_h
);
243 ctx
->internal
->execute(ctx
, filter_slice
, &td
, NULL
, FFMIN(h
, ctx
->graph
->nb_threads
));
249 static int return_frame(AVFilterContext
*ctx
, int is_second
)
251 YADIFContext
*yadif
= ctx
->priv
;
252 AVFilterLink
*link
= ctx
->outputs
[0];
255 if (yadif
->parity
== -1) {
256 tff
= yadif
->cur
->interlaced_frame
?
257 yadif
->cur
->top_field_first
: 1;
259 tff
= yadif
->parity
^ 1;
263 yadif
->out
= ff_get_video_buffer(link
, link
->w
, link
->h
);
265 return AVERROR(ENOMEM
);
267 av_frame_copy_props(yadif
->out
, yadif
->cur
);
268 yadif
->out
->interlaced_frame
= 0;
271 filter(ctx
, yadif
->out
, tff
^ !is_second
, tff
);
274 int64_t cur_pts
= yadif
->cur
->pts
;
275 int64_t next_pts
= yadif
->next
->pts
;
277 if (next_pts
!= AV_NOPTS_VALUE
&& cur_pts
!= AV_NOPTS_VALUE
) {
278 yadif
->out
->pts
= cur_pts
+ next_pts
;
280 yadif
->out
->pts
= AV_NOPTS_VALUE
;
283 ret
= ff_filter_frame(ctx
->outputs
[0], yadif
->out
);
285 yadif
->frame_pending
= (yadif
->mode
&1) && !is_second
;
289 static int checkstride(YADIFContext
*yadif
, const AVFrame
*a
, const AVFrame
*b
)
292 for (i
= 0; i
< yadif
->csp
->nb_components
; i
++)
293 if (a
->linesize
[i
] != b
->linesize
[i
])
298 static void fixstride(AVFilterLink
*link
, AVFrame
*f
)
300 AVFrame
*dst
= ff_default_get_video_buffer(link
, f
->width
, f
->height
);
303 av_frame_copy_props(dst
, f
);
304 av_image_copy(dst
->data
, dst
->linesize
,
305 (const uint8_t **)f
->data
, f
->linesize
,
306 dst
->format
, dst
->width
, dst
->height
);
308 av_frame_move_ref(f
, dst
);
312 static int filter_frame(AVFilterLink
*link
, AVFrame
*frame
)
314 AVFilterContext
*ctx
= link
->dst
;
315 YADIFContext
*yadif
= ctx
->priv
;
319 if (yadif
->frame_pending
)
320 return_frame(ctx
, 1);
323 av_frame_free(&yadif
->prev
);
324 yadif
->prev
= yadif
->cur
;
325 yadif
->cur
= yadif
->next
;
329 !(yadif
->cur
= av_frame_clone(yadif
->next
)))
330 return AVERROR(ENOMEM
);
332 if (checkstride(yadif
, yadif
->next
, yadif
->cur
)) {
333 av_log(ctx
, AV_LOG_VERBOSE
, "Reallocating frame due to differing stride\n");
334 fixstride(link
, yadif
->next
);
336 if (checkstride(yadif
, yadif
->next
, yadif
->cur
))
337 fixstride(link
, yadif
->cur
);
338 if (yadif
->prev
&& checkstride(yadif
, yadif
->next
, yadif
->prev
))
339 fixstride(link
, yadif
->prev
);
340 if (checkstride(yadif
, yadif
->next
, yadif
->cur
) || (yadif
->prev
&& checkstride(yadif
, yadif
->next
, yadif
->prev
))) {
341 av_log(ctx
, AV_LOG_ERROR
, "Failed to reallocate frame\n");
345 if ((yadif
->deint
&& !yadif
->cur
->interlaced_frame
) || ctx
->is_disabled
) {
346 yadif
->out
= av_frame_clone(yadif
->cur
);
348 return AVERROR(ENOMEM
);
350 av_frame_free(&yadif
->prev
);
351 if (yadif
->out
->pts
!= AV_NOPTS_VALUE
)
352 yadif
->out
->pts
*= 2;
353 return ff_filter_frame(ctx
->outputs
[0], yadif
->out
);
359 yadif
->out
= ff_get_video_buffer(ctx
->outputs
[0], link
->w
, link
->h
);
361 return AVERROR(ENOMEM
);
363 av_frame_copy_props(yadif
->out
, yadif
->cur
);
364 yadif
->out
->interlaced_frame
= 0;
366 if (yadif
->out
->pts
!= AV_NOPTS_VALUE
)
367 yadif
->out
->pts
*= 2;
369 return return_frame(ctx
, 0);
372 static int request_frame(AVFilterLink
*link
)
374 AVFilterContext
*ctx
= link
->src
;
375 YADIFContext
*yadif
= ctx
->priv
;
377 if (yadif
->frame_pending
) {
378 return_frame(ctx
, 1);
388 ret
= ff_request_frame(link
->src
->inputs
[0]);
390 if (ret
== AVERROR_EOF
&& yadif
->cur
) {
391 AVFrame
*next
= av_frame_clone(yadif
->next
);
394 return AVERROR(ENOMEM
);
396 next
->pts
= yadif
->next
->pts
* 2 - yadif
->cur
->pts
;
398 filter_frame(link
->src
->inputs
[0], next
);
400 } else if (ret
< 0) {
403 } while (!yadif
->prev
);
408 static av_cold
void uninit(AVFilterContext
*ctx
)
410 YADIFContext
*yadif
= ctx
->priv
;
412 av_frame_free(&yadif
->prev
);
413 av_frame_free(&yadif
->cur
);
414 av_frame_free(&yadif
->next
);
417 static int query_formats(AVFilterContext
*ctx
)
419 static const enum AVPixelFormat pix_fmts
[] = {
435 AV_PIX_FMT_YUV420P10
,
436 AV_PIX_FMT_YUV422P10
,
437 AV_PIX_FMT_YUV444P10
,
438 AV_PIX_FMT_YUV420P12
,
439 AV_PIX_FMT_YUV422P12
,
440 AV_PIX_FMT_YUV444P12
,
441 AV_PIX_FMT_YUV420P14
,
442 AV_PIX_FMT_YUV422P14
,
443 AV_PIX_FMT_YUV444P14
,
444 AV_PIX_FMT_YUV420P16
,
445 AV_PIX_FMT_YUV422P16
,
446 AV_PIX_FMT_YUV444P16
,
455 ff_set_common_formats(ctx
, ff_make_format_list(pix_fmts
));
460 static int config_props(AVFilterLink
*link
)
462 AVFilterContext
*ctx
= link
->src
;
463 YADIFContext
*s
= link
->src
->priv
;
465 link
->time_base
.num
= link
->src
->inputs
[0]->time_base
.num
;
466 link
->time_base
.den
= link
->src
->inputs
[0]->time_base
.den
* 2;
467 link
->w
= link
->src
->inputs
[0]->w
;
468 link
->h
= link
->src
->inputs
[0]->h
;
471 link
->frame_rate
= av_mul_q(link
->src
->inputs
[0]->frame_rate
, (AVRational
){2,1});
473 if (link
->w
< 3 || link
->h
< 3) {
474 av_log(ctx
, AV_LOG_ERROR
, "Video of less than 3 columns or lines is not supported\n");
475 return AVERROR(EINVAL
);
478 s
->csp
= av_pix_fmt_desc_get(link
->format
);
479 if (s
->csp
->comp
[0].depth_minus1
/ 8 == 1) {
480 s
->filter_line
= filter_line_c_16bit
;
481 s
->filter_edges
= filter_edges_16bit
;
483 s
->filter_line
= filter_line_c
;
484 s
->filter_edges
= filter_edges
;
488 ff_yadif_init_x86(s
);
494 #define OFFSET(x) offsetof(YADIFContext, x)
495 #define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
497 #define CONST(name, help, val, unit) { name, help, 0, AV_OPT_TYPE_CONST, {.i64=val}, INT_MIN, INT_MAX, FLAGS, unit }
499 static const AVOption yadif_options
[] = {
500 { "mode", "specify the interlacing mode", OFFSET(mode
), AV_OPT_TYPE_INT
, {.i64
=YADIF_MODE_SEND_FRAME
}, 0, 3, FLAGS
, "mode"},
501 CONST("send_frame", "send one frame for each frame", YADIF_MODE_SEND_FRAME
, "mode"),
502 CONST("send_field", "send one frame for each field", YADIF_MODE_SEND_FIELD
, "mode"),
503 CONST("send_frame_nospatial", "send one frame for each frame, but skip spatial interlacing check", YADIF_MODE_SEND_FRAME_NOSPATIAL
, "mode"),
504 CONST("send_field_nospatial", "send one frame for each field, but skip spatial interlacing check", YADIF_MODE_SEND_FIELD_NOSPATIAL
, "mode"),
506 { "parity", "specify the assumed picture field parity", OFFSET(parity
), AV_OPT_TYPE_INT
, {.i64
=YADIF_PARITY_AUTO
}, -1, 1, FLAGS
, "parity" },
507 CONST("tff", "assume top field first", YADIF_PARITY_TFF
, "parity"),
508 CONST("bff", "assume bottom field first", YADIF_PARITY_BFF
, "parity"),
509 CONST("auto", "auto detect parity", YADIF_PARITY_AUTO
, "parity"),
511 { "deint", "specify which frames to deinterlace", OFFSET(deint
), AV_OPT_TYPE_INT
, {.i64
=YADIF_DEINT_ALL
}, 0, 1, FLAGS
, "deint" },
512 CONST("all", "deinterlace all frames", YADIF_DEINT_ALL
, "deint"),
513 CONST("interlaced", "only deinterlace frames marked as interlaced", YADIF_DEINT_INTERLACED
, "deint"),
518 AVFILTER_DEFINE_CLASS(yadif
);
520 static const AVFilterPad avfilter_vf_yadif_inputs
[] = {
523 .type
= AVMEDIA_TYPE_VIDEO
,
524 .filter_frame
= filter_frame
,
529 static const AVFilterPad avfilter_vf_yadif_outputs
[] = {
532 .type
= AVMEDIA_TYPE_VIDEO
,
533 .request_frame
= request_frame
,
534 .config_props
= config_props
,
539 AVFilter ff_vf_yadif
= {
541 .description
= NULL_IF_CONFIG_SMALL("Deinterlace the input image."),
542 .priv_size
= sizeof(YADIFContext
),
543 .priv_class
= &yadif_class
,
545 .query_formats
= query_formats
,
546 .inputs
= avfilter_vf_yadif_inputs
,
547 .outputs
= avfilter_vf_yadif_outputs
,
548 .flags
= AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL
| AVFILTER_FLAG_SLICE_THREADS
,