2 * Copyright (c) 2011 Stefano Sabatini
4 * This file is part of FFmpeg.
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 * Compute a look-up table for binding the input value to the output
24 * value, and apply it to input video.
27 #include "libavutil/attributes.h"
28 #include "libavutil/common.h"
29 #include "libavutil/eval.h"
30 #include "libavutil/opt.h"
31 #include "libavutil/pixdesc.h"
33 #include "drawutils.h"
38 static const char *const var_names
[] = {
39 "w", ///< width of the input video
40 "h", ///< height of the input video
41 "val", ///< input value for the pixel
42 "maxval", ///< max value for the pixel
43 "minval", ///< min value for the pixel
44 "negval", ///< negated value
60 typedef struct LutContext
{
62 uint8_t lut
[4][256]; ///< lookup table for each component
63 char *comp_expr_str
[4];
66 double var_values
[VAR_VARS_NB
];
69 int negate_alpha
; /* only used by negate */
80 #define OFFSET(x) offsetof(LutContext, x)
81 #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
83 static const AVOption options
[] = {
84 { "c0", "set component #0 expression", OFFSET(comp_expr_str
[0]), AV_OPT_TYPE_STRING
, { .str
= "val" }, .flags
= FLAGS
},
85 { "c1", "set component #1 expression", OFFSET(comp_expr_str
[1]), AV_OPT_TYPE_STRING
, { .str
= "val" }, .flags
= FLAGS
},
86 { "c2", "set component #2 expression", OFFSET(comp_expr_str
[2]), AV_OPT_TYPE_STRING
, { .str
= "val" }, .flags
= FLAGS
},
87 { "c3", "set component #3 expression", OFFSET(comp_expr_str
[3]), AV_OPT_TYPE_STRING
, { .str
= "val" }, .flags
= FLAGS
},
88 { "y", "set Y expression", OFFSET(comp_expr_str
[Y
]), AV_OPT_TYPE_STRING
, { .str
= "val" }, .flags
= FLAGS
},
89 { "u", "set U expression", OFFSET(comp_expr_str
[U
]), AV_OPT_TYPE_STRING
, { .str
= "val" }, .flags
= FLAGS
},
90 { "v", "set V expression", OFFSET(comp_expr_str
[V
]), AV_OPT_TYPE_STRING
, { .str
= "val" }, .flags
= FLAGS
},
91 { "r", "set R expression", OFFSET(comp_expr_str
[R
]), AV_OPT_TYPE_STRING
, { .str
= "val" }, .flags
= FLAGS
},
92 { "g", "set G expression", OFFSET(comp_expr_str
[G
]), AV_OPT_TYPE_STRING
, { .str
= "val" }, .flags
= FLAGS
},
93 { "b", "set B expression", OFFSET(comp_expr_str
[B
]), AV_OPT_TYPE_STRING
, { .str
= "val" }, .flags
= FLAGS
},
94 { "a", "set A expression", OFFSET(comp_expr_str
[A
]), AV_OPT_TYPE_STRING
, { .str
= "val" }, .flags
= FLAGS
},
98 static av_cold
void uninit(AVFilterContext
*ctx
)
100 LutContext
*s
= ctx
->priv
;
103 for (i
= 0; i
< 4; i
++) {
104 av_expr_free(s
->comp_expr
[i
]);
105 s
->comp_expr
[i
] = NULL
;
106 av_freep(&s
->comp_expr_str
[i
]);
110 #define YUV_FORMATS \
111 AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV420P, \
112 AV_PIX_FMT_YUV411P, AV_PIX_FMT_YUV410P, AV_PIX_FMT_YUV440P, \
113 AV_PIX_FMT_YUVA420P, AV_PIX_FMT_YUVA422P, AV_PIX_FMT_YUVA444P, \
114 AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_YUVJ420P, \
117 #define RGB_FORMATS \
118 AV_PIX_FMT_ARGB, AV_PIX_FMT_RGBA, \
119 AV_PIX_FMT_ABGR, AV_PIX_FMT_BGRA, \
120 AV_PIX_FMT_RGB24, AV_PIX_FMT_BGR24
122 static const enum AVPixelFormat yuv_pix_fmts
[] = { YUV_FORMATS
, AV_PIX_FMT_NONE
};
123 static const enum AVPixelFormat rgb_pix_fmts
[] = { RGB_FORMATS
, AV_PIX_FMT_NONE
};
124 static const enum AVPixelFormat all_pix_fmts
[] = { RGB_FORMATS
, YUV_FORMATS
, AV_PIX_FMT_NONE
};
126 static int query_formats(AVFilterContext
*ctx
)
128 LutContext
*s
= ctx
->priv
;
130 const enum AVPixelFormat
*pix_fmts
= s
->is_rgb
? rgb_pix_fmts
:
131 s
->is_yuv
? yuv_pix_fmts
:
134 ff_set_common_formats(ctx
, ff_make_format_list(pix_fmts
));
139 * Clip value val in the minval - maxval range.
141 static double clip(void *opaque
, double val
)
143 LutContext
*s
= opaque
;
144 double minval
= s
->var_values
[VAR_MINVAL
];
145 double maxval
= s
->var_values
[VAR_MAXVAL
];
147 return av_clip(val
, minval
, maxval
);
151 * Compute gamma correction for value val, assuming the minval-maxval
152 * range, val is clipped to a value contained in the same interval.
154 static double compute_gammaval(void *opaque
, double gamma
)
156 LutContext
*s
= opaque
;
157 double val
= s
->var_values
[VAR_CLIPVAL
];
158 double minval
= s
->var_values
[VAR_MINVAL
];
159 double maxval
= s
->var_values
[VAR_MAXVAL
];
161 return pow((val
-minval
)/(maxval
-minval
), gamma
) * (maxval
-minval
)+minval
;
165 * Compute Rec.709 gama correction of value val
167 static double compute_gammaval709(void *opaque
, double gamma
)
169 LutContext
*s
= opaque
;
170 double val
= s
->var_values
[VAR_CLIPVAL
];
171 double minval
= s
->var_values
[VAR_MINVAL
];
172 double maxval
= s
->var_values
[VAR_MAXVAL
];
173 double level
= (val
- minval
) / (maxval
- minval
);
174 level
= level
< 0.018 ? 4.5 * level
175 : 1.099 * pow(level
, 1.0 / gamma
) - 0.099;
176 return level
* (maxval
- minval
) + minval
;
179 static double (* const funcs1
[])(void *, double) = {
181 (void *)compute_gammaval
,
182 (void *)compute_gammaval709
,
186 static const char * const funcs1_names
[] = {
193 static int config_props(AVFilterLink
*inlink
)
195 AVFilterContext
*ctx
= inlink
->dst
;
196 LutContext
*s
= ctx
->priv
;
197 const AVPixFmtDescriptor
*desc
= av_pix_fmt_desc_get(inlink
->format
);
198 uint8_t rgba_map
[4]; /* component index -> RGBA color index map */
202 s
->hsub
= desc
->log2_chroma_w
;
203 s
->vsub
= desc
->log2_chroma_h
;
205 s
->var_values
[VAR_W
] = inlink
->w
;
206 s
->var_values
[VAR_H
] = inlink
->h
;
208 switch (inlink
->format
) {
209 case AV_PIX_FMT_YUV410P
:
210 case AV_PIX_FMT_YUV411P
:
211 case AV_PIX_FMT_YUV420P
:
212 case AV_PIX_FMT_YUV422P
:
213 case AV_PIX_FMT_YUV440P
:
214 case AV_PIX_FMT_YUV444P
:
215 case AV_PIX_FMT_YUVA420P
:
216 case AV_PIX_FMT_YUVA422P
:
217 case AV_PIX_FMT_YUVA444P
:
218 min
[Y
] = min
[U
] = min
[V
] = 16;
220 max
[U
] = max
[V
] = 240;
221 min
[A
] = 0; max
[A
] = 255;
224 min
[0] = min
[1] = min
[2] = min
[3] = 0;
225 max
[0] = max
[1] = max
[2] = max
[3] = 255;
228 s
->is_yuv
= s
->is_rgb
= 0;
229 if (ff_fmt_is_in(inlink
->format
, yuv_pix_fmts
)) s
->is_yuv
= 1;
230 else if (ff_fmt_is_in(inlink
->format
, rgb_pix_fmts
)) s
->is_rgb
= 1;
233 ff_fill_rgba_map(rgba_map
, inlink
->format
);
234 s
->step
= av_get_bits_per_pixel(desc
) >> 3;
237 for (color
= 0; color
< desc
->nb_components
; color
++) {
239 int comp
= s
->is_rgb
? rgba_map
[color
] : color
;
241 /* create the parsed expression */
242 av_expr_free(s
->comp_expr
[color
]);
243 s
->comp_expr
[color
] = NULL
;
244 ret
= av_expr_parse(&s
->comp_expr
[color
], s
->comp_expr_str
[color
],
245 var_names
, funcs1_names
, funcs1
, NULL
, NULL
, 0, ctx
);
247 av_log(ctx
, AV_LOG_ERROR
,
248 "Error when parsing the expression '%s' for the component %d and color %d.\n",
249 s
->comp_expr_str
[comp
], comp
, color
);
250 return AVERROR(EINVAL
);
253 /* compute the lut */
254 s
->var_values
[VAR_MAXVAL
] = max
[color
];
255 s
->var_values
[VAR_MINVAL
] = min
[color
];
257 for (val
= 0; val
< 256; val
++) {
258 s
->var_values
[VAR_VAL
] = val
;
259 s
->var_values
[VAR_CLIPVAL
] = av_clip(val
, min
[color
], max
[color
]);
260 s
->var_values
[VAR_NEGVAL
] =
261 av_clip(min
[color
] + max
[color
] - s
->var_values
[VAR_VAL
],
262 min
[color
], max
[color
]);
264 res
= av_expr_eval(s
->comp_expr
[color
], s
->var_values
, s
);
266 av_log(ctx
, AV_LOG_ERROR
,
267 "Error when evaluating the expression '%s' for the value %d for the component %d.\n",
268 s
->comp_expr_str
[color
], val
, comp
);
269 return AVERROR(EINVAL
);
271 s
->lut
[comp
][val
] = av_clip((int)res
, min
[color
], max
[color
]);
272 av_log(ctx
, AV_LOG_DEBUG
, "val[%d][%d] = %d\n", comp
, val
, s
->lut
[comp
][val
]);
279 static int filter_frame(AVFilterLink
*inlink
, AVFrame
*in
)
281 AVFilterContext
*ctx
= inlink
->dst
;
282 LutContext
*s
= ctx
->priv
;
283 AVFilterLink
*outlink
= ctx
->outputs
[0];
285 uint8_t *inrow
, *outrow
, *inrow0
, *outrow0
;
286 int i
, j
, plane
, direct
= 0;
288 if (av_frame_is_writable(in
)) {
292 out
= ff_get_video_buffer(outlink
, outlink
->w
, outlink
->h
);
295 return AVERROR(ENOMEM
);
297 av_frame_copy_props(out
, in
);
302 inrow0
= in
->data
[0];
303 outrow0
= out
->data
[0];
305 for (i
= 0; i
< in
->height
; i
++) {
307 const uint8_t (*tab
)[256] = (const uint8_t (*)[256])s
->lut
;
310 for (j
= 0; j
< w
; j
++) {
312 case 4: outrow
[3] = tab
[3][inrow
[3]]; // Fall-through
313 case 3: outrow
[2] = tab
[2][inrow
[2]]; // Fall-through
314 case 2: outrow
[1] = tab
[1][inrow
[1]]; // Fall-through
315 default: outrow
[0] = tab
[0][inrow
[0]];
320 inrow0
+= in
->linesize
[0];
321 outrow0
+= out
->linesize
[0];
325 for (plane
= 0; plane
< 4 && in
->data
[plane
] && in
->linesize
[plane
]; plane
++) {
326 int vsub
= plane
== 1 || plane
== 2 ? s
->vsub
: 0;
327 int hsub
= plane
== 1 || plane
== 2 ? s
->hsub
: 0;
328 int h
= FF_CEIL_RSHIFT(inlink
->h
, vsub
);
329 int w
= FF_CEIL_RSHIFT(inlink
->w
, hsub
);
331 inrow
= in
->data
[plane
];
332 outrow
= out
->data
[plane
];
334 for (i
= 0; i
< h
; i
++) {
335 const uint8_t *tab
= s
->lut
[plane
];
336 for (j
= 0; j
< w
; j
++)
337 outrow
[j
] = tab
[inrow
[j
]];
338 inrow
+= in
->linesize
[plane
];
339 outrow
+= out
->linesize
[plane
];
347 return ff_filter_frame(outlink
, out
);
350 static const AVFilterPad inputs
[] = {
352 .type
= AVMEDIA_TYPE_VIDEO
,
353 .filter_frame
= filter_frame
,
354 .config_props
= config_props
,
358 static const AVFilterPad outputs
[] = {
360 .type
= AVMEDIA_TYPE_VIDEO
,
365 #define DEFINE_LUT_FILTER(name_, description_) \
366 AVFilter ff_vf_##name_ = { \
368 .description = NULL_IF_CONFIG_SMALL(description_), \
369 .priv_size = sizeof(LutContext), \
370 .priv_class = &name_ ## _class, \
371 .init = name_##_init, \
373 .query_formats = query_formats, \
375 .outputs = outputs, \
376 .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC, \
379 #if CONFIG_LUT_FILTER
381 #define lut_options options
382 AVFILTER_DEFINE_CLASS(lut
);
384 static int lut_init(AVFilterContext
*ctx
)
389 DEFINE_LUT_FILTER(lut
, "Compute and apply a lookup table to the RGB/YUV input video.");
392 #if CONFIG_LUTYUV_FILTER
394 #define lutyuv_options options
395 AVFILTER_DEFINE_CLASS(lutyuv
);
397 static av_cold
int lutyuv_init(AVFilterContext
*ctx
)
399 LutContext
*s
= ctx
->priv
;
406 DEFINE_LUT_FILTER(lutyuv
, "Compute and apply a lookup table to the YUV input video.");
409 #if CONFIG_LUTRGB_FILTER
411 #define lutrgb_options options
412 AVFILTER_DEFINE_CLASS(lutrgb
);
414 static av_cold
int lutrgb_init(AVFilterContext
*ctx
)
416 LutContext
*s
= ctx
->priv
;
423 DEFINE_LUT_FILTER(lutrgb
, "Compute and apply a lookup table to the RGB input video.");
426 #if CONFIG_NEGATE_FILTER
428 static const AVOption negate_options
[] = {
429 { "negate_alpha", NULL
, OFFSET(negate_alpha
), AV_OPT_TYPE_INT
, { .i64
= 0 }, 0, 1, FLAGS
},
433 AVFILTER_DEFINE_CLASS(negate
);
435 static av_cold
int negate_init(AVFilterContext
*ctx
)
437 LutContext
*s
= ctx
->priv
;
440 av_log(ctx
, AV_LOG_DEBUG
, "negate_alpha:%d\n", s
->negate_alpha
);
442 for (i
= 0; i
< 4; i
++) {
443 s
->comp_expr_str
[i
] = av_strdup((i
== 3 && !s
->negate_alpha
) ?
445 if (!s
->comp_expr_str
[i
]) {
447 return AVERROR(ENOMEM
);
454 DEFINE_LUT_FILTER(negate
, "Negate input video.");