From f4891ca38470893d29864821cd1988bb026cf160 Mon Sep 17 00:00:00 2001
From: Mina <minasamy_@hotmail.com>
Date: Tue, 4 Dec 2018 00:22:23 +0200
Subject: [PATCH] lavfi/vf_colorconstancy: add weighted_greyedge
Signed-off-by: Mina <minasamy_@hotmail.com>
---
doc/filters.texi | 39 ++++
libavfilter/Makefile | 1 +
libavfilter/allfilters.c | 1 +
libavfilter/vf_colorconstancy.c | 322 ++++++++++++++++++++++++--------
4 files changed, 288 insertions(+), 75 deletions(-)
@@ -18038,6 +18038,45 @@ separatefields,select=eq(mod(n,4),0)+eq(mod(n,4),3),weave
@end example
@end itemize
+@section weighted_greyedge
+A color constancy variation filter which estimates scene illumination via weighted grey edge
+algorithm and corrects the scene colors accordingly.
+
+See: @url{http://refbase.cvc.uab.es/files/GGW2012.pdf}
+
+The filter accepts the following options:
+
+@table @option
+@item minknorm
+The Minkowski parameter to be used for calculating the Minkowski distance. Must
+be chosen in the range [1,20] and default value = 1.
+
+@item sigma
+The standard deviation of Gaussian blur to be applied to the scene. Must be
+chosen in the range [0.2,1024.0] and default value = 1.
+
+@item wmap_k
+The power applied to the weight map to emphasize heigher weights. Must be chosen
+in the range [1,20] and default value = 2.
+
+@item iters
+The maximum number of iterations for performing the algorithm. Must be chosen in the
+range [1,20] and default value = 5.
+
+@item angerr
+The angular error threshold in degrees for stopping the algorithm. Must be chosen
+in the range [0,360] and default value = 0.001.
+@end table
+
+@subsection Examples
+@itemize
+
+@item
+@example
+weighted_greyedge=minknorm=5:sigma=2:wmap_k=10:iters=10:angerr=0.0005
+@end example
+@end itemize
+
@section xbr
Apply the xBR high-quality magnification filter which is designed for pixel
art. It follows a set of edge-detection rules, see
@@ -414,6 +414,7 @@ OBJS-$(CONFIG_VSTACK_FILTER) += vf_stack.o framesync.o
OBJS-$(CONFIG_W3FDIF_FILTER) += vf_w3fdif.o
OBJS-$(CONFIG_WAVEFORM_FILTER) += vf_waveform.o
OBJS-$(CONFIG_WEAVE_FILTER) += vf_weave.o
+OBJS-$(CONFIG_WEIGHTED_GREYEDGE_FILTER) += vf_colorconstancy.o
OBJS-$(CONFIG_XBR_FILTER) += vf_xbr.o
OBJS-$(CONFIG_XSTACK_FILTER) += vf_stack.o framesync.o
OBJS-$(CONFIG_YADIF_FILTER) += vf_yadif.o yadif_common.o
@@ -392,6 +392,7 @@ extern AVFilter ff_vf_vstack;
extern AVFilter ff_vf_w3fdif;
extern AVFilter ff_vf_waveform;
extern AVFilter ff_vf_weave;
+extern AVFilter ff_vf_weighted_greyedge;
extern AVFilter ff_vf_xbr;
extern AVFilter ff_vf_xstack;
extern AVFilter ff_vf_yadif;
@@ -26,6 +26,14 @@
*
* @cite
* J. van de Weijer, Th. Gevers, A. Gijsenij "Edge-Based Color Constancy".
+ *
+ * @cite
+ * J. van de Weijer, Th. Gevers, and J. Geusebroek,
+ * “Edge and corner detection by photometric quasi-invariants”.
+ *
+ * @cite
+ * A. Gijsenij, Th. Gevers, J. van de Weijer,
+ * "Improving Color Constancy by Photometric Edge Weighting".
*/
#include "libavutil/imgutils.h"
@@ -39,7 +47,8 @@
#include <math.h>
-#define GREY_EDGE "greyedge"
+#define GREY_EDGE "greyedge"
+#define WEIGHTED_GREY_EDGE "weighted_greyedge"
#define SQRT3 1.73205080757
@@ -79,6 +88,10 @@ typedef struct ColorConstancyContext {
int minknorm; /**< @minknorm = 0 : getMax instead */
double sigma;
+ int wmap_k;
+ int iters;
+ double angerr_thresh;
+
int nb_threads;
int planeheight[4];
int planewidth[4];
@@ -477,53 +490,73 @@ static int filter_slice_grey_edge(AVFilterContext* ctx, void* arg, int jobnr, in
}
/**
- * Main control function for grey edge algorithm.
+ * Slice function for weighted grey edge algorithm that is called iteratively to
+ * calculate and apply weight scheme.
*
* @param ctx the filter context.
- * @param in frame to perfrom grey edge on.
+ * @param arg data to be passed between threads.
+ * @param jobnr current job nubmer.
+ * @param nb_jobs total number of jobs.
*
- * @return 0 in case of success, a negative value corresponding to an
- * AVERROR code in case of failure.
+ * @return 0.
*/
-static int filter_grey_edge(AVFilterContext *ctx, AVFrame *in)
+static int filter_slice_weighted_grey_edge(AVFilterContext* ctx, void* arg, int jobnr, int nb_jobs)
{
ColorConstancyContext *s = ctx->priv;
- ThreadData td;
- int minknorm = s->minknorm;
- int difford = s->difford;
- double *white = s->white;
- int nb_jobs = FFMIN3(s->planeheight[1], s->planewidth[1], s->nb_threads);
- int plane, job, ret;
-
- td.in = in;
- ret = setup_derivative_buffers(ctx, &td);
- if (ret) {
- return ret;
- }
- get_derivative(ctx, &td);
- if (difford > 0) {
- ctx->internal->execute(ctx, slice_normalize, &td, NULL, nb_jobs);
- }
+ ThreadData *td = arg;
+ AVFrame *in = td->in;
+ int minknorm = s->minknorm;
+ int wmap_k = s->wmap_k;
+ const uint8_t thresh = 255;
- ctx->internal->execute(ctx, filter_slice_grey_edge, &td, NULL, nb_jobs);
- if (!minknorm) {
- for (plane = 0; plane < NUM_PLANES; ++plane) {
- white[plane] = 0; // All values are absolute
- for (job = 0; job < nb_jobs; ++job) {
- white[plane] = FFMAX(white[plane] , td.data[INDEX_DST][plane][job]);
+ const int height = s->planeheight[1];
+ const int width = s->planewidth[1];
+ const int slice_start = (height * jobnr) / nb_jobs;
+ const int slice_end = (height * (jobnr+1)) / nb_jobs;
+ double dx, dy, d, dw[NUM_PLANES];
+ double o3_x, o3_y;
+ double spec_var, weight;
+ double result[NUM_PLANES];
+ int r, c, p;
+
+ memset(result, 0, NUM_PLANES * sizeof(result[0]));
+
+ for (r = slice_start; r < slice_end; ++r) {
+ for (c = 0; c < width; ++c) {
+ d = 0;
+ o3_x = 0;
+ o3_y = 0;
+ for (p = 0; p < NUM_PLANES; ++p) {
+ dx = td->data[INDEX_DX][p][INDX2D(r, c, width)];
+ dy = td->data[INDEX_DY][p][INDX2D(r, c, width)];
+ dw[p] = sqrt(dx * dx + dy * dy);
+
+ d += dw[p] * dw[p];
+ o3_x += dx;
+ o3_y += dy;
}
- }
- } else {
- for (plane = 0; plane < NUM_PLANES; ++plane) {
- white[plane] = 0;
- for (job = 0; job < nb_jobs; ++job) {
- white[plane] += td.data[INDEX_DST][plane][job];
+ d = sqrt(d);
+ o3_x /= SQRT3;
+ o3_y /= SQRT3;
+
+ spec_var = sqrt( o3_x * o3_x + o3_y * o3_y);
+ weight = spec_var / d;
+ if (weight > 1) {
+ weight = 1;
+ }
+ weight = pow(weight, wmap_k);
+
+ for (p = 0; p < NUM_PLANES; ++p) {
+ result[p] += ( pow(dw[p] * weight , minknorm)
+ * (in->data[p][INDX2D(r, c, in->linesize[p])] < thresh) );
}
- white[plane] = pow(white[plane], 1./minknorm);
}
}
- cleanup_derivative_buffers(&td, difford + 1, NUM_PLANES);
+ for (p = 0; p < NUM_PLANES; ++p) {
+ /** To make sure you save in a place you won't use again */
+ td->data[INDEX_DST][p][slice_start] = result[p];
+ }
return 0;
}
@@ -554,32 +587,6 @@ static void normalize_light(double *light)
}
}
-/**
- * Redirects to corresponding algorithm estimation function and performs normalization
- * after estimation.
- *
- * @param ctx the filter context.
- * @param in frame to perfrom estimation on.
- *
- * @return 0 in case of success, a negative value corresponding to an
- * AVERROR code in case of failure.
- */
-static int illumination_estimation(AVFilterContext *ctx, AVFrame *in)
-{
- ColorConstancyContext *s = ctx->priv;
- int ret;
-
- ret = filter_grey_edge(ctx, in);
-
- av_log(ctx, AV_LOG_DEBUG, "Estimated illumination= %f %f %f\n",
- s->white[0], s->white[1], s->white[2]);
- normalize_light(s->white);
- av_log(ctx, AV_LOG_DEBUG, "Estimated illumination after normalization= %f %f %f\n",
- s->white[0], s->white[1], s->white[2]);
-
- return ret;
-}
-
/**
* Performs simple correction via diagonal transformation model.
*
@@ -636,6 +643,146 @@ static void chromatic_adaptation(AVFilterContext *ctx, AVFrame *in, AVFrame *out
ctx->internal->execute(ctx, diagonal_transformation, &td, NULL, nb_jobs);
}
+/**
+ * Main control function for grey edge algorithm.
+ *
+ * @param ctx the filter context.
+ * @param in frame to perfrom grey edge on.
+ *
+ * @return 0 in case of success, a negative value corresponding to an
+ * AVERROR code in case of failure.
+ */
+static int filter_grey_edge(AVFilterContext *ctx, AVFrame *in, AVFrame **out)
+{
+ ColorConstancyContext *s = ctx->priv;
+ AVFilterLink *outlink = ctx->outputs[0];
+ int minknorm = s->minknorm;
+ int difford = s->difford;
+ double *white = s->white;
+ int nb_jobs = FFMIN3(s->planeheight[1], s->planewidth[1], s->nb_threads);
+ ThreadData td;
+ int plane, job, ret;
+
+ td.in = in;
+ ret = setup_derivative_buffers(ctx, &td);
+ if (ret) {
+ return ret;
+ }
+ get_derivative(ctx, &td);
+ if (difford > 0) {
+ ctx->internal->execute(ctx, slice_normalize, &td, NULL, nb_jobs);
+ }
+
+ ctx->internal->execute(ctx, filter_slice_grey_edge, &td, NULL, nb_jobs);
+ if (!minknorm) {
+ for (plane = 0; plane < NUM_PLANES; ++plane) {
+ white[plane] = 0; // All values are absolute
+ for (job = 0; job < nb_jobs; ++job) {
+ white[plane] = FFMAX(white[plane] , td.data[INDEX_DST][plane][job]);
+ }
+ }
+ } else {
+ for (plane = 0; plane < NUM_PLANES; ++plane) {
+ white[plane] = 0;
+ for (job = 0; job < nb_jobs; ++job) {
+ white[plane] += td.data[INDEX_DST][plane][job];
+ }
+ white[plane] = pow(white[plane], 1./minknorm);
+ }
+ }
+
+ cleanup_derivative_buffers(&td, difford + 1, NUM_PLANES);
+
+ av_log(ctx, AV_LOG_DEBUG, "Estimated illumination= %f %f %f\n",
+ s->white[0], s->white[1], s->white[2]);
+ normalize_light(s->white);
+ av_log(ctx, AV_LOG_DEBUG, "Estimated illumination after normalization= %f %f %f\n",
+ s->white[0], s->white[1], s->white[2]);
+
+ if (av_frame_is_writable(in)) {
+ *out = in;
+ } else {
+ *out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
+ if ( !(*out) ) {
+ av_log(ctx, AV_LOG_ERROR, "Out of memory while allocating output video buffer.\n");
+ return AVERROR(ENOMEM);
+ }
+ av_frame_copy_props(*out, in);
+ }
+ chromatic_adaptation(ctx, in, *out);
+
+ return 0;
+}
+
+#define ANG_ERR(w) acos( ( (w)[0] + (w)[1] + (w)[2] ) / sqrt(3) )
+#define RAD2DEG(r) (r) * 180 / M_PI
+
+/**
+ * Main control function for weighted grey edge algorithm.
+ *
+ * @param ctx the filter context.
+ * @param in frame to perfrom grey edge on.
+ *
+ * @return 0 in case of success, a negative value corresponding to an
+ * AVERROR code in case of failure.
+ */
+static int filter_weighted_grey_edge(AVFilterContext *ctx, AVFrame *in, AVFrame **out)
+{
+ ColorConstancyContext *s = ctx->priv;
+ const double angerr_thresh = s->angerr_thresh;
+ double *curr_estim_illum = s->white;
+ int difford = s->difford;
+ int minknorm = s->minknorm;
+ int it = s->iters;
+ int nb_jobs = FFMIN3(s->planeheight[1], s->planewidth[1], s->nb_threads);
+ const int height = s->planeheight[1];
+ double final_estim_illum[NUM_PLANES];
+ ThreadData td;
+ int ret, plane, job;
+
+ ret = filter_grey_edge(ctx, in, out);
+ if (ret) {
+ return ret;
+ }
+ memcpy(final_estim_illum, curr_estim_illum, NUM_PLANES * sizeof(*curr_estim_illum));
+
+ td.in = in;
+ ret = setup_derivative_buffers(ctx, &td);
+ if (ret) {
+ return ret;
+ }
+
+ while (it-- && RAD2DEG(ANG_ERR(curr_estim_illum)) > angerr_thresh) {
+ get_derivative(ctx, &td);
+ ctx->internal->execute(ctx, filter_slice_weighted_grey_edge, &td, NULL, nb_jobs);
+
+ for (plane = 0; plane < NUM_PLANES; ++plane) {
+ curr_estim_illum[plane] = 0;
+ for (job = 0; job < nb_jobs; ++job) {
+ curr_estim_illum[plane] += td.data[INDEX_DST][plane][(height * job) / nb_jobs];
+ }
+ curr_estim_illum[plane] = pow(curr_estim_illum[plane], 1./minknorm);
+ }
+
+ normalize_light(curr_estim_illum);
+ chromatic_adaptation(ctx, td.in, td.in);
+
+ for (plane = 0; plane < NUM_PLANES; ++plane) {
+ final_estim_illum[plane] *= curr_estim_illum[plane];
+ }
+ normalize_light(final_estim_illum);
+
+ av_log(ctx, AV_LOG_DEBUG, "Current scene illumination=%f %f %f\n", curr_estim_illum[0],
+ curr_estim_illum[1], curr_estim_illum[2]);
+ av_log(ctx, AV_LOG_DEBUG, "Angular error = %.16f degrees\n", RAD2DEG(ANG_ERR(curr_estim_illum)) );
+ av_log(ctx, AV_LOG_DEBUG, "Iterative estimated illumination after normalization= %f %f %f\n",
+ final_estim_illum[0], final_estim_illum[1], final_estim_illum[2]);
+ }
+
+ cleanup_derivative_buffers(&td, difford + 1, NUM_PLANES);
+ return 0;
+}
+
static int query_formats(AVFilterContext *ctx)
{
static const enum AVPixelFormat pix_fmts[] = {
@@ -657,6 +804,11 @@ static int config_props(AVFilterLink *inlink)
double sigma = s->sigma;
int ret;
+ if (!strcmp(ctx->filter->name, WEIGHTED_GREY_EDGE)) {
+ /** Enforce difford since it's not an option */
+ s->difford = 1;
+ }
+
if (!floor(break_off_sigma * sigma + 0.5) && s->difford) {
av_log(ctx, AV_LOG_ERROR, "floor(%f * sigma) must be > 0 when difford > 0.\n", break_off_sigma);
return AVERROR(EINVAL);
@@ -683,22 +835,15 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in)
AVFrame *out;
int ret;
- ret = illumination_estimation(ctx, in);
- if (ret) {
- return ret;
+ if (!strcmp(ctx->filter->name, GREY_EDGE)) {
+ ret = filter_grey_edge(ctx, in, &out);
+ } else if (!strcmp(ctx->filter->name, WEIGHTED_GREY_EDGE)) {
+ ret = filter_weighted_grey_edge(ctx, in, &out);
}
- if (av_frame_is_writable(in)) {
- out = in;
- } else {
- out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
- if (!out) {
- av_log(ctx, AV_LOG_ERROR, "Out of memory while allocating output video buffer.\n");
- return AVERROR(ENOMEM);
- }
- av_frame_copy_props(out, in);
+ if (ret) {
+ return ret;
}
- chromatic_adaptation(ctx, in, out);
return ff_filter_frame(outlink, out);
}
@@ -756,3 +901,30 @@ AVFilter ff_vf_greyedge = {
};
#endif /* CONFIG_GREY_EDGE_FILTER */
+
+#if CONFIG_WEIGHTED_GREYEDGE_FILTER
+
+static const AVOption weighted_greyedge_options[] = {
+ { "minknorm", "set Minkowski norm", OFFSET(minknorm), AV_OPT_TYPE_INT, {.i64=1}, 1, 20, FLAGS },
+ { "sigma", "set sigma", OFFSET(sigma), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0.2, 1024.0, FLAGS },
+ { "wmap_k", "set power of weight map", OFFSET(wmap_k), AV_OPT_TYPE_INT, {.i64=2}, 1, 20, FLAGS },
+ { "iters", "set max number of iterations", OFFSET(iters), AV_OPT_TYPE_INT, {.i64=5}, 1, 20, FLAGS },
+ { "angerr", "set angular error threshold", OFFSET(angerr_thresh), AV_OPT_TYPE_DOUBLE, {.dbl=0.001}, 0, 360, FLAGS },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(weighted_greyedge);
+
+AVFilter ff_vf_weighted_greyedge = {
+ .name = WEIGHTED_GREY_EDGE,
+ .description = NULL_IF_CONFIG_SMALL("Estimates scene illumination by weighted grey edge assumption."),
+ .priv_size = sizeof(ColorConstancyContext),
+ .priv_class = &weighted_greyedge_class,
+ .query_formats = query_formats,
+ .uninit = uninit,
+ .inputs = colorconstancy_inputs,
+ .outputs = colorconstancy_outputs,
+ .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC | AVFILTER_FLAG_SLICE_THREADS,
+};
+
+#endif /* CONFIG_WEIGHTED_GREYEDGE_FILTER */
\ No newline at end of file
--
2.17.1