diff mbox

[FFmpeg-devel] avfilter: add midequalizer filter

Message ID 20170202134009.7967-1-onemda@gmail.com
State Accepted
Headers show

Commit Message

Paul B Mahol Feb. 2, 2017, 1:40 p.m. UTC
Signed-off-by: Paul B Mahol <onemda@gmail.com>
---
 doc/filters.texi              |  11 ++
 libavfilter/Makefile          |   1 +
 libavfilter/allfilters.c      |   1 +
 libavfilter/vf_midequalizer.c | 301 ++++++++++++++++++++++++++++++++++++++++++
 4 files changed, 314 insertions(+)
 create mode 100644 libavfilter/vf_midequalizer.c

Comments

Calvin Walton Feb. 6, 2017, 10:39 p.m. UTC | #1
Hi, just a quick note below...

On Thu, 2017-02-02 at 14:40 +0100, Paul B Mahol wrote:
> 
> +++ b/doc/filters.texi
> @@ -9643,6 +9643,17 @@ Macroblock size. Default @code{16}.
>  Search parameter. Default @code{7}.
>  @end table
>  
> +@section midequalizer
> +
> +Apply Midway Image Equalization effect using two video streams.
> +
> +This filter accepts the following option:
> +
> +@table @option
> +@item planes
> +Set which planes to process. Default is @code{15}, which is all
> available planes.
> +@end table
> 

Speaking as a user, it would be great if you can include some
information in the docs about what this filter does, or why someone
would want to use it. Something as simple as:

"Midway Image Equalization adjusts a pair of images to have the same
histogram, while maintaining their dynamics as much as possible. It's
useful for e.g. matching exposures from a pair of stereo cameras"

would help here.

Some help on how to actually use it (maybe with an example) would be
great too. For example, if the filter works by having a reference input
and an input which gets modified, it would also be good to say which
input is which. Something along the lines of:

"This input has two inputs and two outputs. The first input is used as
a reference, it is passed through unchanged. The second input has its
contrast adjusted to match that of the first input and the result is
returned on the second output."

but of course adjusted to match what the code actually does!

I don't have any comments on the code itself.

Calvin.
diff mbox

Patch

diff --git a/doc/filters.texi b/doc/filters.texi
index cd8e90e..056ab8d 100644
--- a/doc/filters.texi
+++ b/doc/filters.texi
@@ -9643,6 +9643,17 @@  Macroblock size. Default @code{16}.
 Search parameter. Default @code{7}.
 @end table
 
+@section midequalizer
+
+Apply Midway Image Equalization effect using two video streams.
+
+This filter accepts the following option:
+
+@table @option
+@item planes
+Set which planes to process. Default is @code{15}, which is all available planes.
+@end table
+
 @section minterpolate
 
 Convert the video to specified frame rate using motion interpolation.
diff --git a/libavfilter/Makefile b/libavfilter/Makefile
index 68a94be..c7b40ff 100644
--- a/libavfilter/Makefile
+++ b/libavfilter/Makefile
@@ -219,6 +219,7 @@  OBJS-$(CONFIG_MCDEINT_FILTER)                += vf_mcdeint.o
 OBJS-$(CONFIG_MERGEPLANES_FILTER)            += vf_mergeplanes.o framesync.o
 OBJS-$(CONFIG_MESTIMATE_FILTER)              += vf_mestimate.o motion_estimation.o
 OBJS-$(CONFIG_METADATA_FILTER)               += f_metadata.o
+OBJS-$(CONFIG_MIDEQUALIZER_FILTER)           += vf_midequalizer.o framesync.o
 OBJS-$(CONFIG_MINTERPOLATE_FILTER)           += vf_minterpolate.o motion_estimation.o
 OBJS-$(CONFIG_MPDECIMATE_FILTER)             += vf_mpdecimate.o
 OBJS-$(CONFIG_NEGATE_FILTER)                 += vf_lut.o
diff --git a/libavfilter/allfilters.c b/libavfilter/allfilters.c
index 86941ba..40cf0f4 100644
--- a/libavfilter/allfilters.c
+++ b/libavfilter/allfilters.c
@@ -235,6 +235,7 @@  void avfilter_register_all(void)
     REGISTER_FILTER(MERGEPLANES,    mergeplanes,    vf);
     REGISTER_FILTER(MESTIMATE,      mestimate,      vf);
     REGISTER_FILTER(METADATA,       metadata,       vf);
+    REGISTER_FILTER(MIDEQUALIZER,   midequalizer,   vf);
     REGISTER_FILTER(MINTERPOLATE,   minterpolate,   vf);
     REGISTER_FILTER(MPDECIMATE,     mpdecimate,     vf);
     REGISTER_FILTER(NEGATE,         negate,         vf);
diff --git a/libavfilter/vf_midequalizer.c b/libavfilter/vf_midequalizer.c
new file mode 100644
index 0000000..73e7fd5
--- /dev/null
+++ b/libavfilter/vf_midequalizer.c
@@ -0,0 +1,301 @@ 
+/*
+ * Copyright (c) 2017 Paul B Mahol
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/imgutils.h"
+#include "libavutil/pixdesc.h"
+#include "libavutil/opt.h"
+#include "avfilter.h"
+#include "formats.h"
+#include "internal.h"
+#include "video.h"
+#include "avfilter.h"
+#include "framesync.h"
+
+typedef struct MidEqualizerContext {
+    const AVClass *class;
+    int width[4], height[4];
+    int nb_planes;
+    int planes;
+    float histogram[2][256];
+    unsigned cchange[256];
+    FFFrameSync fs;
+
+    void (*midequalizer)(const uint8_t *in0, const uint8_t *in1,
+                         uint8_t *dst,
+                         ptrdiff_t linesize1, ptrdiff_t linesize2,
+                         ptrdiff_t dlinesize,
+                         int w, int h,
+                         float *histogram1, float *histogram2,
+                         unsigned *cchange);
+} MidEqualizerContext;
+
+#define OFFSET(x) offsetof(MidEqualizerContext, x)
+#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
+
+static const AVOption midequalizer_options[] = {
+    { "planes", "set planes", OFFSET(planes), AV_OPT_TYPE_INT, {.i64=0xF}, 0, 0xF, FLAGS },
+    { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(midequalizer);
+
+static int query_formats(AVFilterContext *ctx)
+{
+    static const enum AVPixelFormat pix_fmts[] = {
+        AV_PIX_FMT_YUVA444P, AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV440P,
+        AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUVJ440P,
+        AV_PIX_FMT_YUVA422P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUVA420P, AV_PIX_FMT_YUV420P,
+        AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_YUVJ420P,
+        AV_PIX_FMT_YUVJ411P, AV_PIX_FMT_YUV411P, AV_PIX_FMT_YUV410P,
+        AV_PIX_FMT_GBRP, AV_PIX_FMT_GBRAP,
+        AV_PIX_FMT_GRAY8,
+        AV_PIX_FMT_NONE
+    };
+
+    return ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
+}
+
+static int process_frame(FFFrameSync *fs)
+{
+    AVFilterContext *ctx = fs->parent;
+    MidEqualizerContext *s = fs->opaque;
+    AVFilterLink *outlink = ctx->outputs[0];
+    AVFrame *out, *in0, *in1;
+    int ret;
+
+    if ((ret = ff_framesync_get_frame(&s->fs, 0, &in0,    0)) < 0 ||
+        (ret = ff_framesync_get_frame(&s->fs, 1, &in1, 0)) < 0)
+        return ret;
+
+    if (ctx->is_disabled) {
+        out = av_frame_clone(in0);
+        if (!out)
+            return AVERROR(ENOMEM);
+    } else {
+        int p;
+
+        out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
+        if (!out)
+            return AVERROR(ENOMEM);
+        av_frame_copy_props(out, in0);
+
+        for (p = 0; p < s->nb_planes; p++) {
+            if (!((1 << p) & s->planes)) {
+                av_image_copy_plane(out->data[p], out->linesize[p], in0->data[p], in0->linesize[p],
+                                    s->width[p], s->height[p]);
+                continue;
+            }
+
+            s->midequalizer(in0->data[p], in1->data[p],
+                            out->data[p],
+                            in0->linesize[p], in1->linesize[p],
+                            out->linesize[p],
+                            s->width[p], s->height[p],
+                            s->histogram[0], s->histogram[1],
+                            s->cchange);
+        }
+    }
+    out->pts = av_rescale_q(in0->pts, s->fs.time_base, outlink->time_base);
+
+    return ff_filter_frame(outlink, out);
+}
+
+static void compute_histogram(const uint8_t *src, ptrdiff_t linesize,
+                              int w, int h, float *histogram)
+{
+    int y, x;
+
+    memset(histogram, 0, 256 * sizeof(*histogram));
+
+    for (y = 0; y < h; y++) {
+        for (x = 0; x < w; x++) {
+            histogram[src[x]] += 1;
+        }
+        src += linesize;
+    }
+
+    for (x = 0; x < 256 - 1; x++) {
+        histogram[x + 1] += histogram[x];
+        histogram[x] /= 256;
+    }
+    histogram[x] /= 256;
+}
+
+static void compute_contrast_change(float *histogram1, float *histogram2,
+                                    unsigned *cchange)
+{
+    int i;
+
+    for (i = 0; i < 256; i++) {
+        int j;
+
+        for (j = 0; j < 256 && histogram2[j] < histogram1[i]; j++);
+
+        cchange[i] = (i + j) / 2;
+    }
+}
+
+static void midequalizer8(const uint8_t *in0, const uint8_t *in1,
+                          uint8_t *dst,
+                          ptrdiff_t linesize1, ptrdiff_t linesize2,
+                          ptrdiff_t dlinesize,
+                          int w, int h,
+                          float *histogram1, float *histogram2,
+                          unsigned *cchange1)
+{
+    int x, y;
+
+    compute_histogram(in0, linesize1, w, h, histogram1);
+    compute_histogram(in1, linesize2, w, h, histogram2);
+
+    compute_contrast_change(histogram1, histogram2, cchange1);
+
+    for (y = 0; y < h; y++) {
+        for (x = 0; x < w; x++) {
+            dst[x] = av_clip_uint8(cchange1[in0[x]]);
+        }
+        dst += dlinesize;
+        in0 += linesize1;
+    }
+}
+
+static int config_input(AVFilterLink *inlink)
+{
+    AVFilterContext *ctx = inlink->dst;
+    MidEqualizerContext *s = ctx->priv;
+    const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
+    int vsub, hsub;
+
+    s->nb_planes = av_pix_fmt_count_planes(inlink->format);
+
+    hsub = desc->log2_chroma_w;
+    vsub = desc->log2_chroma_h;
+    s->height[1] = s->height[2] = AV_CEIL_RSHIFT(inlink->h, vsub);
+    s->height[0] = s->height[3] = inlink->h;
+    s->width[1]  = s->width[2]  = AV_CEIL_RSHIFT(inlink->w, hsub);
+    s->width[0]  = s->width[3]  = inlink->w;
+
+    s->midequalizer = midequalizer8;
+
+    return 0;
+}
+
+static int config_output(AVFilterLink *outlink)
+{
+    AVFilterContext *ctx = outlink->src;
+    MidEqualizerContext *s = ctx->priv;
+    AVFilterLink *in0 = ctx->inputs[0];
+    AVFilterLink *in1 = ctx->inputs[1];
+    FFFrameSyncIn *in;
+    int ret;
+
+    if (in0->format != in1->format) {
+        av_log(ctx, AV_LOG_ERROR, "inputs must be of same pixel format\n");
+        return AVERROR(EINVAL);
+    }
+
+    if (in0->w != in1->w || in0->h != in1->h) {
+        av_log(ctx, AV_LOG_ERROR, "First input link %s parameters "
+               "(size %dx%d) do not match the corresponding "
+               "second input link %s parameters (%dx%d)\n",
+               ctx->input_pads[0].name, in0->w, in0->h,
+               ctx->input_pads[1].name, in1->w, in1->h);
+        return AVERROR(EINVAL);
+    }
+
+    outlink->w = in0->w;
+    outlink->h = in0->h;
+    outlink->time_base = in0->time_base;
+    outlink->sample_aspect_ratio = in0->sample_aspect_ratio;
+    outlink->frame_rate = in0->frame_rate;
+
+    if ((ret = ff_framesync_init(&s->fs, ctx, 2)) < 0)
+        return ret;
+
+    in = s->fs.in;
+    in[0].time_base = in0->time_base;
+    in[1].time_base = in1->time_base;
+    in[0].sync   = 1;
+    in[0].before = EXT_STOP;
+    in[0].after  = EXT_INFINITY;
+    in[1].sync   = 1;
+    in[1].before = EXT_STOP;
+    in[1].after  = EXT_INFINITY;
+    s->fs.opaque   = s;
+    s->fs.on_event = process_frame;
+
+    return ff_framesync_configure(&s->fs);
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
+{
+    MidEqualizerContext *s = inlink->dst->priv;
+    return ff_framesync_filter_frame(&s->fs, inlink, buf);
+}
+
+static int request_frame(AVFilterLink *outlink)
+{
+    MidEqualizerContext *s = outlink->src->priv;
+    return ff_framesync_request_frame(&s->fs, outlink);
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+    MidEqualizerContext *s = ctx->priv;
+
+    ff_framesync_uninit(&s->fs);
+}
+
+static const AVFilterPad midequalizer_inputs[] = {
+    {
+        .name         = "in0",
+        .type         = AVMEDIA_TYPE_VIDEO,
+        .filter_frame = filter_frame,
+        .config_props = config_input,
+    },
+    {
+        .name         = "in1",
+        .type         = AVMEDIA_TYPE_VIDEO,
+        .filter_frame = filter_frame,
+    },
+    { NULL }
+};
+
+static const AVFilterPad midequalizer_outputs[] = {
+    {
+        .name          = "default",
+        .type          = AVMEDIA_TYPE_VIDEO,
+        .config_props  = config_output,
+        .request_frame = request_frame,
+    },
+    { NULL }
+};
+
+AVFilter ff_vf_midequalizer = {
+    .name          = "midequalizer",
+    .description   = NULL_IF_CONFIG_SMALL("Apply Midway Equalization."),
+    .priv_size     = sizeof(MidEqualizerContext),
+    .uninit        = uninit,
+    .query_formats = query_formats,
+    .inputs        = midequalizer_inputs,
+    .outputs       = midequalizer_outputs,
+    .priv_class    = &midequalizer_class,
+    .flags         = AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL,
+};