diff mbox

[FFmpeg-devel,11/14] lavfi/vf_paletteuse: convert to framesync2.

Message ID 20170731120227.31047-11-george@nsup.org
State Superseded
Headers show

Commit Message

Nicolas George July 31, 2017, 12:02 p.m. UTC
Signed-off-by: Nicolas George <george@nsup.org>
---
 libavfilter/Makefile        |  2 +-
 libavfilter/vf_paletteuse.c | 59 ++++++++++++++++++++++++++++-----------------
 2 files changed, 38 insertions(+), 23 deletions(-)

Comments

Clément Bœsch Aug. 9, 2017, 10:42 p.m. UTC | #1
On Mon, Jul 31, 2017 at 02:02:24PM +0200, Nicolas George wrote:
[...]
> @@ -1052,13 +1070,10 @@ static const AVFilterPad paletteuse_inputs[] = {
>      {
>          .name           = "default",
>          .type           = AVMEDIA_TYPE_VIDEO,
> -        .filter_frame   = filter_frame,

> -        .needs_writable = 1, // for error diffusal dithering

why?

[...]
Nicolas George Aug. 10, 2017, 7:13 a.m. UTC | #2
Le tridi 23 thermidor, an CCXXV, Clement Boesch a écrit :
> > -        .needs_writable = 1, // for error diffusal dithering
> why?

It was not needed in the first place: IIRC, dualinput always returns a
writable main frame. And the new implementation does, I am sure of it.

Regards,
Clément Bœsch Aug. 10, 2017, 7:32 a.m. UTC | #3
On Thu, Aug 10, 2017 at 09:13:13AM +0200, Nicolas George wrote:
> Le tridi 23 thermidor, an CCXXV, Clement Boesch a écrit :
> > > -        .needs_writable = 1, // for error diffusal dithering
> > why?
> 
> It was not needed in the first place: IIRC, dualinput always returns a
> writable main frame. And the new implementation does, I am sure of it.
> 

But unless it's API documented, that's implementation specific. I'd prefer
if you keep that as a safe guard. It also has a documentation purpose. If
the frame is already writable it will be a noop.
Nicolas George Aug. 10, 2017, 9:33 a.m. UTC | #4
Le tridi 23 thermidor, an CCXXV, Clement Boesch a écrit :
> But unless it's API documented, that's implementation specific. I'd prefer
> if you keep that as a safe guard. It also has a documentation purpose.

I will do it if you insist, but before that, let me correct a little
detail:

> If the frame is already writable it will be a noop.

Before it is used for its contents, frames are used for their timestamp.
The frame could be read-only at the time it is dequeued by
framesync but have become writable by the time it is ready for
processing by the filter. And I think it is not that unlikely: a graph
with split sending to overlay and scale would have that effect.

Instead of .needs_writable (I am not sure this flag should be relevant
with the activate callback), I can propose the following solution that
make the need for a writable explicit:

	/**
	 * Like ff_framesync2_dualinput_get, but make sure f0 is
	 * writable.
	 */
	ff_framesync2_dualinput_get_writable(fs, &f0, &f1);

or:

	ff_framesync2_dualinput_get(fs, &f0, &f1,
	                            FRAMESYNC_MAIN_WRITABLE);

(whichever people prefer), and probably the same options for the
individual ff_inlink_consume_frame().

Regards,
Clément Bœsch Aug. 10, 2017, 9:41 a.m. UTC | #5
On Thu, Aug 10, 2017 at 11:33:31AM +0200, Nicolas George wrote:
> Le tridi 23 thermidor, an CCXXV, Clement Boesch a écrit :
> > But unless it's API documented, that's implementation specific. I'd prefer
> > if you keep that as a safe guard. It also has a documentation purpose.
> 
> I will do it if you insist, but before that, let me correct a little
> detail:
> 
> > If the frame is already writable it will be a noop.
> 
> Before it is used for its contents, frames are used for their timestamp.
> The frame could be read-only at the time it is dequeued by
> framesync but have become writable by the time it is ready for
> processing by the filter. And I think it is not that unlikely: a graph
> with split sending to overlay and scale would have that effect.
> 
> Instead of .needs_writable (I am not sure this flag should be relevant
> with the activate callback), I can propose the following solution that
> make the need for a writable explicit:
> 
> 	/**
> 	 * Like ff_framesync2_dualinput_get, but make sure f0 is
> 	 * writable.
> 	 */
> 	ff_framesync2_dualinput_get_writable(fs, &f0, &f1);
> 
> or:
> 
> 	ff_framesync2_dualinput_get(fs, &f0, &f1,
> 	                            FRAMESYNC_MAIN_WRITABLE);
> 
> (whichever people prefer), and probably the same options for the
> individual ff_inlink_consume_frame().
> 

I like ff_framesync2_dualinput_get_writable() better, but both are fine
with me.

Note: if you follow that path, please keep the comment about dithering
above the writable call to that framesync function.

Thanks
diff mbox

Patch

diff --git a/libavfilter/Makefile b/libavfilter/Makefile
index a62339992f..4e4201e2b6 100644
--- a/libavfilter/Makefile
+++ b/libavfilter/Makefile
@@ -248,7 +248,7 @@  OBJS-$(CONFIG_OVERLAY_FILTER)                += vf_overlay.o framesync2.o
 OBJS-$(CONFIG_OWDENOISE_FILTER)              += vf_owdenoise.o
 OBJS-$(CONFIG_PAD_FILTER)                    += vf_pad.o
 OBJS-$(CONFIG_PALETTEGEN_FILTER)             += vf_palettegen.o
-OBJS-$(CONFIG_PALETTEUSE_FILTER)             += vf_paletteuse.o dualinput.o framesync.o
+OBJS-$(CONFIG_PALETTEUSE_FILTER)             += vf_paletteuse.o framesync2.o
 OBJS-$(CONFIG_PERMS_FILTER)                  += f_perms.o
 OBJS-$(CONFIG_PERSPECTIVE_FILTER)            += vf_perspective.o
 OBJS-$(CONFIG_PHASE_FILTER)                  += vf_phase.o
diff --git a/libavfilter/vf_paletteuse.c b/libavfilter/vf_paletteuse.c
index b25c6a9eac..063873536b 100644
--- a/libavfilter/vf_paletteuse.c
+++ b/libavfilter/vf_paletteuse.c
@@ -27,8 +27,10 @@ 
 #include "libavutil/internal.h"
 #include "libavutil/opt.h"
 #include "libavutil/qsort.h"
-#include "dualinput.h"
 #include "avfilter.h"
+#include "filters.h"
+#include "framesync2.h"
+#include "internal.h"
 
 enum dithering_mode {
     DITHERING_NONE,
@@ -80,7 +82,7 @@  typedef int (*set_frame_func)(struct PaletteUseContext *s, AVFrame *out, AVFrame
 
 typedef struct PaletteUseContext {
     const AVClass *class;
-    FFDualInputContext dinput;
+    FFFrameSync fs;
     struct cache_node cache[CACHE_SIZE];    /* lookup cache */
     struct color_node map[AVPALETTE_COUNT]; /* 3D-Tree (KD-Tree with K=3) for reverse colormap */
     uint32_t palette[AVPALETTE_COUNT];
@@ -129,6 +131,8 @@  static const AVOption paletteuse_options[] = {
 
 AVFILTER_DEFINE_CLASS(paletteuse);
 
+static int load_apply_palette(FFFrameSync *fs);
+
 static int query_formats(AVFilterContext *ctx)
 {
     static const enum AVPixelFormat in_fmts[]    = {AV_PIX_FMT_RGB32, AV_PIX_FMT_NONE};
@@ -900,11 +904,18 @@  static int config_output(AVFilterLink *outlink)
     AVFilterContext *ctx = outlink->src;
     PaletteUseContext *s = ctx->priv;
 
+    ret = ff_framesync2_init_dualinput(&s->fs, ctx);
+    if (ret < 0)
+        return ret;
+    s->fs.opt_repeatlast = 1; // only 1 frame in the palette
+    s->fs.in[1].before = s->fs.in[1].after = EXT_INFINITY;
+    s->fs.on_event = load_apply_palette;
+
     outlink->w = ctx->inputs[0]->w;
     outlink->h = ctx->inputs[0]->h;
 
     outlink->time_base = ctx->inputs[0]->time_base;
-    if ((ret = ff_dualinput_init(ctx, &s->dinput)) < 0)
+    if ((ret = ff_framesync2_configure(&s->fs)) < 0)
         return ret;
     return 0;
 }
@@ -951,21 +962,31 @@  static void load_palette(PaletteUseContext *s, const AVFrame *palette_frame)
         s->palette_loaded = 1;
 }
 
-static AVFrame *load_apply_palette(AVFilterContext *ctx, AVFrame *main,
-                                   const AVFrame *second)
+static int load_apply_palette(FFFrameSync *fs)
 {
+    AVFilterContext *ctx = fs->parent;
     AVFilterLink *inlink = ctx->inputs[0];
     PaletteUseContext *s = ctx->priv;
+    AVFrame *main, *second, *out;
+    int ret;
+
+    ret = ff_framesync2_dualinput_get(fs, &main, &second);
+    if (ret < 0)
+        return ret;
+    if (!main || !second) {
+        ret = AVERROR_BUG;
+        goto error;
+    }
     if (!s->palette_loaded) {
         load_palette(s, second);
     }
-    return apply_palette(inlink, main);
-}
+    out = apply_palette(inlink, main);
+    return ff_filter_frame(ctx->outputs[0], out);
 
-static int filter_frame(AVFilterLink *inlink, AVFrame *in)
-{
-    PaletteUseContext *s = inlink->dst->priv;
-    return ff_dualinput_filter_frame(&s->dinput, inlink, in);
+error:
+    av_frame_free(&main);
+    av_frame_free(&second);
+    return ret;
 }
 
 #define DEFINE_SET_FRAME(color_search, name, value)                             \
@@ -1013,9 +1034,6 @@  static int dither_value(int p)
 static av_cold int init(AVFilterContext *ctx)
 {
     PaletteUseContext *s = ctx->priv;
-    s->dinput.repeatlast = 1; // only 1 frame in the palette
-    s->dinput.skip_initial_unpaired = 1;
-    s->dinput.process    = load_apply_palette;
 
     s->set_frame = set_frame_lut[s->color_search_method][s->dither];
 
@@ -1030,10 +1048,10 @@  static av_cold int init(AVFilterContext *ctx)
     return 0;
 }
 
-static int request_frame(AVFilterLink *outlink)
+static int activate(AVFilterContext *ctx)
 {
-    PaletteUseContext *s = outlink->src->priv;
-    return ff_dualinput_request_frame(&s->dinput, outlink);
+    PaletteUseContext *s = ctx->priv;
+    return ff_framesync2_activate(&s->fs);
 }
 
 static av_cold void uninit(AVFilterContext *ctx)
@@ -1041,7 +1059,7 @@  static av_cold void uninit(AVFilterContext *ctx)
     int i;
     PaletteUseContext *s = ctx->priv;
 
-    ff_dualinput_uninit(&s->dinput);
+    ff_framesync2_uninit(&s->fs);
     for (i = 0; i < CACHE_SIZE; i++)
         av_freep(&s->cache[i].entries);
     av_frame_free(&s->last_in);
@@ -1052,13 +1070,10 @@  static const AVFilterPad paletteuse_inputs[] = {
     {
         .name           = "default",
         .type           = AVMEDIA_TYPE_VIDEO,
-        .filter_frame   = filter_frame,
-        .needs_writable = 1, // for error diffusal dithering
     },{
         .name           = "palette",
         .type           = AVMEDIA_TYPE_VIDEO,
         .config_props   = config_input_palette,
-        .filter_frame   = filter_frame,
     },
     { NULL }
 };
@@ -1068,7 +1083,6 @@  static const AVFilterPad paletteuse_outputs[] = {
         .name          = "default",
         .type          = AVMEDIA_TYPE_VIDEO,
         .config_props  = config_output,
-        .request_frame = request_frame,
     },
     { NULL }
 };
@@ -1080,6 +1094,7 @@  AVFilter ff_vf_paletteuse = {
     .query_formats = query_formats,
     .init          = init,
     .uninit        = uninit,
+    .activate      = activate,
     .inputs        = paletteuse_inputs,
     .outputs       = paletteuse_outputs,
     .priv_class    = &paletteuse_class,