Message ID | 20191023204943.2360-1-onemda@gmail.com |
---|---|
State | New |
Headers | show |
Paul, this is useful. When can you merge this? On 24-10-2019 02:19 am, Paul B Mahol wrote: > Signed-off-by: Paul B Mahol <onemda@gmail.com> > --- > doc/filters.texi | 18 +++++ > libavfilter/Makefile | 1 + > libavfilter/allfilters.c | 1 + > libavfilter/vf_blend.c | 157 ++++++++++++++++++++++++++++++++++++++- > 4 files changed, 175 insertions(+), 2 deletions(-) > > diff --git a/doc/filters.texi b/doc/filters.texi > index 7400e7dd31..eea0be060d 100644 > --- a/doc/filters.texi > +++ b/doc/filters.texi > @@ -6450,6 +6450,7 @@ The threshold below which a pixel value is considered black; it defaults to > > @end table > > +@anchor{blend} > @section blend, tblend > > Blend two video frames into each other. > @@ -8068,6 +8069,23 @@ indicates 'never reset', and returns the largest area encountered during > playback. > @end table > > +@section crossfade > + > +Apply cross fade from one input video stream to another input video stream. > +The cross fade is applied for specified duration. > + > +The filter accepts the following options: > + > +@table @option > +@item duration > +Set cross fade duration in seconds. > + > +@item offset > +Set cross fade start relative to first input stream. > + > +For rest of options explanation see @ref{blend} filter. > +@end table > + > @anchor{cue} > @section cue > > diff --git a/libavfilter/Makefile b/libavfilter/Makefile > index 63d2fba861..e02c7d3614 100644 > --- a/libavfilter/Makefile > +++ b/libavfilter/Makefile > @@ -197,6 +197,7 @@ OBJS-$(CONFIG_COREIMAGE_FILTER) += vf_coreimage.o > OBJS-$(CONFIG_COVER_RECT_FILTER) += vf_cover_rect.o lavfutils.o > OBJS-$(CONFIG_CROP_FILTER) += vf_crop.o > OBJS-$(CONFIG_CROPDETECT_FILTER) += vf_cropdetect.o > +OBJS-$(CONFIG_CROSSFADE_FILTER) += vf_blend.o framesync.o > OBJS-$(CONFIG_CUE_FILTER) += f_cue.o > OBJS-$(CONFIG_CURVES_FILTER) += vf_curves.o > OBJS-$(CONFIG_DATASCOPE_FILTER) += vf_datascope.o > diff --git a/libavfilter/allfilters.c b/libavfilter/allfilters.c > index e4186f93db..7838002230 100644 > --- a/libavfilter/allfilters.c > +++ b/libavfilter/allfilters.c > @@ -185,6 +185,7 @@ extern AVFilter ff_vf_coreimage; > extern AVFilter ff_vf_cover_rect; > extern AVFilter ff_vf_crop; > extern AVFilter ff_vf_cropdetect; > +extern AVFilter ff_vf_crossfade; > extern AVFilter ff_vf_cue; > extern AVFilter ff_vf_curves; > extern AVFilter ff_vf_datascope; > diff --git a/libavfilter/vf_blend.c b/libavfilter/vf_blend.c > index 67163be3e7..c4411ca5f9 100644 > --- a/libavfilter/vf_blend.c > +++ b/libavfilter/vf_blend.c > @@ -26,6 +26,7 @@ > #include "formats.h" > #include "framesync.h" > #include "internal.h" > +#include "filters.h" > #include "video.h" > #include "blend.h" > > @@ -44,6 +45,17 @@ typedef struct BlendContext { > int depth; > FilterParams params[4]; > int tblend; > + int crossfade; > + int64_t duration; > + int64_t offset; > + int64_t duration_pts; > + int64_t offset_pts; > + int64_t first_pts; > + int64_t pts; > + int crossfade_is_over; > + int need_second; > + int eof[2]; > + AVFrame *cf[2]; > AVFrame *prev_frame; /* only used with tblend */ > } BlendContext; > > @@ -557,6 +569,7 @@ static av_cold int init(AVFilterContext *ctx) > BlendContext *s = ctx->priv; > > s->tblend = !strcmp(ctx->filter->name, "tblend"); > + s->crossfade = !strcmp(ctx->filter->name, "crossfade"); > > s->fs.on_event = blend_frame_for_dualinput; > return 0; > @@ -715,7 +728,7 @@ static int config_output(AVFilterLink *outlink) > s->depth = pix_desc->comp[0].depth; > s->nb_planes = av_pix_fmt_count_planes(toplink->format); > > - if (!s->tblend) > + if (!s->tblend && !s->crossfade) > if ((ret = ff_framesync_init_dualinput(&s->fs, ctx)) < 0) > return ret; > > @@ -743,7 +756,14 @@ static int config_output(AVFilterLink *outlink) > } > } > > - if (s->tblend) > + s->first_pts = s->pts = AV_NOPTS_VALUE; > + > + if (s->duration) > + s->duration_pts = av_rescale_q(s->duration, AV_TIME_BASE_Q, outlink->time_base); > + if (s->offset) > + s->offset_pts = av_rescale_q(s->offset, AV_TIME_BASE_Q, outlink->time_base); > + > + if (s->tblend || s->crossfade) > return 0; > > ret = ff_framesync_configure(&s->fs); > @@ -859,3 +879,136 @@ AVFilter ff_vf_tblend = { > }; > > #endif > + > +static const AVOption crossfade_options[] = { > + { "duration", "set cross fade duration", OFFSET(duration), AV_OPT_TYPE_DURATION, {.i64=1000000}, 0, 60000000, FLAGS }, > + { "offset", "set cross fade start relative to first input stream", OFFSET(offset), AV_OPT_TYPE_DURATION, {.i64=0}, 0, 60000000, FLAGS }, > + COMMON_OPTIONS, > + { NULL } > +}; > + > +AVFILTER_DEFINE_CLASS(crossfade); > + > +static int crossfade_activate(AVFilterContext *ctx) > +{ > + BlendContext *s = ctx->priv; > + AVFilterLink *outlink = ctx->outputs[0]; > + AVFrame *in = NULL, *out = NULL; > + int ret = 0, status; > + int64_t pts; > + > + FF_FILTER_FORWARD_STATUS_BACK_ALL(outlink, ctx); > + > + if (s->crossfade_is_over) { > + ret = ff_inlink_consume_frame(ctx->inputs[1], &in); > + if (ret < 0) { > + return ret; > + } else if (ff_inlink_acknowledge_status(ctx->inputs[1], &status, &pts)) { > + ff_outlink_set_status(outlink, status, s->pts); > + return 0; > + } else if (!ret) { > + if (ff_outlink_frame_wanted(outlink)) { > + ff_inlink_request_frame(ctx->inputs[1]); > + return 0; > + } > + } else { > + in->pts = s->pts; > + s->pts += av_rescale_q(1, av_inv_q(outlink->frame_rate), outlink->time_base); > + return ff_filter_frame(outlink, in); > + } > + } > + > + if (ff_inlink_queued_frames(ctx->inputs[0]) > 0) { > + s->cf[0] = ff_inlink_peek_frame(ctx->inputs[0], 0); > + if (s->cf[0]) { > + if (s->first_pts == AV_NOPTS_VALUE) { > + s->first_pts = s->cf[0]->pts; > + } > + s->pts = s->cf[0]->pts; > + if (s->first_pts + s->offset_pts > s->cf[0]->pts) { > + s->cf[0] = NULL; > + s->need_second = 0; > + ff_inlink_consume_frame(ctx->inputs[0], &in); > + return ff_filter_frame(outlink, in); > + } > + > + s->need_second = 1; > + } > + } > + > + if (s->cf[0] && ff_inlink_queued_frames(ctx->inputs[1]) > 0) { > + ff_inlink_consume_frame(ctx->inputs[0], &s->cf[0]); > + ff_inlink_consume_frame(ctx->inputs[1], &s->cf[1]); > + > + s->pts = s->cf[0]->pts; > + if (s->cf[0]->pts - (s->first_pts + s->offset_pts) > s->duration_pts) > + s->crossfade_is_over = 1; > + out = blend_frame(ctx, s->cf[0], s->cf[1]); > + s->cf[0] = NULL; > + av_frame_free(&s->cf[1]); > + out->pts = s->pts; > + return ff_filter_frame(outlink, out); > + } > + > + if (ff_inlink_queued_frames(ctx->inputs[0]) > 0 && > + ff_inlink_queued_frames(ctx->inputs[1]) > 0) { > + ff_filter_set_ready(ctx, 100); > + return 0; > + } > + > + if (ff_outlink_frame_wanted(outlink)) { > + if (!s->eof[0] && ff_outlink_get_status(ctx->inputs[0])) { > + s->eof[0] = 1; > + s->crossfade_is_over = 1; > + } > + if (!s->eof[1] && ff_outlink_get_status(ctx->inputs[1])) { > + s->eof[1] = 1; > + } > + if (!s->eof[0] && !s->cf[0]) > + ff_inlink_request_frame(ctx->inputs[0]); > + if (!s->eof[1] && (s->need_second || s->eof[0])) > + ff_inlink_request_frame(ctx->inputs[1]); > + if (s->eof[0] && s->eof[1] && ( > + ff_inlink_queued_frames(ctx->inputs[0]) <= 0 || > + ff_inlink_queued_frames(ctx->inputs[1]) <= 0)) > + ff_outlink_set_status(outlink, AVERROR_EOF, AV_NOPTS_VALUE); > + return 0; > + } > + > + return FFERROR_NOT_READY; > +} > + > +static const AVFilterPad crossfade_inputs[] = { > + { > + .name = "crossfade0", > + .type = AVMEDIA_TYPE_VIDEO, > + }, > + { > + .name = "crossfade1", > + .type = AVMEDIA_TYPE_VIDEO, > + }, > + { NULL } > +}; > + > +static const AVFilterPad crossfade_outputs[] = { > + { > + .name = "default", > + .type = AVMEDIA_TYPE_VIDEO, > + .config_props = config_output, > + }, > + { NULL } > +}; > + > +AVFilter ff_vf_crossfade = { > + .name = "crossfade", > + .description = NULL_IF_CONFIG_SMALL("Cross fade two input video streams."), > + .priv_size = sizeof(BlendContext), > + .priv_class = &crossfade_class, > + .query_formats = query_formats, > + .init = init, > + .activate = crossfade_activate, > + .uninit = uninit, > + .inputs = crossfade_inputs, > + .outputs = crossfade_outputs, > + .flags = AVFILTER_FLAG_SLICE_THREADS, > +};
On 1/11/20, Gyan <ffmpeg@gyani.pro> wrote:
> Paul, this is useful. When can you merge this?
You actually tried it?
On 11-01-2020 03:35 pm, Paul B Mahol wrote: > On 1/11/20, Gyan <ffmpeg@gyani.pro> wrote: >> Paul, this is useful. When can you merge this? > You actually tried it? No. Do I need to? It looks to be video counterpart of acrossfade. Gyan
On 1/11/20, Gyan <ffmpeg@gyani.pro> wrote: > > > On 11-01-2020 03:35 pm, Paul B Mahol wrote: >> On 1/11/20, Gyan <ffmpeg@gyani.pro> wrote: >>> Paul, this is useful. When can you merge this? >> You actually tried it? > > No. Do I need to? It looks to be video counterpart of acrossfade. Yes, but generic filtering/crossfade is pretty slow, so wanted to do vulkan filter instead. > > Gyan > _______________________________________________ > ffmpeg-devel mailing list > ffmpeg-devel@ffmpeg.org > https://ffmpeg.org/mailman/listinfo/ffmpeg-devel > > To unsubscribe, visit link above, or email > ffmpeg-devel-request@ffmpeg.org with subject "unsubscribe".
On 11-01-2020 03:53 pm, Paul B Mahol wrote: > On 1/11/20, Gyan <ffmpeg@gyani.pro> wrote: >> >> On 11-01-2020 03:35 pm, Paul B Mahol wrote: >>> On 1/11/20, Gyan <ffmpeg@gyani.pro> wrote: >>>> Paul, this is useful. When can you merge this? >>> You actually tried it? >> No. Do I need to? It looks to be video counterpart of acrossfade. > Yes, but generic filtering/crossfade is pretty slow, so wanted to do > vulkan filter instead. I'll test this filter tonight, but it does not have to be either/or. We have s/w scale,overlay and h/w assisted versions as well. Gyan
diff --git a/doc/filters.texi b/doc/filters.texi index 7400e7dd31..eea0be060d 100644 --- a/doc/filters.texi +++ b/doc/filters.texi @@ -6450,6 +6450,7 @@ The threshold below which a pixel value is considered black; it defaults to @end table +@anchor{blend} @section blend, tblend Blend two video frames into each other. @@ -8068,6 +8069,23 @@ indicates 'never reset', and returns the largest area encountered during playback. @end table +@section crossfade + +Apply cross fade from one input video stream to another input video stream. +The cross fade is applied for specified duration. + +The filter accepts the following options: + +@table @option +@item duration +Set cross fade duration in seconds. + +@item offset +Set cross fade start relative to first input stream. + +For rest of options explanation see @ref{blend} filter. +@end table + @anchor{cue} @section cue diff --git a/libavfilter/Makefile b/libavfilter/Makefile index 63d2fba861..e02c7d3614 100644 --- a/libavfilter/Makefile +++ b/libavfilter/Makefile @@ -197,6 +197,7 @@ OBJS-$(CONFIG_COREIMAGE_FILTER) += vf_coreimage.o OBJS-$(CONFIG_COVER_RECT_FILTER) += vf_cover_rect.o lavfutils.o OBJS-$(CONFIG_CROP_FILTER) += vf_crop.o OBJS-$(CONFIG_CROPDETECT_FILTER) += vf_cropdetect.o +OBJS-$(CONFIG_CROSSFADE_FILTER) += vf_blend.o framesync.o OBJS-$(CONFIG_CUE_FILTER) += f_cue.o OBJS-$(CONFIG_CURVES_FILTER) += vf_curves.o OBJS-$(CONFIG_DATASCOPE_FILTER) += vf_datascope.o diff --git a/libavfilter/allfilters.c b/libavfilter/allfilters.c index e4186f93db..7838002230 100644 --- a/libavfilter/allfilters.c +++ b/libavfilter/allfilters.c @@ -185,6 +185,7 @@ extern AVFilter ff_vf_coreimage; extern AVFilter ff_vf_cover_rect; extern AVFilter ff_vf_crop; extern AVFilter ff_vf_cropdetect; +extern AVFilter ff_vf_crossfade; extern AVFilter ff_vf_cue; extern AVFilter ff_vf_curves; extern AVFilter ff_vf_datascope; diff --git a/libavfilter/vf_blend.c b/libavfilter/vf_blend.c index 67163be3e7..c4411ca5f9 100644 --- a/libavfilter/vf_blend.c +++ b/libavfilter/vf_blend.c @@ -26,6 +26,7 @@ #include "formats.h" #include "framesync.h" #include "internal.h" +#include "filters.h" #include "video.h" #include "blend.h" @@ -44,6 +45,17 @@ typedef struct BlendContext { int depth; FilterParams params[4]; int tblend; + int crossfade; + int64_t duration; + int64_t offset; + int64_t duration_pts; + int64_t offset_pts; + int64_t first_pts; + int64_t pts; + int crossfade_is_over; + int need_second; + int eof[2]; + AVFrame *cf[2]; AVFrame *prev_frame; /* only used with tblend */ } BlendContext; @@ -557,6 +569,7 @@ static av_cold int init(AVFilterContext *ctx) BlendContext *s = ctx->priv; s->tblend = !strcmp(ctx->filter->name, "tblend"); + s->crossfade = !strcmp(ctx->filter->name, "crossfade"); s->fs.on_event = blend_frame_for_dualinput; return 0; @@ -715,7 +728,7 @@ static int config_output(AVFilterLink *outlink) s->depth = pix_desc->comp[0].depth; s->nb_planes = av_pix_fmt_count_planes(toplink->format); - if (!s->tblend) + if (!s->tblend && !s->crossfade) if ((ret = ff_framesync_init_dualinput(&s->fs, ctx)) < 0) return ret; @@ -743,7 +756,14 @@ static int config_output(AVFilterLink *outlink) } } - if (s->tblend) + s->first_pts = s->pts = AV_NOPTS_VALUE; + + if (s->duration) + s->duration_pts = av_rescale_q(s->duration, AV_TIME_BASE_Q, outlink->time_base); + if (s->offset) + s->offset_pts = av_rescale_q(s->offset, AV_TIME_BASE_Q, outlink->time_base); + + if (s->tblend || s->crossfade) return 0; ret = ff_framesync_configure(&s->fs); @@ -859,3 +879,136 @@ AVFilter ff_vf_tblend = { }; #endif + +static const AVOption crossfade_options[] = { + { "duration", "set cross fade duration", OFFSET(duration), AV_OPT_TYPE_DURATION, {.i64=1000000}, 0, 60000000, FLAGS }, + { "offset", "set cross fade start relative to first input stream", OFFSET(offset), AV_OPT_TYPE_DURATION, {.i64=0}, 0, 60000000, FLAGS }, + COMMON_OPTIONS, + { NULL } +}; + +AVFILTER_DEFINE_CLASS(crossfade); + +static int crossfade_activate(AVFilterContext *ctx) +{ + BlendContext *s = ctx->priv; + AVFilterLink *outlink = ctx->outputs[0]; + AVFrame *in = NULL, *out = NULL; + int ret = 0, status; + int64_t pts; + + FF_FILTER_FORWARD_STATUS_BACK_ALL(outlink, ctx); + + if (s->crossfade_is_over) { + ret = ff_inlink_consume_frame(ctx->inputs[1], &in); + if (ret < 0) { + return ret; + } else if (ff_inlink_acknowledge_status(ctx->inputs[1], &status, &pts)) { + ff_outlink_set_status(outlink, status, s->pts); + return 0; + } else if (!ret) { + if (ff_outlink_frame_wanted(outlink)) { + ff_inlink_request_frame(ctx->inputs[1]); + return 0; + } + } else { + in->pts = s->pts; + s->pts += av_rescale_q(1, av_inv_q(outlink->frame_rate), outlink->time_base); + return ff_filter_frame(outlink, in); + } + } + + if (ff_inlink_queued_frames(ctx->inputs[0]) > 0) { + s->cf[0] = ff_inlink_peek_frame(ctx->inputs[0], 0); + if (s->cf[0]) { + if (s->first_pts == AV_NOPTS_VALUE) { + s->first_pts = s->cf[0]->pts; + } + s->pts = s->cf[0]->pts; + if (s->first_pts + s->offset_pts > s->cf[0]->pts) { + s->cf[0] = NULL; + s->need_second = 0; + ff_inlink_consume_frame(ctx->inputs[0], &in); + return ff_filter_frame(outlink, in); + } + + s->need_second = 1; + } + } + + if (s->cf[0] && ff_inlink_queued_frames(ctx->inputs[1]) > 0) { + ff_inlink_consume_frame(ctx->inputs[0], &s->cf[0]); + ff_inlink_consume_frame(ctx->inputs[1], &s->cf[1]); + + s->pts = s->cf[0]->pts; + if (s->cf[0]->pts - (s->first_pts + s->offset_pts) > s->duration_pts) + s->crossfade_is_over = 1; + out = blend_frame(ctx, s->cf[0], s->cf[1]); + s->cf[0] = NULL; + av_frame_free(&s->cf[1]); + out->pts = s->pts; + return ff_filter_frame(outlink, out); + } + + if (ff_inlink_queued_frames(ctx->inputs[0]) > 0 && + ff_inlink_queued_frames(ctx->inputs[1]) > 0) { + ff_filter_set_ready(ctx, 100); + return 0; + } + + if (ff_outlink_frame_wanted(outlink)) { + if (!s->eof[0] && ff_outlink_get_status(ctx->inputs[0])) { + s->eof[0] = 1; + s->crossfade_is_over = 1; + } + if (!s->eof[1] && ff_outlink_get_status(ctx->inputs[1])) { + s->eof[1] = 1; + } + if (!s->eof[0] && !s->cf[0]) + ff_inlink_request_frame(ctx->inputs[0]); + if (!s->eof[1] && (s->need_second || s->eof[0])) + ff_inlink_request_frame(ctx->inputs[1]); + if (s->eof[0] && s->eof[1] && ( + ff_inlink_queued_frames(ctx->inputs[0]) <= 0 || + ff_inlink_queued_frames(ctx->inputs[1]) <= 0)) + ff_outlink_set_status(outlink, AVERROR_EOF, AV_NOPTS_VALUE); + return 0; + } + + return FFERROR_NOT_READY; +} + +static const AVFilterPad crossfade_inputs[] = { + { + .name = "crossfade0", + .type = AVMEDIA_TYPE_VIDEO, + }, + { + .name = "crossfade1", + .type = AVMEDIA_TYPE_VIDEO, + }, + { NULL } +}; + +static const AVFilterPad crossfade_outputs[] = { + { + .name = "default", + .type = AVMEDIA_TYPE_VIDEO, + .config_props = config_output, + }, + { NULL } +}; + +AVFilter ff_vf_crossfade = { + .name = "crossfade", + .description = NULL_IF_CONFIG_SMALL("Cross fade two input video streams."), + .priv_size = sizeof(BlendContext), + .priv_class = &crossfade_class, + .query_formats = query_formats, + .init = init, + .activate = crossfade_activate, + .uninit = uninit, + .inputs = crossfade_inputs, + .outputs = crossfade_outputs, + .flags = AVFILTER_FLAG_SLICE_THREADS, +};
Signed-off-by: Paul B Mahol <onemda@gmail.com> --- doc/filters.texi | 18 +++++ libavfilter/Makefile | 1 + libavfilter/allfilters.c | 1 + libavfilter/vf_blend.c | 157 ++++++++++++++++++++++++++++++++++++++- 4 files changed, 175 insertions(+), 2 deletions(-)