diff mbox

[FFmpeg-devel] avfilter/setpts: switch to activate

Message ID 20190930120509.19403-1-onemda@gmail.com
State New
Headers show

Commit Message

Paul B Mahol Sept. 30, 2019, 12:05 p.m. UTC
Also properly handle EOF timestamps.
Fixes #6833.

Signed-off-by: Paul B Mahol <onemda@gmail.com>
---
 libavfilter/setpts.c | 74 ++++++++++++++++++++++++++++++++------------
 1 file changed, 55 insertions(+), 19 deletions(-)

Comments

Paul B Mahol Sept. 30, 2019, 7:01 p.m. UTC | #1
On 9/30/19, Paul B Mahol <onemda@gmail.com> wrote:
> Also properly handle EOF timestamps.
> Fixes #6833.
>
> Signed-off-by: Paul B Mahol <onemda@gmail.com>
> ---
>  libavfilter/setpts.c | 74 ++++++++++++++++++++++++++++++++------------
>  1 file changed, 55 insertions(+), 19 deletions(-)
>

Will apply soon, as this fixes very important issue.
Nicolas George Sept. 30, 2019, 7:03 p.m. UTC | #2
Paul B Mahol (12019-09-30):
> Will apply soon, as this fixes very important issue.

A very important issue that has been opened 23 months ago and sitting
idle for 12 months. It can therefore wait for a proper review, which I
had already planed to make.

Regards,
Nicolas George Oct. 2, 2019, 6:22 p.m. UTC | #3
Paul B Mahol (12019-09-30):
> Also properly handle EOF timestamps.
> Fixes #6833.
> 
> Signed-off-by: Paul B Mahol <onemda@gmail.com>
> ---
>  libavfilter/setpts.c | 74 ++++++++++++++++++++++++++++++++------------
>  1 file changed, 55 insertions(+), 19 deletions(-)
> 
> diff --git a/libavfilter/setpts.c b/libavfilter/setpts.c
> index 800ba6a83f..076534c518 100644
> --- a/libavfilter/setpts.c
> +++ b/libavfilter/setpts.c
> @@ -33,6 +33,7 @@
>  #include "libavutil/time.h"
>  #include "audio.h"
>  #include "avfilter.h"
> +#include "filters.h"
>  #include "internal.h"
>  #include "video.h"
>  
> @@ -154,6 +155,28 @@ static inline char *double2int64str(char *buf, double v)
>      return buf;
>  }
>  
> +static double eval_pts(SetPTSContext *setpts, AVFilterLink *inlink, AVFrame *frame, int64_t pts)
> +{
> +    if (isnan(setpts->var_values[VAR_STARTPTS])) {
> +        setpts->var_values[VAR_STARTPTS] = TS2D(pts);
> +        setpts->var_values[VAR_STARTT  ] = TS2T(pts, inlink->time_base);
> +    }
> +    setpts->var_values[VAR_PTS       ] = TS2D(pts);
> +    setpts->var_values[VAR_T         ] = TS2T(pts, inlink->time_base);
> +    setpts->var_values[VAR_POS       ] = !frame || frame->pkt_pos == -1 ? NAN : frame->pkt_pos;
> +    setpts->var_values[VAR_RTCTIME   ] = av_gettime();
> +
> +    if (frame) {
> +        if (inlink->type == AVMEDIA_TYPE_VIDEO) {
> +            setpts->var_values[VAR_INTERLACED] = frame->interlaced_frame;
> +        } else if (inlink->type == AVMEDIA_TYPE_AUDIO) {
> +            setpts->var_values[VAR_S] = frame->nb_samples;
> +            setpts->var_values[VAR_NB_SAMPLES] = frame->nb_samples;
> +        }
> +    }
> +
> +    return av_expr_eval(setpts->expr, setpts->var_values, NULL);
> +}
>  #define d2istr(v) double2int64str((char[BUF_SIZE]){0}, v)
>  
>  static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
> @@ -162,23 +185,7 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
>      int64_t in_pts = frame->pts;
>      double d;
>  
> -    if (isnan(setpts->var_values[VAR_STARTPTS])) {
> -        setpts->var_values[VAR_STARTPTS] = TS2D(frame->pts);
> -        setpts->var_values[VAR_STARTT  ] = TS2T(frame->pts, inlink->time_base);
> -    }
> -    setpts->var_values[VAR_PTS       ] = TS2D(frame->pts);
> -    setpts->var_values[VAR_T         ] = TS2T(frame->pts, inlink->time_base);
> -    setpts->var_values[VAR_POS       ] = frame->pkt_pos == -1 ? NAN : frame->pkt_pos;
> -    setpts->var_values[VAR_RTCTIME   ] = av_gettime();
> -
> -    if (inlink->type == AVMEDIA_TYPE_VIDEO) {
> -        setpts->var_values[VAR_INTERLACED] = frame->interlaced_frame;
> -    } else if (inlink->type == AVMEDIA_TYPE_AUDIO) {
> -        setpts->var_values[VAR_S] = frame->nb_samples;
> -        setpts->var_values[VAR_NB_SAMPLES] = frame->nb_samples;
> -    }
> -
> -    d = av_expr_eval(setpts->expr, setpts->var_values, NULL);
> +    d = eval_pts(setpts, inlink, frame, frame->pts);
>      frame->pts = D2TS(d);
>  

>      av_log(inlink->dst, AV_LOG_TRACE,

I think there should be a LOG_TRACE for the output timestamp too.

> @@ -216,6 +223,35 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
>      return ff_filter_frame(inlink->dst->outputs[0], frame);
>  }
>  
> +static int activate(AVFilterContext *ctx)
> +{
> +    SetPTSContext *setpts = ctx->priv;
> +    AVFilterLink *inlink = ctx->inputs[0];
> +    AVFilterLink *outlink = ctx->outputs[0];
> +    AVFrame *in;
> +    int status;
> +    int64_t pts;
> +    int ret;
> +
> +    FF_FILTER_FORWARD_STATUS_BACK(outlink, inlink);
> +
> +    ret = ff_inlink_consume_frame(inlink, &in);
> +    if (ret < 0)
> +        return ret;
> +    if (ret > 0)
> +        return filter_frame(inlink, in);
> +
> +    if (ff_inlink_acknowledge_status(inlink, &status, &pts)) {
> +        pts = D2TS(eval_pts(setpts, inlink, NULL, pts));
> +        ff_outlink_set_status(outlink, status, pts);
> +        return 0;
> +    }
> +
> +    FF_FILTER_FORWARD_WANTED(outlink, inlink);
> +
> +    return FFERROR_NOT_READY;
> +}
> +
>  static av_cold void uninit(AVFilterContext *ctx)
>  {
>      SetPTSContext *setpts = ctx->priv;
> @@ -239,7 +275,6 @@ static const AVFilterPad avfilter_vf_setpts_inputs[] = {
>          .name         = "default",
>          .type         = AVMEDIA_TYPE_VIDEO,
>          .config_props = config_input,
> -        .filter_frame = filter_frame,
>      },
>      { NULL }
>  };
> @@ -256,6 +291,7 @@ AVFilter ff_vf_setpts = {
>      .name      = "setpts",
>      .description = NULL_IF_CONFIG_SMALL("Set PTS for the output video frame."),
>      .init      = init,
> +    .activate  = activate,
>      .uninit    = uninit,
>  
>      .priv_size = sizeof(SetPTSContext),
> @@ -276,7 +312,6 @@ static const AVFilterPad asetpts_inputs[] = {
>          .name         = "default",
>          .type         = AVMEDIA_TYPE_AUDIO,
>          .config_props = config_input,
> -        .filter_frame = filter_frame,
>      },
>      { NULL }
>  };
> @@ -293,6 +328,7 @@ AVFilter ff_af_asetpts = {
>      .name        = "asetpts",
>      .description = NULL_IF_CONFIG_SMALL("Set PTS for the output audio frame."),
>      .init        = init,
> +    .activate    = activate,
>      .uninit      = uninit,
>      .priv_size   = sizeof(SetPTSContext),
>      .priv_class  = &asetpts_class,

Apart from that, LGTM. Thanks for taking care of it.

Regards,
diff mbox

Patch

diff --git a/libavfilter/setpts.c b/libavfilter/setpts.c
index 800ba6a83f..076534c518 100644
--- a/libavfilter/setpts.c
+++ b/libavfilter/setpts.c
@@ -33,6 +33,7 @@ 
 #include "libavutil/time.h"
 #include "audio.h"
 #include "avfilter.h"
+#include "filters.h"
 #include "internal.h"
 #include "video.h"
 
@@ -154,6 +155,28 @@  static inline char *double2int64str(char *buf, double v)
     return buf;
 }
 
+static double eval_pts(SetPTSContext *setpts, AVFilterLink *inlink, AVFrame *frame, int64_t pts)
+{
+    if (isnan(setpts->var_values[VAR_STARTPTS])) {
+        setpts->var_values[VAR_STARTPTS] = TS2D(pts);
+        setpts->var_values[VAR_STARTT  ] = TS2T(pts, inlink->time_base);
+    }
+    setpts->var_values[VAR_PTS       ] = TS2D(pts);
+    setpts->var_values[VAR_T         ] = TS2T(pts, inlink->time_base);
+    setpts->var_values[VAR_POS       ] = !frame || frame->pkt_pos == -1 ? NAN : frame->pkt_pos;
+    setpts->var_values[VAR_RTCTIME   ] = av_gettime();
+
+    if (frame) {
+        if (inlink->type == AVMEDIA_TYPE_VIDEO) {
+            setpts->var_values[VAR_INTERLACED] = frame->interlaced_frame;
+        } else if (inlink->type == AVMEDIA_TYPE_AUDIO) {
+            setpts->var_values[VAR_S] = frame->nb_samples;
+            setpts->var_values[VAR_NB_SAMPLES] = frame->nb_samples;
+        }
+    }
+
+    return av_expr_eval(setpts->expr, setpts->var_values, NULL);
+}
 #define d2istr(v) double2int64str((char[BUF_SIZE]){0}, v)
 
 static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
@@ -162,23 +185,7 @@  static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
     int64_t in_pts = frame->pts;
     double d;
 
-    if (isnan(setpts->var_values[VAR_STARTPTS])) {
-        setpts->var_values[VAR_STARTPTS] = TS2D(frame->pts);
-        setpts->var_values[VAR_STARTT  ] = TS2T(frame->pts, inlink->time_base);
-    }
-    setpts->var_values[VAR_PTS       ] = TS2D(frame->pts);
-    setpts->var_values[VAR_T         ] = TS2T(frame->pts, inlink->time_base);
-    setpts->var_values[VAR_POS       ] = frame->pkt_pos == -1 ? NAN : frame->pkt_pos;
-    setpts->var_values[VAR_RTCTIME   ] = av_gettime();
-
-    if (inlink->type == AVMEDIA_TYPE_VIDEO) {
-        setpts->var_values[VAR_INTERLACED] = frame->interlaced_frame;
-    } else if (inlink->type == AVMEDIA_TYPE_AUDIO) {
-        setpts->var_values[VAR_S] = frame->nb_samples;
-        setpts->var_values[VAR_NB_SAMPLES] = frame->nb_samples;
-    }
-
-    d = av_expr_eval(setpts->expr, setpts->var_values, NULL);
+    d = eval_pts(setpts, inlink, frame, frame->pts);
     frame->pts = D2TS(d);
 
     av_log(inlink->dst, AV_LOG_TRACE,
@@ -216,6 +223,35 @@  static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
     return ff_filter_frame(inlink->dst->outputs[0], frame);
 }
 
+static int activate(AVFilterContext *ctx)
+{
+    SetPTSContext *setpts = ctx->priv;
+    AVFilterLink *inlink = ctx->inputs[0];
+    AVFilterLink *outlink = ctx->outputs[0];
+    AVFrame *in;
+    int status;
+    int64_t pts;
+    int ret;
+
+    FF_FILTER_FORWARD_STATUS_BACK(outlink, inlink);
+
+    ret = ff_inlink_consume_frame(inlink, &in);
+    if (ret < 0)
+        return ret;
+    if (ret > 0)
+        return filter_frame(inlink, in);
+
+    if (ff_inlink_acknowledge_status(inlink, &status, &pts)) {
+        pts = D2TS(eval_pts(setpts, inlink, NULL, pts));
+        ff_outlink_set_status(outlink, status, pts);
+        return 0;
+    }
+
+    FF_FILTER_FORWARD_WANTED(outlink, inlink);
+
+    return FFERROR_NOT_READY;
+}
+
 static av_cold void uninit(AVFilterContext *ctx)
 {
     SetPTSContext *setpts = ctx->priv;
@@ -239,7 +275,6 @@  static const AVFilterPad avfilter_vf_setpts_inputs[] = {
         .name         = "default",
         .type         = AVMEDIA_TYPE_VIDEO,
         .config_props = config_input,
-        .filter_frame = filter_frame,
     },
     { NULL }
 };
@@ -256,6 +291,7 @@  AVFilter ff_vf_setpts = {
     .name      = "setpts",
     .description = NULL_IF_CONFIG_SMALL("Set PTS for the output video frame."),
     .init      = init,
+    .activate  = activate,
     .uninit    = uninit,
 
     .priv_size = sizeof(SetPTSContext),
@@ -276,7 +312,6 @@  static const AVFilterPad asetpts_inputs[] = {
         .name         = "default",
         .type         = AVMEDIA_TYPE_AUDIO,
         .config_props = config_input,
-        .filter_frame = filter_frame,
     },
     { NULL }
 };
@@ -293,6 +328,7 @@  AVFilter ff_af_asetpts = {
     .name        = "asetpts",
     .description = NULL_IF_CONFIG_SMALL("Set PTS for the output audio frame."),
     .init        = init,
+    .activate    = activate,
     .uninit      = uninit,
     .priv_size   = sizeof(SetPTSContext),
     .priv_class  = &asetpts_class,