diff mbox series

[FFmpeg-devel,20/25] fftools/ffmpeg: move filtering functions to ffmpeg_filter

Message ID 20230419195243.2974-20-anton@khirnov.net
State Accepted
Commit eb9ce9de3ba4422d781c79fb53c9f86cfddcc58b
Headers show
Series [FFmpeg-devel,01/25] fftools/ffmpeg_filter: drop write-only FilterGraph.reconfiguration | expand

Checks

Context Check Description
andriy/make_x86 success Make finished
andriy/make_fate_x86 success Make fate finished

Commit Message

Anton Khirnov April 19, 2023, 7:52 p.m. UTC
---
 fftools/ffmpeg.c        | 216 +---------------------------------------
 fftools/ffmpeg.h        |  20 ++++
 fftools/ffmpeg_filter.c | 202 +++++++++++++++++++++++++++++++++++++
 3 files changed, 223 insertions(+), 215 deletions(-)
diff mbox series

Patch

diff --git a/fftools/ffmpeg.c b/fftools/ffmpeg.c
index e6ee3ce557..248c22a4ff 100644
--- a/fftools/ffmpeg.c
+++ b/fftools/ffmpeg.c
@@ -651,66 +651,6 @@  void close_output_stream(OutputStream *ost)
         sq_send(of->sq_encode, ost->sq_idx_encode, SQFRAME(NULL));
 }
 
-/**
- * Get and encode new output from any of the filtergraphs, without causing
- * activity.
- *
- * @return  0 for success, <0 for severe errors
- */
-static int reap_filters(int flush)
-{
-    AVFrame *filtered_frame = NULL;
-
-    /* Reap all buffers present in the buffer sinks */
-    for (OutputStream *ost = ost_iter(NULL); ost; ost = ost_iter(ost)) {
-        AVFilterContext *filter;
-        int ret = 0;
-
-        if (!ost->filter || !ost->filter->graph->graph)
-            continue;
-        filter = ost->filter->filter;
-
-        filtered_frame = ost->filtered_frame;
-
-        while (1) {
-            ret = av_buffersink_get_frame_flags(filter, filtered_frame,
-                                               AV_BUFFERSINK_FLAG_NO_REQUEST);
-            if (ret < 0) {
-                if (ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) {
-                    av_log(NULL, AV_LOG_WARNING,
-                           "Error in av_buffersink_get_frame_flags(): %s\n", av_err2str(ret));
-                } else if (flush && ret == AVERROR_EOF) {
-                    if (av_buffersink_get_type(filter) == AVMEDIA_TYPE_VIDEO)
-                        enc_frame(ost, NULL);
-                }
-                break;
-            }
-            if (ost->finished) {
-                av_frame_unref(filtered_frame);
-                continue;
-            }
-
-            if (filtered_frame->pts != AV_NOPTS_VALUE) {
-                AVRational tb = av_buffersink_get_time_base(filter);
-                ost->filter->last_pts = av_rescale_q(filtered_frame->pts, tb,
-                                                     AV_TIME_BASE_Q);
-                filtered_frame->time_base = tb;
-
-                if (debug_ts)
-                    av_log(NULL, AV_LOG_INFO, "filter_raw -> pts:%s pts_time:%s time_base:%d/%d\n",
-                           av_ts2str(filtered_frame->pts),
-                           av_ts2timestr(filtered_frame->pts, &tb),
-                           tb.num, tb.den);
-            }
-
-            enc_frame(ost, filtered_frame);
-            av_frame_unref(filtered_frame);
-        }
-    }
-
-    return 0;
-}
-
 static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)
 {
     AVBPrint buf, buf_script;
@@ -944,112 +884,6 @@  int ifilter_has_all_input_formats(FilterGraph *fg)
     return 1;
 }
 
-static int ifilter_send_frame(InputFilter *ifilter, AVFrame *frame, int keep_reference)
-{
-    FilterGraph *fg = ifilter->graph;
-    AVFrameSideData *sd;
-    int need_reinit, ret;
-    int buffersrc_flags = AV_BUFFERSRC_FLAG_PUSH;
-
-    if (keep_reference)
-        buffersrc_flags |= AV_BUFFERSRC_FLAG_KEEP_REF;
-
-    /* determine if the parameters for this input changed */
-    need_reinit = ifilter->format != frame->format;
-
-    switch (ifilter->ist->par->codec_type) {
-    case AVMEDIA_TYPE_AUDIO:
-        need_reinit |= ifilter->sample_rate    != frame->sample_rate ||
-                       av_channel_layout_compare(&ifilter->ch_layout, &frame->ch_layout);
-        break;
-    case AVMEDIA_TYPE_VIDEO:
-        need_reinit |= ifilter->width  != frame->width ||
-                       ifilter->height != frame->height;
-        break;
-    }
-
-    if (!ifilter->ist->reinit_filters && fg->graph)
-        need_reinit = 0;
-
-    if (!!ifilter->hw_frames_ctx != !!frame->hw_frames_ctx ||
-        (ifilter->hw_frames_ctx && ifilter->hw_frames_ctx->data != frame->hw_frames_ctx->data))
-        need_reinit = 1;
-
-    if (sd = av_frame_get_side_data(frame, AV_FRAME_DATA_DISPLAYMATRIX)) {
-        if (!ifilter->displaymatrix || memcmp(sd->data, ifilter->displaymatrix, sizeof(int32_t) * 9))
-            need_reinit = 1;
-    } else if (ifilter->displaymatrix)
-        need_reinit = 1;
-
-    if (need_reinit) {
-        ret = ifilter_parameters_from_frame(ifilter, frame);
-        if (ret < 0)
-            return ret;
-    }
-
-    /* (re)init the graph if possible, otherwise buffer the frame and return */
-    if (need_reinit || !fg->graph) {
-        if (!ifilter_has_all_input_formats(fg)) {
-            AVFrame *tmp = av_frame_clone(frame);
-            if (!tmp)
-                return AVERROR(ENOMEM);
-
-            ret = av_fifo_write(ifilter->frame_queue, &tmp, 1);
-            if (ret < 0)
-                av_frame_free(&tmp);
-
-            return ret;
-        }
-
-        ret = reap_filters(1);
-        if (ret < 0 && ret != AVERROR_EOF) {
-            av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
-            return ret;
-        }
-
-        ret = configure_filtergraph(fg);
-        if (ret < 0) {
-            av_log(NULL, AV_LOG_ERROR, "Error reinitializing filters!\n");
-            return ret;
-        }
-    }
-
-    ret = av_buffersrc_add_frame_flags(ifilter->filter, frame, buffersrc_flags);
-    if (ret < 0) {
-        if (ret != AVERROR_EOF)
-            av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
-        return ret;
-    }
-
-    return 0;
-}
-
-static int ifilter_send_eof(InputFilter *ifilter, int64_t pts)
-{
-    int ret;
-
-    ifilter->eof = 1;
-
-    if (ifilter->filter) {
-        ret = av_buffersrc_close(ifilter->filter, pts, AV_BUFFERSRC_FLAG_PUSH);
-        if (ret < 0)
-            return ret;
-    } else {
-        // the filtergraph was never configured
-        if (ifilter->format < 0) {
-            ret = ifilter_parameters_from_codecpar(ifilter, ifilter->ist->par);
-            if (ret < 0)
-                return ret;
-        }
-        if (ifilter->format < 0 && (ifilter->type == AVMEDIA_TYPE_AUDIO || ifilter->type == AVMEDIA_TYPE_VIDEO)) {
-            av_log(NULL, AV_LOG_ERROR, "Cannot determine format of input stream %d:%d after EOF\n", ifilter->ist->file_index, ifilter->ist->st->index);
-            return AVERROR_INVALIDDATA;
-        }
-    }
-
-    return 0;
-}
-
 // This does not quite work like avcodec_decode_audio4/avcodec_decode_video2.
 // There is the following difference: if you got a frame, you must call
 // it again with pkt=NULL. pkt==NULL is treated differently from pkt->size==0
@@ -2292,54 +2126,6 @@  discard_packet:
     return 0;
 }
 
-/**
- * Perform a step of transcoding for the specified filter graph.
- *
- * @param[in]  graph     filter graph to consider
- * @param[out] best_ist  input stream where a frame would allow to continue
- * @return  0 for success, <0 for error
- */
-static int transcode_from_filter(FilterGraph *graph, InputStream **best_ist)
-{
-    int i, ret;
-    int nb_requests, nb_requests_max = 0;
-    InputFilter *ifilter;
-    InputStream *ist;
-
-    *best_ist = NULL;
-    ret = avfilter_graph_request_oldest(graph->graph);
-    if (ret >= 0)
-        return reap_filters(0);
-
-    if (ret == AVERROR_EOF) {
-        ret = reap_filters(1);
-        for (i = 0; i < graph->nb_outputs; i++)
-            close_output_stream(graph->outputs[i]->ost);
-        return ret;
-    }
-    if (ret != AVERROR(EAGAIN))
-        return ret;
-
-    for (i = 0; i < graph->nb_inputs; i++) {
-        ifilter = graph->inputs[i];
-        ist = ifilter->ist;
-        if (input_files[ist->file_index]->eagain ||
-            input_files[ist->file_index]->eof_reached)
-            continue;
-        nb_requests = av_buffersrc_get_nb_failed_requests(ifilter->filter);
-        if (nb_requests > nb_requests_max) {
-            nb_requests_max = nb_requests;
-            *best_ist = ist;
-        }
-    }
-
-    if (!*best_ist)
-        for (i = 0; i < graph->nb_outputs; i++)
-            graph->outputs[i]->ost->unavailable = 1;
-
-    return 0;
-}
-
 /**
  * Run a single step of transcoding.
  *
@@ -2373,7 +2159,7 @@  static int transcode_step(void)
     }
 
     if (ost->filter && ost->filter->graph->graph) {
-        if ((ret = transcode_from_filter(ost->filter->graph, &ist)) < 0)
+        if ((ret = fg_transcode_step(ost->filter->graph, &ist)) < 0)
             return ret;
         if (!ist)
             return 0;
diff --git a/fftools/ffmpeg.h b/fftools/ffmpeg.h
index b181d433b0..07c1fc7ed6 100644
--- a/fftools/ffmpeg.h
+++ b/fftools/ffmpeg.h
@@ -799,6 +799,9 @@  int init_complex_filtergraph(FilterGraph *fg);
 
 void sub2video_update(InputStream *ist, int64_t heartbeat_pts, AVSubtitle *sub);
 
+int ifilter_send_frame(InputFilter *ifilter, AVFrame *frame, int keep_reference);
+int ifilter_send_eof(InputFilter *ifilter, int64_t pts);
+
 int ifilter_parameters_from_frame(InputFilter *ifilter, const AVFrame *frame);
 int ifilter_parameters_from_codecpar(InputFilter *ifilter, AVCodecParameters *par);
 int ifilter_has_all_input_formats(FilterGraph *fg);
@@ -811,6 +814,23 @@  int ifilter_has_all_input_formats(FilterGraph *fg);
  */
 FilterGraph *fg_create(char *graph_desc);
 
+/**
+ * Perform a step of transcoding for the specified filter graph.
+ *
+ * @param[in]  graph     filter graph to consider
+ * @param[out] best_ist  input stream where a frame would allow to continue
+ * @return  0 for success, <0 for error
+ */
+int fg_transcode_step(FilterGraph *graph, InputStream **best_ist);
+
+/**
+ * Get and encode new output from any of the filtergraphs, without causing
+ * activity.
+ *
+ * @return  0 for success, <0 for severe errors
+ */
+int reap_filters(int flush);
+
 int ffmpeg_parse_options(int argc, char **argv);
 
 void enc_stats_write(OutputStream *ost, EncStats *es,
diff --git a/fftools/ffmpeg_filter.c b/fftools/ffmpeg_filter.c
index 7b3d9a490f..c39cf43774 100644
--- a/fftools/ffmpeg_filter.c
+++ b/fftools/ffmpeg_filter.c
@@ -36,6 +36,7 @@ 
 #include "libavutil/pixfmt.h"
 #include "libavutil/imgutils.h"
 #include "libavutil/samplefmt.h"
+#include "libavutil/timestamp.h"
 
 // FIXME: YUV420P etc. are actually supported with full color range,
 // yet the latter information isn't available here.
@@ -1300,3 +1301,204 @@  int filtergraph_is_simple(FilterGraph *fg)
 {
     return !fg->graph_desc;
 }
+
+int reap_filters(int flush)
+{
+    AVFrame *filtered_frame = NULL;
+
+    /* Reap all buffers present in the buffer sinks */
+    for (OutputStream *ost = ost_iter(NULL); ost; ost = ost_iter(ost)) {
+        AVFilterContext *filter;
+        int ret = 0;
+
+        if (!ost->filter || !ost->filter->graph->graph)
+            continue;
+        filter = ost->filter->filter;
+
+        filtered_frame = ost->filtered_frame;
+
+        while (1) {
+            ret = av_buffersink_get_frame_flags(filter, filtered_frame,
+                                               AV_BUFFERSINK_FLAG_NO_REQUEST);
+            if (ret < 0) {
+                if (ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) {
+                    av_log(NULL, AV_LOG_WARNING,
+                           "Error in av_buffersink_get_frame_flags(): %s\n", av_err2str(ret));
+                } else if (flush && ret == AVERROR_EOF) {
+                    if (av_buffersink_get_type(filter) == AVMEDIA_TYPE_VIDEO)
+                        enc_frame(ost, NULL);
+                }
+                break;
+            }
+            if (ost->finished) {
+                av_frame_unref(filtered_frame);
+                continue;
+            }
+
+            if (filtered_frame->pts != AV_NOPTS_VALUE) {
+                AVRational tb = av_buffersink_get_time_base(filter);
+                ost->filter->last_pts = av_rescale_q(filtered_frame->pts, tb,
+                                                     AV_TIME_BASE_Q);
+                filtered_frame->time_base = tb;
+
+                if (debug_ts)
+                    av_log(NULL, AV_LOG_INFO, "filter_raw -> pts:%s pts_time:%s time_base:%d/%d\n",
+                           av_ts2str(filtered_frame->pts),
+                           av_ts2timestr(filtered_frame->pts, &tb),
+                           tb.num, tb.den);
+            }
+
+            enc_frame(ost, filtered_frame);
+            av_frame_unref(filtered_frame);
+        }
+    }
+
+    return 0;
+}
+
+int ifilter_send_eof(InputFilter *ifilter, int64_t pts)
+{
+    int ret;
+
+    ifilter->eof = 1;
+
+    if (ifilter->filter) {
+        ret = av_buffersrc_close(ifilter->filter, pts, AV_BUFFERSRC_FLAG_PUSH);
+        if (ret < 0)
+            return ret;
+    } else {
+        // the filtergraph was never configured
+        if (ifilter->format < 0) {
+            ret = ifilter_parameters_from_codecpar(ifilter, ifilter->ist->par);
+            if (ret < 0)
+                return ret;
+        }
+        if (ifilter->format < 0 && (ifilter->type == AVMEDIA_TYPE_AUDIO || ifilter->type == AVMEDIA_TYPE_VIDEO)) {
+            av_log(NULL, AV_LOG_ERROR, "Cannot determine format of input stream %d:%d after EOF\n", ifilter->ist->file_index, ifilter->ist->st->index);
+            return AVERROR_INVALIDDATA;
+        }
+    }
+
+    return 0;
+}
+
+int ifilter_send_frame(InputFilter *ifilter, AVFrame *frame, int keep_reference)
+{
+    FilterGraph *fg = ifilter->graph;
+    AVFrameSideData *sd;
+    int need_reinit, ret;
+    int buffersrc_flags = AV_BUFFERSRC_FLAG_PUSH;
+
+    if (keep_reference)
+        buffersrc_flags |= AV_BUFFERSRC_FLAG_KEEP_REF;
+
+    /* determine if the parameters for this input changed */
+    need_reinit = ifilter->format != frame->format;
+
+    switch (ifilter->ist->par->codec_type) {
+    case AVMEDIA_TYPE_AUDIO:
+        need_reinit |= ifilter->sample_rate    != frame->sample_rate ||
+                       av_channel_layout_compare(&ifilter->ch_layout, &frame->ch_layout);
+        break;
+    case AVMEDIA_TYPE_VIDEO:
+        need_reinit |= ifilter->width  != frame->width ||
+                       ifilter->height != frame->height;
+        break;
+    }
+
+    if (!ifilter->ist->reinit_filters && fg->graph)
+        need_reinit = 0;
+
+    if (!!ifilter->hw_frames_ctx != !!frame->hw_frames_ctx ||
+        (ifilter->hw_frames_ctx && ifilter->hw_frames_ctx->data != frame->hw_frames_ctx->data))
+        need_reinit = 1;
+
+    if (sd = av_frame_get_side_data(frame, AV_FRAME_DATA_DISPLAYMATRIX)) {
+        if (!ifilter->displaymatrix || memcmp(sd->data, ifilter->displaymatrix, sizeof(int32_t) * 9))
+            need_reinit = 1;
+    } else if (ifilter->displaymatrix)
+        need_reinit = 1;
+
+    if (need_reinit) {
+        ret = ifilter_parameters_from_frame(ifilter, frame);
+        if (ret < 0)
+            return ret;
+    }
+
+    /* (re)init the graph if possible, otherwise buffer the frame and return */
+    if (need_reinit || !fg->graph) {
+        if (!ifilter_has_all_input_formats(fg)) {
+            AVFrame *tmp = av_frame_clone(frame);
+            if (!tmp)
+                return AVERROR(ENOMEM);
+
+            ret = av_fifo_write(ifilter->frame_queue, &tmp, 1);
+            if (ret < 0)
+                av_frame_free(&tmp);
+
+            return ret;
+        }
+
+        ret = reap_filters(1);
+        if (ret < 0 && ret != AVERROR_EOF) {
+            av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
+            return ret;
+        }
+
+        ret = configure_filtergraph(fg);
+        if (ret < 0) {
+            av_log(NULL, AV_LOG_ERROR, "Error reinitializing filters!\n");
+            return ret;
+        }
+    }
+
+    ret = av_buffersrc_add_frame_flags(ifilter->filter, frame, buffersrc_flags);
+    if (ret < 0) {
+        if (ret != AVERROR_EOF)
+            av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
+        return ret;
+    }
+
+    return 0;
+}
+
+int fg_transcode_step(FilterGraph *graph, InputStream **best_ist)
+{
+    int i, ret;
+    int nb_requests, nb_requests_max = 0;
+    InputFilter *ifilter;
+    InputStream *ist;
+
+    *best_ist = NULL;
+    ret = avfilter_graph_request_oldest(graph->graph);
+    if (ret >= 0)
+        return reap_filters(0);
+
+    if (ret == AVERROR_EOF) {
+        ret = reap_filters(1);
+        for (i = 0; i < graph->nb_outputs; i++)
+            close_output_stream(graph->outputs[i]->ost);
+        return ret;
+    }
+    if (ret != AVERROR(EAGAIN))
+        return ret;
+
+    for (i = 0; i < graph->nb_inputs; i++) {
+        ifilter = graph->inputs[i];
+        ist = ifilter->ist;
+        if (input_files[ist->file_index]->eagain ||
+            input_files[ist->file_index]->eof_reached)
+            continue;
+        nb_requests = av_buffersrc_get_nb_failed_requests(ifilter->filter);
+        if (nb_requests > nb_requests_max) {
+            nb_requests_max = nb_requests;
+            *best_ist = ist;
+        }
+    }
+
+    if (!*best_ist)
+        for (i = 0; i < graph->nb_outputs; i++)
+            graph->outputs[i]->ost->unavailable = 1;
+
+    return 0;
+}