diff mbox series

[FFmpeg-devel,V2] dnn: move output name from DNNModel.set_input_output to DNNModule.execute_model

Message ID 1597988069-2567-1-git-send-email-yejun.guo@intel.com
State Accepted
Commit 0f7a99e37ae52f9ecdc4c81195c14b03f5be3dfd
Headers show
Series [FFmpeg-devel,V2] dnn: move output name from DNNModel.set_input_output to DNNModule.execute_model | expand

Checks

Context Check Description
andriy/default pending
andriy/make success Make finished
andriy/make_fate success Make fate finished

Commit Message

Guo, Yejun Aug. 21, 2020, 5:34 a.m. UTC
currently, output is set both at DNNModel.set_input_output and
DNNModule.execute_model, it makes sense that the output name is
provided at model inference time so all the output info is set
at a single place.

and so DNNModel.set_input_output is renamed to DNNModel.set_input

Signed-off-by: Guo, Yejun <yejun.guo@intel.com>
---
v2: rebase with master

 libavfilter/dnn/dnn_backend_native.c   | 44 +++++++----------
 libavfilter/dnn/dnn_backend_native.h   |  4 +-
 libavfilter/dnn/dnn_backend_openvino.c | 50 +++++--------------
 libavfilter/dnn/dnn_backend_openvino.h |  2 +-
 libavfilter/dnn/dnn_backend_tf.c       | 87 ++++++++++++++--------------------
 libavfilter/dnn/dnn_backend_tf.h       |  2 +-
 libavfilter/dnn_interface.h            |  4 +-
 libavfilter/vf_derain.c                |  6 +--
 libavfilter/vf_dnn_processing.c        |  9 ++--
 libavfilter/vf_sr.c                    | 11 +++--
 10 files changed, 82 insertions(+), 137 deletions(-)

Comments

Fu, Ting Aug. 21, 2020, 10:01 a.m. UTC | #1
> -----Original Message-----
> From: ffmpeg-devel <ffmpeg-devel-bounces@ffmpeg.org> On Behalf Of Guo,
> Yejun
> Sent: Friday, August 21, 2020 01:34 PM
> To: ffmpeg-devel@ffmpeg.org
> Subject: [FFmpeg-devel] [PATCH V2] dnn: move output name from
> DNNModel.set_input_output to DNNModule.execute_model
> 
> currently, output is set both at DNNModel.set_input_output and
> DNNModule.execute_model, it makes sense that the output name is provided at
> model inference time so all the output info is set at a single place.
> 
> and so DNNModel.set_input_output is renamed to DNNModel.set_input
> 
> Signed-off-by: Guo, Yejun <yejun.guo@intel.com>
> ---
> v2: rebase with master
> 
>  libavfilter/dnn/dnn_backend_native.c   | 44 +++++++----------
>  libavfilter/dnn/dnn_backend_native.h   |  4 +-
>  libavfilter/dnn/dnn_backend_openvino.c | 50 +++++--------------
> libavfilter/dnn/dnn_backend_openvino.h |  2 +-
>  libavfilter/dnn/dnn_backend_tf.c       | 87 ++++++++++++++--------------------
>  libavfilter/dnn/dnn_backend_tf.h       |  2 +-
>  libavfilter/dnn_interface.h            |  4 +-
>  libavfilter/vf_derain.c                |  6 +--
>  libavfilter/vf_dnn_processing.c        |  9 ++--
>  libavfilter/vf_sr.c                    | 11 +++--
>  10 files changed, 82 insertions(+), 137 deletions(-)
> 
[...]
> --
> 2.7.4

LGTM, all three backends(Native/TF/OV) function well.

> 
> _______________________________________________
> ffmpeg-devel mailing list
> ffmpeg-devel@ffmpeg.org
> https://ffmpeg.org/mailman/listinfo/ffmpeg-devel
> 
> To unsubscribe, visit link above, or email ffmpeg-devel-request@ffmpeg.org
> with subject "unsubscribe".
Guo, Yejun Aug. 24, 2020, 1:20 a.m. UTC | #2
> -----Original Message-----
> From: ffmpeg-devel <ffmpeg-devel-bounces@ffmpeg.org> On Behalf Of Fu, Ting
> Sent: 2020年8月21日 18:01
> To: FFmpeg development discussions and patches <ffmpeg-devel@ffmpeg.org>
> Subject: Re: [FFmpeg-devel] [PATCH V2] dnn: move output name from
> DNNModel.set_input_output to DNNModule.execute_model
> 
> 
> 
> > -----Original Message-----
> > From: ffmpeg-devel <ffmpeg-devel-bounces@ffmpeg.org> On Behalf Of Guo,
> > Yejun
> > Sent: Friday, August 21, 2020 01:34 PM
> > To: ffmpeg-devel@ffmpeg.org
> > Subject: [FFmpeg-devel] [PATCH V2] dnn: move output name from
> > DNNModel.set_input_output to DNNModule.execute_model
> >
> > currently, output is set both at DNNModel.set_input_output and
> > DNNModule.execute_model, it makes sense that the output name is
> > provided at model inference time so all the output info is set at a single place.
> >
> > and so DNNModel.set_input_output is renamed to DNNModel.set_input
> >
> > Signed-off-by: Guo, Yejun <yejun.guo@intel.com>
> > ---
> > v2: rebase with master
> >
> >  libavfilter/dnn/dnn_backend_native.c   | 44 +++++++----------
> >  libavfilter/dnn/dnn_backend_native.h   |  4 +-
> >  libavfilter/dnn/dnn_backend_openvino.c | 50 +++++--------------
> > libavfilter/dnn/dnn_backend_openvino.h |  2 +-
> >  libavfilter/dnn/dnn_backend_tf.c       | 87
> ++++++++++++++--------------------
> >  libavfilter/dnn/dnn_backend_tf.h       |  2 +-
> >  libavfilter/dnn_interface.h            |  4 +-
> >  libavfilter/vf_derain.c                |  6 +--
> >  libavfilter/vf_dnn_processing.c        |  9 ++--
> >  libavfilter/vf_sr.c                    | 11 +++--
> >  10 files changed, 82 insertions(+), 137 deletions(-)
> >
> [...]
> > --
> > 2.7.4
> 
> LGTM, all three backends(Native/TF/OV) function well.
> 
thanks for the test, will push tomorrow if no other comments.
diff mbox series

Patch

diff --git a/libavfilter/dnn/dnn_backend_native.c b/libavfilter/dnn/dnn_backend_native.c
index 0be9c0b..65a5670 100644
--- a/libavfilter/dnn/dnn_backend_native.c
+++ b/libavfilter/dnn/dnn_backend_native.c
@@ -50,7 +50,7 @@  static DNNReturnType get_input_native(void *model, DNNData *input, const char *i
     return DNN_ERROR;
 }
 
-static DNNReturnType set_input_output_native(void *model, DNNData *input, const char *input_name, const char **output_names, uint32_t nb_output)
+static DNNReturnType set_input_native(void *model, DNNData *input, const char *input_name)
 {
     NativeModel *native_model = (NativeModel *)model;
     DnnOperand *oprd = NULL;
@@ -87,27 +87,6 @@  static DNNReturnType set_input_output_native(void *model, DNNData *input, const
 
     input->data = oprd->data;
 
-    /* outputs */
-    native_model->nb_output = 0;
-    av_freep(&native_model->output_indexes);
-    native_model->output_indexes = av_mallocz_array(nb_output, sizeof(*native_model->output_indexes));
-    if (!native_model->output_indexes)
-        return DNN_ERROR;
-
-    for (uint32_t i = 0; i < nb_output; ++i) {
-        const char *output_name = output_names[i];
-        for (int j = 0; j < native_model->operands_num; ++j) {
-            oprd = &native_model->operands[j];
-            if (strcmp(oprd->name, output_name) == 0) {
-                native_model->output_indexes[native_model->nb_output++] = j;
-                break;
-            }
-        }
-    }
-
-    if (native_model->nb_output != nb_output)
-        return DNN_ERROR;
-
     return DNN_SUCCESS;
 }
 
@@ -243,7 +222,7 @@  DNNModel *ff_dnn_load_model_native(const char *model_filename, const char *optio
         return NULL;
     }
 
-    model->set_input_output = &set_input_output_native;
+    model->set_input = &set_input_native;
     model->get_input = &get_input_native;
     model->options = options;
 
@@ -255,11 +234,10 @@  fail:
     return NULL;
 }
 
-DNNReturnType ff_dnn_execute_model_native(const DNNModel *model, DNNData *outputs, uint32_t nb_output)
+DNNReturnType ff_dnn_execute_model_native(const DNNModel *model, DNNData *outputs, const char **output_names, uint32_t nb_output)
 {
     NativeModel *native_model = (NativeModel *)model->model;
     int32_t layer;
-    uint32_t nb = FFMIN(nb_output, native_model->nb_output);
 
     if (native_model->layers_num <= 0 || native_model->operands_num <= 0)
         return DNN_ERROR;
@@ -274,8 +252,19 @@  DNNReturnType ff_dnn_execute_model_native(const DNNModel *model, DNNData *output
                                         native_model->layers[layer].params);
     }
 
-    for (uint32_t i = 0; i < nb; ++i) {
-        DnnOperand *oprd = &native_model->operands[native_model->output_indexes[i]];
+    for (uint32_t i = 0; i < nb_output; ++i) {
+        DnnOperand *oprd = NULL;
+        const char *output_name = output_names[i];
+        for (int j = 0; j < native_model->operands_num; ++j) {
+            if (strcmp(native_model->operands[j].name, output_name) == 0) {
+                oprd = &native_model->operands[j];
+                break;
+            }
+        }
+
+        if (oprd == NULL)
+            return DNN_ERROR;
+
         outputs[i].data = oprd->data;
         outputs[i].height = oprd->dims[1];
         outputs[i].width = oprd->dims[2];
@@ -335,7 +324,6 @@  void ff_dnn_free_model_native(DNNModel **model)
                 av_freep(&native_model->operands);
             }
 
-            av_freep(&native_model->output_indexes);
             av_freep(&native_model);
         }
         av_freep(model);
diff --git a/libavfilter/dnn/dnn_backend_native.h b/libavfilter/dnn/dnn_backend_native.h
index 228d5b7..554098f 100644
--- a/libavfilter/dnn/dnn_backend_native.h
+++ b/libavfilter/dnn/dnn_backend_native.h
@@ -112,13 +112,11 @@  typedef struct NativeModel{
     int32_t layers_num;
     DnnOperand *operands;
     int32_t operands_num;
-    int32_t *output_indexes;
-    uint32_t nb_output;
 } NativeModel;
 
 DNNModel *ff_dnn_load_model_native(const char *model_filename, const char *options);
 
-DNNReturnType ff_dnn_execute_model_native(const DNNModel *model, DNNData *outputs, uint32_t nb_output);
+DNNReturnType ff_dnn_execute_model_native(const DNNModel *model, DNNData *outputs, const char **output_names, uint32_t nb_output);
 
 void ff_dnn_free_model_native(DNNModel **model);
 
diff --git a/libavfilter/dnn/dnn_backend_openvino.c b/libavfilter/dnn/dnn_backend_openvino.c
index d343bf2..034dee1 100644
--- a/libavfilter/dnn/dnn_backend_openvino.c
+++ b/libavfilter/dnn/dnn_backend_openvino.c
@@ -34,8 +34,6 @@  typedef struct OVModel{
     ie_executable_network_t *exe_network;
     ie_infer_request_t *infer_request;
     ie_blob_t *input_blob;
-    ie_blob_t **output_blobs;
-    uint32_t nb_output;
 } OVModel;
 
 static DNNDataType precision_to_datatype(precision_e precision)
@@ -93,7 +91,7 @@  static DNNReturnType get_input_ov(void *model, DNNData *input, const char *input
     return DNN_ERROR;
 }
 
-static DNNReturnType set_input_output_ov(void *model, DNNData *input, const char *input_name, const char **output_names, uint32_t nb_output)
+static DNNReturnType set_input_ov(void *model, DNNData *input, const char *input_name)
 {
     OVModel *ov_model = (OVModel *)model;
     IEStatusCode status;
@@ -124,30 +122,9 @@  static DNNReturnType set_input_output_ov(void *model, DNNData *input, const char
         goto err;
     input->data = blob_buffer.buffer;
 
-    // outputs
-    ov_model->nb_output = 0;
-    av_freep(&ov_model->output_blobs);
-    ov_model->output_blobs = av_mallocz_array(nb_output, sizeof(*ov_model->output_blobs));
-    if (!ov_model->output_blobs)
-        goto err;
-
-    for (int i = 0; i < nb_output; i++) {
-        const char *output_name = output_names[i];
-        status = ie_infer_request_get_blob(ov_model->infer_request, output_name, &(ov_model->output_blobs[i]));
-        if (status != OK)
-            goto err;
-        ov_model->nb_output++;
-    }
-
     return DNN_SUCCESS;
 
 err:
-    if (ov_model->output_blobs) {
-        for (uint32_t i = 0; i < ov_model->nb_output; i++) {
-            ie_blob_free(&(ov_model->output_blobs[i]));
-        }
-        av_freep(&ov_model->output_blobs);
-    }
     if (ov_model->input_blob)
         ie_blob_free(&ov_model->input_blob);
     if (ov_model->infer_request)
@@ -184,7 +161,7 @@  DNNModel *ff_dnn_load_model_ov(const char *model_filename, const char *options)
         goto err;
 
     model->model = (void *)ov_model;
-    model->set_input_output = &set_input_output_ov;
+    model->set_input = &set_input_ov;
     model->get_input = &get_input_ov;
     model->options = options;
 
@@ -205,24 +182,29 @@  err:
     return NULL;
 }
 
-DNNReturnType ff_dnn_execute_model_ov(const DNNModel *model, DNNData *outputs, uint32_t nb_output)
+DNNReturnType ff_dnn_execute_model_ov(const DNNModel *model, DNNData *outputs, const char **output_names, uint32_t nb_output)
 {
     dimensions_t dims;
     precision_e precision;
     ie_blob_buffer_t blob_buffer;
     OVModel *ov_model = (OVModel *)model->model;
-    uint32_t nb = FFMIN(nb_output, ov_model->nb_output);
     IEStatusCode status = ie_infer_request_infer(ov_model->infer_request);
     if (status != OK)
         return DNN_ERROR;
 
-    for (uint32_t i = 0; i < nb; ++i) {
-        status = ie_blob_get_buffer(ov_model->output_blobs[i], &blob_buffer);
+    for (uint32_t i = 0; i < nb_output; ++i) {
+        const char *output_name = output_names[i];
+        ie_blob_t *output_blob = NULL;
+        status = ie_infer_request_get_blob(ov_model->infer_request, output_name, &output_blob);
+        if (status != OK)
+            return DNN_ERROR;
+
+        status = ie_blob_get_buffer(output_blob, &blob_buffer);
         if (status != OK)
             return DNN_ERROR;
 
-        status |= ie_blob_get_dims(ov_model->output_blobs[i], &dims);
-        status |= ie_blob_get_precision(ov_model->output_blobs[i], &precision);
+        status |= ie_blob_get_dims(output_blob, &dims);
+        status |= ie_blob_get_precision(output_blob, &precision);
         if (status != OK)
             return DNN_ERROR;
 
@@ -240,12 +222,6 @@  void ff_dnn_free_model_ov(DNNModel **model)
 {
     if (*model){
         OVModel *ov_model = (OVModel *)(*model)->model;
-        if (ov_model->output_blobs) {
-            for (uint32_t i = 0; i < ov_model->nb_output; i++) {
-                ie_blob_free(&(ov_model->output_blobs[i]));
-            }
-            av_freep(&ov_model->output_blobs);
-        }
         if (ov_model->input_blob)
             ie_blob_free(&ov_model->input_blob);
         if (ov_model->infer_request)
diff --git a/libavfilter/dnn/dnn_backend_openvino.h b/libavfilter/dnn/dnn_backend_openvino.h
index 0a8fef9..b2a86e0 100644
--- a/libavfilter/dnn/dnn_backend_openvino.h
+++ b/libavfilter/dnn/dnn_backend_openvino.h
@@ -31,7 +31,7 @@ 
 
 DNNModel *ff_dnn_load_model_ov(const char *model_filename, const char *options);
 
-DNNReturnType ff_dnn_execute_model_ov(const DNNModel *model, DNNData *outputs, uint32_t nb_output);
+DNNReturnType ff_dnn_execute_model_ov(const DNNModel *model, DNNData *outputs, const char **output_names, uint32_t nb_output);
 
 void ff_dnn_free_model_ov(DNNModel **model);
 
diff --git a/libavfilter/dnn/dnn_backend_tf.c b/libavfilter/dnn/dnn_backend_tf.c
index 9d079aa..bdc90d5 100644
--- a/libavfilter/dnn/dnn_backend_tf.c
+++ b/libavfilter/dnn/dnn_backend_tf.c
@@ -40,7 +40,6 @@  typedef struct TFModel{
     TF_Status *status;
     TF_Output input;
     TF_Tensor *input_tensor;
-    TF_Output *outputs;
     TF_Tensor **output_tensors;
     uint32_t nb_output;
 } TFModel;
@@ -136,7 +135,7 @@  static DNNReturnType get_input_tf(void *model, DNNData *input, const char *input
     return DNN_SUCCESS;
 }
 
-static DNNReturnType set_input_output_tf(void *model, DNNData *input, const char *input_name, const char **output_names, uint32_t nb_output)
+static DNNReturnType set_input_tf(void *model, DNNData *input, const char *input_name)
 {
     TFModel *tf_model = (TFModel *)model;
     TF_SessionOptions *sess_opts;
@@ -157,40 +156,7 @@  static DNNReturnType set_input_output_tf(void *model, DNNData *input, const char
     }
     input->data = (float *)TF_TensorData(tf_model->input_tensor);
 
-    // Output operation
-    if (nb_output == 0)
-        return DNN_ERROR;
-
-    av_freep(&tf_model->outputs);
-    tf_model->outputs = av_malloc_array(nb_output, sizeof(*tf_model->outputs));
-    if (!tf_model->outputs)
-        return DNN_ERROR;
-    for (int i = 0; i < nb_output; ++i) {
-        tf_model->outputs[i].oper = TF_GraphOperationByName(tf_model->graph, output_names[i]);
-        if (!tf_model->outputs[i].oper){
-            av_freep(&tf_model->outputs);
-            return DNN_ERROR;
-        }
-        tf_model->outputs[i].index = 0;
-    }
-
-    if (tf_model->output_tensors) {
-        for (uint32_t i = 0; i < tf_model->nb_output; ++i) {
-            if (tf_model->output_tensors[i]) {
-                TF_DeleteTensor(tf_model->output_tensors[i]);
-                tf_model->output_tensors[i] = NULL;
-            }
-        }
-    }
-    av_freep(&tf_model->output_tensors);
-    tf_model->output_tensors = av_mallocz_array(nb_output, sizeof(*tf_model->output_tensors));
-    if (!tf_model->output_tensors) {
-        av_freep(&tf_model->outputs);
-        return DNN_ERROR;
-    }
-
-    tf_model->nb_output = nb_output;
-
+    // session
     if (tf_model->session){
         TF_CloseSession(tf_model->session, tf_model->status);
         TF_DeleteSession(tf_model->session, tf_model->status);
@@ -598,40 +564,57 @@  DNNModel *ff_dnn_load_model_tf(const char *model_filename, const char *options)
     }
 
     model->model = (void *)tf_model;
-    model->set_input_output = &set_input_output_tf;
+    model->set_input = &set_input_tf;
     model->get_input = &get_input_tf;
     model->options = options;
 
     return model;
 }
 
-
-
-DNNReturnType ff_dnn_execute_model_tf(const DNNModel *model, DNNData *outputs, uint32_t nb_output)
+DNNReturnType ff_dnn_execute_model_tf(const DNNModel *model, DNNData *outputs, const char **output_names, uint32_t nb_output)
 {
+    TF_Output *tf_outputs;
     TFModel *tf_model = (TFModel *)model->model;
-    uint32_t nb = FFMIN(nb_output, tf_model->nb_output);
-    if (nb == 0)
+
+    tf_outputs = av_malloc_array(nb_output, sizeof(*tf_outputs));
+    if (tf_outputs == NULL)
         return DNN_ERROR;
 
-    av_assert0(tf_model->output_tensors);
-    for (uint32_t i = 0; i < tf_model->nb_output; ++i) {
-        if (tf_model->output_tensors[i]) {
-            TF_DeleteTensor(tf_model->output_tensors[i]);
-            tf_model->output_tensors[i] = NULL;
+    if (tf_model->output_tensors) {
+        for (uint32_t i = 0; i < tf_model->nb_output; ++i) {
+            if (tf_model->output_tensors[i]) {
+                TF_DeleteTensor(tf_model->output_tensors[i]);
+                tf_model->output_tensors[i] = NULL;
+            }
         }
     }
+    av_freep(&tf_model->output_tensors);
+    tf_model->nb_output = nb_output;
+    tf_model->output_tensors = av_mallocz_array(nb_output, sizeof(*tf_model->output_tensors));
+    if (!tf_model->output_tensors) {
+        av_freep(&tf_outputs);
+        return DNN_ERROR;
+    }
+
+    for (int i = 0; i < nb_output; ++i) {
+        tf_outputs[i].oper = TF_GraphOperationByName(tf_model->graph, output_names[i]);
+        if (!tf_outputs[i].oper) {
+            av_freep(&tf_outputs);
+            return DNN_ERROR;
+        }
+        tf_outputs[i].index = 0;
+    }
 
     TF_SessionRun(tf_model->session, NULL,
                   &tf_model->input, &tf_model->input_tensor, 1,
-                  tf_model->outputs, tf_model->output_tensors, nb,
+                  tf_outputs, tf_model->output_tensors, nb_output,
                   NULL, 0, NULL, tf_model->status);
-
-    if (TF_GetCode(tf_model->status) != TF_OK){
+    if (TF_GetCode(tf_model->status) != TF_OK) {
+        av_freep(&tf_outputs);
         return DNN_ERROR;
     }
 
-    for (uint32_t i = 0; i < nb; ++i) {
+    for (uint32_t i = 0; i < nb_output; ++i) {
         outputs[i].height = TF_Dim(tf_model->output_tensors[i], 1);
         outputs[i].width = TF_Dim(tf_model->output_tensors[i], 2);
         outputs[i].channels = TF_Dim(tf_model->output_tensors[i], 3);
@@ -639,6 +622,7 @@  DNNReturnType ff_dnn_execute_model_tf(const DNNModel *model, DNNData *outputs, u
         outputs[i].dt = TF_TensorType(tf_model->output_tensors[i]);
     }
 
+    av_freep(&tf_outputs);
     return DNN_SUCCESS;
 }
 
@@ -669,7 +653,6 @@  void ff_dnn_free_model_tf(DNNModel **model)
                 }
             }
         }
-        av_freep(&tf_model->outputs);
         av_freep(&tf_model->output_tensors);
         av_freep(&tf_model);
         av_freep(model);
diff --git a/libavfilter/dnn/dnn_backend_tf.h b/libavfilter/dnn/dnn_backend_tf.h
index d7ee1df..6c7b6d2 100644
--- a/libavfilter/dnn/dnn_backend_tf.h
+++ b/libavfilter/dnn/dnn_backend_tf.h
@@ -31,7 +31,7 @@ 
 
 DNNModel *ff_dnn_load_model_tf(const char *model_filename, const char *options);
 
-DNNReturnType ff_dnn_execute_model_tf(const DNNModel *model, DNNData *outputs, uint32_t nb_output);
+DNNReturnType ff_dnn_execute_model_tf(const DNNModel *model, DNNData *outputs, const char **output_names, uint32_t nb_output);
 
 void ff_dnn_free_model_tf(DNNModel **model);
 
diff --git a/libavfilter/dnn_interface.h b/libavfilter/dnn_interface.h
index c5a72b4..365c9a4 100644
--- a/libavfilter/dnn_interface.h
+++ b/libavfilter/dnn_interface.h
@@ -50,7 +50,7 @@  typedef struct DNNModel{
     DNNReturnType (*get_input)(void *model, DNNData *input, const char *input_name);
     // Sets model input and output.
     // Should be called at least once before model execution.
-    DNNReturnType (*set_input_output)(void *model, DNNData *input, const char *input_name, const char **output_names, uint32_t nb_output);
+    DNNReturnType (*set_input)(void *model, DNNData *input, const char *input_name);
 } DNNModel;
 
 // Stores pointers to functions for loading, executing, freeing DNN models for one of the backends.
@@ -58,7 +58,7 @@  typedef struct DNNModule{
     // Loads model and parameters from given file. Returns NULL if it is not possible.
     DNNModel *(*load_model)(const char *model_filename, const char *options);
     // Executes model with specified input and output. Returns DNN_ERROR otherwise.
-    DNNReturnType (*execute_model)(const DNNModel *model, DNNData *outputs, uint32_t nb_output);
+    DNNReturnType (*execute_model)(const DNNModel *model, DNNData *outputs, const char **output_names, uint32_t nb_output);
     // Frees memory allocated for model.
     void (*free_model)(DNNModel **model);
 } DNNModule;
diff --git a/libavfilter/vf_derain.c b/libavfilter/vf_derain.c
index e175bf4..ff7d8d6 100644
--- a/libavfilter/vf_derain.c
+++ b/libavfilter/vf_derain.c
@@ -78,14 +78,13 @@  static int config_inputs(AVFilterLink *inlink)
 {
     AVFilterContext *ctx          = inlink->dst;
     DRContext *dr_context         = ctx->priv;
-    const char *model_output_name = "y";
     DNNReturnType result;
 
     dr_context->input.width    = inlink->w;
     dr_context->input.height   = inlink->h;
     dr_context->input.channels = 3;
 
-    result = (dr_context->model->set_input_output)(dr_context->model->model, &dr_context->input, "x", &model_output_name, 1);
+    result = (dr_context->model->set_input)(dr_context->model->model, &dr_context->input, "x");
     if (result != DNN_SUCCESS) {
         av_log(ctx, AV_LOG_ERROR, "could not set input and output for the model\n");
         return AVERROR(EIO);
@@ -100,6 +99,7 @@  static int filter_frame(AVFilterLink *inlink, AVFrame *in)
     AVFilterLink *outlink = ctx->outputs[0];
     DRContext *dr_context = ctx->priv;
     DNNReturnType dnn_result;
+    const char *model_output_name = "y";
 
     AVFrame *out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
     if (!out) {
@@ -118,7 +118,7 @@  static int filter_frame(AVFilterLink *inlink, AVFrame *in)
         }
     }
 
-    dnn_result = (dr_context->dnn_module->execute_model)(dr_context->model, &dr_context->output, 1);
+    dnn_result = (dr_context->dnn_module->execute_model)(dr_context->model, &dr_context->output, &model_output_name, 1);
     if (dnn_result != DNN_SUCCESS){
         av_log(ctx, AV_LOG_ERROR, "failed to execute model\n");
         return AVERROR(EIO);
diff --git a/libavfilter/vf_dnn_processing.c b/libavfilter/vf_dnn_processing.c
index b6dfc4e..f78800f 100644
--- a/libavfilter/vf_dnn_processing.c
+++ b/libavfilter/vf_dnn_processing.c
@@ -218,9 +218,8 @@  static int config_input(AVFilterLink *inlink)
     ctx->input.channels = model_input.channels;
     ctx->input.dt = model_input.dt;
 
-    result = (ctx->model->set_input_output)(ctx->model->model,
-                                        &ctx->input, ctx->model_inputname,
-                                        (const char **)&ctx->model_outputname, 1);
+    result = (ctx->model->set_input)(ctx->model->model,
+                                     &ctx->input, ctx->model_inputname);
     if (result != DNN_SUCCESS) {
         av_log(ctx, AV_LOG_ERROR, "could not set input and output for the model\n");
         return AVERROR(EIO);
@@ -309,7 +308,7 @@  static int config_output(AVFilterLink *outlink)
     DNNReturnType result;
 
     // have a try run in case that the dnn model resize the frame
-    result = (ctx->dnn_module->execute_model)(ctx->model, &ctx->output, 1);
+    result = (ctx->dnn_module->execute_model)(ctx->model, &ctx->output, (const char **)&ctx->model_outputname, 1);
     if (result != DNN_SUCCESS){
         av_log(ctx, AV_LOG_ERROR, "failed to execute model\n");
         return AVERROR(EIO);
@@ -456,7 +455,7 @@  static int filter_frame(AVFilterLink *inlink, AVFrame *in)
 
     copy_from_frame_to_dnn(ctx, in);
 
-    dnn_result = (ctx->dnn_module->execute_model)(ctx->model, &ctx->output, 1);
+    dnn_result = (ctx->dnn_module->execute_model)(ctx->model, &ctx->output, (const char **)&ctx->model_outputname, 1);
     if (dnn_result != DNN_SUCCESS){
         av_log(ctx, AV_LOG_ERROR, "failed to execute model\n");
         av_frame_free(&in);
diff --git a/libavfilter/vf_sr.c b/libavfilter/vf_sr.c
index 1dee317..37e1107 100644
--- a/libavfilter/vf_sr.c
+++ b/libavfilter/vf_sr.c
@@ -124,13 +124,13 @@  static int config_props(AVFilterLink *inlink)
     sr_context->input.height = inlink->h * sr_context->scale_factor;
     sr_context->input.channels = 1;
 
-    result = (sr_context->model->set_input_output)(sr_context->model->model, &sr_context->input, "x", &model_output_name, 1);
+    result = (sr_context->model->set_input)(sr_context->model->model, &sr_context->input, "x");
     if (result != DNN_SUCCESS){
         av_log(context, AV_LOG_ERROR, "could not set input and output for the model\n");
         return AVERROR(EIO);
     }
 
-    result = (sr_context->dnn_module->execute_model)(sr_context->model, &sr_context->output, 1);
+    result = (sr_context->dnn_module->execute_model)(sr_context->model, &sr_context->output, &model_output_name, 1);
     if (result != DNN_SUCCESS){
         av_log(context, AV_LOG_ERROR, "failed to execute loaded model\n");
         return AVERROR(EIO);
@@ -139,12 +139,12 @@  static int config_props(AVFilterLink *inlink)
     if (sr_context->input.height != sr_context->output.height || sr_context->input.width != sr_context->output.width){
         sr_context->input.width = inlink->w;
         sr_context->input.height = inlink->h;
-        result = (sr_context->model->set_input_output)(sr_context->model->model, &sr_context->input, "x", &model_output_name, 1);
+        result = (sr_context->model->set_input)(sr_context->model->model, &sr_context->input, "x");
         if (result != DNN_SUCCESS){
             av_log(context, AV_LOG_ERROR, "could not set input and output for the model\n");
             return AVERROR(EIO);
         }
-        result = (sr_context->dnn_module->execute_model)(sr_context->model, &sr_context->output, 1);
+        result = (sr_context->dnn_module->execute_model)(sr_context->model, &sr_context->output, &model_output_name, 1);
         if (result != DNN_SUCCESS){
             av_log(context, AV_LOG_ERROR, "failed to execute loaded model\n");
             return AVERROR(EIO);
@@ -203,6 +203,7 @@  static int filter_frame(AVFilterLink *inlink, AVFrame *in)
     AVFilterLink *outlink = context->outputs[0];
     AVFrame *out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
     DNNReturnType dnn_result;
+    const char *model_output_name = "y";
 
     if (!out){
         av_log(context, AV_LOG_ERROR, "could not allocate memory for output frame\n");
@@ -233,7 +234,7 @@  static int filter_frame(AVFilterLink *inlink, AVFrame *in)
     }
     av_frame_free(&in);
 
-    dnn_result = (sr_context->dnn_module->execute_model)(sr_context->model, &sr_context->output, 1);
+    dnn_result = (sr_context->dnn_module->execute_model)(sr_context->model, &sr_context->output, &model_output_name, 1);
     if (dnn_result != DNN_SUCCESS){
         av_log(context, AV_LOG_ERROR, "failed to execute loaded model\n");
         return AVERROR(EIO);