diff mbox series

[FFmpeg-devel,3/3] lavfi/dnn_backend_tensorflow.c: fix mem leak in execute_model_tf

Message ID 20210324073928.3570-3-ting.fu@intel.com
State Accepted
Commit 637bdefdebba1b57d7e9055f4a9fc27251d1d5b4
Headers show
Series [FFmpeg-devel,1/3] lavfi/dnn_backend_tensorflow.c: fix mem leak in load_tf_model | expand

Checks

Context Check Description
andriy/x86_make success Make finished
andriy/x86_make_fate success Make fate finished
andriy/PPC64_make success Make finished
andriy/PPC64_make_fate success Make fate finished

Commit Message

Fu, Ting March 24, 2021, 7:39 a.m. UTC
Signed-off-by: Ting Fu <ting.fu@intel.com>
---
 libavfilter/dnn/dnn_backend_tf.c | 5 +++++
 1 file changed, 5 insertions(+)

Comments

Guo, Yejun March 26, 2021, 1:02 a.m. UTC | #1
> -----Original Message-----
> From: ffmpeg-devel <ffmpeg-devel-bounces@ffmpeg.org> On Behalf Of Ting Fu
> Sent: 2021年3月24日 15:39
> To: ffmpeg-devel@ffmpeg.org
> Subject: [FFmpeg-devel] [PATCH 3/3] lavfi/dnn_backend_tensorflow.c: fix mem
> leak in execute_model_tf
> 
> Signed-off-by: Ting Fu <ting.fu@intel.com>
> ---
>  libavfilter/dnn/dnn_backend_tf.c | 5 +++++
>  1 file changed, 5 insertions(+)
> 
> diff --git a/libavfilter/dnn/dnn_backend_tf.c
> b/libavfilter/dnn/dnn_backend_tf.c
> index c18cb4063f..c0aa510630 100644
> --- a/libavfilter/dnn/dnn_backend_tf.c
> +++ b/libavfilter/dnn/dnn_backend_tf.c
> @@ -766,18 +766,21 @@ static DNNReturnType execute_model_tf(const
> DNNModel *model, const char *input_n
>      if (nb_output != 1) {
>          // currently, the filter does not need multiple outputs,
>          // so we just pending the support until we really need it.
> +        TF_DeleteTensor(input_tensor);
>          avpriv_report_missing_feature(ctx, "multiple outputs");
>          return DNN_ERROR;
>      }
> 
>      tf_outputs = av_malloc_array(nb_output, sizeof(*tf_outputs));
>      if (tf_outputs == NULL) {
> +        TF_DeleteTensor(input_tensor);
>          av_log(ctx, AV_LOG_ERROR, "Failed to allocate memory for
> *tf_outputs\n"); \
>          return DNN_ERROR;
>      }
> 
>      output_tensors = av_mallocz_array(nb_output, sizeof(*output_tensors));
>      if (!output_tensors) {
> +        TF_DeleteTensor(input_tensor);
>          av_freep(&tf_outputs);
>          av_log(ctx, AV_LOG_ERROR, "Failed to allocate memory for output
> tensor\n"); \
>          return DNN_ERROR;
> @@ -786,6 +789,7 @@ static DNNReturnType execute_model_tf(const
> DNNModel *model, const char *input_n
>      for (int i = 0; i < nb_output; ++i) {
>          tf_outputs[i].oper = TF_GraphOperationByName(tf_model->graph,
> output_names[i]);
>          if (!tf_outputs[i].oper) {
> +            TF_DeleteTensor(input_tensor);
>              av_freep(&tf_outputs);
>              av_freep(&output_tensors);
>              av_log(ctx, AV_LOG_ERROR, "Could not find output \"%s\" in
> model\n", output_names[i]); \ @@ -799,6 +803,7 @@ static DNNReturnType
> execute_model_tf(const DNNModel *model, const char *input_n
>                    tf_outputs, output_tensors, nb_output,
>                    NULL, 0, NULL, tf_model->status);
>      if (TF_GetCode(tf_model->status) != TF_OK) {
> +        TF_DeleteTensor(input_tensor);
>          av_freep(&tf_outputs);
>          av_freep(&output_tensors);
>          av_log(ctx, AV_LOG_ERROR, "Failed to run session when executing
> model\n");
> --

LGTM, will push soon, thanks.
diff mbox series

Patch

diff --git a/libavfilter/dnn/dnn_backend_tf.c b/libavfilter/dnn/dnn_backend_tf.c
index c18cb4063f..c0aa510630 100644
--- a/libavfilter/dnn/dnn_backend_tf.c
+++ b/libavfilter/dnn/dnn_backend_tf.c
@@ -766,18 +766,21 @@  static DNNReturnType execute_model_tf(const DNNModel *model, const char *input_n
     if (nb_output != 1) {
         // currently, the filter does not need multiple outputs,
         // so we just pending the support until we really need it.
+        TF_DeleteTensor(input_tensor);
         avpriv_report_missing_feature(ctx, "multiple outputs");
         return DNN_ERROR;
     }
 
     tf_outputs = av_malloc_array(nb_output, sizeof(*tf_outputs));
     if (tf_outputs == NULL) {
+        TF_DeleteTensor(input_tensor);
         av_log(ctx, AV_LOG_ERROR, "Failed to allocate memory for *tf_outputs\n"); \
         return DNN_ERROR;
     }
 
     output_tensors = av_mallocz_array(nb_output, sizeof(*output_tensors));
     if (!output_tensors) {
+        TF_DeleteTensor(input_tensor);
         av_freep(&tf_outputs);
         av_log(ctx, AV_LOG_ERROR, "Failed to allocate memory for output tensor\n"); \
         return DNN_ERROR;
@@ -786,6 +789,7 @@  static DNNReturnType execute_model_tf(const DNNModel *model, const char *input_n
     for (int i = 0; i < nb_output; ++i) {
         tf_outputs[i].oper = TF_GraphOperationByName(tf_model->graph, output_names[i]);
         if (!tf_outputs[i].oper) {
+            TF_DeleteTensor(input_tensor);
             av_freep(&tf_outputs);
             av_freep(&output_tensors);
             av_log(ctx, AV_LOG_ERROR, "Could not find output \"%s\" in model\n", output_names[i]); \
@@ -799,6 +803,7 @@  static DNNReturnType execute_model_tf(const DNNModel *model, const char *input_n
                   tf_outputs, output_tensors, nb_output,
                   NULL, 0, NULL, tf_model->status);
     if (TF_GetCode(tf_model->status) != TF_OK) {
+        TF_DeleteTensor(input_tensor);
         av_freep(&tf_outputs);
         av_freep(&output_tensors);
         av_log(ctx, AV_LOG_ERROR, "Failed to run session when executing model\n");