From patchwork Wed Mar 24 07:39:27 2021 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Fu, Ting" X-Patchwork-Id: 26593 Return-Path: X-Original-To: patchwork@ffaux-bg.ffmpeg.org Delivered-To: patchwork@ffaux-bg.ffmpeg.org Received: from ffbox0-bg.mplayerhq.hu (ffbox0-bg.ffmpeg.org [79.124.17.100]) by ffaux.localdomain (Postfix) with ESMTP id E4706449F23 for ; Wed, 24 Mar 2021 09:48:05 +0200 (EET) Received: from [127.0.1.1] (localhost [127.0.0.1]) by ffbox0-bg.mplayerhq.hu (Postfix) with ESMTP id CBCBE68AA75; Wed, 24 Mar 2021 09:48:05 +0200 (EET) X-Original-To: ffmpeg-devel@ffmpeg.org Delivered-To: ffmpeg-devel@ffmpeg.org Received: from mga17.intel.com (mga17.intel.com [192.55.52.151]) by ffbox0-bg.mplayerhq.hu (Postfix) with ESMTPS id A7E9368A5DF for ; Wed, 24 Mar 2021 09:47:59 +0200 (EET) IronPort-SDR: pWaim6lRVTlgIiWZle6yRe59LUrI8J8JZuQvAZ2DLoWTiH64xMCuWNdiz3g07L8f+TGu2CjLKT MXIqCfu+Zxsg== X-IronPort-AV: E=McAfee;i="6000,8403,9932"; a="170623028" X-IronPort-AV: E=Sophos;i="5.81,274,1610438400"; d="scan'208";a="170623028" Received: from orsmga008.jf.intel.com ([10.7.209.65]) by fmsmga107.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 24 Mar 2021 00:47:56 -0700 IronPort-SDR: PFt4WDIAxSuaZbEKsG+vydLKEM0wjiMXTp91MQ12KGaT4j61enP0eaJsOoLzCBsQ0aKljg57zv aE1ul1zpcI/Q== X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.81,274,1610438400"; d="scan'208";a="415367818" Received: from semmer-ubuntu.sh.intel.com ([10.239.159.83]) by orsmga008.jf.intel.com with ESMTP; 24 Mar 2021 00:47:55 -0700 From: Ting Fu To: ffmpeg-devel@ffmpeg.org Date: Wed, 24 Mar 2021 15:39:27 +0800 Message-Id: <20210324073928.3570-2-ting.fu@intel.com> X-Mailer: git-send-email 2.17.1 In-Reply-To: <20210324073928.3570-1-ting.fu@intel.com> References: <20210324073928.3570-1-ting.fu@intel.com> Subject: [FFmpeg-devel] [PATCH 2/3] lavfi/dnn_backend_tensorflow.c: fix mem leak in load_native_model X-BeenThere: ffmpeg-devel@ffmpeg.org X-Mailman-Version: 2.1.20 Precedence: list List-Id: FFmpeg development discussions and patches List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Reply-To: FFmpeg development discussions and patches MIME-Version: 1.0 Errors-To: ffmpeg-devel-bounces@ffmpeg.org Sender: "ffmpeg-devel" Signed-off-by: Ting Fu --- libavfilter/dnn/dnn_backend_tf.c | 55 ++++++++++++++++++-------------- 1 file changed, 31 insertions(+), 24 deletions(-) diff --git a/libavfilter/dnn/dnn_backend_tf.c b/libavfilter/dnn/dnn_backend_tf.c index e016571304..c18cb4063f 100644 --- a/libavfilter/dnn/dnn_backend_tf.c +++ b/libavfilter/dnn/dnn_backend_tf.c @@ -330,7 +330,7 @@ static DNNReturnType add_conv_layer(TFModel *tf_model, TF_Operation *transpose_o TF_OperationDescription *op_desc; TF_Output input; int64_t strides[] = {1, 1, 1, 1}; - TF_Tensor *tensor; + TF_Tensor *kernel_tensor = NULL, *biases_tensor = NULL; int64_t dims[4]; int dims_len; char name_buffer[NAME_BUFFER_SIZE]; @@ -347,17 +347,15 @@ static DNNReturnType add_conv_layer(TFModel *tf_model, TF_Operation *transpose_o dims[2] = params->kernel_size; dims[3] = params->input_num; dims_len = 4; - tensor = TF_AllocateTensor(TF_FLOAT, dims, dims_len, size * sizeof(float)); - memcpy(TF_TensorData(tensor), params->kernel, size * sizeof(float)); - TF_SetAttrTensor(op_desc, "value", tensor, tf_model->status); + kernel_tensor = TF_AllocateTensor(TF_FLOAT, dims, dims_len, size * sizeof(float)); + memcpy(TF_TensorData(kernel_tensor), params->kernel, size * sizeof(float)); + TF_SetAttrTensor(op_desc, "value", kernel_tensor, tf_model->status); if (TF_GetCode(tf_model->status) != TF_OK){ - av_log(ctx, AV_LOG_ERROR, "Failed to set value for kernel of conv layer %d\n", layer); - return DNN_ERROR; + goto err; } op = TF_FinishOperation(op_desc, tf_model->status); if (TF_GetCode(tf_model->status) != TF_OK){ - av_log(ctx, AV_LOG_ERROR, "Failed to add kernel to conv layer %d\n", layer); - return DNN_ERROR; + goto err; } snprintf(name_buffer, NAME_BUFFER_SIZE, "transpose%d", layer); @@ -370,8 +368,7 @@ static DNNReturnType add_conv_layer(TFModel *tf_model, TF_Operation *transpose_o TF_SetAttrType(op_desc, "Tperm", TF_INT32); op = TF_FinishOperation(op_desc, tf_model->status); if (TF_GetCode(tf_model->status) != TF_OK){ - av_log(ctx, AV_LOG_ERROR, "Failed to add transpose to conv layer %d\n", layer); - return DNN_ERROR; + goto err; } snprintf(name_buffer, NAME_BUFFER_SIZE, "conv2d%d", layer); @@ -385,8 +382,7 @@ static DNNReturnType add_conv_layer(TFModel *tf_model, TF_Operation *transpose_o TF_SetAttrString(op_desc, "padding", "VALID", 5); *cur_op = TF_FinishOperation(op_desc, tf_model->status); if (TF_GetCode(tf_model->status) != TF_OK){ - av_log(ctx, AV_LOG_ERROR, "Failed to add conv2d to conv layer %d\n", layer); - return DNN_ERROR; + goto err; } snprintf(name_buffer, NAME_BUFFER_SIZE, "conv_biases%d", layer); @@ -394,17 +390,15 @@ static DNNReturnType add_conv_layer(TFModel *tf_model, TF_Operation *transpose_o TF_SetAttrType(op_desc, "dtype", TF_FLOAT); dims[0] = params->output_num; dims_len = 1; - tensor = TF_AllocateTensor(TF_FLOAT, dims, dims_len, params->output_num * sizeof(float)); - memcpy(TF_TensorData(tensor), params->biases, params->output_num * sizeof(float)); - TF_SetAttrTensor(op_desc, "value", tensor, tf_model->status); + biases_tensor = TF_AllocateTensor(TF_FLOAT, dims, dims_len, params->output_num * sizeof(float)); + memcpy(TF_TensorData(biases_tensor), params->biases, params->output_num * sizeof(float)); + TF_SetAttrTensor(op_desc, "value", biases_tensor, tf_model->status); if (TF_GetCode(tf_model->status) != TF_OK){ - av_log(ctx, AV_LOG_ERROR, "Failed to set value for conv_biases of conv layer %d\n", layer); - return DNN_ERROR; + goto err; } op = TF_FinishOperation(op_desc, tf_model->status); if (TF_GetCode(tf_model->status) != TF_OK){ - av_log(ctx, AV_LOG_ERROR, "Failed to add conv_biases to conv layer %d\n", layer); - return DNN_ERROR; + goto err; } snprintf(name_buffer, NAME_BUFFER_SIZE, "bias_add%d", layer); @@ -416,8 +410,7 @@ static DNNReturnType add_conv_layer(TFModel *tf_model, TF_Operation *transpose_o TF_SetAttrType(op_desc, "T", TF_FLOAT); *cur_op = TF_FinishOperation(op_desc, tf_model->status); if (TF_GetCode(tf_model->status) != TF_OK){ - av_log(ctx, AV_LOG_ERROR, "Failed to add bias_add to conv layer %d\n", layer); - return DNN_ERROR; + goto err; } snprintf(name_buffer, NAME_BUFFER_SIZE, "activation%d", layer); @@ -440,11 +433,15 @@ static DNNReturnType add_conv_layer(TFModel *tf_model, TF_Operation *transpose_o TF_SetAttrType(op_desc, "T", TF_FLOAT); *cur_op = TF_FinishOperation(op_desc, tf_model->status); if (TF_GetCode(tf_model->status) != TF_OK){ - av_log(ctx, AV_LOG_ERROR, "Failed to add activation function to conv layer %d\n", layer); - return DNN_ERROR; + goto err; } return DNN_SUCCESS; +err: + TF_DeleteTensor(kernel_tensor); + TF_DeleteTensor(biases_tensor); + av_log(ctx, AV_LOG_ERROR, "Failed to add conv layer %d\n", layer); + return DNN_ERROR; } static DNNReturnType add_depth_to_space_layer(TFModel *tf_model, TF_Operation **cur_op, @@ -499,11 +496,13 @@ static DNNReturnType add_pad_layer(TFModel *tf_model, TF_Operation **cur_op, pads[7] = params->paddings[3][1]; TF_SetAttrTensor(op_desc, "value", tensor, tf_model->status); if (TF_GetCode(tf_model->status) != TF_OK){ + TF_DeleteTensor(tensor); av_log(ctx, AV_LOG_ERROR, "Failed to set value for pad of layer %d\n", layer); return DNN_ERROR; } op = TF_FinishOperation(op_desc, tf_model->status); if (TF_GetCode(tf_model->status) != TF_OK){ + TF_DeleteTensor(tensor); av_log(ctx, AV_LOG_ERROR, "Failed to add pad to layer %d\n", layer); return DNN_ERROR; } @@ -519,6 +518,7 @@ static DNNReturnType add_pad_layer(TFModel *tf_model, TF_Operation **cur_op, TF_SetAttrString(op_desc, "mode", "SYMMETRIC", 9); *cur_op = TF_FinishOperation(op_desc, tf_model->status); if (TF_GetCode(tf_model->status) != TF_OK){ + TF_DeleteTensor(tensor); av_log(ctx, AV_LOG_ERROR, "Failed to add mirror_pad to layer %d\n", layer); return DNN_ERROR; } @@ -546,11 +546,13 @@ static DNNReturnType add_maximum_layer(TFModel *tf_model, TF_Operation **cur_op, *y = params->val.y; TF_SetAttrTensor(op_desc, "value", tensor, tf_model->status); if (TF_GetCode(tf_model->status) != TF_OK){ + TF_DeleteTensor(tensor); av_log(ctx, AV_LOG_ERROR, "Failed to set value for maximum/y of layer %d", layer); return DNN_ERROR; } op = TF_FinishOperation(op_desc, tf_model->status); if (TF_GetCode(tf_model->status) != TF_OK){ + TF_DeleteTensor(tensor); av_log(ctx, AV_LOG_ERROR, "Failed to add maximum/y to layer %d\n", layer); return DNN_ERROR; } @@ -565,6 +567,7 @@ static DNNReturnType add_maximum_layer(TFModel *tf_model, TF_Operation **cur_op, TF_SetAttrType(op_desc, "T", TF_FLOAT); *cur_op = TF_FinishOperation(op_desc, tf_model->status); if (TF_GetCode(tf_model->status) != TF_OK){ + TF_DeleteTensor(tensor); av_log(ctx, AV_LOG_ERROR, "Failed to add maximum to layer %d\n", layer); return DNN_ERROR; } @@ -579,7 +582,7 @@ static DNNReturnType load_native_model(TFModel *tf_model, const char *model_file TF_OperationDescription *op_desc; TF_Operation *op; TF_Operation *transpose_op; - TF_Tensor *tensor; + TF_Tensor *tensor = NULL; TF_Output input; int32_t *transpose_perm; int64_t transpose_perm_shape[] = {4}; @@ -600,6 +603,7 @@ static DNNReturnType load_native_model(TFModel *tf_model, const char *model_file #define CLEANUP_ON_ERROR(tf_model) \ { \ + TF_DeleteTensor(tensor); \ TF_DeleteGraph(tf_model->graph); \ TF_DeleteStatus(tf_model->status); \ av_log(ctx, AV_LOG_ERROR, "Failed to set value or add operator to layer\n"); \ @@ -627,6 +631,9 @@ static DNNReturnType load_native_model(TFModel *tf_model, const char *model_file CLEANUP_ON_ERROR(tf_model); } transpose_op = TF_FinishOperation(op_desc, tf_model->status); + if (TF_GetCode(tf_model->status) != TF_OK){ + CLEANUP_ON_ERROR(tf_model); + } for (layer = 0; layer < native_model->layers_num; ++layer){ switch (native_model->layers[layer].type){