From patchwork Wed Aug 19 13:43:13 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Fu, Ting" X-Patchwork-Id: 21740 Return-Path: X-Original-To: patchwork@ffaux-bg.ffmpeg.org Delivered-To: patchwork@ffaux-bg.ffmpeg.org Received: from ffbox0-bg.mplayerhq.hu (ffbox0-bg.ffmpeg.org [79.124.17.100]) by ffaux.localdomain (Postfix) with ESMTP id 7A97744AB9B for ; Wed, 19 Aug 2020 16:48:45 +0300 (EEST) Received: from [127.0.1.1] (localhost [127.0.0.1]) by ffbox0-bg.mplayerhq.hu (Postfix) with ESMTP id 58BAD68920C; Wed, 19 Aug 2020 16:48:45 +0300 (EEST) X-Original-To: ffmpeg-devel@ffmpeg.org Delivered-To: ffmpeg-devel@ffmpeg.org Received: from mga09.intel.com (mga09.intel.com [134.134.136.24]) by ffbox0-bg.mplayerhq.hu (Postfix) with ESMTPS id 68900687F9E for ; Wed, 19 Aug 2020 16:48:38 +0300 (EEST) IronPort-SDR: QmB6AwFERjioCobaxLINyItlyQWR+5hqHk+GFYRSIZ2S6BIpWImsmeKc0MPiDcniu1sK9DHnIR HJQrD0B75Bdg== X-IronPort-AV: E=McAfee;i="6000,8403,9717"; a="156173284" X-IronPort-AV: E=Sophos;i="5.76,331,1592895600"; d="scan'208";a="156173284" X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from fmsmga005.fm.intel.com ([10.253.24.32]) by orsmga102.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 19 Aug 2020 06:48:35 -0700 IronPort-SDR: oD8EJvntfEKjw1NrJFup2DNy5nJsb2YtBnqVQUW8eRqj3oI2te4SXyEPP9ampQZBefjtnTYggW 7ztnFIaepMKw== X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.76,331,1592895600"; d="scan'208";a="497751514" Received: from semmer-ubuntu.sh.intel.com ([10.239.159.54]) by fmsmga005.fm.intel.com with ESMTP; 19 Aug 2020 06:48:34 -0700 From: Ting Fu To: ffmpeg-devel@ffmpeg.org Date: Wed, 19 Aug 2020 21:43:13 +0800 Message-Id: <20200819134313.16375-1-ting.fu@intel.com> X-Mailer: git-send-email 2.17.1 Subject: [FFmpeg-devel] [PATCH] dnn/native: rename struct ConvolutionalNetwork to NativeModel X-BeenThere: ffmpeg-devel@ffmpeg.org X-Mailman-Version: 2.1.20 Precedence: list List-Id: FFmpeg development discussions and patches List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Reply-To: FFmpeg development discussions and patches MIME-Version: 1.0 Errors-To: ffmpeg-devel-bounces@ffmpeg.org Sender: "ffmpeg-devel" Signed-off-by: Ting Fu --- libavfilter/dnn/dnn_backend_native.c | 112 +++++++++++++-------------- libavfilter/dnn/dnn_backend_native.h | 4 +- libavfilter/dnn/dnn_backend_tf.c | 24 +++--- 3 files changed, 70 insertions(+), 70 deletions(-) diff --git a/libavfilter/dnn/dnn_backend_native.c b/libavfilter/dnn/dnn_backend_native.c index adc652a2c4..0be9c0b53c 100644 --- a/libavfilter/dnn/dnn_backend_native.c +++ b/libavfilter/dnn/dnn_backend_native.c @@ -30,10 +30,10 @@ static DNNReturnType get_input_native(void *model, DNNData *input, const char *input_name) { - ConvolutionalNetwork *network = (ConvolutionalNetwork *)model; + NativeModel *native_model = (NativeModel *)model; - for (int i = 0; i < network->operands_num; ++i) { - DnnOperand *oprd = &network->operands[i]; + for (int i = 0; i < native_model->operands_num; ++i) { + DnnOperand *oprd = &native_model->operands[i]; if (strcmp(oprd->name, input_name) == 0) { if (oprd->type != DOT_INPUT) return DNN_ERROR; @@ -52,15 +52,15 @@ static DNNReturnType get_input_native(void *model, DNNData *input, const char *i static DNNReturnType set_input_output_native(void *model, DNNData *input, const char *input_name, const char **output_names, uint32_t nb_output) { - ConvolutionalNetwork *network = (ConvolutionalNetwork *)model; + NativeModel *native_model = (NativeModel *)model; DnnOperand *oprd = NULL; - if (network->layers_num <= 0 || network->operands_num <= 0) + if (native_model->layers_num <= 0 || native_model->operands_num <= 0) return DNN_ERROR; /* inputs */ - for (int i = 0; i < network->operands_num; ++i) { - oprd = &network->operands[i]; + for (int i = 0; i < native_model->operands_num; ++i) { + oprd = &native_model->operands[i]; if (strcmp(oprd->name, input_name) == 0) { if (oprd->type != DOT_INPUT) return DNN_ERROR; @@ -88,24 +88,24 @@ static DNNReturnType set_input_output_native(void *model, DNNData *input, const input->data = oprd->data; /* outputs */ - network->nb_output = 0; - av_freep(&network->output_indexes); - network->output_indexes = av_mallocz_array(nb_output, sizeof(*network->output_indexes)); - if (!network->output_indexes) + native_model->nb_output = 0; + av_freep(&native_model->output_indexes); + native_model->output_indexes = av_mallocz_array(nb_output, sizeof(*native_model->output_indexes)); + if (!native_model->output_indexes) return DNN_ERROR; for (uint32_t i = 0; i < nb_output; ++i) { const char *output_name = output_names[i]; - for (int j = 0; j < network->operands_num; ++j) { - oprd = &network->operands[j]; + for (int j = 0; j < native_model->operands_num; ++j) { + oprd = &native_model->operands[j]; if (strcmp(oprd->name, output_name) == 0) { - network->output_indexes[network->nb_output++] = j; + native_model->output_indexes[native_model->nb_output++] = j; break; } } } - if (network->nb_output != nb_output) + if (native_model->nb_output != nb_output) return DNN_ERROR; return DNN_SUCCESS; @@ -122,7 +122,7 @@ DNNModel *ff_dnn_load_model_native(const char *model_filename, const char *optio char *buf; size_t size; int version, header_size, major_version_expected = 1; - ConvolutionalNetwork *network = NULL; + NativeModel *native_model = NULL; AVIOContext *model_file_context; int file_size, dnn_size, parsed_size; int32_t layer; @@ -167,29 +167,29 @@ DNNModel *ff_dnn_load_model_native(const char *model_filename, const char *optio dnn_size += 4; header_size = dnn_size; - network = av_mallocz(sizeof(ConvolutionalNetwork)); - if (!network){ + native_model = av_mallocz(sizeof(NativeModel)); + if (!native_model){ goto fail; } - model->model = (void *)network; + model->model = (void *)native_model; avio_seek(model_file_context, file_size - 8, SEEK_SET); - network->layers_num = (int32_t)avio_rl32(model_file_context); - network->operands_num = (int32_t)avio_rl32(model_file_context); + native_model->layers_num = (int32_t)avio_rl32(model_file_context); + native_model->operands_num = (int32_t)avio_rl32(model_file_context); dnn_size += 8; avio_seek(model_file_context, header_size, SEEK_SET); - network->layers = av_mallocz(network->layers_num * sizeof(Layer)); - if (!network->layers){ + native_model->layers = av_mallocz(native_model->layers_num * sizeof(Layer)); + if (!native_model->layers){ goto fail; } - network->operands = av_mallocz(network->operands_num * sizeof(DnnOperand)); - if (!network->operands){ + native_model->operands = av_mallocz(native_model->operands_num * sizeof(DnnOperand)); + if (!native_model->operands){ goto fail; } - for (layer = 0; layer < network->layers_num; ++layer){ + for (layer = 0; layer < native_model->layers_num; ++layer){ layer_type = (int32_t)avio_rl32(model_file_context); dnn_size += 4; @@ -197,25 +197,25 @@ DNNModel *ff_dnn_load_model_native(const char *model_filename, const char *optio goto fail; } - network->layers[layer].type = layer_type; - parsed_size = layer_funcs[layer_type].pf_load(&network->layers[layer], model_file_context, file_size, network->operands_num); + native_model->layers[layer].type = layer_type; + parsed_size = layer_funcs[layer_type].pf_load(&native_model->layers[layer], model_file_context, file_size, native_model->operands_num); if (!parsed_size) { goto fail; } dnn_size += parsed_size; } - for (int32_t i = 0; i < network->operands_num; ++i){ + for (int32_t i = 0; i < native_model->operands_num; ++i){ DnnOperand *oprd; int32_t name_len; int32_t operand_index = (int32_t)avio_rl32(model_file_context); dnn_size += 4; - if (operand_index >= network->operands_num) { + if (operand_index >= native_model->operands_num) { goto fail; } - oprd = &network->operands[operand_index]; + oprd = &native_model->operands[operand_index]; name_len = (int32_t)avio_rl32(model_file_context); dnn_size += 4; @@ -257,25 +257,25 @@ fail: DNNReturnType ff_dnn_execute_model_native(const DNNModel *model, DNNData *outputs, uint32_t nb_output) { - ConvolutionalNetwork *network = (ConvolutionalNetwork *)model->model; + NativeModel *native_model = (NativeModel *)model->model; int32_t layer; - uint32_t nb = FFMIN(nb_output, network->nb_output); + uint32_t nb = FFMIN(nb_output, native_model->nb_output); - if (network->layers_num <= 0 || network->operands_num <= 0) + if (native_model->layers_num <= 0 || native_model->operands_num <= 0) return DNN_ERROR; - if (!network->operands[0].data) + if (!native_model->operands[0].data) return DNN_ERROR; - for (layer = 0; layer < network->layers_num; ++layer){ - DNNLayerType layer_type = network->layers[layer].type; - layer_funcs[layer_type].pf_exec(network->operands, - network->layers[layer].input_operand_indexes, - network->layers[layer].output_operand_index, - network->layers[layer].params); + for (layer = 0; layer < native_model->layers_num; ++layer){ + DNNLayerType layer_type = native_model->layers[layer].type; + layer_funcs[layer_type].pf_exec(native_model->operands, + native_model->layers[layer].input_operand_indexes, + native_model->layers[layer].output_operand_index, + native_model->layers[layer].params); } for (uint32_t i = 0; i < nb; ++i) { - DnnOperand *oprd = &network->operands[network->output_indexes[i]]; + DnnOperand *oprd = &native_model->operands[native_model->output_indexes[i]]; outputs[i].data = oprd->data; outputs[i].height = oprd->dims[1]; outputs[i].width = oprd->dims[2]; @@ -309,34 +309,34 @@ int32_t calculate_operand_data_length(const DnnOperand* oprd) void ff_dnn_free_model_native(DNNModel **model) { - ConvolutionalNetwork *network; + NativeModel *native_model; ConvolutionalParams *conv_params; int32_t layer; if (*model) { if ((*model)->model) { - network = (ConvolutionalNetwork *)(*model)->model; - if (network->layers) { - for (layer = 0; layer < network->layers_num; ++layer){ - if (network->layers[layer].type == DLT_CONV2D){ - conv_params = (ConvolutionalParams *)network->layers[layer].params; + native_model = (NativeModel *)(*model)->model; + if (native_model->layers) { + for (layer = 0; layer < native_model->layers_num; ++layer){ + if (native_model->layers[layer].type == DLT_CONV2D){ + conv_params = (ConvolutionalParams *)native_model->layers[layer].params; av_freep(&conv_params->kernel); av_freep(&conv_params->biases); } - av_freep(&network->layers[layer].params); + av_freep(&native_model->layers[layer].params); } - av_freep(&network->layers); + av_freep(&native_model->layers); } - if (network->operands) { - for (uint32_t operand = 0; operand < network->operands_num; ++operand) - av_freep(&network->operands[operand].data); - av_freep(&network->operands); + if (native_model->operands) { + for (uint32_t operand = 0; operand < native_model->operands_num; ++operand) + av_freep(&native_model->operands[operand].data); + av_freep(&native_model->operands); } - av_freep(&network->output_indexes); - av_freep(&network); + av_freep(&native_model->output_indexes); + av_freep(&native_model); } av_freep(model); } diff --git a/libavfilter/dnn/dnn_backend_native.h b/libavfilter/dnn/dnn_backend_native.h index b455e44992..228d5b742b 100644 --- a/libavfilter/dnn/dnn_backend_native.h +++ b/libavfilter/dnn/dnn_backend_native.h @@ -107,14 +107,14 @@ typedef struct InputParams{ } InputParams; // Represents simple feed-forward convolutional network. -typedef struct ConvolutionalNetwork{ +typedef struct NativeModel{ Layer *layers; int32_t layers_num; DnnOperand *operands; int32_t operands_num; int32_t *output_indexes; uint32_t nb_output; -} ConvolutionalNetwork; +} NativeModel; DNNModel *ff_dnn_load_model_native(const char *model_filename, const char *options); diff --git a/libavfilter/dnn/dnn_backend_tf.c b/libavfilter/dnn/dnn_backend_tf.c index 9099d2b812..9d079aa92e 100644 --- a/libavfilter/dnn/dnn_backend_tf.c +++ b/libavfilter/dnn/dnn_backend_tf.c @@ -487,15 +487,15 @@ static DNNReturnType load_native_model(TFModel *tf_model, const char *model_file int64_t transpose_perm_shape[] = {4}; int64_t input_shape[] = {1, -1, -1, -1}; DNNReturnType layer_add_res; - DNNModel *native_model = NULL; - ConvolutionalNetwork *conv_network; + DNNModel *model = NULL; + NativeModel *native_model; - native_model = ff_dnn_load_model_native(model_filename, NULL); - if (!native_model){ + model = ff_dnn_load_model_native(model_filename, NULL); + if (!model){ return DNN_ERROR; } - conv_network = (ConvolutionalNetwork *)native_model->model; + native_model = (NativeModel *)model->model; tf_model->graph = TF_NewGraph(); tf_model->status = TF_NewStatus(); @@ -528,26 +528,26 @@ static DNNReturnType load_native_model(TFModel *tf_model, const char *model_file } transpose_op = TF_FinishOperation(op_desc, tf_model->status); - for (layer = 0; layer < conv_network->layers_num; ++layer){ - switch (conv_network->layers[layer].type){ + for (layer = 0; layer < native_model->layers_num; ++layer){ + switch (native_model->layers[layer].type){ case DLT_INPUT: layer_add_res = DNN_SUCCESS; break; case DLT_CONV2D: layer_add_res = add_conv_layer(tf_model, transpose_op, &op, - (ConvolutionalParams *)conv_network->layers[layer].params, layer); + (ConvolutionalParams *)native_model->layers[layer].params, layer); break; case DLT_DEPTH_TO_SPACE: layer_add_res = add_depth_to_space_layer(tf_model, &op, - (DepthToSpaceParams *)conv_network->layers[layer].params, layer); + (DepthToSpaceParams *)native_model->layers[layer].params, layer); break; case DLT_MIRROR_PAD: layer_add_res = add_pad_layer(tf_model, &op, - (LayerPadParams *)conv_network->layers[layer].params, layer); + (LayerPadParams *)native_model->layers[layer].params, layer); break; case DLT_MAXIMUM: layer_add_res = add_maximum_layer(tf_model, &op, - (DnnLayerMaximumParams *)conv_network->layers[layer].params, layer); + (DnnLayerMaximumParams *)native_model->layers[layer].params, layer); break; default: CLEANUP_ON_ERROR(tf_model); @@ -567,7 +567,7 @@ static DNNReturnType load_native_model(TFModel *tf_model, const char *model_file CLEANUP_ON_ERROR(tf_model); } - ff_dnn_free_model_native(&native_model); + ff_dnn_free_model_native(&model); return DNN_SUCCESS; }