From patchwork Thu Aug 27 04:17:21 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Fu, Ting" X-Patchwork-Id: 21939 Return-Path: X-Original-To: patchwork@ffaux-bg.ffmpeg.org Delivered-To: patchwork@ffaux-bg.ffmpeg.org Received: from ffbox0-bg.mplayerhq.hu (ffbox0-bg.ffmpeg.org [79.124.17.100]) by ffaux.localdomain (Postfix) with ESMTP id C329F448E87 for ; Thu, 27 Aug 2020 07:22:49 +0300 (EEST) Received: from [127.0.1.1] (localhost [127.0.0.1]) by ffbox0-bg.mplayerhq.hu (Postfix) with ESMTP id A95D4688392; Thu, 27 Aug 2020 07:22:49 +0300 (EEST) X-Original-To: ffmpeg-devel@ffmpeg.org Delivered-To: ffmpeg-devel@ffmpeg.org Received: from mga01.intel.com (mga01.intel.com [192.55.52.88]) by ffbox0-bg.mplayerhq.hu (Postfix) with ESMTPS id 755906880D5 for ; Thu, 27 Aug 2020 07:22:43 +0300 (EEST) IronPort-SDR: 5pGX7Nu42sRlmy64Wh8rPizL6tFt0SRdtlD5vOty8twqCqJU4a8HGk0/f7aR09sqtJevlqO4Vb 1+072+2YzZsg== X-IronPort-AV: E=McAfee;i="6000,8403,9725"; a="174471148" X-IronPort-AV: E=Sophos;i="5.76,358,1592895600"; d="scan'208";a="174471148" X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from fmsmga005.fm.intel.com ([10.253.24.32]) by fmsmga101.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 26 Aug 2020 21:22:41 -0700 IronPort-SDR: TWscbiXvU7NSF/FI4zffFaUSboxkG5qbmyfl9cLLRhcFLYm0L9t6e8zo8Bq2+8cgQ/rcEfHmO8 QFaE5q7n750g== X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.76,358,1592895600"; d="scan'208";a="500492468" Received: from semmer-ubuntu.sh.intel.com ([10.239.159.54]) by fmsmga005.fm.intel.com with ESMTP; 26 Aug 2020 21:22:40 -0700 From: Ting Fu To: ffmpeg-devel@ffmpeg.org Date: Thu, 27 Aug 2020 12:17:21 +0800 Message-Id: <20200827041722.29262-1-ting.fu@intel.com> X-Mailer: git-send-email 2.17.1 Subject: [FFmpeg-devel] [PATCH 1/2] dnn/openvino: add log error message X-BeenThere: ffmpeg-devel@ffmpeg.org X-Mailman-Version: 2.1.20 Precedence: list List-Id: FFmpeg development discussions and patches List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Reply-To: FFmpeg development discussions and patches MIME-Version: 1.0 Errors-To: ffmpeg-devel-bounces@ffmpeg.org Sender: "ffmpeg-devel" Signed-off-by: Ting Fu --- libavfilter/dnn/dnn_backend_openvino.c | 51 ++++++++++++++++++++++---- 1 file changed, 43 insertions(+), 8 deletions(-) diff --git a/libavfilter/dnn/dnn_backend_openvino.c b/libavfilter/dnn/dnn_backend_openvino.c index 034dee1839..5d6d3ed542 100644 --- a/libavfilter/dnn/dnn_backend_openvino.c +++ b/libavfilter/dnn/dnn_backend_openvino.c @@ -28,7 +28,12 @@ #include "libavutil/avassert.h" #include +typedef struct OVContext { + const AVClass *class; +} OVContext; + typedef struct OVModel{ + OVContext ctx; ie_core_t *core; ie_network_t *network; ie_executable_network_t *exe_network; @@ -36,6 +41,14 @@ typedef struct OVModel{ ie_blob_t *input_blob; } OVModel; +static const AVClass dnn_openvino_class = { + .class_name = "dnn_openvino", + .item_name = av_default_item_name, + .option = NULL, + .version = LIBAVUTIL_VERSION_INT, + .category = AV_CLASS_CATEGORY_FILTER, +}; + static DNNDataType precision_to_datatype(precision_e precision) { switch (precision) @@ -51,6 +64,7 @@ static DNNDataType precision_to_datatype(precision_e precision) static DNNReturnType get_input_ov(void *model, DNNData *input, const char *input_name) { OVModel *ov_model = (OVModel *)model; + OVContext *ctx = &ov_model->ctx; char *model_input_name = NULL; IEStatusCode status; size_t model_input_count = 0; @@ -58,25 +72,33 @@ static DNNReturnType get_input_ov(void *model, DNNData *input, const char *input precision_e precision; status = ie_network_get_inputs_number(ov_model->network, &model_input_count); - if (status != OK) + if (status != OK) { + av_log(ctx, AV_LOG_ERROR, "Failed to get input count\n"); return DNN_ERROR; + } for (size_t i = 0; i < model_input_count; i++) { status = ie_network_get_input_name(ov_model->network, i, &model_input_name); - if (status != OK) + if (status != OK) { + av_log(ctx, AV_LOG_ERROR, "Failed to get No.%d input's name\n", (int)i); return DNN_ERROR; + } if (strcmp(model_input_name, input_name) == 0) { ie_network_name_free(&model_input_name); status |= ie_network_get_input_dims(ov_model->network, input_name, &dims); status |= ie_network_get_input_precision(ov_model->network, input_name, &precision); - if (status != OK) + if (status != OK) { + av_log(ctx, AV_LOG_ERROR, "Failed to get No.%d input's dims or precision\n", (int)i); return DNN_ERROR; + } // The order of dims in the openvino is fixed and it is always NCHW for 4-D data. // while we pass NHWC data from FFmpeg to openvino status = ie_network_set_input_layout(ov_model->network, input_name, NHWC); - if (status != OK) + if (status != OK) { + av_log(ctx, AV_LOG_ERROR, "Input \"%s\" does not match layout NHWC\n", input_name); return DNN_ERROR; + } input->channels = dims.dims[1]; input->height = dims.dims[2]; @@ -88,12 +110,14 @@ static DNNReturnType get_input_ov(void *model, DNNData *input, const char *input ie_network_name_free(&model_input_name); } + av_log(ctx, AV_LOG_ERROR, "Could not find \"%s\" in model\n", model_input_name); return DNN_ERROR; } static DNNReturnType set_input_ov(void *model, DNNData *input, const char *input_name) { OVModel *ov_model = (OVModel *)model; + OVContext *ctx = &ov_model->ctx; IEStatusCode status; dimensions_t dims; precision_e precision; @@ -129,6 +153,7 @@ err: ie_blob_free(&ov_model->input_blob); if (ov_model->infer_request) ie_infer_request_free(&ov_model->infer_request); + av_log(ctx, AV_LOG_ERROR, "Failed to create inference instance or get input data/dims/precision/memory\n"); return DNN_ERROR; } @@ -147,6 +172,7 @@ DNNModel *ff_dnn_load_model_ov(const char *model_filename, const char *options) ov_model = av_mallocz(sizeof(OVModel)); if (!ov_model) goto err; + ov_model->ctx.class = &dnn_openvino_class; status = ie_core_create("", &ov_model->core); if (status != OK) @@ -188,25 +214,34 @@ DNNReturnType ff_dnn_execute_model_ov(const DNNModel *model, DNNData *outputs, c precision_e precision; ie_blob_buffer_t blob_buffer; OVModel *ov_model = (OVModel *)model->model; + OVContext *ctx = &ov_model->ctx; IEStatusCode status = ie_infer_request_infer(ov_model->infer_request); - if (status != OK) + if (status != OK) { + av_log(ctx, AV_LOG_ERROR, "Failed to start synchronous model inference\n"); return DNN_ERROR; + } for (uint32_t i = 0; i < nb_output; ++i) { const char *output_name = output_names[i]; ie_blob_t *output_blob = NULL; status = ie_infer_request_get_blob(ov_model->infer_request, output_name, &output_blob); - if (status != OK) + if (status != OK) { + av_log(ctx, AV_LOG_ERROR, "Failed to get model output data\n"); return DNN_ERROR; + } status = ie_blob_get_buffer(output_blob, &blob_buffer); - if (status != OK) + if (status != OK) { + av_log(ctx, AV_LOG_ERROR, "Failed to access output memory\n"); return DNN_ERROR; + } status |= ie_blob_get_dims(output_blob, &dims); status |= ie_blob_get_precision(output_blob, &precision); - if (status != OK) + if (status != OK) { + av_log(ctx, AV_LOG_ERROR, "Failed to get dims or precision of output\n"); return DNN_ERROR; + } outputs[i].channels = dims.dims[1]; outputs[i].height = dims.dims[2]; From patchwork Thu Aug 27 04:17:22 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Fu, Ting" X-Patchwork-Id: 21940 Return-Path: X-Original-To: patchwork@ffaux-bg.ffmpeg.org Delivered-To: patchwork@ffaux-bg.ffmpeg.org Received: from ffbox0-bg.mplayerhq.hu (ffbox0-bg.ffmpeg.org [79.124.17.100]) by ffaux.localdomain (Postfix) with ESMTP id BF8DF448E87 for ; Thu, 27 Aug 2020 07:22:55 +0300 (EEST) Received: from [127.0.1.1] (localhost [127.0.0.1]) by ffbox0-bg.mplayerhq.hu (Postfix) with ESMTP id AAF146883C9; Thu, 27 Aug 2020 07:22:55 +0300 (EEST) X-Original-To: ffmpeg-devel@ffmpeg.org Delivered-To: ffmpeg-devel@ffmpeg.org Received: from mga01.intel.com (mga01.intel.com [192.55.52.88]) by ffbox0-bg.mplayerhq.hu (Postfix) with ESMTPS id 687046880D5 for ; Thu, 27 Aug 2020 07:22:49 +0300 (EEST) IronPort-SDR: Fcp0WClrlErBpVjPMGYHFtDQnTqSZc7nnD0tr4vDxeCOreBfmNtt+dHaSgRLYvJRmhqMsfJ63r kXwrxA1ni07g== X-IronPort-AV: E=McAfee;i="6000,8403,9725"; a="174471150" X-IronPort-AV: E=Sophos;i="5.76,358,1592895600"; d="scan'208";a="174471150" X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from fmsmga005.fm.intel.com ([10.253.24.32]) by fmsmga101.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 26 Aug 2020 21:22:42 -0700 IronPort-SDR: vnAjnRb1NQN0FnfAi1v4SKFyYh1YqveMfTLPf0ERQvpp4WY+ooex/uuBSbm+plDSaPJrPYneG9 AGfALm3t9IbA== X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.76,358,1592895600"; d="scan'208";a="500492471" Received: from semmer-ubuntu.sh.intel.com ([10.239.159.54]) by fmsmga005.fm.intel.com with ESMTP; 26 Aug 2020 21:22:41 -0700 From: Ting Fu To: ffmpeg-devel@ffmpeg.org Date: Thu, 27 Aug 2020 12:17:22 +0800 Message-Id: <20200827041722.29262-2-ting.fu@intel.com> X-Mailer: git-send-email 2.17.1 In-Reply-To: <20200827041722.29262-1-ting.fu@intel.com> References: <20200827041722.29262-1-ting.fu@intel.com> Subject: [FFmpeg-devel] [PATCH 2/2] dnn/tensorflow: add log error message X-BeenThere: ffmpeg-devel@ffmpeg.org X-Mailman-Version: 2.1.20 Precedence: list List-Id: FFmpeg development discussions and patches List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Reply-To: FFmpeg development discussions and patches MIME-Version: 1.0 Errors-To: ffmpeg-devel-bounces@ffmpeg.org Sender: "ffmpeg-devel" Signed-off-by: Ting Fu --- libavfilter/dnn/dnn_backend_tf.c | 59 ++++++++++++++++++++++++++++++-- 1 file changed, 57 insertions(+), 2 deletions(-) diff --git a/libavfilter/dnn/dnn_backend_tf.c b/libavfilter/dnn/dnn_backend_tf.c index bdc90d5063..5e7f37bb12 100644 --- a/libavfilter/dnn/dnn_backend_tf.c +++ b/libavfilter/dnn/dnn_backend_tf.c @@ -34,7 +34,12 @@ #include +typedef struct TFContext { + const AVClass *class; +} TFContext; + typedef struct TFModel{ + TFContext ctx; TF_Graph *graph; TF_Session *session; TF_Status *status; @@ -44,6 +49,14 @@ typedef struct TFModel{ uint32_t nb_output; } TFModel; +static const AVClass dnn_tensorflow_class = { + .class_name = "dnn_tensorflow", + .item_name = av_default_item_name, + .option = NULL, + .version = LIBAVUTIL_VERSION_INT, + .category = AV_CLASS_CATEGORY_FILTER, +}; + static void free_buffer(void *data, size_t length) { av_freep(&data); @@ -107,13 +120,16 @@ static TF_Tensor *allocate_input_tensor(const DNNData *input) static DNNReturnType get_input_tf(void *model, DNNData *input, const char *input_name) { TFModel *tf_model = (TFModel *)model; + TFContext *ctx = &tf_model->ctx; TF_Status *status; int64_t dims[4]; TF_Output tf_output; tf_output.oper = TF_GraphOperationByName(tf_model->graph, input_name); - if (!tf_output.oper) + if (!tf_output.oper) { + av_log(ctx, AV_LOG_ERROR, "Could not find \"%s\" in model\n", input_name); return DNN_ERROR; + } tf_output.index = 0; input->dt = TF_OperationOutputType(tf_output); @@ -122,6 +138,7 @@ static DNNReturnType get_input_tf(void *model, DNNData *input, const char *input TF_GraphGetTensorShape(tf_model->graph, tf_output, dims, 4, status); if (TF_GetCode(status) != TF_OK){ TF_DeleteStatus(status); + av_log(ctx, AV_LOG_ERROR, "Failed to get input tensor shape: number of dimension incorrect\n"); return DNN_ERROR; } TF_DeleteStatus(status); @@ -138,12 +155,14 @@ static DNNReturnType get_input_tf(void *model, DNNData *input, const char *input static DNNReturnType set_input_tf(void *model, DNNData *input, const char *input_name) { TFModel *tf_model = (TFModel *)model; + TFContext *ctx = &tf_model->ctx; TF_SessionOptions *sess_opts; const TF_Operation *init_op = TF_GraphOperationByName(tf_model->graph, "init"); // Input operation tf_model->input.oper = TF_GraphOperationByName(tf_model->graph, input_name); if (!tf_model->input.oper){ + av_log(ctx, AV_LOG_ERROR, "Could not find \"%s\" in model\n", input_name); return DNN_ERROR; } tf_model->input.index = 0; @@ -152,6 +171,7 @@ static DNNReturnType set_input_tf(void *model, DNNData *input, const char *input } tf_model->input_tensor = allocate_input_tensor(input); if (!tf_model->input_tensor){ + av_log(ctx, AV_LOG_ERROR, "Failed to allocate memory for input tensor\n"); return DNN_ERROR; } input->data = (float *)TF_TensorData(tf_model->input_tensor); @@ -167,6 +187,7 @@ static DNNReturnType set_input_tf(void *model, DNNData *input, const char *input TF_DeleteSessionOptions(sess_opts); if (TF_GetCode(tf_model->status) != TF_OK) { + av_log(ctx, AV_LOG_ERROR, "Failed to create new session with model graph\n"); return DNN_ERROR; } @@ -178,6 +199,7 @@ static DNNReturnType set_input_tf(void *model, DNNData *input, const char *input &init_op, 1, NULL, tf_model->status); if (TF_GetCode(tf_model->status) != TF_OK) { + av_log(ctx, AV_LOG_ERROR, "Failed to run session when initializing\n"); return DNN_ERROR; } } @@ -187,11 +209,13 @@ static DNNReturnType set_input_tf(void *model, DNNData *input, const char *input static DNNReturnType load_tf_model(TFModel *tf_model, const char *model_filename) { + TFContext *ctx = &tf_model->ctx; TF_Buffer *graph_def; TF_ImportGraphDefOptions *graph_opts; graph_def = read_graph(model_filename); if (!graph_def){ + av_log(ctx, AV_LOG_ERROR, "Failed to read model \"%s\" graph\n", model_filename); return DNN_ERROR; } tf_model->graph = TF_NewGraph(); @@ -203,6 +227,7 @@ static DNNReturnType load_tf_model(TFModel *tf_model, const char *model_filename if (TF_GetCode(tf_model->status) != TF_OK){ TF_DeleteGraph(tf_model->graph); TF_DeleteStatus(tf_model->status); + av_log(ctx, AV_LOG_ERROR, "Failed to import serialized graph to model graph\n"); return DNN_ERROR; } @@ -214,6 +239,7 @@ static DNNReturnType load_tf_model(TFModel *tf_model, const char *model_filename static DNNReturnType add_conv_layer(TFModel *tf_model, TF_Operation *transpose_op, TF_Operation **cur_op, ConvolutionalParams* params, const int layer) { + TFContext *ctx = &tf_model->ctx; TF_Operation *op; TF_OperationDescription *op_desc; TF_Output input; @@ -239,10 +265,12 @@ static DNNReturnType add_conv_layer(TFModel *tf_model, TF_Operation *transpose_o memcpy(TF_TensorData(tensor), params->kernel, size * sizeof(float)); TF_SetAttrTensor(op_desc, "value", tensor, tf_model->status); if (TF_GetCode(tf_model->status) != TF_OK){ + av_log(ctx, AV_LOG_ERROR, "Failed to set value for kernel of conv layer %d\n", layer); return DNN_ERROR; } op = TF_FinishOperation(op_desc, tf_model->status); if (TF_GetCode(tf_model->status) != TF_OK){ + av_log(ctx, AV_LOG_ERROR, "Failed to add kernel to conv layer %d\n", layer); return DNN_ERROR; } @@ -256,6 +284,7 @@ static DNNReturnType add_conv_layer(TFModel *tf_model, TF_Operation *transpose_o TF_SetAttrType(op_desc, "Tperm", TF_INT32); op = TF_FinishOperation(op_desc, tf_model->status); if (TF_GetCode(tf_model->status) != TF_OK){ + av_log(ctx, AV_LOG_ERROR, "Failed to add transpose to conv layer %d\n", layer); return DNN_ERROR; } @@ -270,6 +299,7 @@ static DNNReturnType add_conv_layer(TFModel *tf_model, TF_Operation *transpose_o TF_SetAttrString(op_desc, "padding", "VALID", 5); *cur_op = TF_FinishOperation(op_desc, tf_model->status); if (TF_GetCode(tf_model->status) != TF_OK){ + av_log(ctx, AV_LOG_ERROR, "Failed to add conv2d to conv layer %d\n", layer); return DNN_ERROR; } @@ -282,10 +312,12 @@ static DNNReturnType add_conv_layer(TFModel *tf_model, TF_Operation *transpose_o memcpy(TF_TensorData(tensor), params->biases, params->output_num * sizeof(float)); TF_SetAttrTensor(op_desc, "value", tensor, tf_model->status); if (TF_GetCode(tf_model->status) != TF_OK){ + av_log(ctx, AV_LOG_ERROR, "Failed to set value for conv_biases of conv layer %d\n", layer); return DNN_ERROR; } op = TF_FinishOperation(op_desc, tf_model->status); if (TF_GetCode(tf_model->status) != TF_OK){ + av_log(ctx, AV_LOG_ERROR, "Failed to add conv_biases to conv layer %d\n", layer); return DNN_ERROR; } @@ -298,6 +330,7 @@ static DNNReturnType add_conv_layer(TFModel *tf_model, TF_Operation *transpose_o TF_SetAttrType(op_desc, "T", TF_FLOAT); *cur_op = TF_FinishOperation(op_desc, tf_model->status); if (TF_GetCode(tf_model->status) != TF_OK){ + av_log(ctx, AV_LOG_ERROR, "Failed to add bias_add to conv layer %d\n", layer); return DNN_ERROR; } @@ -313,6 +346,7 @@ static DNNReturnType add_conv_layer(TFModel *tf_model, TF_Operation *transpose_o op_desc = TF_NewOperation(tf_model->graph, "Sigmoid", name_buffer); break; default: + av_log(ctx, AV_LOG_ERROR, "Unsupported convolutional activation function\n"); return DNN_ERROR; } input.oper = *cur_op; @@ -320,6 +354,7 @@ static DNNReturnType add_conv_layer(TFModel *tf_model, TF_Operation *transpose_o TF_SetAttrType(op_desc, "T", TF_FLOAT); *cur_op = TF_FinishOperation(op_desc, tf_model->status); if (TF_GetCode(tf_model->status) != TF_OK){ + av_log(ctx, AV_LOG_ERROR, "Failed to add activation function to conv layer %d\n", layer); return DNN_ERROR; } @@ -329,6 +364,7 @@ static DNNReturnType add_conv_layer(TFModel *tf_model, TF_Operation *transpose_o static DNNReturnType add_depth_to_space_layer(TFModel *tf_model, TF_Operation **cur_op, DepthToSpaceParams *params, const int layer) { + TFContext *ctx = &tf_model->ctx; TF_OperationDescription *op_desc; TF_Output input; char name_buffer[NAME_BUFFER_SIZE]; @@ -342,6 +378,7 @@ static DNNReturnType add_depth_to_space_layer(TFModel *tf_model, TF_Operation ** TF_SetAttrInt(op_desc, "block_size", params->block_size); *cur_op = TF_FinishOperation(op_desc, tf_model->status); if (TF_GetCode(tf_model->status) != TF_OK){ + av_log(ctx, AV_LOG_ERROR, "Failed to add depth_to_space to layer %d\n", layer); return DNN_ERROR; } @@ -351,6 +388,7 @@ static DNNReturnType add_depth_to_space_layer(TFModel *tf_model, TF_Operation ** static DNNReturnType add_pad_layer(TFModel *tf_model, TF_Operation **cur_op, LayerPadParams *params, const int layer) { + TFContext *ctx = &tf_model->ctx; TF_Operation *op; TF_Tensor *tensor; TF_OperationDescription *op_desc; @@ -375,10 +413,12 @@ static DNNReturnType add_pad_layer(TFModel *tf_model, TF_Operation **cur_op, pads[7] = params->paddings[3][1]; TF_SetAttrTensor(op_desc, "value", tensor, tf_model->status); if (TF_GetCode(tf_model->status) != TF_OK){ + av_log(ctx, AV_LOG_ERROR, "Failed to set value for pad of layer %d\n", layer); return DNN_ERROR; } op = TF_FinishOperation(op_desc, tf_model->status); if (TF_GetCode(tf_model->status) != TF_OK){ + av_log(ctx, AV_LOG_ERROR, "Failed to add pad to layer %d\n", layer); return DNN_ERROR; } @@ -393,6 +433,7 @@ static DNNReturnType add_pad_layer(TFModel *tf_model, TF_Operation **cur_op, TF_SetAttrString(op_desc, "mode", "SYMMETRIC", 9); *cur_op = TF_FinishOperation(op_desc, tf_model->status); if (TF_GetCode(tf_model->status) != TF_OK){ + av_log(ctx, AV_LOG_ERROR, "Failed to add mirror_pad to layer %d\n", layer); return DNN_ERROR; } @@ -402,6 +443,7 @@ static DNNReturnType add_pad_layer(TFModel *tf_model, TF_Operation **cur_op, static DNNReturnType add_maximum_layer(TFModel *tf_model, TF_Operation **cur_op, DnnLayerMaximumParams *params, const int layer) { + TFContext *ctx = &tf_model->ctx; TF_Operation *op; TF_Tensor *tensor; TF_OperationDescription *op_desc; @@ -418,10 +460,12 @@ static DNNReturnType add_maximum_layer(TFModel *tf_model, TF_Operation **cur_op, *y = params->val.y; TF_SetAttrTensor(op_desc, "value", tensor, tf_model->status); if (TF_GetCode(tf_model->status) != TF_OK){ + av_log(ctx, AV_LOG_ERROR, "Failed to set value for maximum/y of layer %d", layer); return DNN_ERROR; } op = TF_FinishOperation(op_desc, tf_model->status); if (TF_GetCode(tf_model->status) != TF_OK){ + av_log(ctx, AV_LOG_ERROR, "Failed to add maximum/y to layer %d\n", layer); return DNN_ERROR; } @@ -435,6 +479,7 @@ static DNNReturnType add_maximum_layer(TFModel *tf_model, TF_Operation **cur_op, TF_SetAttrType(op_desc, "T", TF_FLOAT); *cur_op = TF_FinishOperation(op_desc, tf_model->status); if (TF_GetCode(tf_model->status) != TF_OK){ + av_log(ctx, AV_LOG_ERROR, "Failed to add maximum to layer %d\n", layer); return DNN_ERROR; } @@ -443,6 +488,7 @@ static DNNReturnType add_maximum_layer(TFModel *tf_model, TF_Operation **cur_op, static DNNReturnType load_native_model(TFModel *tf_model, const char *model_filename) { + TFContext *ctx = &tf_model->ctx; int32_t layer; TF_OperationDescription *op_desc; TF_Operation *op; @@ -458,6 +504,7 @@ static DNNReturnType load_native_model(TFModel *tf_model, const char *model_file model = ff_dnn_load_model_native(model_filename, NULL); if (!model){ + av_log(ctx, AV_LOG_ERROR, "Failed to load native model\n"); return DNN_ERROR; } @@ -469,6 +516,7 @@ static DNNReturnType load_native_model(TFModel *tf_model, const char *model_file { \ TF_DeleteGraph(tf_model->graph); \ TF_DeleteStatus(tf_model->status); \ + av_log(ctx, AV_LOG_ERROR, "Failed to set value or add operator to layer\n"); \ return DNN_ERROR; \ } @@ -553,6 +601,7 @@ DNNModel *ff_dnn_load_model_tf(const char *model_filename, const char *options) av_freep(&model); return NULL; } + tf_model->ctx.class = &dnn_tensorflow_class; if (load_tf_model(tf_model, model_filename) != DNN_SUCCESS){ if (load_native_model(tf_model, model_filename) != DNN_SUCCESS){ @@ -575,10 +624,13 @@ DNNReturnType ff_dnn_execute_model_tf(const DNNModel *model, DNNData *outputs, c { TF_Output *tf_outputs; TFModel *tf_model = (TFModel *)model->model; + TFContext *ctx = &tf_model->ctx; tf_outputs = av_malloc_array(nb_output, sizeof(*tf_outputs)); - if (tf_outputs == NULL) + if (tf_outputs == NULL) { + av_log(ctx, AV_LOG_ERROR, "Failed to allocate memory for *tf_outputs\n"); \ return DNN_ERROR; + } if (tf_model->output_tensors) { for (uint32_t i = 0; i < tf_model->nb_output; ++i) { @@ -593,6 +645,7 @@ DNNReturnType ff_dnn_execute_model_tf(const DNNModel *model, DNNData *outputs, c tf_model->output_tensors = av_mallocz_array(nb_output, sizeof(*tf_model->output_tensors)); if (!tf_model->output_tensors) { av_freep(&tf_outputs); + av_log(ctx, AV_LOG_ERROR, "Failed to allocate memory for output tensor\n"); \ return DNN_ERROR; } @@ -600,6 +653,7 @@ DNNReturnType ff_dnn_execute_model_tf(const DNNModel *model, DNNData *outputs, c tf_outputs[i].oper = TF_GraphOperationByName(tf_model->graph, output_names[i]); if (!tf_outputs[i].oper) { av_freep(&tf_outputs); + av_log(ctx, AV_LOG_ERROR, "Could not find output \"%s\" in model\n", output_names[i]); \ return DNN_ERROR; } tf_outputs[i].index = 0; @@ -611,6 +665,7 @@ DNNReturnType ff_dnn_execute_model_tf(const DNNModel *model, DNNData *outputs, c NULL, 0, NULL, tf_model->status); if (TF_GetCode(tf_model->status) != TF_OK) { av_freep(&tf_outputs); + av_log(ctx, AV_LOG_ERROR, "Failed to run session when executing model\n"); return DNN_ERROR; }