@@ -268,10 +268,12 @@ DNNReturnType ff_dnn_execute_model_native(const DNNModel *model, DNNData *output
for (layer = 0; layer < native_model->layers_num; ++layer){
DNNLayerType layer_type = native_model->layers[layer].type;
- layer_funcs[layer_type].pf_exec(native_model->operands,
- native_model->layers[layer].input_operand_indexes,
- native_model->layers[layer].output_operand_index,
- native_model->layers[layer].params);
+ if (layer_funcs[layer_type].pf_exec(native_model->operands,
+ native_model->layers[layer].input_operand_indexes,
+ native_model->layers[layer].output_operand_index,
+ native_model->layers[layer].params) == DNN_ERROR) {
+ return DNN_ERROR;
+ }
}
for (uint32_t i = 0; i < nb; ++i) {
@@ -109,7 +109,7 @@ int dnn_execute_layer_avg_pool(DnnOperand *operands, const int32_t *input_operan
output_operand->length = calculate_operand_data_length(output_operand);
output_operand->data = av_realloc(output_operand->data, output_operand->length);
if (!output_operand->data)
- return -1;
+ return DNN_ERROR;
output = output_operand->data;
for (int y = 0; y < height_end; y += kernel_strides) {
@@ -114,10 +114,10 @@ int dnn_execute_layer_conv2d(DnnOperand *operands, const int32_t *input_operand_
output_operand->data_type = operands[input_operand_index].data_type;
output_operand->length = calculate_operand_data_length(output_operand);
if (output_operand->length <= 0)
- return -1;
+ return DNN_ERROR;
output_operand->data = av_realloc(output_operand->data, output_operand->length);
if (!output_operand->data)
- return -1;
+ return DNN_ERROR;
output = output_operand->data;
av_assert0(channel == conv_params->input_num);
@@ -76,10 +76,10 @@ int dnn_execute_layer_depth2space(DnnOperand *operands, const int32_t *input_ope
output_operand->data_type = operands[input_operand_index].data_type;
output_operand->length = calculate_operand_data_length(output_operand);
if (output_operand->length <= 0)
- return -1;
+ return DNN_ERROR;
output_operand->data = av_realloc(output_operand->data, output_operand->length);
if (!output_operand->data)
- return -1;
+ return DNN_ERROR;
output = output_operand->data;
for (y = 0; y < height; ++y){
@@ -176,6 +176,6 @@ int dnn_execute_layer_math_binary(DnnOperand *operands, const int32_t *input_ope
}
return 0;
default:
- return -1;
+ return DNN_ERROR;
}
}
@@ -143,6 +143,6 @@ int dnn_execute_layer_math_unary(DnnOperand *operands, const int32_t *input_oper
dst[i] = round(src[i]);
return 0;
default:
- return -1;
+ return DNN_ERROR;
}
}
@@ -112,10 +112,10 @@ int dnn_execute_layer_pad(DnnOperand *operands, const int32_t *input_operand_ind
output_operand->data_type = operands[input_operand_index].data_type;
output_operand->length = calculate_operand_data_length(output_operand);
if (output_operand->length <= 0)
- return -1;
+ return DNN_ERROR;
output_operand->data = av_realloc(output_operand->data, output_operand->length);
if (!output_operand->data)
- return -1;
+ return DNN_ERROR;
output = output_operand->data;
// copy the original data
Unify all error return as DNN_ERROR, in order to cease model executing when return error in ff_dnn_execute_model_native layer_func.pf_exec Signed-off-by: Ting Fu <ting.fu@intel.com> --- V4: Rename NetworkContext to NativeContext Move pf_exec return DNN_ERROR from PATCH 2/2 to 1/2 libavfilter/dnn/dnn_backend_native.c | 10 ++++++---- libavfilter/dnn/dnn_backend_native_layer_avgpool.c | 2 +- libavfilter/dnn/dnn_backend_native_layer_conv2d.c | 4 ++-- libavfilter/dnn/dnn_backend_native_layer_depth2space.c | 4 ++-- libavfilter/dnn/dnn_backend_native_layer_mathbinary.c | 2 +- libavfilter/dnn/dnn_backend_native_layer_mathunary.c | 2 +- libavfilter/dnn/dnn_backend_native_layer_pad.c | 4 ++-- 7 files changed, 15 insertions(+), 13 deletions(-)