@@ -2459,6 +2459,7 @@ HAVE_LIST="
texi2html
xmllint
zlib_gzip
+ openvino2
"
# options emitted with CONFIG_ prefix but not available on the command line
@@ -6767,8 +6768,9 @@ enabled libopenh264 && require_pkg_config libopenh264 openh264 wels/codec_
enabled libopenjpeg && { check_pkg_config libopenjpeg "libopenjp2 >= 2.1.0" openjpeg.h opj_version ||
{ require_pkg_config libopenjpeg "libopenjp2 >= 2.1.0" openjpeg.h opj_version -DOPJ_STATIC && add_cppflags -DOPJ_STATIC; } }
enabled libopenmpt && require_pkg_config libopenmpt "libopenmpt >= 0.2.6557" libopenmpt/libopenmpt.h openmpt_module_create -lstdc++ && append libopenmpt_extralibs "-lstdc++"
-enabled libopenvino && { check_pkg_config libopenvino openvino c_api/ie_c_api.h ie_c_api_version ||
- require libopenvino c_api/ie_c_api.h ie_c_api_version -linference_engine_c_api; }
+enabled libopenvino && { { check_pkg_config libopenvino openvino openvino/c/openvino.h ov_core_create && enable openvino2; } ||
+ { check_pkg_config libopenvino openvino c_api/ie_c_api.h ie_c_api_version ||
+ require libopenvino c_api/ie_c_api.h ie_c_api_version -linference_engine_c_api; } }
enabled libopus && {
enabled libopus_decoder && {
require_pkg_config libopus opus opus_multistream.h opus_multistream_decoder_create
@@ -32,7 +32,11 @@
#include "libavutil/detection_bbox.h"
#include "../internal.h"
#include "safe_queue.h"
+#if HAVE_OPENVINO2
+#include <openvino/c/openvino.h>
+#else
#include <c_api/ie_c_api.h>
+#endif
#include "dnn_backend_common.h"
typedef struct OVOptions{
@@ -51,9 +55,20 @@ typedef struct OVContext {
typedef struct OVModel{
OVContext ctx;
DNNModel *model;
+#if HAVE_OPENVINO2
+ ov_core_t *core;
+ ov_model_t *ov_model;
+ ov_compiled_model_t *compiled_model;
+ ov_output_const_port_t* input_port;
+ ov_preprocess_input_info_t* input_info;
+ ov_output_const_port_t* output_port;
+ ov_preprocess_output_info_t* output_info;
+ ov_preprocess_prepostprocessor_t* preprocess;
+#else
ie_core_t *core;
ie_network_t *network;
ie_executable_network_t *exe_network;
+#endif
SafeQueue *request_queue; // holds OVRequestItem
Queue *task_queue; // holds TaskItem
Queue *lltask_queue; // holds LastLevelTaskItem
@@ -63,10 +78,15 @@ typedef struct OVModel{
// one request for one call to openvino
typedef struct OVRequestItem {
- ie_infer_request_t *infer_request;
LastLevelTaskItem **lltasks;
uint32_t lltask_count;
+#if HAVE_OPENVINO2
+ ov_infer_request_t *infer_request;
+ ov_callback_t callback;
+#else
ie_complete_call_back_t callback;
+ ie_infer_request_t *infer_request;
+#endif
} OVRequestItem;
#define APPEND_STRING(generated_string, iterate_string) \
@@ -85,11 +105,61 @@ static const AVOption dnn_openvino_options[] = {
AVFILTER_DEFINE_CLASS(dnn_openvino);
+#if HAVE_OPENVINO2
+static const struct {
+ ov_status_e status;
+ int av_err;
+ const char *desc;
+} ov2_errors[] = {
+ { OK, 0, "success" },
+ { GENERAL_ERROR, AVERROR_EXTERNAL, "general error" },
+ { NOT_IMPLEMENTED, AVERROR(ENOSYS), "not implemented" },
+ { NETWORK_NOT_LOADED, AVERROR_EXTERNAL, "network not loaded" },
+ { PARAMETER_MISMATCH, AVERROR(EINVAL), "parameter mismatch" },
+ { NOT_FOUND, AVERROR_EXTERNAL, "not found" },
+ { OUT_OF_BOUNDS, AVERROR(EOVERFLOW), "out of bounds" },
+ { UNEXPECTED, AVERROR_EXTERNAL, "unexpected" },
+ { REQUEST_BUSY, AVERROR(EBUSY), "request busy" },
+ { RESULT_NOT_READY, AVERROR(EBUSY), "result not ready" },
+ { NOT_ALLOCATED, AVERROR(ENODATA), "not allocated" },
+ { INFER_NOT_STARTED, AVERROR_EXTERNAL, "infer not started" },
+ { NETWORK_NOT_READ, AVERROR_EXTERNAL, "network not read" },
+ { INFER_CANCELLED, AVERROR(ECANCELED), "infer cancelled" },
+ { INVALID_C_PARAM, AVERROR(EINVAL), "invalid C parameter" },
+ { UNKNOWN_C_ERROR, AVERROR_UNKNOWN, "unknown C error" },
+ { NOT_IMPLEMENT_C_METHOD, AVERROR(ENOSYS), "not implement C method" },
+ { UNKNOW_EXCEPTION, AVERROR_UNKNOWN, "unknown exception" },
+};
+
+static int ov2_map_error(ov_status_e status, const char **desc)
+{
+ int i;
+ for (i = 0; i < FF_ARRAY_ELEMS(ov2_errors); i++) {
+ if (ov2_errors[i].status == status) {
+ if (desc)
+ *desc = ov2_errors[i].desc;
+ return ov2_errors[i].av_err;
+ }
+ }
+ if (desc)
+ *desc = "unknown error";
+ return AVERROR_UNKNOWN;
+}
+#endif
+
+#if HAVE_OPENVINO2
+static DNNDataType precision_to_datatype(ov_element_type_e precision)
+#else
static DNNDataType precision_to_datatype(precision_e precision)
+#endif
{
switch (precision)
{
+#if HAVE_OPENVINO2
+ case F32:
+#else
case FP32:
+#endif
return DNN_FLOAT;
case U8:
return DNN_UINT8;
@@ -115,20 +185,61 @@ static int get_datatype_size(DNNDataType dt)
static int fill_model_input_ov(OVModel *ov_model, OVRequestItem *request)
{
+ DNNData input;
+ LastLevelTaskItem *lltask;
+ TaskItem *task;
+ OVContext *ctx = &ov_model->ctx;
+#if HAVE_OPENVINO2
+ int64_t* dims;
+ ov_status_e status;
+ ov_tensor_t* tensor = NULL;
+ ov_shape_t input_shape = {0};
+ ov_element_type_e precision;
+ void *input_data_ptr = NULL;
+#else
dimensions_t dims;
precision_e precision;
ie_blob_buffer_t blob_buffer;
- OVContext *ctx = &ov_model->ctx;
IEStatusCode status;
- DNNData input;
ie_blob_t *input_blob = NULL;
- LastLevelTaskItem *lltask;
- TaskItem *task;
+#endif
lltask = ff_queue_peek_front(ov_model->lltask_queue);
av_assert0(lltask);
task = lltask->task;
+#if HAVE_OPENVINO2
+ if (!ov_model_is_dynamic(ov_model->ov_model)) {
+ ov_output_const_port_free(ov_model->input_port);
+ status = ov_model_const_input_by_name(ov_model->ov_model, task->input_name, &ov_model->input_port);
+ if (status != OK) {
+ av_log(ctx, AV_LOG_ERROR, "Failed to get input port shape.\n");
+ return ov2_map_error(status, NULL);
+ }
+ status = ov_const_port_get_shape(ov_model->input_port, &input_shape);
+ if (status != OK) {
+ av_log(ctx, AV_LOG_ERROR, "Failed to get input port shape.\n");
+ return ov2_map_error(status, NULL);
+ }
+ dims = input_shape.dims;
+ status = ov_port_get_element_type(ov_model->input_port, &precision);
+ if (status != OK) {
+ av_log(ctx, AV_LOG_ERROR, "Failed to get input port data type.\n");
+ return ov2_map_error(status, NULL);
+ }
+ } else {
+ avpriv_report_missing_feature(ctx, "Do not support dynamic model.");
+ return AVERROR(ENOSYS);
+ }
+ input.height = dims[2];
+ input.width = dims[3];
+ input.channels = dims[1];
+ input.dt = precision_to_datatype(precision);
+ input.data = av_malloc(input.height * input.width * input.channels * get_datatype_size(input.dt));
+ if (!input.data)
+ return AVERROR(ENOMEM);
+ input_data_ptr = input.data;
+#else
status = ie_infer_request_get_blob(request->infer_request, task->input_name, &input_blob);
if (status != OK) {
av_log(ctx, AV_LOG_ERROR, "Failed to get input blob with name %s\n", task->input_name);
@@ -149,12 +260,12 @@ static int fill_model_input_ov(OVModel *ov_model, OVRequestItem *request)
av_log(ctx, AV_LOG_ERROR, "Failed to get input blob buffer\n");
return DNN_GENERIC_ERROR;
}
-
input.height = dims.dims[2];
input.width = dims.dims[3];
input.channels = dims.dims[1];
input.data = blob_buffer.buffer;
input.dt = precision_to_datatype(precision);
+#endif
// all models in openvino open model zoo use BGR as input,
// change to be an option when necessary.
input.order = DCO_BGR;
@@ -187,29 +298,82 @@ static int fill_model_input_ov(OVModel *ov_model, OVRequestItem *request)
av_assert0(!"should not reach here");
break;
}
+#if HAVE_OPENVINO2
+ status = ov_tensor_create_from_host_ptr(precision, input_shape, input.data, &tensor);
+ if (status != OK) {
+ av_log(ctx, AV_LOG_ERROR, "Failed to create tensor from host prt.\n");
+ return ov2_map_error(status, NULL);
+ }
+ status = ov_infer_request_set_input_tensor(request->infer_request, tensor);
+ if (status != OK) {
+ av_log(ctx, AV_LOG_ERROR, "Failed to Set an input tensor for the model.\n");
+ return ov2_map_error(status, NULL);
+ }
+#endif
input.data = (uint8_t *)input.data
+ input.width * input.height * input.channels * get_datatype_size(input.dt);
}
+#if HAVE_OPENVINO2
+ av_freep(&input_data_ptr);
+#else
ie_blob_free(&input_blob);
+#endif
return 0;
}
static void infer_completion_callback(void *args)
{
- dimensions_t dims;
- precision_e precision;
- IEStatusCode status;
OVRequestItem *request = args;
LastLevelTaskItem *lltask = request->lltasks[0];
TaskItem *task = lltask->task;
OVModel *ov_model = task->model;
SafeQueue *requestq = ov_model->request_queue;
- ie_blob_t *output_blob = NULL;
- ie_blob_buffer_t blob_buffer;
DNNData output;
OVContext *ctx = &ov_model->ctx;
+#if HAVE_OPENVINO2
+ size_t* dims;
+ ov_status_e status;
+ ov_tensor_t *output_tensor;
+ ov_shape_t output_shape = {0};
+ ov_element_type_e precision;
+
+ status = ov_infer_request_get_output_tensor_by_index(request->infer_request, 0, &output_tensor);
+ if (status != OK) {
+ av_log(ctx, AV_LOG_ERROR,
+ "Failed to get output tensor.");
+ return;
+ }
+
+ status = ov_tensor_data(output_tensor, &output.data);
+ if (status != OK) {
+ av_log(ctx, AV_LOG_ERROR,
+ "Failed to get output data.");
+ return;
+ }
+ status = ov_tensor_get_shape(output_tensor, &output_shape);
+ if (status != OK) {
+ av_log(ctx, AV_LOG_ERROR, "Failed to get output port shape.\n");
+ return;
+ }
+ dims = output_shape.dims;
+
+ status = ov_port_get_element_type(ov_model->output_port, &precision);
+ if (status != OK) {
+ av_log(ctx, AV_LOG_ERROR, "Failed to get output port data type.\n");
+ return;
+ }
+ output.channels = dims[1];
+ output.height = dims[2];
+ output.width = dims[3];
+ av_assert0(request->lltask_count <= dims[0]);
+#else
+ IEStatusCode status;
+ dimensions_t dims;
+ ie_blob_t *output_blob = NULL;
+ ie_blob_buffer_t blob_buffer;
+ precision_e precision;
status = ie_infer_request_get_blob(request->infer_request, task->output_names[0], &output_blob);
if (status != OK) {
av_log(ctx, AV_LOG_ERROR,
@@ -232,14 +396,14 @@ static void infer_completion_callback(void *args)
av_log(ctx, AV_LOG_ERROR, "Failed to get dims or precision of output\n");
return;
}
-
+ output.data = blob_buffer.buffer;
output.channels = dims.dims[1];
output.height = dims.dims[2];
output.width = dims.dims[3];
+ av_assert0(request->lltask_count <= dims.dims[0]);
+#endif
output.dt = precision_to_datatype(precision);
- output.data = blob_buffer.buffer;
- av_assert0(request->lltask_count <= dims.dims[0]);
av_assert0(request->lltask_count >= 1);
for (int i = 0; i < request->lltask_count; ++i) {
task = request->lltasks[i]->task;
@@ -281,11 +445,16 @@ static void infer_completion_callback(void *args)
output.data = (uint8_t *)output.data
+ output.width * output.height * output.channels * get_datatype_size(output.dt);
}
+#if !HAVE_OPENVINO2
ie_blob_free(&output_blob);
-
+#endif
request->lltask_count = 0;
if (ff_safe_queue_push_back(requestq, request) < 0) {
+#if HAVE_OPENVINO2
+ ov_infer_request_free(request->infer_request);
+#else
ie_infer_request_free(&request->infer_request);
+#endif
av_freep(&request);
av_log(ctx, AV_LOG_ERROR, "Failed to push back request_queue.\n");
return;
@@ -299,7 +468,11 @@ static void dnn_free_model_ov(DNNModel **model)
while (ff_safe_queue_size(ov_model->request_queue) != 0) {
OVRequestItem *item = ff_safe_queue_pop_front(ov_model->request_queue);
if (item && item->infer_request) {
+#if HAVE_OPENVINO2
+ ov_infer_request_free(item->infer_request);
+#else
ie_infer_request_free(&item->infer_request);
+#endif
}
av_freep(&item->lltasks);
av_freep(&item);
@@ -319,13 +492,23 @@ static void dnn_free_model_ov(DNNModel **model)
av_freep(&item);
}
ff_queue_destroy(ov_model->task_queue);
-
+#if HAVE_OPENVINO2
+ if (ov_model->preprocess)
+ ov_preprocess_prepostprocessor_free(ov_model->preprocess);
+ if (ov_model->compiled_model)
+ ov_compiled_model_free(ov_model->compiled_model);
+ if (ov_model->ov_model)
+ ov_model_free(ov_model->ov_model);
+ if (ov_model->core)
+ ov_core_free(ov_model->core);
+#else
if (ov_model->exe_network)
ie_exec_network_free(&ov_model->exe_network);
if (ov_model->network)
ie_network_free(&ov_model->network);
if (ov_model->core)
ie_core_free(&ov_model->core);
+#endif
av_freep(&ov_model);
av_freep(model);
}
@@ -336,16 +519,106 @@ static int init_model_ov(OVModel *ov_model, const char *input_name, const char *
{
int ret = 0;
OVContext *ctx = &ov_model->ctx;
+#if HAVE_OPENVINO2
+ ov_status_e status;
+ ov_preprocess_input_tensor_info_t* input_tensor_info;
+ ov_preprocess_output_tensor_info_t* output_tensor_info;
+ ov_model_t *tmp_ov_model;
+ ov_layout_t* NHWC_layout = NULL;
+ const char* NHWC_desc = "NHWC";
+ const char* device = ctx->options.device_type;
+#else
IEStatusCode status;
ie_available_devices_t a_dev;
ie_config_t config = {NULL, NULL, NULL};
char *all_dev_names = NULL;
+#endif
// batch size
if (ctx->options.batch_size <= 0) {
ctx->options.batch_size = 1;
}
+#if HAVE_OPENVINO2
+ if (ctx->options.batch_size > 1) {
+ avpriv_report_missing_feature(ctx, "Do not support batch_size > 1 for now,"
+ "change batch_size to 1.\n");
+ ctx->options.batch_size = 1;
+ }
+
+ status = ov_preprocess_prepostprocessor_create(ov_model->ov_model, &ov_model->preprocess);
+ if (status != OK) {
+ av_log(ctx, AV_LOG_ERROR, "Failed to create preprocess for ov_model.\n");
+ ret = ov2_map_error(status, NULL);
+ goto err;
+ }
+
+ status = ov_preprocess_prepostprocessor_get_input_info_by_name(ov_model->preprocess, input_name, &ov_model->input_info);
+ status |= ov_preprocess_prepostprocessor_get_output_info_by_name(ov_model->preprocess, output_name, &ov_model->output_info);
+ if (status != OK) {
+ av_log(ctx, AV_LOG_ERROR, "Failed to get input/output info from preprocess.\n");
+ ret = ov2_map_error(status, NULL);
+ goto err;
+ }
+
+ status = ov_preprocess_input_info_get_tensor_info(ov_model->input_info, &input_tensor_info);
+ status |= ov_preprocess_output_info_get_tensor_info(ov_model->output_info, &output_tensor_info);
+ if (status != OK) {
+ av_log(ctx, AV_LOG_ERROR, "Failed to get tensor info from input/output.\n");
+ ret = ov2_map_error(status, NULL);
+ goto err;
+ }
+
+ //set input layout
+ status = ov_layout_create(NHWC_desc, &NHWC_layout);
+ if (status != OK) {
+ av_log(ctx, AV_LOG_ERROR, "Failed to create layout for input.\n");
+ ret = ov2_map_error(status, NULL);
+ goto err;
+ }
+
+ status = ov_preprocess_input_tensor_info_set_layout(input_tensor_info, NHWC_layout);
+ if (status != OK) {
+ av_log(ctx, AV_LOG_ERROR, "Failed to set input tensor layout\n");
+ ret = ov2_map_error(status, NULL);
+ goto err;
+ }
+
+ if (ov_model->model->func_type != DFT_PROCESS_FRAME)
+ //set precision only for detect and classify
+ status = ov_preprocess_input_tensor_info_set_element_type(input_tensor_info, U8);
+ status |= ov_preprocess_output_set_element_type(output_tensor_info, F32);
+ if (status != OK) {
+ av_log(ctx, AV_LOG_ERROR, "Failed to set input/output element type\n");
+ ret = ov2_map_error(status, NULL);
+ goto err;
+ }
+
+ //update model
+ if(ov_model->ov_model)
+ tmp_ov_model = ov_model->ov_model;
+ status = ov_preprocess_prepostprocessor_build(ov_model->preprocess, &ov_model->ov_model);
+ if (status != OK) {
+ av_log(ctx, AV_LOG_ERROR, "Failed to update OV model\n");
+ ret = ov2_map_error(status, NULL);
+ goto err;
+ }
+ ov_model_free(tmp_ov_model);
+ //update output_port
+ if (ov_model->output_port)
+ ov_output_const_port_free(ov_model->output_port);
+ status = ov_model_const_output_by_name(ov_model->ov_model, output_name, &ov_model->output_port);
+ if (status != OK) {
+ av_log(ctx, AV_LOG_ERROR, "Failed to get output port.\n");
+ goto err;
+ }
+ //compile network
+ status = ov_core_compile_model(ov_model->core, ov_model->ov_model, device, 0, &ov_model->compiled_model);
+ if (status != OK) {
+ ret = ov2_map_error(status, NULL);
+ goto err;
+ }
+#else
if (ctx->options.batch_size > 1) {
input_shapes_t input_shapes;
status = ie_network_get_input_shapes(ov_model->network, &input_shapes);
@@ -420,7 +693,7 @@ static int init_model_ov(OVModel *ov_model, const char *input_name, const char *
ret = AVERROR(ENODEV);
goto err;
}
-
+#endif
// create infer_requests for async execution
if (ctx->options.nireq <= 0) {
// the default value is a rough estimation
@@ -440,7 +713,11 @@ static int init_model_ov(OVModel *ov_model, const char *input_name, const char *
goto err;
}
+#if HAVE_OPENVINO2
+ item->callback.callback_func = infer_completion_callback;
+#else
item->callback.completeCallBackFunc = infer_completion_callback;
+#endif
item->callback.args = item;
if (ff_safe_queue_push_back(ov_model->request_queue, item) < 0) {
av_freep(&item);
@@ -448,11 +725,19 @@ static int init_model_ov(OVModel *ov_model, const char *input_name, const char *
goto err;
}
+#if HAVE_OPENVINO2
+ status = ov_compiled_model_create_infer_request(ov_model->compiled_model, &item->infer_request);
+ if (status != OK) {
+ av_log(ctx, AV_LOG_ERROR, "Failed to Creates an inference request object.\n");
+ goto err;
+ }
+#else
status = ie_exec_network_create_infer_request(ov_model->exe_network, &item->infer_request);
if (status != OK) {
ret = DNN_GENERIC_ERROR;
goto err;
}
+#endif
item->lltasks = av_malloc_array(ctx->options.batch_size, sizeof(*item->lltasks));
if (!item->lltasks) {
@@ -483,7 +768,11 @@ err:
static int execute_model_ov(OVRequestItem *request, Queue *inferenceq)
{
+#if HAVE_OPENVINO2
+ ov_status_e status;
+#else
IEStatusCode status;
+#endif
LastLevelTaskItem *lltask;
int ret = 0;
TaskItem *task;
@@ -491,7 +780,11 @@ static int execute_model_ov(OVRequestItem *request, Queue *inferenceq)
OVModel *ov_model;
if (ff_queue_size(inferenceq) == 0) {
+#if HAVE_OPENVINO2
+ ov_infer_request_free(request->infer_request);
+#else
ie_infer_request_free(&request->infer_request);
+#endif
av_freep(&request);
return 0;
}
@@ -501,11 +794,39 @@ static int execute_model_ov(OVRequestItem *request, Queue *inferenceq)
ov_model = task->model;
ctx = &ov_model->ctx;
+ ret = fill_model_input_ov(ov_model, request);
+ if (ret != 0) {
+ goto err;
+ }
+
+#if HAVE_OPENVINO2
if (task->async) {
- ret = fill_model_input_ov(ov_model, request);
- if (ret != 0) {
+ status = ov_infer_request_set_callback(request->infer_request, &request->callback);
+ if (status != OK) {
+ av_log(ctx, AV_LOG_ERROR, "Failed to set completion callback for inference\n");
+ ret = ov2_map_error(status, NULL);
+ goto err;
+ }
+
+ status = ov_infer_request_start_async(request->infer_request);
+ if (status != OK) {
+ av_log(ctx, AV_LOG_ERROR, "Failed to start async inference\n");
+ ret = ov2_map_error(status, NULL);
+ goto err;
+ }
+ return 0;
+ } else {
+ status = ov_infer_request_infer(request->infer_request);
+ if (status != OK) {
+ av_log(NULL, AV_LOG_ERROR, "Failed to start synchronous model inference for OV2\n");
+ ret = ov2_map_error(status, NULL);
goto err;
}
+ infer_completion_callback(request);
+ return (task->inference_done == task->inference_todo) ? 0 : DNN_GENERIC_ERROR;
+ }
+#else
+ if (task->async) {
status = ie_infer_set_completion_callback(request->infer_request, &request->callback);
if (status != OK) {
av_log(ctx, AV_LOG_ERROR, "Failed to set completion callback for inference\n");
@@ -520,10 +841,6 @@ static int execute_model_ov(OVRequestItem *request, Queue *inferenceq)
}
return 0;
} else {
- ret = fill_model_input_ov(ov_model, request);
- if (ret != 0) {
- goto err;
- }
status = ie_infer_request_infer(request->infer_request);
if (status != OK) {
av_log(ctx, AV_LOG_ERROR, "Failed to start synchronous model inference\n");
@@ -533,9 +850,14 @@ static int execute_model_ov(OVRequestItem *request, Queue *inferenceq)
infer_completion_callback(request);
return (task->inference_done == task->inference_todo) ? 0 : DNN_GENERIC_ERROR;
}
+#endif
err:
if (ff_safe_queue_push_back(ov_model->request_queue, request) < 0) {
+#if HAVE_OPENVINO2
+ ov_infer_request_free(request->infer_request);
+#else
ie_infer_request_free(&request->infer_request);
+#endif
av_freep(&request);
}
return ret;
@@ -545,19 +867,54 @@ static int get_input_ov(void *model, DNNData *input, const char *input_name)
{
OVModel *ov_model = model;
OVContext *ctx = &ov_model->ctx;
+ int input_resizable = ctx->options.input_resizable;
+
+#if HAVE_OPENVINO2
+ ov_shape_t input_shape = {0};
+ ov_element_type_e precision;
+ int64_t* dims;
+ ov_status_e status;
+ if (!ov_model_is_dynamic(ov_model->ov_model)) {
+ status = ov_model_const_input_by_name(ov_model->ov_model, input_name, &ov_model->input_port);
+ if (status != OK) {
+ av_log(ctx, AV_LOG_ERROR, "Failed to get input port shape.\n");
+ return ov2_map_error(status, NULL);
+ }
+
+ status = ov_const_port_get_shape(ov_model->input_port, &input_shape);
+ if (status != OK) {
+ av_log(ctx, AV_LOG_ERROR, "Failed to get input port shape.\n");
+ return ov2_map_error(status, NULL);
+ }
+ dims = input_shape.dims;
+
+ status = ov_port_get_element_type(ov_model->input_port, &precision);
+ if (status != OK) {
+ av_log(ctx, AV_LOG_ERROR, "Failed to get input port data type.\n");
+ return ov2_map_error(status, NULL);
+ }
+ } else {
+ avpriv_report_missing_feature(ctx, "Do not support dynamic model now.");
+ return AVERROR(ENOSYS);
+ }
+
+ input->channels = dims[1];
+ input->height = input_resizable ? -1 : dims[2];
+ input->width = input_resizable ? -1 : dims[3];
+ input->dt = precision_to_datatype(precision);
+
+ return 0;
+#else
char *model_input_name = NULL;
IEStatusCode status;
size_t model_input_count = 0;
dimensions_t dims;
precision_e precision;
- int input_resizable = ctx->options.input_resizable;
-
status = ie_network_get_inputs_number(ov_model->network, &model_input_count);
if (status != OK) {
av_log(ctx, AV_LOG_ERROR, "Failed to get input count\n");
return DNN_GENERIC_ERROR;
}
-
for (size_t i = 0; i < model_input_count; i++) {
status = ie_network_get_input_name(ov_model->network, i, &model_input_name);
if (status != OK) {
@@ -585,6 +942,7 @@ static int get_input_ov(void *model, DNNData *input, const char *input_name)
av_log(ctx, AV_LOG_ERROR, "Could not find \"%s\" in model, all input(s) are: \"%s\"\n", input_name, ov_model->all_input_names);
return AVERROR(EINVAL);
+#endif
}
static int contain_valid_detection_bbox(AVFrame *frame)
@@ -693,13 +1051,20 @@ static int extract_lltask_from_task(DNNFunctionType func_type, TaskItem *task, Q
static int get_output_ov(void *model, const char *input_name, int input_width, int input_height,
const char *output_name, int *output_width, int *output_height)
{
+#if HAVE_OPENVINO2
+ ov_dimension_t dims[4] = {{1, 1}, {1, 1}, {input_height, input_height}, {input_width, input_width}};
+ ov_status_e status;
+ ov_shape_t input_shape = {0};
+ ov_partial_shape_t partial_shape;
+#else
+ IEStatusCode status;
+ input_shapes_t input_shapes;
+#endif
int ret;
OVModel *ov_model = model;
OVContext *ctx = &ov_model->ctx;
TaskItem task;
OVRequestItem *request;
- IEStatusCode status;
- input_shapes_t input_shapes;
DNNExecBaseParams exec_params = {
.input_name = input_name,
.output_names = &output_name,
@@ -713,6 +1078,46 @@ static int get_output_ov(void *model, const char *input_name, int input_width, i
return AVERROR(EINVAL);
}
+#if HAVE_OPENVINO2
+ if (ctx->options.input_resizable) {
+ if (!ov_model_is_dynamic(ov_model->ov_model)) {
+ status = ov_partial_shape_create(4, dims, &partial_shape);
+ if (status != OK) {
+ av_log(ctx, AV_LOG_ERROR, "Failed create partial shape.\n");
+ goto err;
+ }
+ status = ov_const_port_get_shape(ov_model->input_port, &input_shape);
+ input_shape.dims[2] = input_height;
+ input_shape.dims[3] = input_width;
+ if (status != OK) {
+ av_log(ctx, AV_LOG_ERROR, "Failed create shape for model input resize.\n");
+ goto err;
+ }
+
+ status = ov_shape_to_partial_shape(input_shape, &partial_shape);
+ if (status != OK) {
+ av_log(ctx, AV_LOG_ERROR, "Failed create partial shape for model input resize.\n");
+ goto err;
+ }
+
+ status = ov_model_reshape_single_input(ov_model->ov_model, partial_shape);
+ if (status != OK) {
+ av_log(ctx, AV_LOG_ERROR, "Failed to reszie model input.\n");
+ goto err;
+ }
+ } else {
+ avpriv_report_missing_feature(ctx, "Do not support dynamic model.");
+ goto err;
+ }
+ }
+
+ status = ov_model_const_output_by_name(ov_model->ov_model, output_name, &ov_model->output_port);
+ if (status != OK) {
+ av_log(ctx, AV_LOG_ERROR, "Failed to get output port.\n");
+ goto err;
+ }
+ if (!ov_model->compiled_model) {
+#else
if (ctx->options.input_resizable) {
status = ie_network_get_input_shapes(ov_model->network, &input_shapes);
input_shapes.shapes->shape.dims[2] = input_height;
@@ -724,8 +1129,8 @@ static int get_output_ov(void *model, const char *input_name, int input_width, i
return DNN_GENERIC_ERROR;
}
}
-
if (!ov_model->exe_network) {
+#endif
ret = init_model_ov(ov_model, input_name, output_name);
if (ret != 0) {
av_log(ctx, AV_LOG_ERROR, "Failed init OpenVINO exectuable network or inference request\n");
@@ -765,9 +1170,15 @@ static DNNModel *dnn_load_model_ov(const char *model_filename, DNNFunctionType f
DNNModel *model = NULL;
OVModel *ov_model = NULL;
OVContext *ctx = NULL;
- IEStatusCode status;
+#if HAVE_OPENVINO2
+ ov_core_t* core = NULL;
+ ov_model_t* ovmodel = NULL;
+ ov_status_e status;
+#else
size_t node_count = 0;
char *node_name = NULL;
+ IEStatusCode status;
+#endif
model = av_mallocz(sizeof(DNNModel));
if (!model){
@@ -783,8 +1194,6 @@ static DNNModel *dnn_load_model_ov(const char *model_filename, DNNFunctionType f
ov_model->model = model;
ov_model->ctx.class = &dnn_openvino_class;
ctx = &ov_model->ctx;
- ov_model->all_input_names = NULL;
- ov_model->all_output_names = NULL;
//parse options
av_opt_set_defaults(ctx);
@@ -793,6 +1202,31 @@ static DNNModel *dnn_load_model_ov(const char *model_filename, DNNFunctionType f
goto err;
}
+#if HAVE_OPENVINO2
+ status = ov_core_create(&core);
+ if (status != OK) {
+ goto err;
+ }
+
+ status = ov_core_read_model(core, model_filename, NULL, &ovmodel);
+ if (status != OK) {
+ ov_version_t ver;
+ status = ov_get_openvino_version(&ver);
+ av_log(NULL, AV_LOG_ERROR, "Failed to read the network from model file %s,\n"
+ "Please check if the model version matches the runtime OpenVINO Version:\n",
+ model_filename);
+ if (status == OK) {
+ av_log(NULL, AV_LOG_ERROR, "BuildNumber: %s\n", ver.buildNumber);
+ }
+ ov_version_free(&ver);
+ goto err;
+ }
+ ov_model->ov_model = ovmodel;
+ ov_model->core = core;
+#else
+ ov_model->all_input_names = NULL;
+ ov_model->all_output_names = NULL;
+
status = ie_core_create("", &ov_model->core);
if (status != OK)
goto err;
@@ -835,6 +1269,7 @@ static DNNModel *dnn_load_model_ov(const char *model_filename, DNNFunctionType f
}
APPEND_STRING(ov_model->all_output_names, node_name)
}
+#endif
model->get_input = &get_input_ov;
model->get_output = &get_output_ov;
@@ -862,7 +1297,11 @@ static int dnn_execute_model_ov(const DNNModel *model, DNNExecBaseParams *exec_p
return ret;
}
+#if HAVE_OPENVINO2
+ if (!ov_model->compiled_model) {
+#else
if (!ov_model->exe_network) {
+#endif
ret = init_model_ov(ov_model, exec_params->input_name, exec_params->output_names[0]);
if (ret != 0) {
av_log(ctx, AV_LOG_ERROR, "Failed init OpenVINO exectuable network or inference request\n");
@@ -943,7 +1382,11 @@ static int dnn_flush_ov(const DNNModel *model)
OVModel *ov_model = model->model;
OVContext *ctx = &ov_model->ctx;
OVRequestItem *request;
+#if HAVE_OPENVINO2
+ ov_status_e status;
+#else
IEStatusCode status;
+#endif
int ret;
if (ff_queue_size(ov_model->lltask_queue) == 0) {
@@ -962,6 +1405,13 @@ static int dnn_flush_ov(const DNNModel *model)
av_log(ctx, AV_LOG_ERROR, "Failed to fill model input.\n");
return ret;
}
+#if HAVE_OPENVINO2
+ status = ov_infer_request_infer(request->infer_request);
+ if (status != OK) {
+ av_log(ctx, AV_LOG_ERROR, "Failed to start sync inference for OV2\n");
+ return ov2_map_error(status, NULL);
+ }
+#else
status = ie_infer_set_completion_callback(request->infer_request, &request->callback);
if (status != OK) {
av_log(ctx, AV_LOG_ERROR, "Failed to set completion callback for inference\n");
@@ -972,6 +1422,7 @@ static int dnn_flush_ov(const DNNModel *model)
av_log(ctx, AV_LOG_ERROR, "Failed to start async inference\n");
return DNN_GENERIC_ERROR;
}
+#endif
return 0;
}