33 #include "../internal.h"
36 #include <openvino/c/openvino.h>
38 #include <c_api/ie_c_api.h>
96 #define APPEND_STRING(generated_string, iterate_string) \
97 generated_string = generated_string ? av_asprintf("%s %s", generated_string, iterate_string) : \
98 av_asprintf("%s", iterate_string);
100 #define OFFSET(x) offsetof(OVContext, x)
101 #define FLAGS AV_OPT_FLAG_FILTERING_PARAM
111 {
"scale",
"Add scale preprocess operation. Divide each element of input by specified value.",
OFFSET(
options.scale),
AV_OPT_TYPE_FLOAT, { .dbl = 0 }, INT_MIN, INT_MAX,
FLAGS},
112 {
"mean",
"Add mean preprocess operation. Subtract specified value from each element of input.",
OFFSET(
options.mean),
AV_OPT_TYPE_FLOAT, { .dbl = 0 }, INT_MIN, INT_MAX,
FLAGS},
119 static const struct {
124 { OK, 0,
"success" },
126 { NOT_IMPLEMENTED,
AVERROR(ENOSYS),
"not implemented" },
128 { PARAMETER_MISMATCH,
AVERROR(EINVAL),
"parameter mismatch" },
130 { OUT_OF_BOUNDS,
AVERROR(EOVERFLOW),
"out of bounds" },
132 { REQUEST_BUSY,
AVERROR(EBUSY),
"request busy" },
133 { RESULT_NOT_READY,
AVERROR(EBUSY),
"result not ready" },
134 { NOT_ALLOCATED,
AVERROR(ENODATA),
"not allocated" },
137 { INFER_CANCELLED,
AVERROR(ECANCELED),
"infer cancelled" },
138 { INVALID_C_PARAM,
AVERROR(EINVAL),
"invalid C parameter" },
140 { NOT_IMPLEMENT_C_METHOD,
AVERROR(ENOSYS),
"not implement C method" },
155 *
desc =
"unknown error";
187 return sizeof(
float);
189 return sizeof(uint8_t);
205 ov_tensor_t* tensor =
NULL;
206 ov_shape_t input_shape = {0};
207 ov_element_type_e precision;
211 precision_e precision;
212 ie_blob_buffer_t blob_buffer;
214 ie_blob_t *input_blob =
NULL;
224 ov_output_const_port_free(ov_model->
input_port);
249 dims = input_shape.dims;
253 ov_shape_free(&input_shape);
256 for (
int i = 0;
i < input_shape.rank;
i++)
267 status |= ie_blob_get_dims(input_blob, &dims);
268 status |= ie_blob_get_precision(input_blob, &precision);
270 ie_blob_free(&input_blob);
275 status = ie_blob_get_buffer(input_blob, &blob_buffer);
277 ie_blob_free(&input_blob);
281 for (
int i = 0;
i < input_shape.rank;
i++)
284 input.data = blob_buffer.buffer;
294 for (
int i = 0;
i <
ctx->options.batch_size; ++
i) {
304 ov_tensor_free(tensor);
305 status = ov_tensor_create(precision, input_shape, &tensor);
306 ov_shape_free(&input_shape);
346 ov_tensor_free(tensor);
348 ie_blob_free(&input_blob);
366 ov_tensor_t *output_tensor;
367 ov_shape_t output_shape = {0};
368 ov_element_type_e precision;
382 "Failed to get output tensor.");
389 "Failed to get output data.");
393 status = ov_tensor_get_shape(output_tensor, &output_shape);
398 dims = output_shape.dims;
408 outputs[
i].dims[1] = output_shape.rank > 2 ? dims[output_shape.rank - 3] : 1;
409 outputs[
i].dims[2] = output_shape.rank > 1 ? dims[output_shape.rank - 2] : 1;
410 outputs[
i].dims[3] = output_shape.rank > 0 ? dims[output_shape.rank - 1] : 1;
415 ov_shape_free(&output_shape);
416 ov_tensor_free(output_tensor);
417 output_tensor =
NULL;
422 ie_blob_t *output_blob =
NULL;
423 ie_blob_buffer_t blob_buffer;
424 precision_e precision;
429 "output \"%s\" may not correct, all output(s) are: \"%s\"\n",
434 status = ie_blob_get_buffer(output_blob, &blob_buffer);
436 ie_blob_free(&output_blob);
441 status |= ie_blob_get_dims(output_blob, &dims);
442 status |= ie_blob_get_precision(output_blob, &precision);
444 ie_blob_free(&output_blob);
448 output.data = blob_buffer.buffer;
450 for (
int i = 0;
i < 4;
i++)
493 for (
int output_i = 0; output_i < ov_model->
nb_outputs; output_i++)
513 ov_shape_free(&output_shape);
515 ov_tensor_free(output_tensor);
517 ie_blob_free(&output_blob);
536 if (!model || !*model)
539 ov_model = (*model)->
model;
569 ov_output_const_port_free(ov_model->
input_port);
575 ov_preprocess_prepostprocessor_free(ov_model->
preprocess);
581 ov_core_free(ov_model->
core);
586 ie_network_free(&ov_model->
network);
588 ie_core_free(&ov_model->
core);
604 ov_preprocess_input_tensor_info_t* input_tensor_info =
NULL;
605 ov_preprocess_output_tensor_info_t* output_tensor_info =
NULL;
606 ov_preprocess_input_model_info_t* input_model_info =
NULL;
607 ov_model_t *tmp_ov_model;
608 ov_layout_t* NHWC_layout =
NULL;
609 ov_layout_t* NCHW_layout =
NULL;
610 const char* NHWC_desc =
"NHWC";
611 const char* NCHW_desc =
"NCHW";
612 const char* device =
ctx->options.device_type;
615 ie_available_devices_t a_dev;
617 char *all_dev_names =
NULL;
623 if (
ctx->options.batch_size <= 0) {
624 ctx->options.batch_size = 1;
627 if (
ctx->options.batch_size > 1) {
629 "change batch_size to 1.\n");
630 ctx->options.batch_size = 1;
650 status = ov_preprocess_input_info_get_tensor_info(ov_model->
input_info, &input_tensor_info);
658 status = ov_layout_create(NHWC_desc, &NHWC_layout);
659 status |= ov_layout_create(NCHW_desc, &NCHW_layout);
666 status = ov_preprocess_input_tensor_info_set_layout(input_tensor_info, NHWC_layout);
673 status = ov_preprocess_input_info_get_model_info(ov_model->
input_info, &input_model_info);
680 status = ov_preprocess_input_model_info_set_layout(input_model_info, NCHW_layout);
682 status = ov_preprocess_input_model_info_set_layout(input_model_info, NHWC_layout);
689 status = ov_preprocess_input_tensor_info_set_element_type(input_tensor_info, U8);
704 nb_outputs = output_size;
707 for (
int i = 0;
i < nb_outputs;
i++) {
709 status = ov_preprocess_prepostprocessor_get_output_info_by_name(
712 status = ov_preprocess_prepostprocessor_get_output_info_by_index(
719 status |= ov_preprocess_output_info_get_tensor_info(ov_model->
output_info, &output_tensor_info);
726 status |= ov_preprocess_output_set_element_type(output_tensor_info, F32);
728 status |= ov_preprocess_output_set_element_type(output_tensor_info, F32);
730 status |= ov_preprocess_output_set_element_type(output_tensor_info, U8);
736 ov_preprocess_output_tensor_info_free(output_tensor_info);
737 output_tensor_info =
NULL;
738 ov_preprocess_output_info_free(ov_model->
output_info);
743 ov_preprocess_preprocess_steps_t* input_process_steps =
NULL;
744 status = ov_preprocess_input_info_get_preprocess_steps(ov_model->
input_info, &input_process_steps);
750 status = ov_preprocess_preprocess_steps_convert_element_type(input_process_steps, F32);
751 status |= ov_preprocess_preprocess_steps_mean(input_process_steps,
ctx->options.mean);
752 status |= ov_preprocess_preprocess_steps_scale(input_process_steps,
ctx->options.scale);
755 ov_preprocess_preprocess_steps_free(input_process_steps);
756 input_process_steps =
NULL;
760 ov_preprocess_preprocess_steps_free(input_process_steps);
761 input_process_steps =
NULL;
763 ov_preprocess_input_tensor_info_free(input_tensor_info);
764 input_tensor_info =
NULL;
765 ov_preprocess_input_info_free(ov_model->
input_info);
774 ov_model_free(tmp_ov_model);
779 ov_model_free(tmp_ov_model);
789 for (
int i = 0;
i < nb_outputs;
i++) {
794 for (
int i = 0;
i < nb_outputs;
i++) {
797 status = ov_model_const_output_by_name(ov_model->
ov_model, output_names[
i],
821 ov_preprocess_input_model_info_free(input_model_info);
822 input_model_info =
NULL;
823 ov_layout_free(NCHW_layout);
824 ov_layout_free(NHWC_layout);
826 if (
ctx->options.batch_size > 1) {
827 input_shapes_t input_shapes;
828 status = ie_network_get_input_shapes(ov_model->
network, &input_shapes);
833 for (
int i = 0;
i < input_shapes.shape_num;
i++)
834 input_shapes.shapes[
i].shape.dims[0] =
ctx->options.batch_size;
835 status = ie_network_reshape(ov_model->
network, input_shapes);
836 ie_network_input_shapes_free(&input_shapes);
845 status = ie_network_set_input_layout(ov_model->
network, input_name, NHWC);
847 if (
status == NOT_FOUND) {
856 status = ie_network_set_output_layout(ov_model->
network, output_name, NHWC);
858 if (
status == NOT_FOUND) {
876 status = ie_network_set_input_precision(ov_model->
network, input_name, U8);
887 status = ie_core_get_available_devices(ov_model->
core, &a_dev);
893 for (
int i = 0;
i < a_dev.num_devices;
i++) {
894 APPEND_STRING(all_dev_names, a_dev.devices[
i])
897 ctx->options.device_type, all_dev_names);
903 if (
ctx->options.nireq <= 0) {
914 for (
int i = 0;
i <
ctx->options.nireq;
i++) {
971 if (output_tensor_info)
972 ov_preprocess_output_tensor_info_free(output_tensor_info);
974 ov_preprocess_output_info_free(ov_model->
output_info);
976 ov_layout_free(NCHW_layout);
978 ov_layout_free(NHWC_layout);
979 if (input_model_info)
980 ov_preprocess_input_model_info_free(input_model_info);
1010 task = lltask->
task;
1011 ov_model = task->
model;
1087 int input_resizable =
ctx->options.input_resizable;
1090 ov_shape_t input_shape = {0};
1091 ov_element_type_e precision;
1111 for (
int i = 0;
i < 4;
i++)
1112 input->dims[
i] = input_shape.dims[
i];
1113 if (input_resizable) {
1118 if (input_shape.dims[1] <= 3)
1124 ov_shape_free(&input_shape);
1127 char *model_input_name =
NULL;
1129 size_t model_input_count = 0;
1131 precision_e precision;
1132 status = ie_network_get_inputs_number(ov_model->
network, &model_input_count);
1137 for (
size_t i = 0;
i < model_input_count;
i++) {
1138 status = ie_network_get_input_name(ov_model->
network,
i, &model_input_name);
1143 if (strcmp(model_input_name, input_name) == 0) {
1144 ie_network_name_free(&model_input_name);
1145 status |= ie_network_get_input_dims(ov_model->
network, input_name, &dims);
1146 status |= ie_network_get_input_precision(ov_model->
network, input_name, &precision);
1152 for (
int i = 0;
i < 4;
i++)
1153 input->dims[
i] = input_shape.dims[
i];
1154 if (input_resizable) {
1159 if (input_shape.dims[1] <= 3)
1168 ie_network_name_free(&model_input_name);
1196 for (uint32_t
i = 0;
i <
header->nb_bboxes;
i++) {
1198 if (bbox->
x < 0 || bbox->
w < 0 || bbox->
x + bbox->
w >=
frame->
width) {
1201 if (bbox->
y < 0 || bbox->
h < 0 || bbox->
y + bbox->
h >=
frame->
width) {
1215 switch (func_type) {
1225 lltask->
task = task;
1264 lltask->
task = task;
1279 static int get_output_ov(
void *model,
const char *input_name,
int input_width,
int input_height,
1280 const char *output_name,
int *output_width,
int *output_height)
1283 ov_dimension_t dims[4] = {{1, 1}, {1, 1}, {input_height, input_height}, {input_width, input_width}};
1285 ov_shape_t input_shape = {0};
1286 ov_partial_shape_t partial_shape;
1289 input_shapes_t input_shapes;
1298 .output_names = output_name ? &output_name :
NULL,
1310 if (
ctx->options.input_resizable) {
1311 status = ov_partial_shape_create(4, dims, &partial_shape);
1321 input_shape.dims[2] = input_height;
1322 input_shape.dims[3] = input_width;
1324 status = ov_shape_to_partial_shape(input_shape, &partial_shape);
1325 ov_shape_free(&input_shape);
1331 status = ov_model_reshape_single_input(ov_model->
ov_model, partial_shape);
1332 ov_partial_shape_free(&partial_shape);
1341 if (
ctx->options.input_resizable) {
1342 status = ie_network_get_input_shapes(ov_model->
network, &input_shapes);
1343 input_shapes.shapes->shape.dims[2] = input_height;
1344 input_shapes.shapes->shape.dims[3] = input_width;
1345 status |= ie_network_reshape(ov_model->
network, input_shapes);
1346 ie_network_input_shapes_free(&input_shapes);
1394 ov_core_t* core =
NULL;
1395 ov_model_t* ovmodel =
NULL;
1398 size_t node_count = 0;
1399 char *node_name =
NULL;
1413 model->
model = ov_model;
1414 ov_model->
model = model;
1415 ov_model->
ctx.
class = &dnn_openvino_class;
1426 status = ov_core_create(&core);
1430 ov_model->
core = core;
1432 status = ov_core_read_model(core, model_filename,
NULL, &ovmodel);
1435 status = ov_get_openvino_version(&ver);
1437 "Please check if the model version matches the runtime OpenVINO Version:\n",
1442 ov_version_free(&ver);
1450 status = ie_core_create(
"", &ov_model->
core);
1457 ver = ie_c_api_version();
1459 "Please check if the model version matches the runtime OpenVINO %s\n",
1460 model_filename, ver.api_version);
1461 ie_version_free(&ver);
1466 status = ie_network_get_inputs_number(ov_model->
network, &node_count);
1471 for (
size_t i = 0;
i < node_count;
i++) {
1472 status = ie_network_get_input_name(ov_model->
network,
i, &node_name);
1478 ie_network_name_free(&node_name);
1480 status = ie_network_get_outputs_number(ov_model->
network, &node_count);
1485 for (
size_t i = 0;
i < node_count;
i++) {
1486 status = ie_network_get_output_name(ov_model->
network,
i, &node_name);
1492 ie_network_name_free(&node_name);
1559 if (
ctx->options.async) {
1583 if (
ctx->options.batch_size > 1) {
1603 static int dnn_flush_ov(
const DNNModel *model)
1655 .execute_model = dnn_execute_model_ov,
1656 .get_result = dnn_get_result_ov,
1657 .flush = dnn_flush_ov,