FFmpeg
dnn_backend_openvino.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2020
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * DNN OpenVINO backend implementation.
24  */
25 
26 #include "dnn_io_proc.h"
27 #include "libavformat/avio.h"
28 #include "libavutil/avassert.h"
29 #include "libavutil/cpu.h"
30 #include "libavutil/opt.h"
31 #include "libavutil/avstring.h"
33 #include "../internal.h"
34 #include "safe_queue.h"
35 #if HAVE_OPENVINO2
36 #include <openvino/c/openvino.h>
37 #else
38 #include <c_api/ie_c_api.h>
39 #endif
40 #include "dnn_backend_common.h"
41 
42 typedef struct OVOptions{
43  char *device_type;
44  int nireq;
45  uint8_t async;
49  float scale;
50  float mean;
51 } OVOptions;
52 
53 typedef struct OVContext {
54  const AVClass *class;
56 } OVContext;
57 
58 typedef struct OVModel{
61 #if HAVE_OPENVINO2
62  ov_core_t *core;
63  ov_model_t *ov_model;
64  ov_compiled_model_t *compiled_model;
65  ov_output_const_port_t* input_port;
66  ov_preprocess_input_info_t* input_info;
67  ov_output_const_port_t** output_ports;
68  ov_preprocess_output_info_t* output_info;
69  ov_preprocess_prepostprocessor_t* preprocess;
70 #else
71  ie_core_t *core;
72  ie_network_t *network;
73  ie_executable_network_t *exe_network;
74  const char *all_input_names;
75  const char *all_output_names;
76 #endif
77  SafeQueue *request_queue; // holds OVRequestItem
78  Queue *task_queue; // holds TaskItem
79  Queue *lltask_queue; // holds LastLevelTaskItem
81 } OVModel;
82 
83 // one request for one call to openvino
84 typedef struct OVRequestItem {
86  uint32_t lltask_count;
87 #if HAVE_OPENVINO2
88  ov_infer_request_t *infer_request;
89  ov_callback_t callback;
90 #else
91  ie_complete_call_back_t callback;
92  ie_infer_request_t *infer_request;
93 #endif
95 
96 #define APPEND_STRING(generated_string, iterate_string) \
97  generated_string = generated_string ? av_asprintf("%s %s", generated_string, iterate_string) : \
98  av_asprintf("%s", iterate_string);
99 
100 #define OFFSET(x) offsetof(OVContext, x)
101 #define FLAGS AV_OPT_FLAG_FILTERING_PARAM
102 static const AVOption dnn_openvino_options[] = {
103  { "device", "device to run model", OFFSET(options.device_type), AV_OPT_TYPE_STRING, { .str = "CPU" }, 0, 0, FLAGS },
105  { "batch_size", "batch size per request", OFFSET(options.batch_size), AV_OPT_TYPE_INT, { .i64 = 1 }, 1, 1000, FLAGS},
106  { "input_resizable", "can input be resizable or not", OFFSET(options.input_resizable), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, FLAGS },
107  { "layout", "input layout of model", OFFSET(options.layout), AV_OPT_TYPE_INT, { .i64 = DL_NONE}, DL_NONE, DL_NHWC, FLAGS, .unit = "layout" },
108  { "none", "none", 0, AV_OPT_TYPE_CONST, { .i64 = DL_NONE }, 0, 0, FLAGS, .unit = "layout"},
109  { "nchw", "nchw", 0, AV_OPT_TYPE_CONST, { .i64 = DL_NCHW }, 0, 0, FLAGS, .unit = "layout"},
110  { "nhwc", "nhwc", 0, AV_OPT_TYPE_CONST, { .i64 = DL_NHWC }, 0, 0, FLAGS, .unit = "layout"},
111  { "scale", "Add scale preprocess operation. Divide each element of input by specified value.", OFFSET(options.scale), AV_OPT_TYPE_FLOAT, { .dbl = 0 }, INT_MIN, INT_MAX, FLAGS},
112  { "mean", "Add mean preprocess operation. Subtract specified value from each element of input.", OFFSET(options.mean), AV_OPT_TYPE_FLOAT, { .dbl = 0 }, INT_MIN, INT_MAX, FLAGS},
113  { NULL }
114 };
115 
116 AVFILTER_DEFINE_CLASS(dnn_openvino);
117 
118 #if HAVE_OPENVINO2
119 static const struct {
120  ov_status_e status;
121  int av_err;
122  const char *desc;
123 } ov2_errors[] = {
124  { OK, 0, "success" },
125  { GENERAL_ERROR, AVERROR_EXTERNAL, "general error" },
126  { NOT_IMPLEMENTED, AVERROR(ENOSYS), "not implemented" },
127  { NETWORK_NOT_LOADED, AVERROR_EXTERNAL, "network not loaded" },
128  { PARAMETER_MISMATCH, AVERROR(EINVAL), "parameter mismatch" },
129  { NOT_FOUND, AVERROR_EXTERNAL, "not found" },
130  { OUT_OF_BOUNDS, AVERROR(EOVERFLOW), "out of bounds" },
131  { UNEXPECTED, AVERROR_EXTERNAL, "unexpected" },
132  { REQUEST_BUSY, AVERROR(EBUSY), "request busy" },
133  { RESULT_NOT_READY, AVERROR(EBUSY), "result not ready" },
134  { NOT_ALLOCATED, AVERROR(ENODATA), "not allocated" },
135  { INFER_NOT_STARTED, AVERROR_EXTERNAL, "infer not started" },
136  { NETWORK_NOT_READ, AVERROR_EXTERNAL, "network not read" },
137  { INFER_CANCELLED, AVERROR(ECANCELED), "infer cancelled" },
138  { INVALID_C_PARAM, AVERROR(EINVAL), "invalid C parameter" },
139  { UNKNOWN_C_ERROR, AVERROR_UNKNOWN, "unknown C error" },
140  { NOT_IMPLEMENT_C_METHOD, AVERROR(ENOSYS), "not implement C method" },
141  { UNKNOW_EXCEPTION, AVERROR_UNKNOWN, "unknown exception" },
142 };
143 
144 static int ov2_map_error(ov_status_e status, const char **desc)
145 {
146  int i;
147  for (i = 0; i < FF_ARRAY_ELEMS(ov2_errors); i++) {
148  if (ov2_errors[i].status == status) {
149  if (desc)
150  *desc = ov2_errors[i].desc;
151  return ov2_errors[i].av_err;
152  }
153  }
154  if (desc)
155  *desc = "unknown error";
156  return AVERROR_UNKNOWN;
157 }
158 #endif
159 
160 #if HAVE_OPENVINO2
161 static DNNDataType precision_to_datatype(ov_element_type_e precision)
162 #else
163 static DNNDataType precision_to_datatype(precision_e precision)
164 #endif
165 {
166  switch (precision)
167  {
168 #if HAVE_OPENVINO2
169  case F32:
170 #else
171  case FP32:
172 #endif
173  return DNN_FLOAT;
174  case U8:
175  return DNN_UINT8;
176  default:
177  av_assert0(!"not supported yet.");
178  return DNN_FLOAT;
179  }
180 }
181 
183 {
184  switch (dt)
185  {
186  case DNN_FLOAT:
187  return sizeof(float);
188  case DNN_UINT8:
189  return sizeof(uint8_t);
190  default:
191  av_assert0(!"not supported yet.");
192  return 1;
193  }
194 }
195 
196 static int fill_model_input_ov(OVModel *ov_model, OVRequestItem *request)
197 {
198  DNNData input;
199  LastLevelTaskItem *lltask;
200  TaskItem *task;
201  OVContext *ctx = &ov_model->ctx;
202 #if HAVE_OPENVINO2
203  int64_t* dims;
204  ov_status_e status;
205  ov_tensor_t* tensor = NULL;
206  ov_shape_t input_shape = {0};
207  ov_element_type_e precision;
208  char *port_name;
209 #else
210  dimensions_t dims;
211  precision_e precision;
212  ie_blob_buffer_t blob_buffer;
213  IEStatusCode status;
214  ie_blob_t *input_blob = NULL;
215 #endif
216 
217  memset(&input, 0, sizeof(input));
218  lltask = ff_queue_peek_front(ov_model->lltask_queue);
219  av_assert0(lltask);
220  task = lltask->task;
221 
222 #if HAVE_OPENVINO2
223  if (ov_model->input_port) {
224  ov_output_const_port_free(ov_model->input_port);
225  ov_model->input_port = NULL;
226  }
227  if (task->input_name)
228  status = ov_model_const_input_by_name(ov_model->ov_model, task->input_name, &ov_model->input_port);
229  else
230  status = ov_model_const_input(ov_model->ov_model, &ov_model->input_port);
231  if (status != OK) {
232  av_log(ctx, AV_LOG_ERROR, "Failed to get input port shape.\n");
233  return ov2_map_error(status, NULL);
234  }
235  status = ov_port_get_any_name(ov_model->input_port, &port_name);
236  if (status != OK) {
237  av_log(ctx, AV_LOG_ERROR, "Failed to get input port name.\n");
238  return ov2_map_error(status, NULL);
239  }
240  av_log(ctx, AV_LOG_VERBOSE, "OpenVINO model input: %s\n", port_name);
241  ov_free(port_name);
242  port_name = NULL;
243 
244  status = ov_const_port_get_shape(ov_model->input_port, &input_shape);
245  if (status != OK) {
246  av_log(ctx, AV_LOG_ERROR, "Failed to get input port shape.\n");
247  return ov2_map_error(status, NULL);
248  }
249  dims = input_shape.dims;
250  status = ov_port_get_element_type(ov_model->input_port, &precision);
251  if (status != OK) {
252  av_log(ctx, AV_LOG_ERROR, "Failed to get input port data type.\n");
253  ov_shape_free(&input_shape);
254  return ov2_map_error(status, NULL);
255  }
256  for (int i = 0; i < input_shape.rank; i++)
257  input.dims[i] = dims[i];
258  input.layout = DL_NHWC;
259  input.dt = precision_to_datatype(precision);
260 #else
261  status = ie_infer_request_get_blob(request->infer_request, task->input_name, &input_blob);
262  if (status != OK) {
263  av_log(ctx, AV_LOG_ERROR, "Failed to get input blob with name %s\n", task->input_name);
264  return DNN_GENERIC_ERROR;
265  }
266 
267  status |= ie_blob_get_dims(input_blob, &dims);
268  status |= ie_blob_get_precision(input_blob, &precision);
269  if (status != OK) {
270  ie_blob_free(&input_blob);
271  av_log(ctx, AV_LOG_ERROR, "Failed to get input blob dims/precision\n");
272  return DNN_GENERIC_ERROR;
273  }
274 
275  status = ie_blob_get_buffer(input_blob, &blob_buffer);
276  if (status != OK) {
277  ie_blob_free(&input_blob);
278  av_log(ctx, AV_LOG_ERROR, "Failed to get input blob buffer\n");
279  return DNN_GENERIC_ERROR;
280  }
281  for (int i = 0; i < input_shape.rank; i++)
282  input.dims[i] = dims[i];
283  input.layout = DL_NCHW;
284  input.data = blob_buffer.buffer;
285  input.dt = precision_to_datatype(precision);
286 #endif
287  // all models in openvino open model zoo use BGR as input,
288  // change to be an option when necessary.
289  input.order = DCO_BGR;
290  // We use preprocess_steps to scale input data, so disable scale and mean here.
291  input.scale = 1;
292  input.mean = 0;
293 
294  for (int i = 0; i < ctx->options.batch_size; ++i) {
295  lltask = ff_queue_pop_front(ov_model->lltask_queue);
296  if (!lltask) {
297  break;
298  }
299  request->lltasks[i] = lltask;
300  request->lltask_count = i + 1;
301  task = lltask->task;
302 #if HAVE_OPENVINO2
303  if (tensor)
304  ov_tensor_free(tensor);
305  status = ov_tensor_create(precision, input_shape, &tensor);
306  ov_shape_free(&input_shape);
307  if (status != OK) {
308  av_log(ctx, AV_LOG_ERROR, "Failed to create tensor from host prt.\n");
309  return ov2_map_error(status, NULL);
310  }
311  status = ov_tensor_data(tensor, &input.data);
312  if (status != OK) {
313  av_log(ctx, AV_LOG_ERROR, "Failed to get input data.\n");
314  return ov2_map_error(status, NULL);
315  }
316  status = ov_infer_request_set_input_tensor(request->infer_request, tensor);
317  if (status != OK) {
318  av_log(ctx, AV_LOG_ERROR, "Failed to Set an input tensor for the model.\n");
319  return ov2_map_error(status, NULL);
320  }
321 #endif
322  switch (ov_model->model->func_type) {
323  case DFT_PROCESS_FRAME:
324  if (task->do_ioproc) {
325  if (ov_model->model->frame_pre_proc != NULL) {
326  ov_model->model->frame_pre_proc(task->in_frame, &input, ov_model->model->filter_ctx);
327  } else {
329  }
330  }
331  break;
334  break;
337  break;
338  default:
339  av_assert0(!"should not reach here");
340  break;
341  }
342  input.data = (uint8_t *)input.data +
343  input.dims[1] * input.dims[2] * input.dims[3] * get_datatype_size(input.dt);
344  }
345 #if HAVE_OPENVINO2
346  ov_tensor_free(tensor);
347 #else
348  ie_blob_free(&input_blob);
349 #endif
350 
351  return 0;
352 }
353 
354 static void infer_completion_callback(void *args)
355 {
356  OVRequestItem *request = args;
357  LastLevelTaskItem *lltask = request->lltasks[0];
358  TaskItem *task = lltask->task;
359  OVModel *ov_model = task->model;
360  SafeQueue *requestq = ov_model->request_queue;
361  DNNData *outputs;
362  OVContext *ctx = &ov_model->ctx;
363 #if HAVE_OPENVINO2
364  size_t* dims;
365  ov_status_e status;
366  ov_tensor_t *output_tensor;
367  ov_shape_t output_shape = {0};
368  ov_element_type_e precision;
369 
370  outputs = av_calloc(ov_model->nb_outputs, sizeof(*outputs));
371  if (!outputs) {
372  av_log(ctx, AV_LOG_ERROR, "Failed to alloc outputs.");
373  return;
374  }
375 
376  for (int i = 0; i < ov_model->nb_outputs; i++) {
377  status = ov_infer_request_get_tensor_by_const_port(request->infer_request,
378  ov_model->output_ports[i],
379  &output_tensor);
380  if (status != OK) {
382  "Failed to get output tensor.");
383  goto end;
384  }
385 
386  status = ov_tensor_data(output_tensor, &outputs[i].data);
387  if (status != OK) {
389  "Failed to get output data.");
390  goto end;
391  }
392 
393  status = ov_tensor_get_shape(output_tensor, &output_shape);
394  if (status != OK) {
395  av_log(ctx, AV_LOG_ERROR, "Failed to get output port shape.\n");
396  goto end;
397  }
398  dims = output_shape.dims;
399 
400  status = ov_port_get_element_type(ov_model->output_ports[i], &precision);
401  if (status != OK) {
402  av_log(ctx, AV_LOG_ERROR, "Failed to get output port data type.\n");
403  goto end;
404  }
405  outputs[i].dt = precision_to_datatype(precision);
406  outputs[i].layout = DL_NCHW;
407  outputs[i].dims[0] = 1;
408  outputs[i].dims[1] = output_shape.rank > 2 ? dims[output_shape.rank - 3] : 1;
409  outputs[i].dims[2] = output_shape.rank > 1 ? dims[output_shape.rank - 2] : 1;
410  outputs[i].dims[3] = output_shape.rank > 0 ? dims[output_shape.rank - 1] : 1;
411  av_assert0(request->lltask_count <= dims[0]);
412  outputs[i].layout = ctx->options.layout;
413  outputs[i].scale = ctx->options.scale;
414  outputs[i].mean = ctx->options.mean;
415  ov_shape_free(&output_shape);
416  ov_tensor_free(output_tensor);
417  output_tensor = NULL;
418  }
419 #else
420  IEStatusCode status;
421  dimensions_t dims;
422  ie_blob_t *output_blob = NULL;
423  ie_blob_buffer_t blob_buffer;
424  precision_e precision;
425  DNNData output;
426  status = ie_infer_request_get_blob(request->infer_request, task->output_names[0], &output_blob);
427  if (status != OK) {
429  "output \"%s\" may not correct, all output(s) are: \"%s\"\n",
430  task->output_names[0], ov_model->all_output_names);
431  return;
432  }
433 
434  status = ie_blob_get_buffer(output_blob, &blob_buffer);
435  if (status != OK) {
436  ie_blob_free(&output_blob);
437  av_log(ctx, AV_LOG_ERROR, "Failed to access output memory\n");
438  return;
439  }
440 
441  status |= ie_blob_get_dims(output_blob, &dims);
442  status |= ie_blob_get_precision(output_blob, &precision);
443  if (status != OK) {
444  ie_blob_free(&output_blob);
445  av_log(ctx, AV_LOG_ERROR, "Failed to get dims or precision of output\n");
446  return;
447  }
448  output.data = blob_buffer.buffer;
449  output.layout = DL_NCHW;
450  for (int i = 0; i < 4; i++)
451  output.dims[i] = dims.dims[i];
452  av_assert0(request->lltask_count <= dims.dims[0]);
453  output.dt = precision_to_datatype(precision);
454  output.layout = ctx->options.layout;
455  output.scale = ctx->options.scale;
456  output.mean = ctx->options.mean;
457  outputs = &output;
458 #endif
459 
460  av_assert0(request->lltask_count >= 1);
461  for (int i = 0; i < request->lltask_count; ++i) {
462  task = request->lltasks[i]->task;
463 
464  switch (ov_model->model->func_type) {
465  case DFT_PROCESS_FRAME:
466  if (task->do_ioproc) {
467  if (ov_model->model->frame_post_proc != NULL) {
468  ov_model->model->frame_post_proc(task->out_frame, outputs, ov_model->model->filter_ctx);
469  } else {
471  }
472  } else {
473  task->out_frame->width =
475  task->out_frame->height =
477  }
478  break;
480  if (!ov_model->model->detect_post_proc) {
481  av_log(ctx, AV_LOG_ERROR, "detect filter needs to provide post proc\n");
482  goto end;
483  }
484  ov_model->model->detect_post_proc(task->in_frame, outputs,
485  ov_model->nb_outputs,
486  ov_model->model->filter_ctx);
487  break;
489  if (!ov_model->model->classify_post_proc) {
490  av_log(ctx, AV_LOG_ERROR, "classify filter needs to provide post proc\n");
491  goto end;
492  }
493  for (int output_i = 0; output_i < ov_model->nb_outputs; output_i++)
494  ov_model->model->classify_post_proc(task->in_frame, outputs,
495  request->lltasks[i]->bbox_index,
496  ov_model->model->filter_ctx);
497  break;
498  default:
499  av_assert0(!"should not reach here");
500  break;
501  }
502 
503  task->inference_done++;
504  av_freep(&request->lltasks[i]);
505  for (int i = 0; i < ov_model->nb_outputs; i++)
506  outputs[i].data = (uint8_t *)outputs[i].data +
507  outputs[i].dims[1] * outputs[i].dims[2] * outputs[i].dims[3] *
509  }
510 end:
511 #if HAVE_OPENVINO2
512  av_freep(&outputs);
513  ov_shape_free(&output_shape);
514  if (output_tensor)
515  ov_tensor_free(output_tensor);
516 #else
517  ie_blob_free(&output_blob);
518 #endif
519  request->lltask_count = 0;
520  if (ff_safe_queue_push_back(requestq, request) < 0) {
521 #if HAVE_OPENVINO2
522  ov_infer_request_free(request->infer_request);
523 #else
524  ie_infer_request_free(&request->infer_request);
525 #endif
526  av_freep(&request);
527  av_log(ctx, AV_LOG_ERROR, "Failed to push back request_queue.\n");
528  return;
529  }
530 }
531 
532 static void dnn_free_model_ov(DNNModel **model)
533 {
534  OVModel *ov_model;
535 
536  if (!model || !*model)
537  return;
538 
539  ov_model = (*model)->model;
540  while (ff_safe_queue_size(ov_model->request_queue) != 0) {
542  if (item && item->infer_request) {
543 #if HAVE_OPENVINO2
544  ov_infer_request_free(item->infer_request);
545 #else
546  ie_infer_request_free(&item->infer_request);
547 #endif
548  }
549  av_freep(&item->lltasks);
550  av_freep(&item);
551  }
553 
554  while (ff_queue_size(ov_model->lltask_queue) != 0) {
556  av_freep(&item);
557  }
558  ff_queue_destroy(ov_model->lltask_queue);
559 
560  while (ff_queue_size(ov_model->task_queue) != 0) {
561  TaskItem *item = ff_queue_pop_front(ov_model->task_queue);
562  av_frame_free(&item->in_frame);
563  av_frame_free(&item->out_frame);
564  av_freep(&item);
565  }
566  ff_queue_destroy(ov_model->task_queue);
567 #if HAVE_OPENVINO2
568  if (ov_model->input_port)
569  ov_output_const_port_free(ov_model->input_port);
570  for (int i = 0; i < ov_model->nb_outputs; i++)
571  if (ov_model->output_ports[i])
572  ov_output_const_port_free(ov_model->output_ports[i]);
573  av_freep(&ov_model->output_ports);
574  if (ov_model->preprocess)
575  ov_preprocess_prepostprocessor_free(ov_model->preprocess);
576  if (ov_model->compiled_model)
577  ov_compiled_model_free(ov_model->compiled_model);
578  if (ov_model->ov_model)
579  ov_model_free(ov_model->ov_model);
580  if (ov_model->core)
581  ov_core_free(ov_model->core);
582 #else
583  if (ov_model->exe_network)
584  ie_exec_network_free(&ov_model->exe_network);
585  if (ov_model->network)
586  ie_network_free(&ov_model->network);
587  if (ov_model->core)
588  ie_core_free(&ov_model->core);
589  av_free(ov_model->all_output_names);
590  av_free(ov_model->all_input_names);
591 #endif
592  av_opt_free(&ov_model->ctx);
593  av_freep(&ov_model);
594  av_freep(model);
595 }
596 
597 
598 static int init_model_ov(OVModel *ov_model, const char *input_name, const char **output_names, int nb_outputs)
599 {
600  int ret = 0;
601  OVContext *ctx = &ov_model->ctx;
602 #if HAVE_OPENVINO2
603  ov_status_e status;
604  ov_preprocess_input_tensor_info_t* input_tensor_info = NULL;
605  ov_preprocess_output_tensor_info_t* output_tensor_info = NULL;
606  ov_preprocess_input_model_info_t* input_model_info = NULL;
607  ov_model_t *tmp_ov_model;
608  ov_layout_t* NHWC_layout = NULL;
609  ov_layout_t* NCHW_layout = NULL;
610  const char* NHWC_desc = "NHWC";
611  const char* NCHW_desc = "NCHW";
612  const char* device = ctx->options.device_type;
613 #else
614  IEStatusCode status;
615  ie_available_devices_t a_dev;
616  ie_config_t config = {NULL, NULL, NULL};
617  char *all_dev_names = NULL;
618 #endif
619  // We scale pixel by default when do frame processing.
620  if (fabsf(ctx->options.scale) < 1e-6f)
621  ctx->options.scale = ov_model->model->func_type == DFT_PROCESS_FRAME ? 255 : 1;
622  // batch size
623  if (ctx->options.batch_size <= 0) {
624  ctx->options.batch_size = 1;
625  }
626 #if HAVE_OPENVINO2
627  if (ctx->options.batch_size > 1) {
628  avpriv_report_missing_feature(ctx, "Do not support batch_size > 1 for now,"
629  "change batch_size to 1.\n");
630  ctx->options.batch_size = 1;
631  }
632 
633  status = ov_preprocess_prepostprocessor_create(ov_model->ov_model, &ov_model->preprocess);
634  if (status != OK) {
635  av_log(ctx, AV_LOG_ERROR, "Failed to create preprocess for ov_model.\n");
637  goto err;
638  }
639 
640  if (input_name)
641  status = ov_preprocess_prepostprocessor_get_input_info_by_name(ov_model->preprocess, input_name, &ov_model->input_info);
642  else
643  status = ov_preprocess_prepostprocessor_get_input_info(ov_model->preprocess, &ov_model->input_info);
644  if (status != OK) {
645  av_log(ctx, AV_LOG_ERROR, "Failed to get input info from preprocess.\n");
647  goto err;
648  }
649 
650  status = ov_preprocess_input_info_get_tensor_info(ov_model->input_info, &input_tensor_info);
651  if (status != OK) {
652  av_log(ctx, AV_LOG_ERROR, "Failed to get tensor info from input.\n");
654  goto err;
655  }
656 
657  //set input layout
658  status = ov_layout_create(NHWC_desc, &NHWC_layout);
659  status |= ov_layout_create(NCHW_desc, &NCHW_layout);
660  if (status != OK) {
661  av_log(ctx, AV_LOG_ERROR, "Failed to create layout for input.\n");
663  goto err;
664  }
665 
666  status = ov_preprocess_input_tensor_info_set_layout(input_tensor_info, NHWC_layout);
667  if (status != OK) {
668  av_log(ctx, AV_LOG_ERROR, "Failed to set input tensor layout\n");
670  goto err;
671  }
672 
673  status = ov_preprocess_input_info_get_model_info(ov_model->input_info, &input_model_info);
674  if (status != OK) {
675  av_log(ctx, AV_LOG_ERROR, "Failed to get input model info\n");
677  goto err;
678  }
679  if (ctx->options.layout == DL_NCHW)
680  status = ov_preprocess_input_model_info_set_layout(input_model_info, NCHW_layout);
681  else if (ctx->options.layout == DL_NHWC)
682  status = ov_preprocess_input_model_info_set_layout(input_model_info, NHWC_layout);
683  if (status != OK) {
684  av_log(ctx, AV_LOG_ERROR, "Failed to get set input model layout\n");
686  goto err;
687  }
688 
689  status = ov_preprocess_input_tensor_info_set_element_type(input_tensor_info, U8);
690  if (status != OK) {
691  av_log(ctx, AV_LOG_ERROR, "Failed to set input element type\n");
693  goto err;
694  }
695 
696  if (!nb_outputs) {
697  size_t output_size;
698  status = ov_model_outputs_size(ov_model->ov_model, &output_size);
699  if (status != OK) {
700  av_log(ctx, AV_LOG_ERROR, "Failed to get output size.\n");
702  goto err;
703  }
704  nb_outputs = output_size;
705  }
706  ov_model->nb_outputs = nb_outputs;
707  for (int i = 0; i < nb_outputs; i++) {
708  if (output_names)
709  status = ov_preprocess_prepostprocessor_get_output_info_by_name(
710  ov_model->preprocess, output_names[i], &ov_model->output_info);
711  else
712  status = ov_preprocess_prepostprocessor_get_output_info_by_index(
713  ov_model->preprocess, i, &ov_model->output_info);
714  if (status != OK) {
715  av_log(ctx, AV_LOG_ERROR, "Failed to get output info from preprocess.\n");
717  goto err;
718  }
719  status |= ov_preprocess_output_info_get_tensor_info(ov_model->output_info, &output_tensor_info);
720  if (status != OK) {
721  av_log(ctx, AV_LOG_ERROR, "Failed to get tensor info from input/output.\n");
723  goto err;
724  }
725  if (ov_model->model->func_type != DFT_PROCESS_FRAME)
726  status |= ov_preprocess_output_set_element_type(output_tensor_info, F32);
727  else if (fabsf(ctx->options.scale - 1) > 1e-6f || fabsf(ctx->options.mean) > 1e-6f)
728  status |= ov_preprocess_output_set_element_type(output_tensor_info, F32);
729  else
730  status |= ov_preprocess_output_set_element_type(output_tensor_info, U8);
731  if (status != OK) {
732  av_log(ctx, AV_LOG_ERROR, "Failed to set output element type\n");
734  goto err;
735  }
736  ov_preprocess_output_tensor_info_free(output_tensor_info);
737  output_tensor_info = NULL;
738  ov_preprocess_output_info_free(ov_model->output_info);
739  ov_model->output_info = NULL;
740  }
741  // set preprocess steps.
742  if (fabsf(ctx->options.scale - 1) > 1e-6f || fabsf(ctx->options.mean) > 1e-6f) {
743  ov_preprocess_preprocess_steps_t* input_process_steps = NULL;
744  status = ov_preprocess_input_info_get_preprocess_steps(ov_model->input_info, &input_process_steps);
745  if (status != OK) {
746  av_log(ctx, AV_LOG_ERROR, "Failed to get preprocess steps\n");
748  goto err;
749  }
750  status = ov_preprocess_preprocess_steps_convert_element_type(input_process_steps, F32);
751  status |= ov_preprocess_preprocess_steps_mean(input_process_steps, ctx->options.mean);
752  status |= ov_preprocess_preprocess_steps_scale(input_process_steps, ctx->options.scale);
753  if (status != OK) {
754  av_log(ctx, AV_LOG_ERROR, "Failed to set preprocess steps\n");
755  ov_preprocess_preprocess_steps_free(input_process_steps);
756  input_process_steps = NULL;
758  goto err;
759  }
760  ov_preprocess_preprocess_steps_free(input_process_steps);
761  input_process_steps = NULL;
762  }
763  ov_preprocess_input_tensor_info_free(input_tensor_info);
764  input_tensor_info = NULL;
765  ov_preprocess_input_info_free(ov_model->input_info);
766  ov_model->input_info = NULL;
767 
768  //update model
769  if(ov_model->ov_model)
770  tmp_ov_model = ov_model->ov_model;
771  status = ov_preprocess_prepostprocessor_build(ov_model->preprocess, &ov_model->ov_model);
772  if (status != OK) {
773  av_log(ctx, AV_LOG_ERROR, "Failed to update OV model\n");
774  ov_model_free(tmp_ov_model);
775  tmp_ov_model = NULL;
777  goto err;
778  }
779  ov_model_free(tmp_ov_model);
780 
781  //update output_port
782  if (!ov_model->output_ports) {
783  ov_model->output_ports = av_calloc(nb_outputs, sizeof(*ov_model->output_ports));
784  if (!ov_model->output_ports) {
785  ret = AVERROR(ENOMEM);
786  goto err;
787  }
788  } else
789  for (int i = 0; i < nb_outputs; i++) {
790  ov_output_const_port_free(ov_model->output_ports[i]);
791  ov_model->output_ports[i] = NULL;
792  }
793 
794  for (int i = 0; i < nb_outputs; i++) {
795  char *port_name;
796  if (output_names)
797  status = ov_model_const_output_by_name(ov_model->ov_model, output_names[i],
798  &ov_model->output_ports[i]);
799  else
800  status = ov_model_const_output_by_index(ov_model->ov_model, i,
801  &ov_model->output_ports[i]);
802  if (status != OK) {
803  av_log(ctx, AV_LOG_ERROR, "Failed to get output port %s.\n", output_names[i]);
804  goto err;
805  }
806  status = ov_port_get_any_name(ov_model->output_ports[i], &port_name);
807  if (status != OK) {
808  av_log(ctx, AV_LOG_ERROR, "Failed to get output port name.\n");
809  goto err;
810  }
811  av_log(ctx, AV_LOG_VERBOSE, "OpenVINO model outputs: %s\n", port_name);
812  ov_free(port_name);
813  port_name = NULL;
814  }
815  //compile network
816  status = ov_core_compile_model(ov_model->core, ov_model->ov_model, device, 0, &ov_model->compiled_model);
817  if (status != OK) {
819  goto err;
820  }
821  ov_preprocess_input_model_info_free(input_model_info);
822  input_model_info = NULL;
823  ov_layout_free(NCHW_layout);
824  ov_layout_free(NHWC_layout);
825 #else
826  if (ctx->options.batch_size > 1) {
827  input_shapes_t input_shapes;
828  status = ie_network_get_input_shapes(ov_model->network, &input_shapes);
829  if (status != OK) {
831  goto err;
832  }
833  for (int i = 0; i < input_shapes.shape_num; i++)
834  input_shapes.shapes[i].shape.dims[0] = ctx->options.batch_size;
835  status = ie_network_reshape(ov_model->network, input_shapes);
836  ie_network_input_shapes_free(&input_shapes);
837  if (status != OK) {
839  goto err;
840  }
841  }
842 
843  // The order of dims in the openvino is fixed and it is always NCHW for 4-D data.
844  // while we pass NHWC data from FFmpeg to openvino
845  status = ie_network_set_input_layout(ov_model->network, input_name, NHWC);
846  if (status != OK) {
847  if (status == NOT_FOUND) {
848  av_log(ctx, AV_LOG_ERROR, "Could not find \"%s\" in model, failed to set input layout as NHWC, "\
849  "all input(s) are: \"%s\"\n", input_name, ov_model->all_input_names);
850  } else{
851  av_log(ctx, AV_LOG_ERROR, "Failed to set layout as NHWC for input %s\n", input_name);
852  }
854  goto err;
855  }
856  status = ie_network_set_output_layout(ov_model->network, output_name, NHWC);
857  if (status != OK) {
858  if (status == NOT_FOUND) {
859  av_log(ctx, AV_LOG_ERROR, "Could not find \"%s\" in model, failed to set output layout as NHWC, "\
860  "all output(s) are: \"%s\"\n", output_name, ov_model->all_output_names);
861  } else{
862  av_log(ctx, AV_LOG_ERROR, "Failed to set layout as NHWC for output %s\n", output_name);
863  }
865  goto err;
866  }
867  ov_model->nb_outputs = 1;
868 
869  // all models in openvino open model zoo use BGR with range [0.0f, 255.0f] as input,
870  // we don't have a AVPixelFormat to describe it, so we'll use AV_PIX_FMT_BGR24 and
871  // ask openvino to do the conversion internally.
872  // the current supported SR model (frame processing) is generated from tensorflow model,
873  // and its input is Y channel as float with range [0.0f, 1.0f], so do not set for this case.
874  // TODO: we need to get a final clear&general solution with all backends/formats considered.
875  if (ov_model->model->func_type != DFT_PROCESS_FRAME) {
876  status = ie_network_set_input_precision(ov_model->network, input_name, U8);
877  if (status != OK) {
878  av_log(ctx, AV_LOG_ERROR, "Failed to set input precision as U8 for %s\n", input_name);
880  goto err;
881  }
882  }
883 
884  status = ie_core_load_network(ov_model->core, ov_model->network, ctx->options.device_type, &config, &ov_model->exe_network);
885  if (status != OK) {
886  av_log(ctx, AV_LOG_ERROR, "Failed to load OpenVINO model network\n");
887  status = ie_core_get_available_devices(ov_model->core, &a_dev);
888  if (status != OK) {
889  av_log(ctx, AV_LOG_ERROR, "Failed to get available devices\n");
891  goto err;
892  }
893  for (int i = 0; i < a_dev.num_devices; i++) {
894  APPEND_STRING(all_dev_names, a_dev.devices[i])
895  }
896  av_log(ctx, AV_LOG_ERROR,"device %s may not be supported, all available devices are: \"%s\"\n",
897  ctx->options.device_type, all_dev_names);
898  ret = AVERROR(ENODEV);
899  goto err;
900  }
901 #endif
902  // create infer_requests for async execution
903  if (ctx->options.nireq <= 0) {
904  // the default value is a rough estimation
905  ctx->options.nireq = av_cpu_count() / 2 + 1;
906  }
907 
908  ov_model->request_queue = ff_safe_queue_create();
909  if (!ov_model->request_queue) {
910  ret = AVERROR(ENOMEM);
911  goto err;
912  }
913 
914  for (int i = 0; i < ctx->options.nireq; i++) {
915  OVRequestItem *item = av_mallocz(sizeof(*item));
916  if (!item) {
917  ret = AVERROR(ENOMEM);
918  goto err;
919  }
920 
921 #if HAVE_OPENVINO2
922  item->callback.callback_func = infer_completion_callback;
923 #else
924  item->callback.completeCallBackFunc = infer_completion_callback;
925 #endif
926  item->callback.args = item;
927  if (ff_safe_queue_push_back(ov_model->request_queue, item) < 0) {
928  av_freep(&item);
929  ret = AVERROR(ENOMEM);
930  goto err;
931  }
932 
933 #if HAVE_OPENVINO2
934  status = ov_compiled_model_create_infer_request(ov_model->compiled_model, &item->infer_request);
935  if (status != OK) {
936  av_log(ctx, AV_LOG_ERROR, "Failed to Creates an inference request object.\n");
937  goto err;
938  }
939 #else
940  status = ie_exec_network_create_infer_request(ov_model->exe_network, &item->infer_request);
941  if (status != OK) {
943  goto err;
944  }
945 #endif
946 
947  item->lltasks = av_malloc_array(ctx->options.batch_size, sizeof(*item->lltasks));
948  if (!item->lltasks) {
949  ret = AVERROR(ENOMEM);
950  goto err;
951  }
952  item->lltask_count = 0;
953  }
954 
955  ov_model->task_queue = ff_queue_create();
956  if (!ov_model->task_queue) {
957  ret = AVERROR(ENOMEM);
958  goto err;
959  }
960 
961  ov_model->lltask_queue = ff_queue_create();
962  if (!ov_model->lltask_queue) {
963  ret = AVERROR(ENOMEM);
964  goto err;
965  }
966 
967  return 0;
968 
969 err:
970 #if HAVE_OPENVINO2
971  if (output_tensor_info)
972  ov_preprocess_output_tensor_info_free(output_tensor_info);
973  if (ov_model->output_info)
974  ov_preprocess_output_info_free(ov_model->output_info);
975  if (NCHW_layout)
976  ov_layout_free(NCHW_layout);
977  if (NHWC_layout)
978  ov_layout_free(NHWC_layout);
979  if (input_model_info)
980  ov_preprocess_input_model_info_free(input_model_info);
981 #endif
982  dnn_free_model_ov(&ov_model->model);
983  return ret;
984 }
985 
986 static int execute_model_ov(OVRequestItem *request, Queue *inferenceq)
987 {
988 #if HAVE_OPENVINO2
989  ov_status_e status;
990 #else
991  IEStatusCode status;
992 #endif
993  LastLevelTaskItem *lltask;
994  int ret = 0;
995  TaskItem *task;
996  OVContext *ctx;
997  OVModel *ov_model;
998 
999  if (ff_queue_size(inferenceq) == 0) {
1000 #if HAVE_OPENVINO2
1001  ov_infer_request_free(request->infer_request);
1002 #else
1003  ie_infer_request_free(&request->infer_request);
1004 #endif
1005  av_freep(&request);
1006  return 0;
1007  }
1008 
1009  lltask = ff_queue_peek_front(inferenceq);
1010  task = lltask->task;
1011  ov_model = task->model;
1012  ctx = &ov_model->ctx;
1013 
1014  ret = fill_model_input_ov(ov_model, request);
1015  if (ret != 0) {
1016  goto err;
1017  }
1018 
1019 #if HAVE_OPENVINO2
1020  if (task->async) {
1021  status = ov_infer_request_set_callback(request->infer_request, &request->callback);
1022  if (status != OK) {
1023  av_log(ctx, AV_LOG_ERROR, "Failed to set completion callback for inference\n");
1025  goto err;
1026  }
1027 
1028  status = ov_infer_request_start_async(request->infer_request);
1029  if (status != OK) {
1030  av_log(ctx, AV_LOG_ERROR, "Failed to start async inference\n");
1032  goto err;
1033  }
1034  return 0;
1035  } else {
1036  status = ov_infer_request_infer(request->infer_request);
1037  if (status != OK) {
1038  av_log(NULL, AV_LOG_ERROR, "Failed to start synchronous model inference for OV2\n");
1040  goto err;
1041  }
1042  infer_completion_callback(request);
1043  return (task->inference_done == task->inference_todo) ? 0 : DNN_GENERIC_ERROR;
1044  }
1045 #else
1046  if (task->async) {
1047  status = ie_infer_set_completion_callback(request->infer_request, &request->callback);
1048  if (status != OK) {
1049  av_log(ctx, AV_LOG_ERROR, "Failed to set completion callback for inference\n");
1051  goto err;
1052  }
1053  status = ie_infer_request_infer_async(request->infer_request);
1054  if (status != OK) {
1055  av_log(ctx, AV_LOG_ERROR, "Failed to start async inference\n");
1057  goto err;
1058  }
1059  return 0;
1060  } else {
1061  status = ie_infer_request_infer(request->infer_request);
1062  if (status != OK) {
1063  av_log(ctx, AV_LOG_ERROR, "Failed to start synchronous model inference\n");
1065  goto err;
1066  }
1067  infer_completion_callback(request);
1068  return (task->inference_done == task->inference_todo) ? 0 : DNN_GENERIC_ERROR;
1069  }
1070 #endif
1071 err:
1072  if (ff_safe_queue_push_back(ov_model->request_queue, request) < 0) {
1073 #if HAVE_OPENVINO2
1074  ov_infer_request_free(request->infer_request);
1075 #else
1076  ie_infer_request_free(&request->infer_request);
1077 #endif
1078  av_freep(&request);
1079  }
1080  return ret;
1081 }
1082 
1083 static int get_input_ov(void *model, DNNData *input, const char *input_name)
1084 {
1085  OVModel *ov_model = model;
1086  OVContext *ctx = &ov_model->ctx;
1087  int input_resizable = ctx->options.input_resizable;
1088 
1089 #if HAVE_OPENVINO2
1090  ov_shape_t input_shape = {0};
1091  ov_element_type_e precision;
1092  ov_status_e status;
1093  if (input_name)
1094  status = ov_model_const_input_by_name(ov_model->ov_model, input_name, &ov_model->input_port);
1095  else
1096  status = ov_model_const_input(ov_model->ov_model, &ov_model->input_port);
1097  if (status != OK) {
1098  av_log(ctx, AV_LOG_ERROR, "Failed to get input port shape.\n");
1099  return ov2_map_error(status, NULL);
1100  }
1101  status = ov_port_get_element_type(ov_model->input_port, &precision);
1102  if (status != OK) {
1103  av_log(ctx, AV_LOG_ERROR, "Failed to get input port data type.\n");
1104  return ov2_map_error(status, NULL);
1105  }
1106  status = ov_const_port_get_shape(ov_model->input_port, &input_shape);
1107  if (status != OK) {
1108  av_log(ctx, AV_LOG_ERROR, "Failed to get input port shape.\n");
1109  return ov2_map_error(status, NULL);
1110  }
1111  for (int i = 0; i < 4; i++)
1112  input->dims[i] = input_shape.dims[i];
1113  if (input_resizable) {
1114  input->dims[dnn_get_width_idx_by_layout(input->layout)] = -1;
1115  input->dims[dnn_get_height_idx_by_layout(input->layout)] = -1;
1116  }
1117 
1118  if (input_shape.dims[1] <= 3) // NCHW
1119  input->layout = DL_NCHW;
1120  else // NHWC
1121  input->layout = DL_NHWC;
1122 
1123  input->dt = precision_to_datatype(precision);
1124  ov_shape_free(&input_shape);
1125  return 0;
1126 #else
1127  char *model_input_name = NULL;
1128  IEStatusCode status;
1129  size_t model_input_count = 0;
1130  dimensions_t dims;
1131  precision_e precision;
1132  status = ie_network_get_inputs_number(ov_model->network, &model_input_count);
1133  if (status != OK) {
1134  av_log(ctx, AV_LOG_ERROR, "Failed to get input count\n");
1135  return DNN_GENERIC_ERROR;
1136  }
1137  for (size_t i = 0; i < model_input_count; i++) {
1138  status = ie_network_get_input_name(ov_model->network, i, &model_input_name);
1139  if (status != OK) {
1140  av_log(ctx, AV_LOG_ERROR, "Failed to get No.%d input's name\n", (int)i);
1141  return DNN_GENERIC_ERROR;
1142  }
1143  if (strcmp(model_input_name, input_name) == 0) {
1144  ie_network_name_free(&model_input_name);
1145  status |= ie_network_get_input_dims(ov_model->network, input_name, &dims);
1146  status |= ie_network_get_input_precision(ov_model->network, input_name, &precision);
1147  if (status != OK) {
1148  av_log(ctx, AV_LOG_ERROR, "Failed to get No.%d input's dims or precision\n", (int)i);
1149  return DNN_GENERIC_ERROR;
1150  }
1151 
1152  for (int i = 0; i < 4; i++)
1153  input->dims[i] = input_shape.dims[i];
1154  if (input_resizable) {
1155  input->dims[dnn_get_width_idx_by_layout(input->layout)] = -1;
1156  input->dims[dnn_get_height_idx_by_layout(input->layout)] = -1;
1157  }
1158 
1159  if (input_shape.dims[1] <= 3) // NCHW
1160  input->layout = DL_NCHW;
1161  else // NHWC
1162  input->layout = DL_NHWC;
1163 
1164  input->dt = precision_to_datatype(precision);
1165  return 0;
1166  }
1167 
1168  ie_network_name_free(&model_input_name);
1169  }
1170 
1171  av_log(ctx, AV_LOG_ERROR, "Could not find \"%s\" in model, all input(s) are: \"%s\"\n", input_name, ov_model->all_input_names);
1172  return AVERROR(EINVAL);
1173 #endif
1174 }
1175 
1177 {
1178  AVFrameSideData *sd;
1180  const AVDetectionBBox *bbox;
1181 
1183  if (!sd) { // this frame has nothing detected
1184  return 0;
1185  }
1186 
1187  if (!sd->size) {
1188  return 0;
1189  }
1190 
1191  header = (const AVDetectionBBoxHeader *)sd->data;
1192  if (!header->nb_bboxes) {
1193  return 0;
1194  }
1195 
1196  for (uint32_t i = 0; i < header->nb_bboxes; i++) {
1197  bbox = av_get_detection_bbox(header, i);
1198  if (bbox->x < 0 || bbox->w < 0 || bbox->x + bbox->w >= frame->width) {
1199  return 0;
1200  }
1201  if (bbox->y < 0 || bbox->h < 0 || bbox->y + bbox->h >= frame->width) {
1202  return 0;
1203  }
1204 
1206  return 0;
1207  }
1208  }
1209 
1210  return 1;
1211 }
1212 
1213 static int extract_lltask_from_task(DNNFunctionType func_type, TaskItem *task, Queue *lltask_queue, DNNExecBaseParams *exec_params)
1214 {
1215  switch (func_type) {
1216  case DFT_PROCESS_FRAME:
1217  case DFT_ANALYTICS_DETECT:
1218  {
1219  LastLevelTaskItem *lltask = av_malloc(sizeof(*lltask));
1220  if (!lltask) {
1221  return AVERROR(ENOMEM);
1222  }
1223  task->inference_todo = 1;
1224  task->inference_done = 0;
1225  lltask->task = task;
1226  if (ff_queue_push_back(lltask_queue, lltask) < 0) {
1227  av_freep(&lltask);
1228  return AVERROR(ENOMEM);
1229  }
1230  return 0;
1231  }
1233  {
1235  AVFrame *frame = task->in_frame;
1236  AVFrameSideData *sd;
1238 
1239  task->inference_todo = 0;
1240  task->inference_done = 0;
1241 
1243  return 0;
1244  }
1245 
1247  header = (const AVDetectionBBoxHeader *)sd->data;
1248 
1249  for (uint32_t i = 0; i < header->nb_bboxes; i++) {
1250  LastLevelTaskItem *lltask;
1252 
1253  if (params->target) {
1254  if (av_strncasecmp(bbox->detect_label, params->target, sizeof(bbox->detect_label)) != 0) {
1255  continue;
1256  }
1257  }
1258 
1259  lltask = av_malloc(sizeof(*lltask));
1260  if (!lltask) {
1261  return AVERROR(ENOMEM);
1262  }
1263  task->inference_todo++;
1264  lltask->task = task;
1265  lltask->bbox_index = i;
1266  if (ff_queue_push_back(lltask_queue, lltask) < 0) {
1267  av_freep(&lltask);
1268  return AVERROR(ENOMEM);
1269  }
1270  }
1271  return 0;
1272  }
1273  default:
1274  av_assert0(!"should not reach here");
1275  return AVERROR(EINVAL);
1276  }
1277 }
1278 
1279 static int get_output_ov(void *model, const char *input_name, int input_width, int input_height,
1280  const char *output_name, int *output_width, int *output_height)
1281 {
1282 #if HAVE_OPENVINO2
1283  ov_dimension_t dims[4] = {{1, 1}, {1, 1}, {input_height, input_height}, {input_width, input_width}};
1284  ov_status_e status;
1285  ov_shape_t input_shape = {0};
1286  ov_partial_shape_t partial_shape;
1287 #else
1288  IEStatusCode status;
1289  input_shapes_t input_shapes;
1290 #endif
1291  int ret;
1292  OVModel *ov_model = model;
1293  OVContext *ctx = &ov_model->ctx;
1294  TaskItem task;
1295  OVRequestItem *request;
1296  DNNExecBaseParams exec_params = {
1297  .input_name = input_name,
1298  .output_names = output_name ? &output_name : NULL,
1299  .nb_output = 1,
1300  .in_frame = NULL,
1301  .out_frame = NULL,
1302  };
1303 
1304  if (ov_model->model->func_type != DFT_PROCESS_FRAME) {
1305  av_log(ctx, AV_LOG_ERROR, "Get output dim only when processing frame.\n");
1306  return AVERROR(EINVAL);
1307  }
1308 
1309 #if HAVE_OPENVINO2
1310  if (ctx->options.input_resizable) {
1311  status = ov_partial_shape_create(4, dims, &partial_shape);
1312  if (status != OK) {
1313  av_log(ctx, AV_LOG_ERROR, "Failed to create partial shape.\n");
1314  return ov2_map_error(status, NULL);
1315  }
1316  status = ov_const_port_get_shape(ov_model->input_port, &input_shape);
1317  if (status != OK) {
1318  av_log(ctx, AV_LOG_ERROR, "Failed to create shape for model input resize.\n");
1319  return ov2_map_error(status, NULL);
1320  }
1321  input_shape.dims[2] = input_height;
1322  input_shape.dims[3] = input_width;
1323 
1324  status = ov_shape_to_partial_shape(input_shape, &partial_shape);
1325  ov_shape_free(&input_shape);
1326  if (status != OK) {
1327  av_log(ctx, AV_LOG_ERROR, "Failed to create partial shape for model input resize.\n");
1328  return ov2_map_error(status, NULL);
1329  }
1330 
1331  status = ov_model_reshape_single_input(ov_model->ov_model, partial_shape);
1332  ov_partial_shape_free(&partial_shape);
1333  if (status != OK) {
1334  av_log(ctx, AV_LOG_ERROR, "Failed to reszie model input.\n");
1335  return ov2_map_error(status, NULL);
1336  }
1337  }
1338 
1339  if (!ov_model->compiled_model) {
1340 #else
1341  if (ctx->options.input_resizable) {
1342  status = ie_network_get_input_shapes(ov_model->network, &input_shapes);
1343  input_shapes.shapes->shape.dims[2] = input_height;
1344  input_shapes.shapes->shape.dims[3] = input_width;
1345  status |= ie_network_reshape(ov_model->network, input_shapes);
1346  ie_network_input_shapes_free(&input_shapes);
1347  if (status != OK) {
1348  av_log(ctx, AV_LOG_ERROR, "Failed to reshape input size for %s\n", input_name);
1349  return DNN_GENERIC_ERROR;
1350  }
1351  }
1352  if (!ov_model->exe_network) {
1353 #endif
1354  ret = init_model_ov(ov_model, input_name, output_name ? &output_name : NULL, 1);
1355  if (ret != 0) {
1356  av_log(ctx, AV_LOG_ERROR, "Failed init OpenVINO exectuable network or inference request\n");
1357  return ret;
1358  }
1359  }
1360 
1361  ret = ff_dnn_fill_gettingoutput_task(&task, &exec_params, ov_model, input_height, input_width, ctx);
1362  if (ret != 0) {
1363  goto err;
1364  }
1365 
1366  ret = extract_lltask_from_task(ov_model->model->func_type, &task, ov_model->lltask_queue, NULL);
1367  if (ret != 0) {
1368  av_log(ctx, AV_LOG_ERROR, "unable to extract inference from task.\n");
1369  goto err;
1370  }
1371 
1372  request = ff_safe_queue_pop_front(ov_model->request_queue);
1373  if (!request) {
1374  av_log(ctx, AV_LOG_ERROR, "unable to get infer request.\n");
1375  ret = AVERROR(EINVAL);
1376  goto err;
1377  }
1378 
1379  ret = execute_model_ov(request, ov_model->lltask_queue);
1380  *output_width = task.out_frame->width;
1381  *output_height = task.out_frame->height;
1382 err:
1383  av_frame_free(&task.out_frame);
1384  av_frame_free(&task.in_frame);
1385  return ret;
1386 }
1387 
1388 static DNNModel *dnn_load_model_ov(const char *model_filename, DNNFunctionType func_type, const char *options, AVFilterContext *filter_ctx)
1389 {
1390  DNNModel *model = NULL;
1391  OVModel *ov_model = NULL;
1392  OVContext *ctx = NULL;
1393 #if HAVE_OPENVINO2
1394  ov_core_t* core = NULL;
1395  ov_model_t* ovmodel = NULL;
1396  ov_status_e status;
1397 #else
1398  size_t node_count = 0;
1399  char *node_name = NULL;
1400  IEStatusCode status;
1401 #endif
1402 
1403  model = av_mallocz(sizeof(DNNModel));
1404  if (!model){
1405  return NULL;
1406  }
1407 
1408  ov_model = av_mallocz(sizeof(OVModel));
1409  if (!ov_model) {
1410  av_freep(&model);
1411  return NULL;
1412  }
1413  model->model = ov_model;
1414  ov_model->model = model;
1415  ov_model->ctx.class = &dnn_openvino_class;
1416  ctx = &ov_model->ctx;
1417 
1418  //parse options
1420  if (av_opt_set_from_string(ctx, options, NULL, "=", "&") < 0) {
1421  av_log(ctx, AV_LOG_ERROR, "Failed to parse options \"%s\"\n", options);
1422  goto err;
1423  }
1424 
1425 #if HAVE_OPENVINO2
1426  status = ov_core_create(&core);
1427  if (status != OK) {
1428  goto err;
1429  }
1430  ov_model->core = core;
1431 
1432  status = ov_core_read_model(core, model_filename, NULL, &ovmodel);
1433  if (status != OK) {
1434  ov_version_t ver;
1435  status = ov_get_openvino_version(&ver);
1436  av_log(NULL, AV_LOG_ERROR, "Failed to read the network from model file %s,\n"
1437  "Please check if the model version matches the runtime OpenVINO Version:\n",
1438  model_filename);
1439  if (status == OK) {
1440  av_log(NULL, AV_LOG_ERROR, "BuildNumber: %s\n", ver.buildNumber);
1441  }
1442  ov_version_free(&ver);
1443  goto err;
1444  }
1445  ov_model->ov_model = ovmodel;
1446 #else
1447  ov_model->all_input_names = NULL;
1448  ov_model->all_output_names = NULL;
1449 
1450  status = ie_core_create("", &ov_model->core);
1451  if (status != OK)
1452  goto err;
1453 
1454  status = ie_core_read_network(ov_model->core, model_filename, NULL, &ov_model->network);
1455  if (status != OK) {
1456  ie_version_t ver;
1457  ver = ie_c_api_version();
1458  av_log(ctx, AV_LOG_ERROR, "Failed to read the network from model file %s,\n"
1459  "Please check if the model version matches the runtime OpenVINO %s\n",
1460  model_filename, ver.api_version);
1461  ie_version_free(&ver);
1462  goto err;
1463  }
1464 
1465  //get all the input and output names
1466  status = ie_network_get_inputs_number(ov_model->network, &node_count);
1467  if (status != OK) {
1468  av_log(ctx, AV_LOG_ERROR, "Failed to get input count\n");
1469  goto err;
1470  }
1471  for (size_t i = 0; i < node_count; i++) {
1472  status = ie_network_get_input_name(ov_model->network, i, &node_name);
1473  if (status != OK) {
1474  av_log(ctx, AV_LOG_ERROR, "Failed to get No.%d input's name\n", (int)i);
1475  goto err;
1476  }
1477  APPEND_STRING(ov_model->all_input_names, node_name)
1478  ie_network_name_free(&node_name);
1479  }
1480  status = ie_network_get_outputs_number(ov_model->network, &node_count);
1481  if (status != OK) {
1482  av_log(ctx, AV_LOG_ERROR, "Failed to get output count\n");
1483  goto err;
1484  }
1485  for (size_t i = 0; i < node_count; i++) {
1486  status = ie_network_get_output_name(ov_model->network, i, &node_name);
1487  if (status != OK) {
1488  av_log(ctx, AV_LOG_ERROR, "Failed to get No.%d output's name\n", (int)i);
1489  goto err;
1490  }
1491  APPEND_STRING(ov_model->all_output_names, node_name)
1492  ie_network_name_free(&node_name);
1493  }
1494 #endif
1495 
1496  model->get_input = &get_input_ov;
1497  model->get_output = &get_output_ov;
1498  model->options = options;
1499  model->filter_ctx = filter_ctx;
1500  model->func_type = func_type;
1501 
1502  return model;
1503 
1504 err:
1505  dnn_free_model_ov(&model);
1506  return NULL;
1507 }
1508 
1509 static int dnn_execute_model_ov(const DNNModel *model, DNNExecBaseParams *exec_params)
1510 {
1511  OVModel *ov_model = model->model;
1512  OVContext *ctx = &ov_model->ctx;
1513  OVRequestItem *request;
1514  TaskItem *task;
1515  int ret;
1516 
1517  ret = ff_check_exec_params(ctx, DNN_OV, model->func_type, exec_params);
1518  if (ret != 0) {
1519  return ret;
1520  }
1521 
1522 #if HAVE_OPENVINO2
1523  if (!ov_model->compiled_model) {
1524 #else
1525  if (!ov_model->exe_network) {
1526 #endif
1527  ret = init_model_ov(ov_model, exec_params->input_name,
1528  exec_params->output_names, exec_params->nb_output);
1529  if (ret != 0) {
1530  av_log(ctx, AV_LOG_ERROR, "Failed init OpenVINO exectuable network or inference request\n");
1531  return ret;
1532  }
1533  }
1534 
1535  task = av_malloc(sizeof(*task));
1536  if (!task) {
1537  av_log(ctx, AV_LOG_ERROR, "unable to alloc memory for task item.\n");
1538  return AVERROR(ENOMEM);
1539  }
1540 
1541  ret = ff_dnn_fill_task(task, exec_params, ov_model, ctx->options.async, 1);
1542  if (ret != 0) {
1543  av_freep(&task);
1544  return ret;
1545  }
1546 
1547  if (ff_queue_push_back(ov_model->task_queue, task) < 0) {
1548  av_freep(&task);
1549  av_log(ctx, AV_LOG_ERROR, "unable to push back task_queue.\n");
1550  return AVERROR(ENOMEM);
1551  }
1552 
1553  ret = extract_lltask_from_task(model->func_type, task, ov_model->lltask_queue, exec_params);
1554  if (ret != 0) {
1555  av_log(ctx, AV_LOG_ERROR, "unable to extract inference from task.\n");
1556  return ret;
1557  }
1558 
1559  if (ctx->options.async) {
1560  while (ff_queue_size(ov_model->lltask_queue) >= ctx->options.batch_size) {
1561  request = ff_safe_queue_pop_front(ov_model->request_queue);
1562  if (!request) {
1563  av_log(ctx, AV_LOG_ERROR, "unable to get infer request.\n");
1564  return AVERROR(EINVAL);
1565  }
1566 
1567  ret = execute_model_ov(request, ov_model->lltask_queue);
1568  if (ret != 0) {
1569  return ret;
1570  }
1571  }
1572 
1573  return 0;
1574  }
1575  else {
1576  if (model->func_type == DFT_ANALYTICS_CLASSIFY) {
1577  // Classification filter has not been completely
1578  // tested with the sync mode. So, do not support now.
1579  avpriv_report_missing_feature(ctx, "classify for sync execution");
1580  return AVERROR(ENOSYS);
1581  }
1582 
1583  if (ctx->options.batch_size > 1) {
1584  avpriv_report_missing_feature(ctx, "batch mode for sync execution");
1585  return AVERROR(ENOSYS);
1586  }
1587 
1588  request = ff_safe_queue_pop_front(ov_model->request_queue);
1589  if (!request) {
1590  av_log(ctx, AV_LOG_ERROR, "unable to get infer request.\n");
1591  return AVERROR(EINVAL);
1592  }
1593  return execute_model_ov(request, ov_model->lltask_queue);
1594  }
1595 }
1596 
1597 static DNNAsyncStatusType dnn_get_result_ov(const DNNModel *model, AVFrame **in, AVFrame **out)
1598 {
1599  OVModel *ov_model = model->model;
1600  return ff_dnn_get_result_common(ov_model->task_queue, in, out);
1601 }
1602 
1603 static int dnn_flush_ov(const DNNModel *model)
1604 {
1605  OVModel *ov_model = model->model;
1606  OVContext *ctx = &ov_model->ctx;
1607  OVRequestItem *request;
1608 #if HAVE_OPENVINO2
1609  ov_status_e status;
1610 #else
1611  IEStatusCode status;
1612 #endif
1613  int ret;
1614 
1615  if (ff_queue_size(ov_model->lltask_queue) == 0) {
1616  // no pending task need to flush
1617  return 0;
1618  }
1619 
1620  request = ff_safe_queue_pop_front(ov_model->request_queue);
1621  if (!request) {
1622  av_log(ctx, AV_LOG_ERROR, "unable to get infer request.\n");
1623  return AVERROR(EINVAL);
1624  }
1625 
1626  ret = fill_model_input_ov(ov_model, request);
1627  if (ret != 0) {
1628  av_log(ctx, AV_LOG_ERROR, "Failed to fill model input.\n");
1629  return ret;
1630  }
1631 #if HAVE_OPENVINO2
1632  status = ov_infer_request_infer(request->infer_request);
1633  if (status != OK) {
1634  av_log(ctx, AV_LOG_ERROR, "Failed to start sync inference for OV2\n");
1635  return ov2_map_error(status, NULL);
1636  }
1637 #else
1638  status = ie_infer_set_completion_callback(request->infer_request, &request->callback);
1639  if (status != OK) {
1640  av_log(ctx, AV_LOG_ERROR, "Failed to set completion callback for inference\n");
1641  return DNN_GENERIC_ERROR;
1642  }
1643  status = ie_infer_request_infer_async(request->infer_request);
1644  if (status != OK) {
1645  av_log(ctx, AV_LOG_ERROR, "Failed to start async inference\n");
1646  return DNN_GENERIC_ERROR;
1647  }
1648 #endif
1649 
1650  return 0;
1651 }
1652 
1654  .load_model = dnn_load_model_ov,
1655  .execute_model = dnn_execute_model_ov,
1656  .get_result = dnn_get_result_ov,
1657  .flush = dnn_flush_ov,
1658  .free_model = dnn_free_model_ov,
1659 };
ov2_errors
static const struct @246 ov2_errors[]
OVModel::lltask_queue
Queue * lltask_queue
Definition: dnn_backend_openvino.c:79
ff_dnn_backend_openvino
const DNNModule ff_dnn_backend_openvino
OVModel::input_info
ov_preprocess_input_info_t * input_info
Definition: dnn_backend_openvino.c:66
av_opt_set_defaults
void av_opt_set_defaults(void *s)
Set the values of all AVOption fields to their default values.
Definition: opt.c:1638
OVRequestItem::callback
ie_complete_call_back_t callback
Definition: dnn_backend_openvino.c:91
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
OVContext::class
const AVClass * class
Definition: dnn_backend_openvino.c:54
opt.h
ff_safe_queue_pop_front
void * ff_safe_queue_pop_front(SafeQueue *sq)
Remove and free first element from the queue in SafeQueue.
Definition: safe_queue.c:105
out
FILE * out
Definition: movenc.c:54
av_frame_get_side_data
AVFrameSideData * av_frame_get_side_data(const AVFrame *frame, enum AVFrameSideDataType type)
Definition: frame.c:838
OVModel::nb_outputs
int nb_outputs
Definition: dnn_backend_openvino.c:80
OVModel::exe_network
ie_executable_network_t * exe_network
Definition: dnn_backend_openvino.c:73
DNNFunctionType
DNNFunctionType
Definition: dnn_interface.h:52
int64_t
long long int64_t
Definition: coverity.c:34
output
filter_frame For filters that do not use the this method is called when a frame is pushed to the filter s input It can be called at any time except in a reentrant way If the input frame is enough to produce output
Definition: filter_design.txt:225
ff_queue_pop_front
void * ff_queue_pop_front(Queue *q)
Remove and free first element from the Queue.
Definition: queue.c:151
ff_check_exec_params
int ff_check_exec_params(void *ctx, DNNBackendType backend, DNNFunctionType func_type, DNNExecBaseParams *exec_params)
Definition: dnn_backend_common.c:29
ff_queue_size
size_t ff_queue_size(Queue *q)
Return the length of the Queue.
Definition: queue.c:88
get_input_ov
static int get_input_ov(void *model, DNNData *input, const char *input_name)
Definition: dnn_backend_openvino.c:1083
DNN_GENERIC_ERROR
#define DNN_GENERIC_ERROR
Definition: dnn_interface.h:33
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:130
LastLevelTaskItem
Definition: dnn_backend_common.h:50
LastLevelTaskItem::bbox_index
uint32_t bbox_index
Definition: dnn_backend_common.h:52
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:344
AVFrame::width
int width
Definition: frame.h:416
OVOptions::async
uint8_t async
Definition: dnn_backend_openvino.c:45
AVFILTER_DEFINE_CLASS
AVFILTER_DEFINE_CLASS(dnn_openvino)
SafeQueue
Double-ended queue with mutex locks ensuring data consistency while multithreading.
Definition: safe_queue.c:46
av_opt_set_from_string
int av_opt_set_from_string(void *ctx, const char *opts, const char *const *shorthand, const char *key_val_sep, const char *pairs_sep)
Parse the key-value pairs list in opts.
Definition: opt.c:1856
OVOptions::mean
float mean
Definition: dnn_backend_openvino.c:50
AVOption
AVOption.
Definition: opt.h:346
DNNModule::load_model
DNNModel *(* load_model)(const char *model_filename, DNNFunctionType func_type, const char *options, AVFilterContext *filter_ctx)
Definition: dnn_interface.h:123
DNNModel::frame_pre_proc
FramePrePostProc frame_pre_proc
Definition: dnn_interface.h:110
ov2_map_error
static int ov2_map_error(ov_status_e status, const char **desc)
Definition: dnn_backend_openvino.c:144
data
const char data[16]
Definition: mxf.c:148
OVModel::core
ie_core_t * core
Definition: dnn_backend_openvino.c:71
FLAGS
#define FLAGS
Definition: cmdutils.c:584
AV_LOG_VERBOSE
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:196
DNNExecBaseParams::input_name
const char * input_name
Definition: dnn_interface.h:77
dnn_io_proc.h
TaskItem
Definition: dnn_backend_common.h:36
AVERROR_UNKNOWN
#define AVERROR_UNKNOWN
Unknown error, typically from an external library.
Definition: error.h:73
AVDetectionBBox::y
int y
Definition: detection_bbox.h:32
tf_sess_config.config
config
Definition: tf_sess_config.py:33
OVModel
Definition: dnn_backend_openvino.c:58
OVOptions::batch_size
int batch_size
Definition: dnn_backend_openvino.c:46
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:30
init_model_ov
static int init_model_ov(OVModel *ov_model, const char *input_name, const char **output_names, int nb_outputs)
Definition: dnn_backend_openvino.c:598
DNNModel::filter_ctx
AVFilterContext * filter_ctx
Definition: dnn_interface.h:99
ff_queue_create
Queue * ff_queue_create(void)
Create a Queue instance.
Definition: queue.c:47
dnn_get_width_idx_by_layout
static int dnn_get_width_idx_by_layout(DNNLayout layout)
Definition: dnn_interface.h:137
AVDetectionBBox::detect_label
char detect_label[AV_DETECTION_BBOX_LABEL_NAME_MAX_SIZE]
Detect result with confidence.
Definition: detection_bbox.h:41
TaskItem::model
void * model
Definition: dnn_backend_common.h:37
OVModel::output_info
ov_preprocess_output_info_t * output_info
Definition: dnn_backend_openvino.c:68
av_opt_free
void av_opt_free(void *obj)
Free all allocated objects in obj.
Definition: opt.c:1908
OVRequestItem::infer_request
ov_infer_request_t * infer_request
Definition: dnn_backend_openvino.c:88
filter_ctx
static FilteringContext * filter_ctx
Definition: transcode.c:51
OVModel::output_ports
ov_output_const_port_t ** output_ports
Definition: dnn_backend_openvino.c:67
OVOptions::device_type
char * device_type
Definition: dnn_backend_openvino.c:43
fabsf
static __device__ float fabsf(float a)
Definition: cuda_runtime.h:181
DL_NHWC
@ DL_NHWC
Definition: dnn_interface.h:62
Queue
Linear double-ended data structure.
Definition: queue.c:33
av_get_detection_bbox
static av_always_inline AVDetectionBBox * av_get_detection_bbox(const AVDetectionBBoxHeader *header, unsigned int idx)
Definition: detection_bbox.h:84
ff_queue_push_back
int ff_queue_push_back(Queue *q, void *v)
Add data to the tail of the queue.
Definition: queue.c:130
avassert.h
DNN_BACKEND_COMMON_OPTIONS
#define DNN_BACKEND_COMMON_OPTIONS
Definition: dnn_backend_common.h:31
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
AVFrameSideData::size
size_t size
Definition: frame.h:253
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
float
float
Definition: af_crystalizer.c:121
desc
const char * desc
Definition: dnn_backend_openvino.c:122
LastLevelTaskItem::task
TaskItem * task
Definition: dnn_backend_common.h:51
DNNExecClassificationParams
Definition: dnn_interface.h:84
ff_queue_destroy
void ff_queue_destroy(Queue *q)
Destroy the Queue instance.
Definition: queue.c:72
OVOptions::layout
DNNLayout layout
Definition: dnn_backend_openvino.c:48
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:40
DNNData
Definition: dnn_interface.h:65
execute_model_ov
static int execute_model_ov(OVRequestItem *request, Queue *inferenceq)
Definition: dnn_backend_openvino.c:986
ff_dnn_fill_gettingoutput_task
int ff_dnn_fill_gettingoutput_task(TaskItem *task, DNNExecBaseParams *exec_params, void *backend_model, int input_height, int input_width, void *ctx)
Allocate input and output frames and fill the Task with execution parameters.
Definition: dnn_backend_common.c:155
ctx
AVFormatContext * ctx
Definition: movenc.c:48
TaskItem::inference_todo
uint32_t inference_todo
Definition: dnn_backend_common.h:45
DL_NCHW
@ DL_NCHW
Definition: dnn_interface.h:61
dnn_free_model_ov
static void dnn_free_model_ov(DNNModel **model)
Definition: dnn_backend_openvino.c:532
OVRequestItem::infer_request
ie_infer_request_t * infer_request
Definition: dnn_backend_openvino.c:92
OVModel::preprocess
ov_preprocess_prepostprocessor_t * preprocess
Definition: dnn_backend_openvino.c:69
frame
static AVFrame * frame
Definition: demux_decode.c:54
DNN_OV
@ DNN_OV
Definition: dnn_interface.h:35
if
if(ret)
Definition: filter_design.txt:179
ff_safe_queue_size
size_t ff_safe_queue_size(SafeQueue *sq)
Return the length of the SafeQueue.
Definition: safe_queue.c:80
ff_proc_from_frame_to_dnn
int ff_proc_from_frame_to_dnn(AVFrame *frame, DNNData *input, void *log_ctx)
Definition: dnn_io_proc.c:181
DNNExecClassificationParams::target
const char * target
Definition: dnn_interface.h:86
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:66
OVModel::all_input_names
const char * all_input_names
Definition: dnn_backend_openvino.c:74
ff_frame_to_dnn_detect
int ff_frame_to_dnn_detect(AVFrame *frame, DNNData *input, void *log_ctx)
Definition: dnn_io_proc.c:420
NULL
#define NULL
Definition: coverity.c:32
OVRequestItem::lltask_count
uint32_t lltask_count
Definition: dnn_backend_openvino.c:86
av_err
int av_err
Definition: dnn_backend_openvino.c:121
OVModel::network
ie_network_t * network
Definition: dnn_backend_openvino.c:72
ff_safe_queue_create
SafeQueue * ff_safe_queue_create(void)
Create and initialize a SafeQueue instance.
Definition: safe_queue.c:52
AVDetectionBBoxHeader
Definition: detection_bbox.h:56
DNNModel::frame_post_proc
FramePrePostProc frame_post_proc
Definition: dnn_interface.h:113
get_output_ov
static int get_output_ov(void *model, const char *input_name, int input_width, int input_height, const char *output_name, int *output_width, int *output_height)
Definition: dnn_backend_openvino.c:1279
TaskItem::in_frame
AVFrame * in_frame
Definition: dnn_backend_common.h:38
OVRequestItem::lltasks
LastLevelTaskItem ** lltasks
Definition: dnn_backend_openvino.c:85
OVModel::ctx
OVContext ctx
Definition: dnn_backend_openvino.c:59
OVRequestItem
Definition: dnn_backend_openvino.c:84
av_cpu_count
int av_cpu_count(void)
Definition: cpu.c:209
av_strncasecmp
int av_strncasecmp(const char *a, const char *b, size_t n)
Locale-independent case-insensitive compare.
Definition: avstring.c:217
get_datatype_size
static int get_datatype_size(DNNDataType dt)
Definition: dnn_backend_openvino.c:182
options
const OptionDef options[]
f
f
Definition: af_crystalizer.c:121
OVModel::compiled_model
ov_compiled_model_t * compiled_model
Definition: dnn_backend_openvino.c:64
TaskItem::async
uint8_t async
Definition: dnn_backend_common.h:42
TaskItem::inference_done
uint32_t inference_done
Definition: dnn_backend_common.h:46
AVDetectionBBox::w
int w
Definition: detection_bbox.h:33
cpu.h
for
for(k=2;k<=8;++k)
Definition: h264pred_template.c:425
DNNLayout
DNNLayout
Definition: dnn_interface.h:59
OVModel::model
DNNModel * model
Definition: dnn_backend_openvino.c:60
DNNModel::detect_post_proc
DetectPostProc detect_post_proc
Definition: dnn_interface.h:115
avio.h
DNNModel::func_type
DNNFunctionType func_type
Definition: dnn_interface.h:101
avpriv_report_missing_feature
void avpriv_report_missing_feature(void *avc, const char *msg,...) av_printf_format(2
Log a generic warning message about a missing feature.
AVFrameSideData::data
uint8_t * data
Definition: frame.h:252
ff_safe_queue_destroy
void ff_safe_queue_destroy(SafeQueue *sq)
Destroy the SafeQueue instance.
Definition: safe_queue.c:69
DNNDataType
DNNDataType
Definition: dnn_interface.h:37
OVModel::all_output_names
const char * all_output_names
Definition: dnn_backend_openvino.c:75
header
static const uint8_t header[24]
Definition: sdr2.c:68
AVDetectionBBox::classify_count
uint32_t classify_count
Definition: detection_bbox.h:51
DNN_FLOAT
@ DNN_FLOAT
Definition: dnn_interface.h:37
AVERROR_EXTERNAL
#define AVERROR_EXTERNAL
Generic error in an external library.
Definition: error.h:59
ff_dnn_fill_task
int ff_dnn_fill_task(TaskItem *task, DNNExecBaseParams *exec_params, void *backend_model, int async, int do_ioproc)
Fill the Task for Backend Execution.
Definition: dnn_backend_common.c:49
input
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some input
Definition: filter_design.txt:172
contain_valid_detection_bbox
static int contain_valid_detection_bbox(AVFrame *frame)
Definition: dnn_backend_openvino.c:1176
AV_OPT_TYPE_FLOAT
@ AV_OPT_TYPE_FLOAT
Definition: opt.h:238
ff_safe_queue_push_back
int ff_safe_queue_push_back(SafeQueue *sq, void *v)
Add data to the tail of queue in the SafeQueue after locking mutex.
Definition: safe_queue.c:95
OVOptions::input_resizable
int input_resizable
Definition: dnn_backend_openvino.c:47
layout
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel layout
Definition: filter_design.txt:18
dnn_openvino_options
static const AVOption dnn_openvino_options[]
Definition: dnn_backend_openvino.c:102
infer_completion_callback
static void infer_completion_callback(void *args)
Definition: dnn_backend_openvino.c:354
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:255
DFT_ANALYTICS_DETECT
@ DFT_ANALYTICS_DETECT
Definition: dnn_interface.h:55
OVModel::ov_model
ov_model_t * ov_model
Definition: dnn_backend_openvino.c:63
av_malloc_array
#define av_malloc_array(a, b)
Definition: tableprint_vlc.h:31
OVModel::core
ov_core_t * core
Definition: dnn_backend_openvino.c:62
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:254
safe_queue.h
OVModel::request_queue
SafeQueue * request_queue
Definition: dnn_backend_openvino.c:77
TaskItem::output_names
const char ** output_names
Definition: dnn_backend_common.h:41
OVContext
Definition: dnn_backend_openvino.c:53
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Definition: mem.c:262
outputs
static const AVFilterPad outputs[]
Definition: af_aap.c:310
DNNModel::classify_post_proc
ClassifyPostProc classify_post_proc
Definition: dnn_interface.h:117
ret
ret
Definition: filter_design.txt:187
OVOptions::scale
float scale
Definition: dnn_backend_openvino.c:49
AVDetectionBBox::h
int h
Definition: detection_bbox.h:34
DNNModel::get_input
int(* get_input)(void *model, DNNData *input, const char *input_name)
Definition: dnn_interface.h:104
DNN_UINT8
@ DNN_UINT8
Definition: dnn_interface.h:37
TaskItem::out_frame
AVFrame * out_frame
Definition: dnn_backend_common.h:39
OFFSET
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default minimum maximum flags name is the option keep it simple and lowercase description are in without and describe what they for example set the foo of the bar offset is the offset of the field in your see the OFFSET() macro
OVModel::task_queue
Queue * task_queue
Definition: dnn_backend_openvino.c:78
DFT_ANALYTICS_CLASSIFY
@ DFT_ANALYTICS_CLASSIFY
Definition: dnn_interface.h:56
AVFrame::height
int height
Definition: frame.h:416
extract_lltask_from_task
static int extract_lltask_from_task(DNNFunctionType func_type, TaskItem *task, Queue *lltask_queue, DNNExecBaseParams *exec_params)
Definition: dnn_backend_openvino.c:1213
status
ov_status_e status
Definition: dnn_backend_openvino.c:120
dnn_backend_common.h
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Definition: opt.h:235
AVDetectionBBox::x
int x
Distance in pixels from the left/top edge of the frame, together with width and height,...
Definition: detection_bbox.h:31
ff_dnn_get_result_common
DNNAsyncStatusType ff_dnn_get_result_common(Queue *task_queue, AVFrame **in, AVFrame **out)
Extract input and output frame from the Task Queue after asynchronous inference.
Definition: dnn_backend_common.c:135
ff_queue_peek_front
void * ff_queue_peek_front(Queue *q)
Return a pointer to the data at the head of the queue.
Definition: queue.c:93
DNNExecBaseParams::output_names
const char ** output_names
Definition: dnn_interface.h:78
DL_NONE
@ DL_NONE
Definition: dnn_interface.h:60
AVFilterContext
An instance of a filter.
Definition: avfilter.h:407
DNNModel
Definition: dnn_interface.h:93
precision_to_datatype
static DNNDataType precision_to_datatype(ov_element_type_e precision) static DNNDataType precision_to_datatype(precision_e precision)
Definition: dnn_backend_openvino.c:161
dnn_get_height_idx_by_layout
static int dnn_get_height_idx_by_layout(DNNLayout layout)
Definition: dnn_interface.h:142
AVFrameSideData
Structure to hold side data for an AVFrame.
Definition: frame.h:250
TaskItem::input_name
const char * input_name
Definition: dnn_backend_common.h:40
AV_NUM_DETECTION_BBOX_CLASSIFY
#define AV_NUM_DETECTION_BBOX_CLASSIFY
At most 4 classifications based on the detected bounding box.
Definition: detection_bbox.h:50
DNNModel::options
const char * options
Definition: dnn_interface.h:97
OVOptions::nireq
int nireq
Definition: dnn_backend_openvino.c:44
ff_frame_to_dnn_classify
int ff_frame_to_dnn_classify(AVFrame *frame, DNNData *input, uint32_t bbox_index, void *log_ctx)
Definition: dnn_io_proc.c:339
av_free
#define av_free(p)
Definition: tableprint_vlc.h:33
AV_OPT_TYPE_BOOL
@ AV_OPT_TYPE_BOOL
Definition: opt.h:251
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
OVOptions
Definition: dnn_backend_openvino.c:42
DNNExecBaseParams
Definition: dnn_interface.h:76
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
OVModel::input_port
ov_output_const_port_t * input_port
Definition: dnn_backend_openvino.c:65
AVDetectionBBox
Definition: detection_bbox.h:26
TaskItem::do_ioproc
uint8_t do_ioproc
Definition: dnn_backend_common.h:43
DNNModel::get_output
int(* get_output)(void *model, const char *input_name, int input_width, int input_height, const char *output_name, int *output_width, int *output_height)
Definition: dnn_interface.h:106
OVRequestItem::callback
ov_callback_t callback
Definition: dnn_backend_openvino.c:89
avstring.h
AV_OPT_TYPE_STRING
@ AV_OPT_TYPE_STRING
Definition: opt.h:239
DCO_BGR
@ DCO_BGR
Definition: dnn_interface.h:41
DNNAsyncStatusType
DNNAsyncStatusType
Definition: dnn_interface.h:45
AV_OPT_TYPE_CONST
@ AV_OPT_TYPE_CONST
Definition: opt.h:244
DFT_PROCESS_FRAME
@ DFT_PROCESS_FRAME
Definition: dnn_interface.h:54
detection_bbox.h
fill_model_input_ov
static int fill_model_input_ov(OVModel *ov_model, OVRequestItem *request)
Definition: dnn_backend_openvino.c:196
AV_FRAME_DATA_DETECTION_BBOXES
@ AV_FRAME_DATA_DETECTION_BBOXES
Bounding boxes for object detection and classification, as described by AVDetectionBBoxHeader.
Definition: frame.h:194
DNNModule
Definition: dnn_interface.h:121
OVContext::options
OVOptions options
Definition: dnn_backend_openvino.c:55
DNNExecBaseParams::nb_output
uint32_t nb_output
Definition: dnn_interface.h:79
DNNModel::model
void * model
Definition: dnn_interface.h:95
ff_proc_from_dnn_to_frame
int ff_proc_from_dnn_to_frame(AVFrame *frame, DNNData *output, void *log_ctx)
Definition: dnn_io_proc.c:41