FFmpeg
dnn_backend_tf.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2018 Sergey Lavrushkin
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * DNN tensorflow backend implementation.
24  */
25 
26 #include "libavformat/avio.h"
27 #include "libavutil/avassert.h"
28 #include "libavutil/avstring.h"
29 #include "libavutil/cpu.h"
30 #include "libavutil/opt.h"
31 #include "libavcodec/defs.h"
32 #include "../internal.h"
33 #include "dnn_io_proc.h"
34 #include "dnn_backend_common.h"
35 #include "safe_queue.h"
36 #include <tensorflow/c/c_api.h>
37 
38 typedef struct TFOptions{
39  char *sess_config;
40  uint8_t async;
41  uint32_t nireq;
42 } TFOptions;
43 
44 typedef struct TFContext {
45  const AVClass *class;
47 } TFContext;
48 
49 typedef struct TFModel{
52  TF_Graph *graph;
53  TF_Session *session;
54  TF_Status *status;
58 } TFModel;
59 
60 /**
61  * Stores execution parameters for single
62  * call to the TensorFlow C API
63  */
64 typedef struct TFInferRequest {
65  TF_Output *tf_outputs;
66  TF_Tensor **output_tensors;
67  TF_Output *tf_input;
68  TF_Tensor *input_tensor;
70 
71 typedef struct TFRequestItem {
74  TF_Status *status;
77 
78 #define OFFSET(x) offsetof(TFContext, x)
79 #define FLAGS AV_OPT_FLAG_FILTERING_PARAM
80 static const AVOption dnn_tensorflow_options[] = {
81  { "sess_config", "config for SessionOptions", OFFSET(options.sess_config), AV_OPT_TYPE_STRING, { .str = NULL }, 0, 0, FLAGS },
83  { NULL }
84 };
85 
86 AVFILTER_DEFINE_CLASS(dnn_tensorflow);
87 
88 static int execute_model_tf(TFRequestItem *request, Queue *lltask_queue);
89 static void infer_completion_callback(void *args);
90 static inline void destroy_request_item(TFRequestItem **arg);
91 
92 static void free_buffer(void *data, size_t length)
93 {
94  av_freep(&data);
95 }
96 
97 /**
98  * Free the contents of TensorFlow inference request.
99  * It does not free the TFInferRequest instance.
100  *
101  * @param request pointer to TFInferRequest instance.
102  * NULL pointer is allowed.
103  */
104 static void tf_free_request(TFInferRequest *request)
105 {
106  if (!request)
107  return;
108  if (request->input_tensor) {
109  TF_DeleteTensor(request->input_tensor);
110  request->input_tensor = NULL;
111  }
112  av_freep(&request->tf_input);
113  av_freep(&request->tf_outputs);
114  if (request->output_tensors) {
115  int nb_output = sizeof(*request->output_tensors)/sizeof(request->output_tensors[0]);
116  for (uint32_t i = 0; i < nb_output; ++i) {
117  if (request->output_tensors[i]) {
118  TF_DeleteTensor(request->output_tensors[i]);
119  request->output_tensors[i] = NULL;
120  }
121  }
122  av_freep(&request->output_tensors);
123  }
124 }
125 
126 /**
127  * Create a TensorFlow inference request. All properties
128  * are initially unallocated and set as NULL.
129  *
130  * @return pointer to the allocated TFInferRequest instance.
131  */
133 {
134  TFInferRequest *infer_request = av_malloc(sizeof(TFInferRequest));
135  if (!infer_request) {
136  return NULL;
137  }
138  infer_request->tf_outputs = NULL;
139  infer_request->tf_input = NULL;
140  infer_request->input_tensor = NULL;
141  infer_request->output_tensors = NULL;
142  return infer_request;
143 }
144 
145 /**
146  * Start synchronous inference for the TensorFlow model.
147  *
148  * @param request pointer to the TFRequestItem for inference
149  * @retval 0 if execution is successful
150  * @retval AVERROR(EINVAL) if request is NULL
151  * @retval DNN_GENERIC_ERROR if execution fails
152  */
153 static int tf_start_inference(void *args)
154 {
155  TFRequestItem *request = args;
156  TFInferRequest *infer_request = request->infer_request;
157  LastLevelTaskItem *lltask = request->lltask;
158  TaskItem *task = lltask->task;
159  TFModel *tf_model = task->model;
160 
161  if (!request) {
162  av_log(&tf_model->ctx, AV_LOG_ERROR, "TFRequestItem is NULL\n");
163  return AVERROR(EINVAL);
164  }
165 
166  TF_SessionRun(tf_model->session, NULL,
167  infer_request->tf_input, &infer_request->input_tensor, 1,
168  infer_request->tf_outputs, infer_request->output_tensors,
169  task->nb_output, NULL, 0, NULL,
170  request->status);
171  if (TF_GetCode(request->status) != TF_OK) {
172  av_log(&tf_model->ctx, AV_LOG_ERROR, "%s", TF_Message(request->status));
173  return DNN_GENERIC_ERROR;
174  }
175  return 0;
176 }
177 
178 /**
179  * Free the TFRequestItem completely.
180  *
181  * @param arg Address of the TFInferRequest instance.
182  */
183 static inline void destroy_request_item(TFRequestItem **arg) {
184  TFRequestItem *request;
185  if (!arg) {
186  return;
187  }
188  request = *arg;
189  tf_free_request(request->infer_request);
190  av_freep(&request->infer_request);
191  av_freep(&request->lltask);
192  TF_DeleteStatus(request->status);
194  av_freep(arg);
195 }
196 
197 static int extract_lltask_from_task(TaskItem *task, Queue *lltask_queue)
198 {
199  TFModel *tf_model = task->model;
200  TFContext *ctx = &tf_model->ctx;
201  LastLevelTaskItem *lltask = av_malloc(sizeof(*lltask));
202  if (!lltask) {
203  av_log(ctx, AV_LOG_ERROR, "Unable to allocate space for LastLevelTaskItem\n");
204  return AVERROR(ENOMEM);
205  }
206  task->inference_todo = 1;
207  task->inference_done = 0;
208  lltask->task = task;
209  if (ff_queue_push_back(lltask_queue, lltask) < 0) {
210  av_log(ctx, AV_LOG_ERROR, "Failed to push back lltask_queue.\n");
211  av_freep(&lltask);
212  return AVERROR(ENOMEM);
213  }
214  return 0;
215 }
216 
217 static TF_Buffer *read_graph(const char *model_filename)
218 {
219  TF_Buffer *graph_buf;
220  unsigned char *graph_data = NULL;
221  AVIOContext *model_file_context;
222  long size, bytes_read;
223 
224  if (avio_open(&model_file_context, model_filename, AVIO_FLAG_READ) < 0){
225  return NULL;
226  }
227 
228  size = avio_size(model_file_context);
229 
230  graph_data = av_malloc(size);
231  if (!graph_data){
232  avio_closep(&model_file_context);
233  return NULL;
234  }
235  bytes_read = avio_read(model_file_context, graph_data, size);
236  avio_closep(&model_file_context);
237  if (bytes_read != size){
238  av_freep(&graph_data);
239  return NULL;
240  }
241 
242  graph_buf = TF_NewBuffer();
243  graph_buf->data = graph_data;
244  graph_buf->length = size;
245  graph_buf->data_deallocator = free_buffer;
246 
247  return graph_buf;
248 }
249 
250 static TF_Tensor *allocate_input_tensor(const DNNData *input)
251 {
252  TF_DataType dt;
253  size_t size;
254  int64_t input_dims[] = {1, input->height, input->width, input->channels};
255  switch (input->dt) {
256  case DNN_FLOAT:
257  dt = TF_FLOAT;
258  size = sizeof(float);
259  break;
260  case DNN_UINT8:
261  dt = TF_UINT8;
262  size = 1;
263  break;
264  default:
265  av_assert0(!"should not reach here");
266  }
267 
268  return TF_AllocateTensor(dt, input_dims, 4,
269  input_dims[1] * input_dims[2] * input_dims[3] * size);
270 }
271 
272 static int get_input_tf(void *model, DNNData *input, const char *input_name)
273 {
274  TFModel *tf_model = model;
275  TFContext *ctx = &tf_model->ctx;
276  TF_Status *status;
277  TF_DataType dt;
278  int64_t dims[4];
279 
280  TF_Output tf_output;
281  tf_output.oper = TF_GraphOperationByName(tf_model->graph, input_name);
282  if (!tf_output.oper) {
283  av_log(ctx, AV_LOG_ERROR, "Could not find \"%s\" in model\n", input_name);
284  return AVERROR(EINVAL);
285  }
286 
287  tf_output.index = 0;
288  dt = TF_OperationOutputType(tf_output);
289  switch (dt) {
290  case TF_FLOAT:
291  input->dt = DNN_FLOAT;
292  break;
293  case TF_UINT8:
294  input->dt = DNN_UINT8;
295  break;
296  default:
297  av_log(ctx, AV_LOG_ERROR, "Unsupported output type %d in model\n", dt);
298  return AVERROR(EINVAL);
299  }
300  input->order = DCO_RGB;
301 
302  status = TF_NewStatus();
303  TF_GraphGetTensorShape(tf_model->graph, tf_output, dims, 4, status);
304  if (TF_GetCode(status) != TF_OK){
305  TF_DeleteStatus(status);
306  av_log(ctx, AV_LOG_ERROR, "Failed to get input tensor shape: number of dimension incorrect\n");
307  return DNN_GENERIC_ERROR;
308  }
309  TF_DeleteStatus(status);
310 
311  // currently only NHWC is supported
312  av_assert0(dims[0] == 1 || dims[0] == -1);
313  input->height = dims[1];
314  input->width = dims[2];
315  input->channels = dims[3];
316 
317  return 0;
318 }
319 
320 static int get_output_tf(void *model, const char *input_name, int input_width, int input_height,
321  const char *output_name, int *output_width, int *output_height)
322 {
323  int ret;
324  TFModel *tf_model = model;
325  TFContext *ctx = &tf_model->ctx;
326  TaskItem task;
327  TFRequestItem *request;
328  DNNExecBaseParams exec_params = {
329  .input_name = input_name,
330  .output_names = &output_name,
331  .nb_output = 1,
332  .in_frame = NULL,
333  .out_frame = NULL,
334  };
335 
336  ret = ff_dnn_fill_gettingoutput_task(&task, &exec_params, tf_model, input_height, input_width, ctx);
337  if (ret != 0) {
338  goto err;
339  }
340 
341  ret = extract_lltask_from_task(&task, tf_model->lltask_queue);
342  if (ret != 0) {
343  av_log(ctx, AV_LOG_ERROR, "unable to extract inference from task.\n");
344  goto err;
345  }
346 
347  request = ff_safe_queue_pop_front(tf_model->request_queue);
348  if (!request) {
349  av_log(ctx, AV_LOG_ERROR, "unable to get infer request.\n");
350  ret = AVERROR(EINVAL);
351  goto err;
352  }
353 
354  ret = execute_model_tf(request, tf_model->lltask_queue);
355  *output_width = task.out_frame->width;
356  *output_height = task.out_frame->height;
357 
358 err:
359  av_frame_free(&task.out_frame);
360  av_frame_free(&task.in_frame);
361  return ret;
362 }
363 
364 #define SPACE_CHARS " \t\r\n"
365 static int hex_to_data(uint8_t *data, const char *p)
366 {
367  int c, len, v;
368 
369  len = 0;
370  v = 1;
371  for (;;) {
372  p += strspn(p, SPACE_CHARS);
373  if (*p == '\0')
374  break;
375  c = av_toupper((unsigned char) *p++);
376  if (c >= '0' && c <= '9')
377  c = c - '0';
378  else if (c >= 'A' && c <= 'F')
379  c = c - 'A' + 10;
380  else
381  break;
382  v = (v << 4) | c;
383  if (v & 0x100) {
384  if (data) {
385  data[len] = v;
386  }
387  len++;
388  v = 1;
389  }
390  }
391  return len;
392 }
393 
394 static int load_tf_model(TFModel *tf_model, const char *model_filename)
395 {
396  TFContext *ctx = &tf_model->ctx;
397  TF_Buffer *graph_def;
398  TF_ImportGraphDefOptions *graph_opts;
399  TF_SessionOptions *sess_opts;
400  const TF_Operation *init_op;
401  uint8_t *sess_config = NULL;
402  int sess_config_length = 0;
403 
404  // prepare the sess config data
405  if (tf_model->ctx.options.sess_config != NULL) {
406  const char *config;
407  /*
408  tf_model->ctx.options.sess_config is hex to present the serialized proto
409  required by TF_SetConfig below, so we need to first generate the serialized
410  proto in a python script, tools/python/tf_sess_config.py is a script example
411  to generate the configs of sess_config.
412  */
413  if (strncmp(tf_model->ctx.options.sess_config, "0x", 2) != 0) {
414  av_log(ctx, AV_LOG_ERROR, "sess_config should start with '0x'\n");
415  return AVERROR(EINVAL);
416  }
417  config = tf_model->ctx.options.sess_config + 2;
418  sess_config_length = hex_to_data(NULL, config);
419 
420  sess_config = av_mallocz(sess_config_length + AV_INPUT_BUFFER_PADDING_SIZE);
421  if (!sess_config) {
422  av_log(ctx, AV_LOG_ERROR, "failed to allocate memory\n");
423  return AVERROR(ENOMEM);
424  }
425  if (hex_to_data(sess_config, config) < 0) {
426  av_log(ctx, AV_LOG_ERROR, "failed to convert hex to data\n");
427  return AVERROR(EINVAL);
428  }
429  }
430 
431  graph_def = read_graph(model_filename);
432  if (!graph_def){
433  av_log(ctx, AV_LOG_ERROR, "Failed to read model \"%s\" graph\n", model_filename);
434  av_freep(&sess_config);
435  return AVERROR(EINVAL);
436  }
437  tf_model->graph = TF_NewGraph();
438  tf_model->status = TF_NewStatus();
439  graph_opts = TF_NewImportGraphDefOptions();
440  TF_GraphImportGraphDef(tf_model->graph, graph_def, graph_opts, tf_model->status);
441  TF_DeleteImportGraphDefOptions(graph_opts);
442  TF_DeleteBuffer(graph_def);
443  if (TF_GetCode(tf_model->status) != TF_OK){
444  av_log(ctx, AV_LOG_ERROR, "Failed to import serialized graph to model graph\n");
445  av_freep(&sess_config);
446  return DNN_GENERIC_ERROR;
447  }
448 
449  init_op = TF_GraphOperationByName(tf_model->graph, "init");
450  sess_opts = TF_NewSessionOptions();
451 
452  if (sess_config) {
453  TF_SetConfig(sess_opts, sess_config, sess_config_length,tf_model->status);
454  av_freep(&sess_config);
455  if (TF_GetCode(tf_model->status) != TF_OK) {
456  TF_DeleteSessionOptions(sess_opts);
457  av_log(ctx, AV_LOG_ERROR, "Failed to set config for sess options with %s\n",
458  tf_model->ctx.options.sess_config);
459  return DNN_GENERIC_ERROR;
460  }
461  }
462 
463  tf_model->session = TF_NewSession(tf_model->graph, sess_opts, tf_model->status);
464  TF_DeleteSessionOptions(sess_opts);
465  if (TF_GetCode(tf_model->status) != TF_OK)
466  {
467  av_freep(&sess_config);
468  av_log(ctx, AV_LOG_ERROR, "Failed to create new session with model graph\n");
469  return DNN_GENERIC_ERROR;
470  }
471 
472  // Run initialization operation with name "init" if it is present in graph
473  if (init_op){
474  TF_SessionRun(tf_model->session, NULL,
475  NULL, NULL, 0,
476  NULL, NULL, 0,
477  &init_op, 1, NULL, tf_model->status);
478  if (TF_GetCode(tf_model->status) != TF_OK)
479  {
480  av_freep(&sess_config);
481  av_log(ctx, AV_LOG_ERROR, "Failed to run session when initializing\n");
482  return DNN_GENERIC_ERROR;
483  }
484  }
485 
486  return 0;
487 }
488 
489 static void dnn_free_model_tf(DNNModel **model)
490 {
491  TFModel *tf_model;
492 
493  if (*model){
494  tf_model = (*model)->model;
495  while (ff_safe_queue_size(tf_model->request_queue) != 0) {
497  destroy_request_item(&item);
498  }
500 
501  while (ff_queue_size(tf_model->lltask_queue) != 0) {
503  av_freep(&item);
504  }
505  ff_queue_destroy(tf_model->lltask_queue);
506 
507  while (ff_queue_size(tf_model->task_queue) != 0) {
508  TaskItem *item = ff_queue_pop_front(tf_model->task_queue);
509  av_frame_free(&item->in_frame);
510  av_frame_free(&item->out_frame);
511  av_freep(&item);
512  }
513  ff_queue_destroy(tf_model->task_queue);
514 
515  if (tf_model->graph){
516  TF_DeleteGraph(tf_model->graph);
517  }
518  if (tf_model->session){
519  TF_CloseSession(tf_model->session, tf_model->status);
520  TF_DeleteSession(tf_model->session, tf_model->status);
521  }
522  if (tf_model->status){
523  TF_DeleteStatus(tf_model->status);
524  }
525  av_freep(&tf_model);
526  av_freep(model);
527  }
528 }
529 
530 static DNNModel *dnn_load_model_tf(const char *model_filename, DNNFunctionType func_type, const char *options, AVFilterContext *filter_ctx)
531 {
532  DNNModel *model = NULL;
533  TFModel *tf_model = NULL;
534  TFContext *ctx = NULL;
535 
536  model = av_mallocz(sizeof(DNNModel));
537  if (!model){
538  return NULL;
539  }
540 
541  tf_model = av_mallocz(sizeof(TFModel));
542  if (!tf_model){
543  av_freep(&model);
544  return NULL;
545  }
546  model->model = tf_model;
547  tf_model->model = model;
548  ctx = &tf_model->ctx;
549  ctx->class = &dnn_tensorflow_class;
550 
551  //parse options
553  if (av_opt_set_from_string(ctx, options, NULL, "=", "&") < 0) {
554  av_log(ctx, AV_LOG_ERROR, "Failed to parse options \"%s\"\n", options);
555  goto err;
556  }
557 
558  if (load_tf_model(tf_model, model_filename) != 0){
559  av_log(ctx, AV_LOG_ERROR, "Failed to load TensorFlow model: \"%s\"\n", model_filename);
560  goto err;
561  }
562 
563  if (ctx->options.nireq <= 0) {
564  ctx->options.nireq = av_cpu_count() / 2 + 1;
565  }
566 
567 #if !HAVE_PTHREAD_CANCEL
568  if (ctx->options.async) {
569  ctx->options.async = 0;
570  av_log(filter_ctx, AV_LOG_WARNING, "pthread is not supported, roll back to sync.\n");
571  }
572 #endif
573 
574  tf_model->request_queue = ff_safe_queue_create();
575  if (!tf_model->request_queue) {
576  goto err;
577  }
578 
579  for (int i = 0; i < ctx->options.nireq; i++) {
580  TFRequestItem *item = av_mallocz(sizeof(*item));
581  if (!item) {
582  goto err;
583  }
584  item->lltask = NULL;
586  if (!item->infer_request) {
587  av_log(ctx, AV_LOG_ERROR, "Failed to allocate memory for TensorFlow inference request\n");
588  av_freep(&item);
589  goto err;
590  }
591  item->status = TF_NewStatus();
594  item->exec_module.args = item;
595 
596  if (ff_safe_queue_push_back(tf_model->request_queue, item) < 0) {
597  destroy_request_item(&item);
598  goto err;
599  }
600  }
601 
602  tf_model->lltask_queue = ff_queue_create();
603  if (!tf_model->lltask_queue) {
604  goto err;
605  }
606 
607  tf_model->task_queue = ff_queue_create();
608  if (!tf_model->task_queue) {
609  goto err;
610  }
611 
612  model->get_input = &get_input_tf;
613  model->get_output = &get_output_tf;
614  model->options = options;
615  model->filter_ctx = filter_ctx;
616  model->func_type = func_type;
617 
618  return model;
619 err:
620  dnn_free_model_tf(&model);
621  return NULL;
622 }
623 
624 static int fill_model_input_tf(TFModel *tf_model, TFRequestItem *request) {
625  DNNData input = { 0 };
626  LastLevelTaskItem *lltask;
627  TaskItem *task;
628  TFInferRequest *infer_request = NULL;
629  TFContext *ctx = &tf_model->ctx;
630  int ret = 0;
631 
632  lltask = ff_queue_pop_front(tf_model->lltask_queue);
633  av_assert0(lltask);
634  task = lltask->task;
635  request->lltask = lltask;
636 
637  ret = get_input_tf(tf_model, &input, task->input_name);
638  if (ret != 0) {
639  goto err;
640  }
641 
642  infer_request = request->infer_request;
643  input.height = task->in_frame->height;
644  input.width = task->in_frame->width;
645 
646  infer_request->tf_input = av_malloc(sizeof(TF_Output));
647  if (!infer_request->tf_input) {
648  av_log(ctx, AV_LOG_ERROR, "Failed to allocate memory for input tensor\n");
649  ret = AVERROR(ENOMEM);
650  goto err;
651  }
652 
653  infer_request->tf_input->oper = TF_GraphOperationByName(tf_model->graph, task->input_name);
654  if (!infer_request->tf_input->oper){
655  av_log(ctx, AV_LOG_ERROR, "Could not find \"%s\" in model\n", task->input_name);
657  goto err;
658  }
659  infer_request->tf_input->index = 0;
660 
661  infer_request->input_tensor = allocate_input_tensor(&input);
662  if (!infer_request->input_tensor){
663  av_log(ctx, AV_LOG_ERROR, "Failed to allocate memory for input tensor\n");
664  ret = AVERROR(ENOMEM);
665  goto err;
666  }
667  input.data = (float *)TF_TensorData(infer_request->input_tensor);
668 
669  switch (tf_model->model->func_type) {
670  case DFT_PROCESS_FRAME:
671  if (task->do_ioproc) {
672  if (tf_model->model->frame_pre_proc != NULL) {
673  tf_model->model->frame_pre_proc(task->in_frame, &input, tf_model->model->filter_ctx);
674  } else {
676  }
677  }
678  break;
681  break;
682  default:
683  avpriv_report_missing_feature(ctx, "model function type %d", tf_model->model->func_type);
684  break;
685  }
686 
687  infer_request->tf_outputs = av_malloc_array(task->nb_output, sizeof(TF_Output));
688  if (infer_request->tf_outputs == NULL) {
689  av_log(ctx, AV_LOG_ERROR, "Failed to allocate memory for *tf_outputs\n");
690  ret = AVERROR(ENOMEM);
691  goto err;
692  }
693 
694  infer_request->output_tensors = av_calloc(task->nb_output, sizeof(*infer_request->output_tensors));
695  if (!infer_request->output_tensors) {
696  av_log(ctx, AV_LOG_ERROR, "Failed to allocate memory for output tensor\n");
697  ret = AVERROR(ENOMEM);
698  goto err;
699  }
700 
701  for (int i = 0; i < task->nb_output; ++i) {
702  infer_request->output_tensors[i] = NULL;
703  infer_request->tf_outputs[i].oper = TF_GraphOperationByName(tf_model->graph, task->output_names[i]);
704  if (!infer_request->tf_outputs[i].oper) {
705  av_log(ctx, AV_LOG_ERROR, "Could not find output \"%s\" in model\n", task->output_names[i]);
707  goto err;
708  }
709  infer_request->tf_outputs[i].index = 0;
710  }
711 
712  return 0;
713 err:
714  tf_free_request(infer_request);
715  return ret;
716 }
717 
718 static void infer_completion_callback(void *args) {
719  TFRequestItem *request = args;
720  LastLevelTaskItem *lltask = request->lltask;
721  TaskItem *task = lltask->task;
722  DNNData *outputs;
723  TFInferRequest *infer_request = request->infer_request;
724  TFModel *tf_model = task->model;
725  TFContext *ctx = &tf_model->ctx;
726 
727  outputs = av_calloc(task->nb_output, sizeof(*outputs));
728  if (!outputs) {
729  av_log(ctx, AV_LOG_ERROR, "Failed to allocate memory for *outputs\n");
730  goto err;
731  }
732 
733  for (uint32_t i = 0; i < task->nb_output; ++i) {
734  outputs[i].height = TF_Dim(infer_request->output_tensors[i], 1);
735  outputs[i].width = TF_Dim(infer_request->output_tensors[i], 2);
736  outputs[i].channels = TF_Dim(infer_request->output_tensors[i], 3);
737  outputs[i].data = TF_TensorData(infer_request->output_tensors[i]);
738  outputs[i].dt = (DNNDataType)TF_TensorType(infer_request->output_tensors[i]);
739  }
740  switch (tf_model->model->func_type) {
741  case DFT_PROCESS_FRAME:
742  //it only support 1 output if it's frame in & frame out
743  if (task->do_ioproc) {
744  if (tf_model->model->frame_post_proc != NULL) {
745  tf_model->model->frame_post_proc(task->out_frame, outputs, tf_model->model->filter_ctx);
746  } else {
748  }
749  } else {
750  task->out_frame->width = outputs[0].width;
751  task->out_frame->height = outputs[0].height;
752  }
753  break;
755  if (!tf_model->model->detect_post_proc) {
756  av_log(ctx, AV_LOG_ERROR, "Detect filter needs provide post proc\n");
757  return;
758  }
759  tf_model->model->detect_post_proc(task->in_frame, outputs, task->nb_output, tf_model->model->filter_ctx);
760  break;
761  default:
762  av_log(ctx, AV_LOG_ERROR, "Tensorflow backend does not support this kind of dnn filter now\n");
763  goto err;
764  }
765  task->inference_done++;
766 err:
767  tf_free_request(infer_request);
768  av_freep(&outputs);
769 
770  if (ff_safe_queue_push_back(tf_model->request_queue, request) < 0) {
771  destroy_request_item(&request);
772  av_log(ctx, AV_LOG_ERROR, "Failed to push back request_queue.\n");
773  }
774 }
775 
776 static int execute_model_tf(TFRequestItem *request, Queue *lltask_queue)
777 {
778  TFModel *tf_model;
779  TFContext *ctx;
780  LastLevelTaskItem *lltask;
781  TaskItem *task;
782  int ret = 0;
783 
784  if (ff_queue_size(lltask_queue) == 0) {
785  destroy_request_item(&request);
786  return 0;
787  }
788 
789  lltask = ff_queue_peek_front(lltask_queue);
790  task = lltask->task;
791  tf_model = task->model;
792  ctx = &tf_model->ctx;
793 
794  ret = fill_model_input_tf(tf_model, request);
795  if (ret != 0) {
796  goto err;
797  }
798 
799  if (task->async) {
800  if (ff_dnn_start_inference_async(ctx, &request->exec_module) != 0) {
801  goto err;
802  }
803  return 0;
804  }
805  else {
806  ret = tf_start_inference(request);
807  if (ret != 0) {
808  goto err;
809  }
810  infer_completion_callback(request);
811  return (task->inference_done == task->inference_todo) ? 0 : DNN_GENERIC_ERROR;
812  }
813 err:
814  tf_free_request(request->infer_request);
815  if (ff_safe_queue_push_back(tf_model->request_queue, request) < 0) {
816  destroy_request_item(&request);
817  }
818  dnn_free_model_tf(&tf_model->model);
819  return ret;
820 }
821 
822 static int dnn_execute_model_tf(const DNNModel *model, DNNExecBaseParams *exec_params)
823 {
824  TFModel *tf_model = model->model;
825  TFContext *ctx = &tf_model->ctx;
826  TaskItem *task;
827  TFRequestItem *request;
828  int ret = 0;
829 
830  ret = ff_check_exec_params(ctx, DNN_TF, model->func_type, exec_params);
831  if (ret != 0) {
832  return ret;
833  }
834 
835  task = av_malloc(sizeof(*task));
836  if (!task) {
837  av_log(ctx, AV_LOG_ERROR, "unable to alloc memory for task item.\n");
838  return AVERROR(ENOMEM);
839  }
840 
841  ret = ff_dnn_fill_task(task, exec_params, tf_model, ctx->options.async, 1);
842  if (ret != 0) {
843  av_log(ctx, AV_LOG_ERROR, "Fill task with invalid parameter(s).\n");
844  av_freep(&task);
845  return ret;
846  }
847 
848  if (ff_queue_push_back(tf_model->task_queue, task) < 0) {
849  av_freep(&task);
850  av_log(ctx, AV_LOG_ERROR, "unable to push back task_queue.\n");
851  return AVERROR(ENOMEM);
852  }
853 
854  ret = extract_lltask_from_task(task, tf_model->lltask_queue);
855  if (ret != 0) {
856  av_freep(&task);
857  av_log(ctx, AV_LOG_ERROR, "unable to extract last level task from task.\n");
858  return ret;
859  }
860 
861  request = ff_safe_queue_pop_front(tf_model->request_queue);
862  if (!request) {
863  av_freep(&task);
864  av_log(ctx, AV_LOG_ERROR, "unable to get infer request.\n");
865  return AVERROR(EINVAL);
866  }
867  return execute_model_tf(request, tf_model->lltask_queue);
868 }
869 
871 {
872  TFModel *tf_model = model->model;
873  return ff_dnn_get_result_common(tf_model->task_queue, in, out);
874 }
875 
876 static int dnn_flush_tf(const DNNModel *model)
877 {
878  TFModel *tf_model = model->model;
879  TFContext *ctx = &tf_model->ctx;
880  TFRequestItem *request;
881  int ret;
882 
883  if (ff_queue_size(tf_model->lltask_queue) == 0) {
884  // no pending task need to flush
885  return 0;
886  }
887 
888  request = ff_safe_queue_pop_front(tf_model->request_queue);
889  if (!request) {
890  av_log(ctx, AV_LOG_ERROR, "unable to get infer request.\n");
891  return AVERROR(EINVAL);
892  }
893 
894  ret = fill_model_input_tf(tf_model, request);
895  if (ret != 0) {
896  av_log(ctx, AV_LOG_ERROR, "Failed to fill model input.\n");
897  if (ff_safe_queue_push_back(tf_model->request_queue, request) < 0) {
898  destroy_request_item(&request);
899  }
900  return ret;
901  }
902 
903  return ff_dnn_start_inference_async(ctx, &request->exec_module);
904 }
905 
908  .execute_model = dnn_execute_model_tf,
909  .get_result = dnn_get_result_tf,
910  .flush = dnn_flush_tf,
911  .free_model = dnn_free_model_tf,
912 };
AVFILTER_DEFINE_CLASS
AVFILTER_DEFINE_CLASS(dnn_tensorflow)
TFOptions::sess_config
char * sess_config
Definition: dnn_backend_tf.c:39
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:186
TFInferRequest
Stores execution parameters for single call to the TensorFlow C API.
Definition: dnn_backend_tf.c:64
TFInferRequest::tf_outputs
TF_Output * tf_outputs
Definition: dnn_backend_tf.c:65
execute_model_tf
static int execute_model_tf(TFRequestItem *request, Queue *lltask_queue)
Definition: dnn_backend_tf.c:776
FLAGS
#define FLAGS
Definition: dnn_backend_tf.c:79
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
TFModel::graph
TF_Graph * graph
Definition: dnn_backend_tf.c:52
av_opt_set_defaults
void av_opt_set_defaults(void *s)
Set the values of all AVOption fields to their default values.
Definition: opt.c:1459
ff_safe_queue_pop_front
void * ff_safe_queue_pop_front(SafeQueue *sq)
Remove and free first element from the queue in SafeQueue.
Definition: safe_queue.c:105
out
FILE * out
Definition: movenc.c:54
TFModel::ctx
TFContext ctx
Definition: dnn_backend_tf.c:50
DNNAsyncExecModule
Common Async Execution Mechanism for the DNN Backends.
Definition: dnn_backend_common.h:58
DNNFunctionType
DNNFunctionType
Definition: dnn_interface.h:52
extract_lltask_from_task
static int extract_lltask_from_task(TaskItem *task, Queue *lltask_queue)
Definition: dnn_backend_tf.c:197
ff_queue_pop_front
void * ff_queue_pop_front(Queue *q)
Remove and free first element from the Queue.
Definition: queue.c:151
ff_check_exec_params
int ff_check_exec_params(void *ctx, DNNBackendType backend, DNNFunctionType func_type, DNNExecBaseParams *exec_params)
Definition: dnn_backend_common.c:29
ff_queue_size
size_t ff_queue_size(Queue *q)
Return the length of the Queue.
Definition: queue.c:88
DNN_GENERIC_ERROR
#define DNN_GENERIC_ERROR
Definition: dnn_interface.h:33
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:100
LastLevelTaskItem
Definition: dnn_backend_common.h:50
test::height
int height
Definition: vc1dsp.c:39
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:340
AVFrame::width
int width
Definition: frame.h:412
dnn_load_model_tf
static DNNModel * dnn_load_model_tf(const char *model_filename, DNNFunctionType func_type, const char *options, AVFilterContext *filter_ctx)
Definition: dnn_backend_tf.c:530
SafeQueue
Double-ended queue with mutex locks ensuring data consistency while multithreading.
Definition: safe_queue.c:46
AVOption
AVOption.
Definition: opt.h:251
DNNModule::load_model
DNNModel *(* load_model)(const char *model_filename, DNNFunctionType func_type, const char *options, AVFilterContext *filter_ctx)
Definition: dnn_interface.h:123
DNNModel::frame_pre_proc
FramePrePostProc frame_pre_proc
Definition: dnn_interface.h:110
TFInferRequest::input_tensor
TF_Tensor * input_tensor
Definition: dnn_backend_tf.c:68
data
const char data[16]
Definition: mxf.c:148
DNNExecBaseParams::input_name
const char * input_name
Definition: dnn_interface.h:77
load_tf_model
static int load_tf_model(TFModel *tf_model, const char *model_filename)
Definition: dnn_backend_tf.c:394
dnn_io_proc.h
TFModel::request_queue
SafeQueue * request_queue
Definition: dnn_backend_tf.c:55
TaskItem
Definition: dnn_backend_common.h:36
DNNAsyncExecModule::callback
void(* callback)(void *args)
Completion Callback for the backend.
Definition: dnn_backend_common.h:70
avio_size
int64_t avio_size(AVIOContext *s)
Get the filesize.
Definition: aviobuf.c:370
tf_sess_config.config
config
Definition: tf_sess_config.py:33
OFFSET
#define OFFSET(x)
Definition: dnn_backend_tf.c:78
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:30
destroy_request_item
static void destroy_request_item(TFRequestItem **arg)
Free the TFRequestItem completely.
Definition: dnn_backend_tf.c:183
DNNModel::filter_ctx
AVFilterContext * filter_ctx
Definition: dnn_interface.h:99
ff_queue_create
Queue * ff_queue_create(void)
Create a Queue instance.
Definition: queue.c:47
TaskItem::model
void * model
Definition: dnn_backend_common.h:37
filter_ctx
static FilteringContext * filter_ctx
Definition: transcode.c:51
get_input_tf
static int get_input_tf(void *model, DNNData *input, const char *input_name)
Definition: dnn_backend_tf.c:272
SPACE_CHARS
#define SPACE_CHARS
Definition: dnn_backend_tf.c:364
Queue
Linear double-ended data structure.
Definition: queue.c:33
ff_queue_push_back
int ff_queue_push_back(Queue *q, void *v)
Add data to the tail of the queue.
Definition: queue.c:130
avassert.h
DNN_BACKEND_COMMON_OPTIONS
#define DNN_BACKEND_COMMON_OPTIONS
Definition: dnn_backend_common.h:31
DNN_TF
@ DNN_TF
Definition: dnn_interface.h:35
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
fill_model_input_tf
static int fill_model_input_tf(TFModel *tf_model, TFRequestItem *request)
Definition: dnn_backend_tf.c:624
TFRequestItem::exec_module
DNNAsyncExecModule exec_module
Definition: dnn_backend_tf.c:75
float
float
Definition: af_crystalizer.c:121
LastLevelTaskItem::task
TaskItem * task
Definition: dnn_backend_common.h:51
read_graph
static TF_Buffer * read_graph(const char *model_filename)
Definition: dnn_backend_tf.c:217
ff_queue_destroy
void ff_queue_destroy(Queue *q)
Destroy the Queue instance.
Definition: queue.c:72
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:40
DNNData
Definition: dnn_interface.h:65
dnn_tensorflow_options
static const AVOption dnn_tensorflow_options[]
Definition: dnn_backend_tf.c:80
ff_dnn_fill_gettingoutput_task
int ff_dnn_fill_gettingoutput_task(TaskItem *task, DNNExecBaseParams *exec_params, void *backend_model, int input_height, int input_width, void *ctx)
Allocate input and output frames and fill the Task with execution parameters.
Definition: dnn_backend_common.c:162
ctx
AVFormatContext * ctx
Definition: movenc.c:48
TaskItem::inference_todo
uint32_t inference_todo
Definition: dnn_backend_common.h:45
arg
const char * arg
Definition: jacosubdec.c:67
ff_safe_queue_size
size_t ff_safe_queue_size(SafeQueue *sq)
Return the length of the SafeQueue.
Definition: safe_queue.c:80
ff_proc_from_frame_to_dnn
int ff_proc_from_frame_to_dnn(AVFrame *frame, DNNData *input, void *log_ctx)
Definition: dnn_io_proc.c:181
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:66
ff_frame_to_dnn_detect
int ff_frame_to_dnn_detect(AVFrame *frame, DNNData *input, void *log_ctx)
Definition: dnn_io_proc.c:413
NULL
#define NULL
Definition: coverity.c:32
ff_safe_queue_create
SafeQueue * ff_safe_queue_create(void)
Create and initialize a SafeQueue instance.
Definition: safe_queue.c:52
DNNModel::frame_post_proc
FramePrePostProc frame_post_proc
Definition: dnn_interface.h:113
av_opt_set_from_string
int av_opt_set_from_string(void *ctx, const char *opts, const char *const *shorthand, const char *key_val_sep, const char *pairs_sep)
Parse the key-value pairs list in opts.
Definition: opt.c:1667
tf_create_inference_request
static TFInferRequest * tf_create_inference_request(void)
Create a TensorFlow inference request.
Definition: dnn_backend_tf.c:132
ff_dnn_async_module_cleanup
int ff_dnn_async_module_cleanup(DNNAsyncExecModule *async_module)
Join the Async Execution thread and set module pointers to NULL.
Definition: dnn_backend_common.c:92
TFModel::task_queue
Queue * task_queue
Definition: dnn_backend_tf.c:57
infer_completion_callback
static void infer_completion_callback(void *args)
Definition: dnn_backend_tf.c:718
TaskItem::in_frame
AVFrame * in_frame
Definition: dnn_backend_common.h:38
TFModel::status
TF_Status * status
Definition: dnn_backend_tf.c:54
tf_free_request
static void tf_free_request(TFInferRequest *request)
Free the contents of TensorFlow inference request.
Definition: dnn_backend_tf.c:104
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
av_cpu_count
int av_cpu_count(void)
Definition: cpu.c:209
options
const OptionDef options[]
test::width
int width
Definition: vc1dsp.c:38
AVIOContext
Bytestream IO Context.
Definition: avio.h:166
TaskItem::async
uint8_t async
Definition: dnn_backend_common.h:42
TaskItem::inference_done
uint32_t inference_done
Definition: dnn_backend_common.h:46
cpu.h
DNNModel::detect_post_proc
DetectPostProc detect_post_proc
Definition: dnn_interface.h:115
size
int size
Definition: twinvq_data.h:10344
avio.h
DNNModel::func_type
DNNFunctionType func_type
Definition: dnn_interface.h:101
avpriv_report_missing_feature
void avpriv_report_missing_feature(void *avc, const char *msg,...) av_printf_format(2
Log a generic warning message about a missing feature.
dnn_flush_tf
static int dnn_flush_tf(const DNNModel *model)
Definition: dnn_backend_tf.c:876
TFOptions::nireq
uint32_t nireq
Definition: dnn_backend_tf.c:41
ff_safe_queue_destroy
void ff_safe_queue_destroy(SafeQueue *sq)
Destroy the SafeQueue instance.
Definition: safe_queue.c:69
DNNDataType
DNNDataType
Definition: dnn_interface.h:37
hex_to_data
static int hex_to_data(uint8_t *data, const char *p)
Definition: dnn_backend_tf.c:365
DNN_FLOAT
@ DNN_FLOAT
Definition: dnn_interface.h:37
tf_start_inference
static int tf_start_inference(void *args)
Start synchronous inference for the TensorFlow model.
Definition: dnn_backend_tf.c:153
ff_dnn_fill_task
int ff_dnn_fill_task(TaskItem *task, DNNExecBaseParams *exec_params, void *backend_model, int async, int do_ioproc)
Fill the Task for Backend Execution.
Definition: dnn_backend_common.c:56
input
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some input
Definition: filter_design.txt:172
ff_safe_queue_push_back
int ff_safe_queue_push_back(SafeQueue *sq, void *v)
Add data to the tail of queue in the SafeQueue after locking mutex.
Definition: safe_queue.c:95
avio_closep
int avio_closep(AVIOContext **s)
Close the resource accessed by the AVIOContext *s, free it and set the pointer pointing to it to NULL...
Definition: aviobuf.c:1304
ff_dnn_backend_tf
const DNNModule ff_dnn_backend_tf
Definition: dnn_backend_tf.c:906
dnn_execute_model_tf
static int dnn_execute_model_tf(const DNNModel *model, DNNExecBaseParams *exec_params)
Definition: dnn_backend_tf.c:822
TFContext::options
TFOptions options
Definition: dnn_backend_tf.c:46
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:255
DFT_ANALYTICS_DETECT
@ DFT_ANALYTICS_DETECT
Definition: dnn_interface.h:55
TFRequestItem::status
TF_Status * status
Definition: dnn_backend_tf.c:74
TFInferRequest::output_tensors
TF_Tensor ** output_tensors
Definition: dnn_backend_tf.c:66
TFModel::session
TF_Session * session
Definition: dnn_backend_tf.c:53
TFRequestItem::infer_request
TFInferRequest * infer_request
Definition: dnn_backend_tf.c:72
av_malloc_array
#define av_malloc_array(a, b)
Definition: tableprint_vlc.h:31
DNNAsyncExecModule::start_inference
int(* start_inference)(void *request)
Synchronous inference function for the backend with corresponding request item as the argument.
Definition: dnn_backend_common.h:63
DNNAsyncExecModule::args
void * args
Argument for the execution functions.
Definition: dnn_backend_common.h:76
av_toupper
static av_const int av_toupper(int c)
Locale-independent conversion of ASCII characters to uppercase.
Definition: avstring.h:227
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:254
TFOptions::async
uint8_t async
Definition: dnn_backend_tf.c:40
safe_queue.h
TaskItem::output_names
const char ** output_names
Definition: dnn_backend_common.h:41
len
int len
Definition: vorbis_enc_data.h:426
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Definition: mem.c:262
TFInferRequest::tf_input
TF_Output * tf_input
Definition: dnn_backend_tf.c:67
TFContext
Definition: dnn_backend_tf.c:44
ret
ret
Definition: filter_design.txt:187
DNNModel::get_input
int(* get_input)(void *model, DNNData *input, const char *input_name)
Definition: dnn_interface.h:104
DNN_UINT8
@ DNN_UINT8
Definition: dnn_interface.h:37
TFModel::model
DNNModel * model
Definition: dnn_backend_tf.c:51
TFModel
Definition: dnn_backend_tf.c:49
AV_INPUT_BUFFER_PADDING_SIZE
#define AV_INPUT_BUFFER_PADDING_SIZE
Definition: defs.h:40
dnn_get_result_tf
static DNNAsyncStatusType dnn_get_result_tf(const DNNModel *model, AVFrame **in, AVFrame **out)
Definition: dnn_backend_tf.c:870
TaskItem::out_frame
AVFrame * out_frame
Definition: dnn_backend_common.h:39
AVFrame::height
int height
Definition: frame.h:412
status
ov_status_e status
Definition: dnn_backend_openvino.c:119
allocate_input_tensor
static TF_Tensor * allocate_input_tensor(const DNNData *input)
Definition: dnn_backend_tf.c:250
outputs
static const AVFilterPad outputs[]
Definition: af_afwtdn.c:1291
dnn_backend_common.h
TFRequestItem::lltask
LastLevelTaskItem * lltask
Definition: dnn_backend_tf.c:73
defs.h
avio_read
int avio_read(AVIOContext *s, unsigned char *buf, int size)
Read size bytes from AVIOContext into buf.
Definition: aviobuf.c:659
ff_dnn_get_result_common
DNNAsyncStatusType ff_dnn_get_result_common(Queue *task_queue, AVFrame **in, AVFrame **out)
Extract input and output frame from the Task Queue after asynchronous inference.
Definition: dnn_backend_common.c:142
ff_queue_peek_front
void * ff_queue_peek_front(Queue *q)
Return a pointer to the data at the head of the queue.
Definition: queue.c:93
DCO_RGB
@ DCO_RGB
Definition: dnn_interface.h:42
avio_open
int avio_open(AVIOContext **s, const char *url, int flags)
Create and initialize a AVIOContext for accessing the resource indicated by url.
Definition: aviobuf.c:1239
AVFilterContext
An instance of a filter.
Definition: avfilter.h:397
ff_dnn_start_inference_async
int ff_dnn_start_inference_async(void *ctx, DNNAsyncExecModule *async_module)
Start asynchronous inference routine for the TensorFlow model on a detached thread.
Definition: dnn_backend_common.c:111
DNNModel
Definition: dnn_interface.h:93
AVIO_FLAG_READ
#define AVIO_FLAG_READ
read-only
Definition: avio.h:636
dnn_free_model_tf
static void dnn_free_model_tf(DNNModel **model)
Definition: dnn_backend_tf.c:489
TaskItem::input_name
const char * input_name
Definition: dnn_backend_common.h:40
DNNModel::options
const char * options
Definition: dnn_interface.h:97
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
TFOptions
Definition: dnn_backend_tf.c:38
free_buffer
static void free_buffer(void *data, size_t length)
Definition: dnn_backend_tf.c:92
get_output_tf
static int get_output_tf(void *model, const char *input_name, int input_width, int input_height, const char *output_name, int *output_width, int *output_height)
Definition: dnn_backend_tf.c:320
DNNExecBaseParams
Definition: dnn_interface.h:76
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
TaskItem::do_ioproc
uint8_t do_ioproc
Definition: dnn_backend_common.h:43
DNNModel::get_output
int(* get_output)(void *model, const char *input_name, int input_width, int input_height, const char *output_name, int *output_width, int *output_height)
Definition: dnn_interface.h:106
avstring.h
AV_OPT_TYPE_STRING
@ AV_OPT_TYPE_STRING
Definition: opt.h:229
TFRequestItem
Definition: dnn_backend_tf.c:71
DNNAsyncStatusType
DNNAsyncStatusType
Definition: dnn_interface.h:45
DFT_PROCESS_FRAME
@ DFT_PROCESS_FRAME
Definition: dnn_interface.h:54
TFModel::lltask_queue
Queue * lltask_queue
Definition: dnn_backend_tf.c:56
TaskItem::nb_output
uint32_t nb_output
Definition: dnn_backend_common.h:44
DNNModule
Definition: dnn_interface.h:121
DNNModel::model
void * model
Definition: dnn_interface.h:95
ff_proc_from_dnn_to_frame
int ff_proc_from_dnn_to_frame(AVFrame *frame, DNNData *output, void *log_ctx)
Definition: dnn_io_proc.c:41