Go to the documentation of this file.
24 #ifndef AVFILTER_DNN_DNN_BACKEND_COMMON_H
25 #define AVFILTER_DNN_DNN_BACKEND_COMMON_H
28 #include "../dnn_interface.h"
31 #define DNN_BACKEND_COMMON_OPTIONS \
32 { "nireq", "number of request", OFFSET(options.nireq), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, FLAGS }, \
33 { "async", "use DNN async inference", OFFSET(options.async), AV_OPT_TYPE_BOOL, { .i64 = 1 }, 0, 1, FLAGS },
77 #if HAVE_PTHREAD_CANCEL
Common Async Execution Mechanism for the DNN Backends.
int ff_dnn_start_inference_async(void *ctx, DNNAsyncExecModule *async_module)
Start asynchronous inference routine for the TensorFlow model on a detached thread.
This structure describes decoded (raw) audio or video data.
void(* callback)(void *args)
Completion Callback for the backend.
Linear double-ended data structure.
int ff_dnn_fill_gettingoutput_task(TaskItem *task, DNNExecBaseParams *exec_params, void *backend_model, int input_height, int input_width, void *ctx)
Allocate input and output frames and fill the Task with execution parameters.
int ff_dnn_fill_task(TaskItem *task, DNNExecBaseParams *exec_params, void *backend_model, int async, int do_ioproc)
Fill the Task for Backend Execution.
int ff_dnn_async_module_cleanup(DNNAsyncExecModule *async_module)
Join the Async Execution thread and set module pointers to NULL.
int ff_check_exec_params(void *ctx, DNNBackendType backend, DNNFunctionType func_type, DNNExecBaseParams *exec_params)
DNNAsyncStatusType ff_dnn_get_result_common(Queue *task_queue, AVFrame **in, AVFrame **out)
Extract input and output frame from the Task Queue after asynchronous inference.
int(* start_inference)(void *request)
Synchronous inference function for the backend with corresponding request item as the argument.
void * args
Argument for the execution functions.
const char ** output_names