Go to the documentation of this file.
61 #define OFFSET(x) offsetof(DnnDetectContext, dnnctx.x)
62 #define OFFSET2(x) offsetof(DnnDetectContext, x)
63 #define FLAGS AV_OPT_FLAG_FILTERING_PARAM | AV_OPT_FLAG_VIDEO_PARAM
66 #if (CONFIG_LIBTENSORFLOW == 1)
69 #if (CONFIG_LIBOPENVINO == 1)
75 {
"ssd",
"output shape [1, 1, N, 7]", 0,
AV_OPT_TYPE_CONST, { .i64 =
DDMT_SSD }, 0, 0,
FLAGS, .unit =
"model_type" },
76 {
"yolo",
"output shape [1, N*Cx*Cy*DetectionBox]", 0,
AV_OPT_TYPE_CONST, { .i64 =
DDMT_YOLOV1V2 }, 0, 0,
FLAGS, .unit =
"model_type" },
77 {
"yolov3",
"outputs shape [1, N*D, Cx, Cy]", 0,
AV_OPT_TYPE_CONST, { .i64 =
DDMT_YOLOV3 }, 0, 0,
FLAGS, .unit =
"model_type" },
78 {
"yolov4",
"outputs shape [1, N*D, Cx, Cy]", 0,
AV_OPT_TYPE_CONST, { .i64 =
DDMT_YOLOV4 }, 0, 0,
FLAGS, .unit =
"model_type" },
89 return 1.f / (1.f +
exp(-x));
92 static inline float linear(
float x) {
100 for (
int i = 0;
i < nb_classes;
i++) {
101 if (label_data[
i * cell_size] > max_prob) {
102 max_prob = label_data[
i * cell_size];
111 char *saveptr =
NULL, *token;
113 int nb_anchor = 0,
i = 0;
114 while(anchors_str[
i] !=
'\0') {
115 if(anchors_str[
i] ==
'&')
120 anchors_buf =
av_mallocz(nb_anchor *
sizeof(**anchors));
124 for (
int i = 0;
i < nb_anchor;
i++) {
125 token =
av_strtok(anchors_str,
"&", &saveptr);
130 anchors_buf[
i] = strtof(token,
NULL);
133 *anchors = anchors_buf;
140 float overlapping_width =
FFMIN(bbox1->
x + bbox1->
w, bbox2->
x + bbox2->
w) -
FFMAX(bbox1->
x, bbox2->
x);
141 float overlapping_height =
FFMIN(bbox1->
y + bbox1->
h, bbox2->
y + bbox2->
h) -
FFMAX(bbox1->
y, bbox2->
y);
142 float intersection_area =
143 (overlapping_width < 0 || overlapping_height < 0) ? 0 : overlapping_height * overlapping_width;
144 float union_area = bbox1->
w * bbox1->
h + bbox2->
w * bbox2->
h - intersection_area;
145 return intersection_area / union_area;
152 float conf_threshold =
ctx->confidence;
153 int detection_boxes, box_size;
154 int cell_w = 0, cell_h = 0, scale_w = 0, scale_h = 0;
155 int nb_classes =
ctx->nb_classes;
157 float *anchors =
ctx->anchors;
163 cell_w =
ctx->cell_w;
164 cell_h =
ctx->cell_h;
168 if (
output[output_index].dims[2] !=
output[output_index].dims[3] &&
169 output[output_index].dims[2] ==
output[output_index].dims[1]) {
171 cell_w =
output[output_index].dims[2];
172 cell_h =
output[output_index].dims[1];
174 cell_w =
output[output_index].dims[3];
175 cell_h =
output[output_index].dims[2];
177 scale_w =
ctx->scale_width;
178 scale_h =
ctx->scale_height;
180 box_size = nb_classes + 5;
182 switch (
ctx->model_type) {
185 post_process_raw_data =
linear;
188 post_process_raw_data =
sigmoid;
192 if (!cell_h || !cell_w) {
207 if (
output[output_index].dims[1] *
output[output_index].dims[2] *
208 output[output_index].dims[3] % (box_size * cell_w * cell_h)) {
212 detection_boxes =
output[output_index].dims[1] *
213 output[output_index].dims[2] *
214 output[output_index].dims[3] / box_size / cell_w / cell_h;
216 anchors = anchors + (detection_boxes * output_index * 2);
222 for (
int box_id = 0; box_id < detection_boxes; box_id++) {
223 for (
int cx = 0; cx < cell_w; cx++)
224 for (
int cy = 0; cy < cell_h; cy++) {
225 float x, y,
w,
h, conf;
226 float *detection_boxes_data;
231 ((cy * cell_w + cx) * detection_boxes + box_id) * box_size;
232 conf = post_process_raw_data(detection_boxes_data[4]);
234 detection_boxes_data =
output_data + box_id * box_size * cell_w * cell_h;
235 conf = post_process_raw_data(
236 detection_boxes_data[cy * cell_w + cx + 4 * cell_w * cell_h]);
240 x = post_process_raw_data(detection_boxes_data[0]);
241 y = post_process_raw_data(detection_boxes_data[1]);
242 w = detection_boxes_data[2];
243 h = detection_boxes_data[3];
245 conf = conf * post_process_raw_data(detection_boxes_data[label_id + 5]);
247 x = post_process_raw_data(detection_boxes_data[cy * cell_w + cx]);
248 y = post_process_raw_data(detection_boxes_data[cy * cell_w + cx + cell_w * cell_h]);
249 w = detection_boxes_data[cy * cell_w + cx + 2 * cell_w * cell_h];
250 h = detection_boxes_data[cy * cell_w + cx + 3 * cell_w * cell_h];
252 detection_boxes_data + cy * cell_w + cx + 5 * cell_w * cell_h);
253 conf = conf * post_process_raw_data(
254 detection_boxes_data[cy * cell_w + cx + (label_id + 5) * cell_w * cell_h]);
256 if (conf < conf_threshold) {
264 bbox->
w =
exp(
w) * anchors[box_id * 2] *
frame->width / scale_w;
265 bbox->
h =
exp(
h) * anchors[box_id * 2 + 1] *
frame->height / scale_h;
266 bbox->
x = (cx + x) / cell_w *
frame->width - bbox->
w / 2;
267 bbox->
y = (cy + y) / cell_h *
frame->height - bbox->
h / 2;
269 if (
ctx->labels && label_id < ctx->label_count) {
288 float conf_threshold =
ctx->confidence;
326 memcpy(bbox, candidate_bbox,
sizeof(*bbox));
350 for (
int i = 0;
i < nb_outputs;
i++) {
365 float conf_threshold =
ctx->confidence;
366 int proposal_count = 0;
368 float *detections =
NULL, *labels =
NULL;
372 int scale_w =
ctx->scale_width;
373 int scale_h =
ctx->scale_height;
375 if (nb_outputs == 1 &&
output->dims[3] == 7) {
376 proposal_count =
output->dims[2];
377 detect_size =
output->dims[3];
378 detections =
output->data;
379 }
else if (nb_outputs == 2 &&
output[0].dims[3] == 5) {
380 proposal_count =
output[0].dims[2];
381 detect_size =
output[0].dims[3];
382 detections =
output[0].data;
384 }
else if (nb_outputs == 2 &&
output[1].dims[3] == 5) {
385 proposal_count =
output[1].dims[2];
386 detect_size =
output[1].dims[3];
387 detections =
output[1].data;
394 if (proposal_count == 0)
397 for (
int i = 0;
i < proposal_count; ++
i) {
400 conf = detections[
i * detect_size + 2];
402 conf = detections[
i * detect_size + 4];
403 if (conf < conf_threshold) {
409 if (nb_bboxes == 0) {
422 for (
int i = 0;
i < proposal_count; ++
i) {
423 int av_unused image_id = (int)detections[
i * detect_size + 0];
425 float conf, x0, y0, x1, y1;
427 if (nb_outputs == 1) {
428 label_id = (int)detections[
i * detect_size + 1];
429 conf = detections[
i * detect_size + 2];
430 x0 = detections[
i * detect_size + 3];
431 y0 = detections[
i * detect_size + 4];
432 x1 = detections[
i * detect_size + 5];
433 y1 = detections[
i * detect_size + 6];
435 label_id = (int)labels[
i];
436 x0 = detections[
i * detect_size] / scale_w;
437 y0 = detections[
i * detect_size + 1] / scale_h;
438 x1 = detections[
i * detect_size + 2] / scale_w;
439 y1 = detections[
i * detect_size + 3] / scale_h;
440 conf = detections[
i * detect_size + 4];
443 if (conf < conf_threshold) {
448 bbox->
x = (int)(x0 *
frame->width);
449 bbox->
w = (int)(x1 *
frame->width) - bbox->
x;
450 bbox->
y = (int)(y0 *
frame->height);
451 bbox->
h = (int)(y1 *
frame->height) - bbox->
y;
456 if (
ctx->labels && label_id < ctx->label_count) {
463 if (nb_bboxes == 0) {
483 switch (
ctx->model_type) {
508 float conf_threshold =
ctx->confidence;
509 float *conf, *position, *label_id, x0, y0, x1, y1;
517 position =
output[3].data;
518 label_id =
output[2].data;
526 for (
int i = 0;
i < proposal_count; ++
i) {
527 if (conf[
i] < conf_threshold)
532 if (nb_bboxes == 0) {
545 for (
int i = 0;
i < proposal_count; ++
i) {
546 y0 = position[
i * 4];
547 x0 = position[
i * 4 + 1];
548 y1 = position[
i * 4 + 2];
549 x1 = position[
i * 4 + 3];
553 if (conf[
i] < conf_threshold) {
557 bbox->
x = (int)(x0 *
frame->width);
558 bbox->
w = (int)(x1 *
frame->width) - bbox->
x;
559 bbox->
y = (int)(y0 *
frame->height);
560 bbox->
h = (int)(y1 *
frame->height) - bbox->
y;
565 if (
ctx->labels && label_id[
i] <
ctx->label_count) {
572 if (nb_bboxes == 0) {
596 for (
int i = 0;
i <
ctx->label_count;
i++) {
599 ctx->label_count = 0;
615 while (!feof(file)) {
618 if (!fgets(buf, 256, file)) {
622 line_len = strlen(buf);
624 int i = line_len - 1;
625 if (buf[
i] ==
'\n' || buf[
i] ==
'\r' || buf[
i] ==
' ') {
663 switch(backend_type) {
665 if (output_nb != 4) {
667 but get %d instead\n", output_nb);
693 if (!
ctx->bboxes_fifo)
697 if (
ctx->labels_filename) {
700 if (
ctx->anchors_str) {
740 *out_pts = in_frame->
pts +
pts;
809 if (
ctx->bboxes_fifo) {
825 int ret, width_idx, height_idx;
834 ctx->scale_width = model_input.
dims[width_idx] == -1 ?
inlink->w :
835 model_input.
dims[width_idx];
836 ctx->scale_height = model_input.
dims[height_idx] == -1 ?
inlink->h :
837 model_input.
dims[height_idx];
851 .
name =
"dnn_detect",
860 .priv_class = &dnn_detect_class,
static enum AVPixelFormat pix_fmts[]
AVPixelFormat
Pixel format.
static int dnn_detect_parse_yolo_output(AVFrame *frame, DNNData *output, int output_index, AVFilterContext *filter_ctx)
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
#define FILTER_PIXFMTS_ARRAY(array)
AVFrameSideData * av_frame_get_side_data(const AVFrame *frame, enum AVFrameSideDataType type)
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
#define AVERROR_EOF
End of file.
filter_frame For filters that do not use the this method is called when a frame is pushed to the filter s input It can be called at any time except in a reentrant way If the input frame is enough to produce output
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
int av_fifo_peek(const AVFifo *f, void *buf, size_t nb_elems, size_t offset)
Read data from a FIFO without modifying FIFO state.
#define FILTER_INPUTS(array)
This structure describes decoded (raw) audio or video data.
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
static int read_detect_label_file(AVFilterContext *context)
static int dnn_detect_post_proc_ssd(AVFrame *frame, DNNData *output, int nb_outputs, AVFilterContext *filter_ctx)
static av_cold int dnn_detect_init(AVFilterContext *context)
static int output_data(MLPDecodeContext *m, unsigned int substr, AVFrame *frame, int *got_frame_ptr)
Write the audio data into the output buffer.
#define AV_LOG_VERBOSE
Detailed information.
static const AVFilterPad dnn_detect_inputs[]
@ AV_PIX_FMT_BGR24
packed RGB 8:8:8, 24bpp, BGRBGR...
static av_cold int preinit(AVFilterContext *ctx)
const char * name
Filter name.
A link between two filters.
#define FF_FILTER_FORWARD_STATUS_BACK(outlink, inlink)
Forward the status on an output link to an input link.
AVFILTER_DNN_DEFINE_CLASS(dnn_detect, DNN_TF|DNN_OV)
int ff_inlink_consume_frame(AVFilterLink *link, AVFrame **rframe)
Take a frame from the link's FIFO and update the link's stats.
#define AV_DETECTION_BBOX_LABEL_NAME_MAX_SIZE
static int dnn_get_width_idx_by_layout(DNNLayout layout)
char detect_label[AV_DETECTION_BBOX_LABEL_NAME_MAX_SIZE]
Detect result with confidence.
void * priv
private data for use by the filter
int av_fifo_write(AVFifo *f, const void *buf, size_t nb_elems)
Write data into a FIFO.
static float dnn_detect_IOU(AVDetectionBBox *bbox1, AVDetectionBBox *bbox2)
static FilteringContext * filter_ctx
int ff_dnn_filter_init_child_class(AVFilterContext *filter)
static av_cold void dnn_detect_uninit(AVFilterContext *context)
DNNDetectionModelType model_type
A filter pad used for either input or output.
static av_always_inline AVDetectionBBox * av_get_detection_bbox(const AVDetectionBBoxHeader *header, unsigned int idx)
static int dnn_detect_post_proc_ov(AVFrame *frame, DNNData *output, int nb_outputs, AVFilterContext *filter_ctx)
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
int av_fifo_read(AVFifo *f, void *buf, size_t nb_elems)
Read data from a FIFO.
const AVFilterPad ff_video_default_filterpad[1]
An AVFilterPad array whose only entry has name "default" and is of type AVMEDIA_TYPE_VIDEO.
static void ff_outlink_set_status(AVFilterLink *link, int status, int64_t pts)
Set the status field of a link from the source filter.
int ff_dnn_set_detect_post_proc(DnnContext *ctx, DetectPostProc post_proc)
char * av_strtok(char *s, const char *delim, char **saveptr)
Split the string into several tokens which can be accessed by successive calls to av_strtok().
static void free_detect_labels(DnnDetectContext *ctx)
static int dnn_detect_post_proc_yolov3(AVFrame *frame, DNNData *output, AVFilterContext *filter_ctx, int nb_outputs)
DNNAsyncStatusType ff_dnn_get_result(DnnContext *ctx, AVFrame **in_frame, AVFrame **out_frame)
static int config_input(AVFilterLink *inlink)
static float linear(float x)
const AVFilter ff_vf_dnn_detect
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
int av_usleep(unsigned usec)
Sleep for a period of time.
#define FILTER_OUTPUTS(array)
#define AV_PIX_FMT_GRAYF32
int ff_dnn_get_input(DnnContext *ctx, DNNData *input)
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default minimum maximum flags name is the option keep it simple and lowercase description are in without and describe what they for example set the foo of the bar offset is the offset of the field in your context
Describe the class of an AVClass context structure.
static int dnn_detect_activate(AVFilterContext *filter_ctx)
size_t av_fifo_can_read(const AVFifo *f)
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
int ff_dnn_flush(DnnContext *ctx)
int ff_inlink_acknowledge_status(AVFilterLink *link, int *rstatus, int64_t *rpts)
Test and acknowledge the change of status on the link.
AVDetectionBBoxHeader * av_detection_bbox_create_side_data(AVFrame *frame, uint32_t nb_bboxes)
Allocates memory for AVDetectionBBoxHeader, plus an array of.
DNNBackendType backend_type
int(* init)(AVBSFContext *ctx)
@ AV_PIX_FMT_RGB24
packed RGB 8:8:8, 24bpp, RGBRGB...
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
static AVRational av_make_q(int num, int den)
Create an AVRational.
void avpriv_report_missing_feature(void *avc, const char *msg,...) av_printf_format(2
Log a generic warning message about a missing feature.
AVFilterContext * src
source filter
static float sigmoid(float x)
static const uint8_t header[24]
FF_FILTER_FORWARD_WANTED(outlink, inlink)
static const AVOption dnn_detect_options[]
@ AV_OPT_TYPE_FLOAT
Underlying C type is float.
static int dnn_detect_flush_frame(AVFilterLink *outlink, int64_t pts, int64_t *out_pts)
static int dnn_detect_post_proc(AVFrame *frame, DNNData *output, uint32_t nb, AVFilterContext *filter_ctx)
static void uninit(AVBSFContext *ctx)
#define i(width, name, range_min, range_max)
static int check_output_nb(DnnDetectContext *ctx, DNNBackendType backend_type, int output_nb)
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
const char * name
Pad name.
FILE * avpriv_fopen_utf8(const char *path, const char *mode)
Open a file using a UTF-8 filename.
static int dnn_detect_post_proc_yolo(AVFrame *frame, DNNData *output, AVFilterContext *filter_ctx)
static int av_cmp_q(AVRational a, AVRational b)
Compare two rationals.
@ AV_PIX_FMT_NV12
planar YUV 4:2:0, 12bpp, 1 plane for Y and 1 plane for the UV components, which are interleaved (firs...
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
AVFifo * av_fifo_alloc2(size_t nb_elems, size_t elem_size, unsigned int flags)
Allocate and initialize an AVFifo with a given element size.
AVRational detect_confidence
int av_dynarray_add_nofree(void *tab_ptr, int *nb_ptr, void *elem)
Add an element to a dynamic array.
@ AV_OPT_TYPE_INT
Underlying C type is int.
int x
Distance in pixels from the left/top edge of the frame, together with width and height,...
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
char * av_strdup(const char *s)
Duplicate a string.
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
static int dnn_detect_fill_side_data(AVFrame *frame, AVFilterContext *filter_ctx)
static int dnn_get_height_idx_by_layout(DNNLayout layout)
Structure to hold side data for an AVFrame.
int ff_dnn_init(DnnContext *ctx, DNNFunctionType func_type, AVFilterContext *filter_ctx)
static int dnn_detect_get_label_id(int nb_classes, int cell_size, float *label_data)
@ AV_PIX_FMT_YUV411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
@ AV_PIX_FMT_YUV410P
planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
size_t av_strlcpy(char *dst, const char *src, size_t size)
Copy the string src to dst, but no more than size - 1 bytes, and null-terminate dst.
void av_fifo_freep2(AVFifo **f)
Free an AVFifo and reset pointer to NULL.
void ff_dnn_uninit(DnnContext *ctx)
static int dnn_detect_post_proc_tf(AVFrame *frame, DNNData *output, AVFilterContext *filter_ctx)
int ff_dnn_execute_model(DnnContext *ctx, AVFrame *in_frame, AVFrame *out_frame)
@ AV_OPT_TYPE_STRING
Underlying C type is a uint8_t* that is either NULL or points to a C string allocated with the av_mal...
static int dnn_detect_parse_anchors(char *anchors_str, float **anchors)
@ AV_OPT_TYPE_CONST
Special option type for declaring named constants.
#define AV_FIFO_FLAG_AUTO_GROW
Automatically resize the FIFO on writes, so that the data fits.
@ AV_FRAME_DATA_DETECTION_BBOXES
Bounding boxes for object detection and classification, as described by AVDetectionBBoxHeader.