Go to the documentation of this file.
61 #define OFFSET(x) offsetof(DnnDetectContext, dnnctx.x)
62 #define OFFSET2(x) offsetof(DnnDetectContext, x)
63 #define FLAGS AV_OPT_FLAG_FILTERING_PARAM | AV_OPT_FLAG_VIDEO_PARAM
66 #if (CONFIG_LIBTENSORFLOW == 1)
69 #if (CONFIG_LIBOPENVINO == 1)
76 {
"ssd",
"output shape [1, 1, N, 7]", 0,
AV_OPT_TYPE_CONST, { .i64 =
DDMT_SSD }, 0, 0,
FLAGS, .unit =
"model_type" },
77 {
"yolo",
"output shape [1, N*Cx*Cy*DetectionBox]", 0,
AV_OPT_TYPE_CONST, { .i64 =
DDMT_YOLOV1V2 }, 0, 0,
FLAGS, .unit =
"model_type" },
78 {
"yolov3",
"outputs shape [1, N*D, Cx, Cy]", 0,
AV_OPT_TYPE_CONST, { .i64 =
DDMT_YOLOV3 }, 0, 0,
FLAGS, .unit =
"model_type" },
79 {
"yolov4",
"outputs shape [1, N*D, Cx, Cy]", 0,
AV_OPT_TYPE_CONST, { .i64 =
DDMT_YOLOV4 }, 0, 0,
FLAGS, .unit =
"model_type" },
90 return 1.f / (1.f +
exp(-x));
93 static inline float linear(
float x) {
101 for (
int i = 0;
i < nb_classes;
i++) {
102 if (label_data[
i * cell_size] > max_prob) {
103 max_prob = label_data[
i * cell_size];
112 char *saveptr =
NULL, *token;
114 int nb_anchor = 0,
i = 0;
115 while(anchors_str[
i] !=
'\0') {
116 if(anchors_str[
i] ==
'&')
121 anchors_buf =
av_mallocz(nb_anchor *
sizeof(**anchors));
125 for (
int i = 0;
i < nb_anchor;
i++) {
126 token =
av_strtok(anchors_str,
"&", &saveptr);
131 anchors_buf[
i] = strtof(token,
NULL);
134 *anchors = anchors_buf;
141 float overlapping_width =
FFMIN(bbox1->
x + bbox1->
w, bbox2->
x + bbox2->
w) -
FFMAX(bbox1->
x, bbox2->
x);
142 float overlapping_height =
FFMIN(bbox1->
y + bbox1->
h, bbox2->
y + bbox2->
h) -
FFMAX(bbox1->
y, bbox2->
y);
143 float intersection_area =
144 (overlapping_width < 0 || overlapping_height < 0) ? 0 : overlapping_height * overlapping_width;
145 float union_area = bbox1->
w * bbox1->
h + bbox2->
w * bbox2->
h - intersection_area;
146 return intersection_area / union_area;
153 float conf_threshold =
ctx->confidence;
154 int detection_boxes, box_size;
155 int cell_w = 0, cell_h = 0, scale_w = 0, scale_h = 0;
156 int nb_classes =
ctx->nb_classes;
158 float *anchors =
ctx->anchors;
164 cell_w =
ctx->cell_w;
165 cell_h =
ctx->cell_h;
169 if (
output[output_index].dims[2] !=
output[output_index].dims[3] &&
170 output[output_index].dims[2] ==
output[output_index].dims[1]) {
172 cell_w =
output[output_index].dims[2];
173 cell_h =
output[output_index].dims[1];
175 cell_w =
output[output_index].dims[3];
176 cell_h =
output[output_index].dims[2];
178 scale_w =
ctx->scale_width;
179 scale_h =
ctx->scale_height;
181 box_size = nb_classes + 5;
183 switch (
ctx->model_type) {
186 post_process_raw_data =
linear;
189 post_process_raw_data =
sigmoid;
193 if (!cell_h || !cell_w) {
208 if (
output[output_index].dims[1] *
output[output_index].dims[2] *
209 output[output_index].dims[3] % (box_size * cell_w * cell_h)) {
213 detection_boxes =
output[output_index].dims[1] *
214 output[output_index].dims[2] *
215 output[output_index].dims[3] / box_size / cell_w / cell_h;
217 anchors = anchors + (detection_boxes * output_index * 2);
223 for (
int box_id = 0; box_id < detection_boxes; box_id++) {
224 for (
int cx = 0; cx < cell_w; cx++)
225 for (
int cy = 0; cy < cell_h; cy++) {
226 float x, y,
w,
h, conf;
227 float *detection_boxes_data;
232 ((cy * cell_w + cx) * detection_boxes + box_id) * box_size;
233 conf = post_process_raw_data(detection_boxes_data[4]);
235 detection_boxes_data =
output_data + box_id * box_size * cell_w * cell_h;
236 conf = post_process_raw_data(
237 detection_boxes_data[cy * cell_w + cx + 4 * cell_w * cell_h]);
241 x = post_process_raw_data(detection_boxes_data[0]);
242 y = post_process_raw_data(detection_boxes_data[1]);
243 w = detection_boxes_data[2];
244 h = detection_boxes_data[3];
246 conf = conf * post_process_raw_data(detection_boxes_data[label_id + 5]);
248 x = post_process_raw_data(detection_boxes_data[cy * cell_w + cx]);
249 y = post_process_raw_data(detection_boxes_data[cy * cell_w + cx + cell_w * cell_h]);
250 w = detection_boxes_data[cy * cell_w + cx + 2 * cell_w * cell_h];
251 h = detection_boxes_data[cy * cell_w + cx + 3 * cell_w * cell_h];
253 detection_boxes_data + cy * cell_w + cx + 5 * cell_w * cell_h);
254 conf = conf * post_process_raw_data(
255 detection_boxes_data[cy * cell_w + cx + (label_id + 5) * cell_w * cell_h]);
257 if (conf < conf_threshold) {
267 bbox->
x = (cx + x) / cell_w *
frame->
width - bbox->
w / 2;
270 if (
ctx->labels && label_id < ctx->label_count) {
289 float conf_threshold =
ctx->confidence;
327 memcpy(bbox, candidate_bbox,
sizeof(*bbox));
351 for (
int i = 0;
i < nb_outputs;
i++) {
366 float conf_threshold =
ctx->confidence;
367 int proposal_count = 0;
369 float *detections =
NULL, *labels =
NULL;
373 int scale_w =
ctx->scale_width;
374 int scale_h =
ctx->scale_height;
376 if (nb_outputs == 1 &&
output->dims[3] == 7) {
377 proposal_count =
output->dims[2];
378 detect_size =
output->dims[3];
379 detections =
output->data;
380 }
else if (nb_outputs == 2 &&
output[0].dims[3] == 5) {
381 proposal_count =
output[0].dims[2];
382 detect_size =
output[0].dims[3];
383 detections =
output[0].data;
385 }
else if (nb_outputs == 2 &&
output[1].dims[3] == 5) {
386 proposal_count =
output[1].dims[2];
387 detect_size =
output[1].dims[3];
388 detections =
output[1].data;
395 if (proposal_count == 0)
398 for (
int i = 0;
i < proposal_count; ++
i) {
401 conf = detections[
i * detect_size + 2];
403 conf = detections[
i * detect_size + 4];
404 if (conf < conf_threshold) {
410 if (nb_bboxes == 0) {
423 for (
int i = 0;
i < proposal_count; ++
i) {
424 int av_unused image_id = (
int)detections[
i * detect_size + 0];
426 float conf, x0, y0, x1, y1;
428 if (nb_outputs == 1) {
429 label_id = (
int)detections[
i * detect_size + 1];
430 conf = detections[
i * detect_size + 2];
431 x0 = detections[
i * detect_size + 3];
432 y0 = detections[
i * detect_size + 4];
433 x1 = detections[
i * detect_size + 5];
434 y1 = detections[
i * detect_size + 6];
436 label_id = (
int)labels[
i];
437 x0 = detections[
i * detect_size] / scale_w;
438 y0 = detections[
i * detect_size + 1] / scale_h;
439 x1 = detections[
i * detect_size + 2] / scale_w;
440 y1 = detections[
i * detect_size + 3] / scale_h;
441 conf = detections[
i * detect_size + 4];
444 if (conf < conf_threshold) {
457 if (
ctx->labels && label_id < ctx->label_count) {
464 if (nb_bboxes == 0) {
484 switch (
ctx->model_type) {
509 float conf_threshold =
ctx->confidence;
510 float *conf, *position, *label_id, x0, y0, x1, y1;
518 position =
output[3].data;
519 label_id =
output[2].data;
527 for (
int i = 0;
i < proposal_count; ++
i) {
528 if (conf[
i] < conf_threshold)
533 if (nb_bboxes == 0) {
546 for (
int i = 0;
i < proposal_count; ++
i) {
547 y0 = position[
i * 4];
548 x0 = position[
i * 4 + 1];
549 y1 = position[
i * 4 + 2];
550 x1 = position[
i * 4 + 3];
554 if (conf[
i] < conf_threshold) {
566 if (
ctx->labels && label_id[
i] <
ctx->label_count) {
573 if (nb_bboxes == 0) {
597 for (
int i = 0;
i <
ctx->label_count;
i++) {
600 ctx->label_count = 0;
616 while (!feof(file)) {
619 if (!fgets(buf, 256, file)) {
623 line_len = strlen(buf);
625 int i = line_len - 1;
626 if (buf[
i] ==
'\n' || buf[
i] ==
'\r' || buf[
i] ==
' ') {
664 switch(backend_type) {
666 if (output_nb != 4) {
668 but get %d instead\n", output_nb);
694 if (!
ctx->bboxes_fifo)
698 if (
ctx->labels_filename) {
701 if (
ctx->anchors_str) {
741 *out_pts = in_frame->
pts +
pts;
824 int ret, width_idx, height_idx;
833 ctx->scale_width = model_input.
dims[width_idx] == -1 ?
inlink->w :
834 model_input.
dims[width_idx];
835 ctx->scale_height = model_input.
dims[height_idx] == -1 ?
inlink->h :
836 model_input.
dims[height_idx];
850 .
name =
"dnn_detect",
858 .priv_class = &dnn_detect_class,
static enum AVPixelFormat pix_fmts[]
AVPixelFormat
Pixel format.
static int dnn_detect_parse_yolo_output(AVFrame *frame, DNNData *output, int output_index, AVFilterContext *filter_ctx)
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
AVFrameSideData * av_frame_get_side_data(const AVFrame *frame, enum AVFrameSideDataType type)
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
#define AVERROR_EOF
End of file.
#define FILTER_PIXFMTS_ARRAY(array)
filter_frame For filters that do not use the this method is called when a frame is pushed to the filter s input It can be called at any time except in a reentrant way If the input frame is enough to produce output
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
int av_fifo_peek(const AVFifo *f, void *buf, size_t nb_elems, size_t offset)
Read data from a FIFO without modifying FIFO state.
This structure describes decoded (raw) audio or video data.
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
AVFILTER_DEFINE_CLASS(dnn_detect)
static int read_detect_label_file(AVFilterContext *context)
static int dnn_detect_post_proc_ssd(AVFrame *frame, DNNData *output, int nb_outputs, AVFilterContext *filter_ctx)
static av_cold int dnn_detect_init(AVFilterContext *context)
static int output_data(MLPDecodeContext *m, unsigned int substr, AVFrame *frame, int *got_frame_ptr)
Write the audio data into the output buffer.
#define AV_LOG_VERBOSE
Detailed information.
static const AVFilterPad dnn_detect_inputs[]
@ AV_PIX_FMT_BGR24
packed RGB 8:8:8, 24bpp, BGRBGR...
const char * name
Filter name.
A link between two filters.
#define FF_FILTER_FORWARD_STATUS_BACK(outlink, inlink)
Forward the status on an output link to an input link.
int ff_inlink_consume_frame(AVFilterLink *link, AVFrame **rframe)
Take a frame from the link's FIFO and update the link's stats.
#define AV_DETECTION_BBOX_LABEL_NAME_MAX_SIZE
static int dnn_get_width_idx_by_layout(DNNLayout layout)
char detect_label[AV_DETECTION_BBOX_LABEL_NAME_MAX_SIZE]
Detect result with confidence.
void * priv
private data for use by the filter
int av_fifo_write(AVFifo *f, const void *buf, size_t nb_elems)
Write data into a FIFO.
static float dnn_detect_IOU(AVDetectionBBox *bbox1, AVDetectionBBox *bbox2)
static FilteringContext * filter_ctx
static av_cold void dnn_detect_uninit(AVFilterContext *context)
DNNDetectionModelType model_type
A filter pad used for either input or output.
static av_always_inline AVDetectionBBox * av_get_detection_bbox(const AVDetectionBBoxHeader *header, unsigned int idx)
static int dnn_detect_post_proc_ov(AVFrame *frame, DNNData *output, int nb_outputs, AVFilterContext *filter_ctx)
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
int av_fifo_read(AVFifo *f, void *buf, size_t nb_elems)
Read data from a FIFO.
const AVFilterPad ff_video_default_filterpad[1]
An AVFilterPad array whose only entry has name "default" and is of type AVMEDIA_TYPE_VIDEO.
static void ff_outlink_set_status(AVFilterLink *link, int status, int64_t pts)
Set the status field of a link from the source filter.
int ff_dnn_set_detect_post_proc(DnnContext *ctx, DetectPostProc post_proc)
char * av_strtok(char *s, const char *delim, char **saveptr)
Split the string into several tokens which can be accessed by successive calls to av_strtok().
static void free_detect_labels(DnnDetectContext *ctx)
static int dnn_detect_post_proc_yolov3(AVFrame *frame, DNNData *output, AVFilterContext *filter_ctx, int nb_outputs)
DNNAsyncStatusType ff_dnn_get_result(DnnContext *ctx, AVFrame **in_frame, AVFrame **out_frame)
static int config_input(AVFilterLink *inlink)
static float linear(float x)
const AVFilter ff_vf_dnn_detect
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
int av_usleep(unsigned usec)
Sleep for a period of time.
#define AV_PIX_FMT_GRAYF32
#define FILTER_INPUTS(array)
int ff_dnn_get_input(DnnContext *ctx, DNNData *input)
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default minimum maximum flags name is the option keep it simple and lowercase description are in without and describe what they for example set the foo of the bar offset is the offset of the field in your context
Describe the class of an AVClass context structure.
static int dnn_detect_activate(AVFilterContext *filter_ctx)
size_t av_fifo_can_read(const AVFifo *f)
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
int ff_dnn_flush(DnnContext *ctx)
int ff_inlink_acknowledge_status(AVFilterLink *link, int *rstatus, int64_t *rpts)
Test and acknowledge the change of status on the link.
AVDetectionBBoxHeader * av_detection_bbox_create_side_data(AVFrame *frame, uint32_t nb_bboxes)
Allocates memory for AVDetectionBBoxHeader, plus an array of.
#define DNN_COMMON_OPTIONS
DNNBackendType backend_type
int(* init)(AVBSFContext *ctx)
@ AV_PIX_FMT_RGB24
packed RGB 8:8:8, 24bpp, RGBRGB...
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
static AVRational av_make_q(int num, int den)
Create an AVRational.
void avpriv_report_missing_feature(void *avc, const char *msg,...) av_printf_format(2
Log a generic warning message about a missing feature.
AVFilterContext * src
source filter
static float sigmoid(float x)
static const uint8_t header[24]
FF_FILTER_FORWARD_WANTED(outlink, inlink)
static const AVOption dnn_detect_options[]
static int dnn_detect_flush_frame(AVFilterLink *outlink, int64_t pts, int64_t *out_pts)
static int dnn_detect_post_proc(AVFrame *frame, DNNData *output, uint32_t nb, AVFilterContext *filter_ctx)
static void uninit(AVBSFContext *ctx)
#define i(width, name, range_min, range_max)
static int check_output_nb(DnnDetectContext *ctx, DNNBackendType backend_type, int output_nb)
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
const char * name
Pad name.
FILE * avpriv_fopen_utf8(const char *path, const char *mode)
Open a file using a UTF-8 filename.
static int dnn_detect_post_proc_yolo(AVFrame *frame, DNNData *output, AVFilterContext *filter_ctx)
static int av_cmp_q(AVRational a, AVRational b)
Compare two rationals.
@ AV_PIX_FMT_NV12
planar YUV 4:2:0, 12bpp, 1 plane for Y and 1 plane for the UV components, which are interleaved (firs...
AVFifo * av_fifo_alloc2(size_t nb_elems, size_t elem_size, unsigned int flags)
Allocate and initialize an AVFifo with a given element size.
AVRational detect_confidence
int av_dynarray_add_nofree(void *tab_ptr, int *nb_ptr, void *elem)
Add an element to a dynamic array.
int x
Distance in pixels from the left/top edge of the frame, together with width and height,...
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
char * av_strdup(const char *s)
Duplicate a string.
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
static int dnn_detect_fill_side_data(AVFrame *frame, AVFilterContext *filter_ctx)
static int dnn_get_height_idx_by_layout(DNNLayout layout)
Structure to hold side data for an AVFrame.
#define FILTER_OUTPUTS(array)
int ff_dnn_init(DnnContext *ctx, DNNFunctionType func_type, AVFilterContext *filter_ctx)
static int dnn_detect_get_label_id(int nb_classes, int cell_size, float *label_data)
@ AV_PIX_FMT_YUV411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
@ AV_PIX_FMT_YUV410P
planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
size_t av_strlcpy(char *dst, const char *src, size_t size)
Copy the string src to dst, but no more than size - 1 bytes, and null-terminate dst.
void av_fifo_freep2(AVFifo **f)
Free an AVFifo and reset pointer to NULL.
void ff_dnn_uninit(DnnContext *ctx)
static int dnn_detect_post_proc_tf(AVFrame *frame, DNNData *output, AVFilterContext *filter_ctx)
int ff_dnn_execute_model(DnnContext *ctx, AVFrame *in_frame, AVFrame *out_frame)
static int dnn_detect_parse_anchors(char *anchors_str, float **anchors)
#define AV_FIFO_FLAG_AUTO_GROW
Automatically resize the FIFO on writes, so that the data fits.
@ AV_FRAME_DATA_DETECTION_BBOXES
Bounding boxes for object detection and classification, as described by AVDetectionBBoxHeader.