2014-01-02 08:12:06 +08:00
|
|
|
// The contents of this file are in the public domain. See LICENSE_FOR_EXAMPLE_PROGRAMS.txt
|
|
|
|
/*
|
|
|
|
|
2014-01-10 12:19:44 +08:00
|
|
|
This example program shows how you can use dlib to make an object detector
|
2014-01-12 00:30:52 +08:00
|
|
|
for things like faces, pedestrians, and any other semi-rigid object. In
|
2014-01-10 12:19:44 +08:00
|
|
|
particular, we go though the steps to train the kind of sliding window
|
|
|
|
object detector first published by Dalal and Triggs in 2005 in the paper
|
|
|
|
Histograms of Oriented Gradients for Human Detection.
|
|
|
|
|
|
|
|
Note that this program executes fastest when compiled with at least SSE2
|
|
|
|
instructions enabled. So if you are using a PC with an Intel or AMD chip
|
|
|
|
then you should enable at least SSE2 instructions. If you are using cmake
|
|
|
|
to compile this program you can enable them by using one of the following
|
2014-01-06 09:01:49 +08:00
|
|
|
commands when you create the build project:
|
2014-01-12 11:40:43 +08:00
|
|
|
cmake path_to_dlib_root/examples -DUSE_SSE2_INSTRUCTIONS=ON
|
|
|
|
cmake path_to_dlib_root/examples -DUSE_SSE4_INSTRUCTIONS=ON
|
|
|
|
cmake path_to_dlib_root/examples -DUSE_AVX_INSTRUCTIONS=ON
|
2014-01-06 09:01:49 +08:00
|
|
|
This will set the appropriate compiler options for GCC, clang, Visual
|
|
|
|
Studio, or the Intel compiler. If you are using another compiler then you
|
|
|
|
need to consult your compiler's manual to determine how to enable these
|
|
|
|
instructions. Note that AVX is the fastest but requires a CPU from at least
|
|
|
|
2011. SSE4 is the next fastest and is supported by most current machines.
|
2014-01-02 08:12:06 +08:00
|
|
|
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
|
|
#include <dlib/svm_threaded.h>
|
|
|
|
#include <dlib/gui_widgets.h>
|
|
|
|
#include <dlib/image_processing.h>
|
|
|
|
#include <dlib/data_io.h>
|
|
|
|
|
|
|
|
#include <iostream>
|
|
|
|
#include <fstream>
|
|
|
|
|
|
|
|
|
|
|
|
using namespace std;
|
|
|
|
using namespace dlib;
|
|
|
|
|
|
|
|
// ----------------------------------------------------------------------------------------
|
|
|
|
|
2014-01-06 09:01:49 +08:00
|
|
|
int main(int argc, char** argv)
|
2014-01-02 08:12:06 +08:00
|
|
|
{
|
|
|
|
|
|
|
|
try
|
|
|
|
{
|
2014-01-10 12:19:44 +08:00
|
|
|
// In this example we are going to train a face detector based on the
|
2014-01-12 11:40:43 +08:00
|
|
|
// small faces dataset in the examples/faces directory. So the first
|
|
|
|
// thing we do is load that dataset. This means you need to supply the
|
|
|
|
// path to this faces folder as a command line argument so we will know
|
|
|
|
// where it is.
|
2014-01-06 09:01:49 +08:00
|
|
|
if (argc != 2)
|
|
|
|
{
|
2014-01-12 11:40:43 +08:00
|
|
|
cout << "Give the path to the examples/faces directory as the argument to this" << endl;
|
|
|
|
cout << "program. For example, if you are in the examples folder then execute " << endl;
|
2014-01-06 09:01:49 +08:00
|
|
|
cout << "this program by running: " << endl;
|
|
|
|
cout << " ./fhog_object_detector_ex faces" << endl;
|
|
|
|
cout << endl;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
const std::string faces_directory = argv[1];
|
2014-01-12 00:30:52 +08:00
|
|
|
// The faces directory contains a training dataset and a separate
|
2014-01-10 12:19:44 +08:00
|
|
|
// testing dataset. The training data consists of 4 images, each
|
|
|
|
// annotated with rectangles that bound each human face. The idea is
|
|
|
|
// to use this training data to learn to identify human faces in new
|
|
|
|
// images.
|
|
|
|
//
|
|
|
|
// Once you have trained an object detector it is always important to
|
|
|
|
// test it on data it wasn't trained on. Therefore, we will also load
|
|
|
|
// a separate testing set of 5 images. Once we have a face detector
|
|
|
|
// created from the training data we will see how well it works by
|
|
|
|
// running it on the testing images.
|
|
|
|
//
|
|
|
|
// So here we create the variables that will hold our dataset.
|
|
|
|
// images_train will hold the 4 training images and face_boxes_train
|
|
|
|
// holds the locations of the faces in the training images. So for
|
|
|
|
// example, the image images_train[0] has the faces given by the
|
|
|
|
// rectangles in face_boxes_train[0].
|
|
|
|
dlib::array<array2d<unsigned char> > images_train, images_test;
|
|
|
|
std::vector<std::vector<rectangle> > face_boxes_train, face_boxes_test;
|
|
|
|
|
|
|
|
// Now we load the data. These XML files list the images in each
|
|
|
|
// dataset and also contain the positions of the face boxes. Obviously
|
|
|
|
// you can use any kind of input format you like so long as you store
|
|
|
|
// the data into images_train and face_boxes_train. But for convenience
|
|
|
|
// dlib comes with tools for creating and loading XML image dataset
|
|
|
|
// files. Here you see how to load the data. To create the XML files
|
2014-01-12 11:40:43 +08:00
|
|
|
// you can use the imglab tool which can be found in the tools/imglab
|
|
|
|
// folder. It is a simple graphical tool for labeling objects in images
|
|
|
|
// with boxes. To see how to use it read the tools/imglab/README.txt
|
|
|
|
// file.
|
2014-01-10 12:19:44 +08:00
|
|
|
load_image_dataset(images_train, face_boxes_train, faces_directory+"/training.xml");
|
|
|
|
load_image_dataset(images_test, face_boxes_test, faces_directory+"/testing.xml");
|
|
|
|
|
|
|
|
// Now we do a little bit of pre-processing. This is optional but for
|
|
|
|
// this training data it improves the results. The first thing we do is
|
|
|
|
// increase the size of the images by a factor of two. We do this
|
|
|
|
// because it will allow us to detect smaller faces than otherwise would
|
|
|
|
// be practical (since the faces are all now twice as big). Note that,
|
|
|
|
// in addition to resizing the images, these functions also make the
|
|
|
|
// appropriate adjustments to the face boxes so that they still fall on
|
|
|
|
// top of the faces after the images are resized.
|
|
|
|
upsample_image_dataset<pyramid_down<2> >(images_train, face_boxes_train);
|
|
|
|
upsample_image_dataset<pyramid_down<2> >(images_test, face_boxes_test);
|
|
|
|
// Since human faces are generally left-right symmetric we can increase
|
|
|
|
// our training dataset by adding mirrored versions of each image back
|
|
|
|
// into images_train. So this next step doubles the size of our
|
|
|
|
// training dataset. Again, this is obviously optional but is useful in
|
|
|
|
// many object detection tasks.
|
|
|
|
add_image_left_right_flips(images_train, face_boxes_train);
|
|
|
|
cout << "num training images: " << images_train.size() << endl;
|
2014-01-02 08:12:06 +08:00
|
|
|
cout << "num testing images: " << images_test.size() << endl;
|
|
|
|
|
2014-01-10 12:19:44 +08:00
|
|
|
|
|
|
|
// Finally we get to the training code. dlib contains a number of
|
|
|
|
// object detectors. This typedef tells it that you want to use the one
|
|
|
|
// based on Felzenszwalb's version of the Histogram of Oriented
|
|
|
|
// Gradients (commonly called HOG) detector. The 6 means that you want
|
|
|
|
// it to use an image pyramid that downsamples the image at a ratio of
|
|
|
|
// 5/6. Recall that HOG detectors work by creating an image pyramid and
|
|
|
|
// then running the detector over each pyramid level in a sliding window
|
|
|
|
// fashion.
|
2014-01-02 08:12:06 +08:00
|
|
|
typedef scan_fhog_pyramid<pyramid_down<6> > image_scanner_type;
|
|
|
|
image_scanner_type scanner;
|
2014-01-10 12:19:44 +08:00
|
|
|
// The sliding window detector will be 80 pixels wide and 80 pixels tall.
|
2014-01-06 09:01:49 +08:00
|
|
|
scanner.set_detection_window_size(80, 80);
|
2014-01-02 08:12:06 +08:00
|
|
|
structural_object_detection_trainer<image_scanner_type> trainer(scanner);
|
2014-01-10 12:19:44 +08:00
|
|
|
// Set this to the number of processing cores on your machine.
|
|
|
|
trainer.set_num_threads(4);
|
|
|
|
// The trainer is a kind of support vector machine and therefore has the usual SVM
|
2014-01-12 00:30:52 +08:00
|
|
|
// C parameter. In general, a bigger C encourages it to fit the training data
|
2014-02-23 06:49:33 +08:00
|
|
|
// better but might lead to overfitting. You must find the best C value
|
|
|
|
// empirically by checking how well the trained detector works on a test set of
|
|
|
|
// images you haven't trained on. Don't just leave the value set at 1. Try a few
|
|
|
|
// different C values and see what works best for your data.
|
2014-01-02 08:12:06 +08:00
|
|
|
trainer.set_c(1);
|
2014-01-10 12:19:44 +08:00
|
|
|
// We can tell the trainer to print it's progress to the console if we want.
|
2014-01-02 08:12:06 +08:00
|
|
|
trainer.be_verbose();
|
2014-01-10 12:19:44 +08:00
|
|
|
// The trainer will run until the "risk gap" is less than 0.01. Smaller values
|
|
|
|
// make the trainer solve the SVM optimization problem more accurately but will
|
|
|
|
// take longer to train. For most problems a value in the range of 0.1 to 0.01 is
|
2014-01-12 00:30:52 +08:00
|
|
|
// plenty accurate. Also, when in verbose mode the risk gap is printed on each
|
2014-01-10 12:19:44 +08:00
|
|
|
// iteration so you can see how close it is to finishing the training.
|
2014-01-02 08:12:06 +08:00
|
|
|
trainer.set_epsilon(0.01);
|
|
|
|
|
|
|
|
|
2014-01-10 12:19:44 +08:00
|
|
|
// Now we run the trainer. For this example, it should take on the order of 10
|
|
|
|
// seconds to train.
|
|
|
|
object_detector<image_scanner_type> detector = trainer.train(images_train, face_boxes_train);
|
2014-01-02 08:12:06 +08:00
|
|
|
|
2014-01-10 12:19:44 +08:00
|
|
|
// Now that we have a face detector we can test it. The first statement tests it
|
|
|
|
// on the training data. It will print the precision, recall, and then average precision.
|
|
|
|
cout << "training results: " << test_object_detection_function(detector, images_train, face_boxes_train) << endl;
|
2014-01-12 00:30:52 +08:00
|
|
|
// However, to get an idea if it really worked without overfitting we need to run
|
|
|
|
// it on images it wasn't trained on. The next line does this. Happily, we see
|
|
|
|
// that the object detector works perfectly on the testing images.
|
2014-01-10 12:19:44 +08:00
|
|
|
cout << "testing results: " << test_object_detection_function(detector, images_test, face_boxes_test) << endl;
|
2014-01-02 08:12:06 +08:00
|
|
|
|
|
|
|
|
2014-01-10 12:19:44 +08:00
|
|
|
// If you have read any papers that use HOG you have probably seen the nice looking
|
|
|
|
// "sticks" visualization of a learned HOG detector. This next line creates a
|
2014-01-12 00:30:52 +08:00
|
|
|
// window with such a visualization of our detector. It should look somewhat like
|
|
|
|
// a face.
|
2014-01-10 12:19:44 +08:00
|
|
|
image_window hogwin(draw_fhog(detector), "Learned fHOG detector");
|
2014-01-02 08:12:06 +08:00
|
|
|
|
2014-02-23 05:07:17 +08:00
|
|
|
// Now for the really fun part. Let's display the testing images on the screen and
|
2014-01-12 00:30:52 +08:00
|
|
|
// show the output of the face detector overlaid on each image. You will see that
|
|
|
|
// it finds all the faces without false alarming on any non-faces.
|
2014-01-06 09:01:49 +08:00
|
|
|
image_window win;
|
2014-01-02 08:12:06 +08:00
|
|
|
for (unsigned long i = 0; i < images_test.size(); ++i)
|
|
|
|
{
|
2014-01-12 00:30:52 +08:00
|
|
|
// Run the detector and get the face detections.
|
2014-01-06 09:01:49 +08:00
|
|
|
std::vector<rectangle> dets = detector(images_test[i]);
|
2014-01-02 08:12:06 +08:00
|
|
|
win.clear_overlay();
|
|
|
|
win.set_image(images_test[i]);
|
|
|
|
win.add_overlay(dets, rgb_pixel(255,0,0));
|
2014-01-06 09:01:49 +08:00
|
|
|
cout << "Hit enter to process the next image..." << endl;
|
2014-01-02 08:12:06 +08:00
|
|
|
cin.get();
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2014-01-10 12:19:44 +08:00
|
|
|
// Like everything in dlib, you can save your detector to disk using the
|
|
|
|
// serialize() function.
|
2014-05-09 09:07:56 +08:00
|
|
|
serialize("face_detector.svm") << detector;
|
2014-01-06 09:01:49 +08:00
|
|
|
|
2014-01-10 12:19:44 +08:00
|
|
|
// Then you can recall it using the deserialize() function.
|
2014-01-06 09:01:49 +08:00
|
|
|
object_detector<image_scanner_type> detector2;
|
2014-05-09 09:07:56 +08:00
|
|
|
deserialize("face_detector.svm") >> detector2;
|
2014-01-06 09:01:49 +08:00
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2014-02-23 05:07:17 +08:00
|
|
|
// Now let's talk about some optional features of this training tool as well as some
|
2014-01-10 12:19:44 +08:00
|
|
|
// important points you should understand.
|
|
|
|
//
|
|
|
|
// The first thing that should be pointed out is that, since this is a sliding
|
2014-01-12 00:30:52 +08:00
|
|
|
// window classifier, it can't output an arbitrary rectangle as a detection. In
|
2014-01-10 12:19:44 +08:00
|
|
|
// this example our sliding window is 80 by 80 pixels and is run over an image
|
2014-01-12 00:30:52 +08:00
|
|
|
// pyramid. This means that it can only output detections that are at least 80 by
|
|
|
|
// 80 pixels in size (recall that this is why we upsampled the images after loading
|
2014-02-06 07:16:11 +08:00
|
|
|
// them). It also means that the aspect ratio of the outputs is 1. So if,
|
2014-01-12 00:30:52 +08:00
|
|
|
// for example, you had a box in your training data that was 200 pixels by 10
|
|
|
|
// pixels then it would simply be impossible for the detector to learn to detect
|
|
|
|
// it. Similarly, if you had a really small box it would be unable to learn to
|
|
|
|
// detect it.
|
2014-01-10 12:19:44 +08:00
|
|
|
//
|
2014-01-12 00:30:52 +08:00
|
|
|
// So the training code performs an input validation check on the training data and
|
|
|
|
// will throw an exception if it detects any boxes that are impossible to detect
|
|
|
|
// given your setting of scanning window size and image pyramid resolution. You
|
|
|
|
// can use a statement like:
|
2014-01-10 12:19:44 +08:00
|
|
|
// remove_unobtainable_rectangles(trainer, images_train, face_boxes_train)
|
2014-01-12 00:30:52 +08:00
|
|
|
// to automatically discard these impossible boxes from your training dataset
|
|
|
|
// before running the trainer. This will avoid getting the "impossible box"
|
|
|
|
// exception. However, I would recommend you be careful that you are not throwing
|
|
|
|
// away truth boxes you really care about. The remove_unobtainable_rectangles()
|
|
|
|
// will return the set of removed rectangles so you can visually inspect them and
|
|
|
|
// make sure you are OK that they are being removed.
|
2014-01-10 12:19:44 +08:00
|
|
|
//
|
|
|
|
// Next, note that any location in the images not marked with a truth box is
|
|
|
|
// implicitly treated as a negative example. This means that when creating
|
|
|
|
// training data it is critical that you label all the objects you want to detect.
|
|
|
|
// So for example, if you are making a face detector then you must mark all the
|
2014-01-12 00:30:52 +08:00
|
|
|
// faces in each image. However, sometimes there are objects in images you are
|
|
|
|
// unsure about or simply don't care if the detector identifies or not. For these
|
|
|
|
// objects you can pass in a set of "ignore boxes" as a third argument to the
|
|
|
|
// trainer.train() function. The trainer will simply disregard any detections that
|
|
|
|
// happen to hit these boxes.
|
2014-01-10 12:19:44 +08:00
|
|
|
//
|
2014-05-27 06:46:35 +08:00
|
|
|
// Another useful thing you can do is evaluate multiple HOG detectors together. The
|
|
|
|
// benefit of this is increased testing speed since it avoids recomputing the HOG
|
|
|
|
// features for each run of the detector. You do this by storing your detectors
|
|
|
|
// into a std::vector and then invoking evaluate_detectors() like so:
|
2014-03-08 08:45:18 +08:00
|
|
|
std::vector<object_detector<image_scanner_type> > my_detectors;
|
|
|
|
my_detectors.push_back(detector);
|
2014-05-27 06:46:35 +08:00
|
|
|
std::vector<rectangle> dets = evaluate_detectors(my_detectors, images_train[0]);
|
2014-03-08 08:45:18 +08:00
|
|
|
//
|
2014-01-10 12:19:44 +08:00
|
|
|
//
|
|
|
|
// Finally, you can add a nuclear norm regularizer to the SVM trainer. Doing has
|
2014-01-12 00:30:52 +08:00
|
|
|
// two benefits. First, it can cause the learned HOG detector to be composed of
|
|
|
|
// separable filters and therefore makes it execute faster when detecting objects.
|
|
|
|
// It can also help with generalization since it tends to make the learned HOG
|
|
|
|
// filters smoother. To enable this option you call the following function before
|
|
|
|
// you create the trainer object:
|
2014-01-10 12:19:44 +08:00
|
|
|
// scanner.set_nuclear_norm_regularization_strength(1.0);
|
|
|
|
// The argument determines how important it is to have a small nuclear norm. A
|
|
|
|
// bigger regularization strength means it is more important. The smaller the
|
|
|
|
// nuclear norm the smoother and faster the learned HOG filters will be, but if the
|
|
|
|
// regularization strength value is too large then the SVM will not fit the data
|
|
|
|
// well. This is analogous to giving a C value that is too small.
|
|
|
|
//
|
|
|
|
// You can see how many separable filters are inside your detector like so:
|
|
|
|
cout << "num filters: "<< num_separable_filters(detector) << endl;
|
|
|
|
// You can also control how many filters there are by explicitly thresholding the
|
|
|
|
// singular values of the filters like this:
|
2014-01-06 09:01:49 +08:00
|
|
|
detector = threshold_filter_singular_values(detector,0.1);
|
2014-01-10 12:19:44 +08:00
|
|
|
// That removes filter components with singular values less than 0.1. The bigger
|
|
|
|
// this number the fewer separable filters you will have and the faster the
|
|
|
|
// detector will run. However, a large enough threshold will hurt detection
|
|
|
|
// accuracy.
|
2014-01-02 08:12:06 +08:00
|
|
|
|
|
|
|
}
|
|
|
|
catch (exception& e)
|
|
|
|
{
|
|
|
|
cout << "\nexception thrown!" << endl;
|
|
|
|
cout << e.what() << endl;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// ----------------------------------------------------------------------------------------
|
|
|
|
|