mirror of
https://github.com/davisking/dlib.git
synced 2024-11-01 10:14:53 +08:00
Sort out PEP8 issues in the examples
This commit is contained in:
parent
32ad0ffaef
commit
af82bc402f
@ -14,9 +14,7 @@ letter to
|
||||
San Francisco, California, 94105, USA.
|
||||
|
||||
|
||||
|
||||
Public domain dedications are not recognized by some countries. So
|
||||
if you live in an area where the above dedication isn't valid then
|
||||
you can consider the example programs to be licensed under the Boost
|
||||
Software License.
|
||||
|
||||
|
@ -7,7 +7,8 @@
|
||||
# face.
|
||||
#
|
||||
# The examples/faces folder contains some jpg images of people. You can run
|
||||
# this program on them and see the detections by executing the following command:
|
||||
# this program on them and see the detections by executing the
|
||||
# following command:
|
||||
# ./face_detector.py ../examples/faces/*.jpg
|
||||
#
|
||||
# This face detector is made using the now classic Histogram of Oriented
|
||||
@ -20,14 +21,17 @@
|
||||
#
|
||||
#
|
||||
# COMPILING THE DLIB PYTHON INTERFACE
|
||||
# Dlib comes with a compiled python interface for python 2.7 on MS Windows. If
|
||||
# Dlib comes with a compiled python interface for python 2.7 on MS Windows. If
|
||||
# you are using another python version or operating system then you need to
|
||||
# compile the dlib python interface before you can use this file. To do this,
|
||||
# run compile_dlib_python_module.bat. This should work on any operating system
|
||||
# so long as you have CMake and boost-python installed. On Ubuntu, this can be
|
||||
# done easily by running the command: sudo apt-get install libboost-python-dev cmake
|
||||
# run compile_dlib_python_module.bat. This should work on any operating
|
||||
# system so long as you have CMake and boost-python installed.
|
||||
# On Ubuntu, this can be done easily by running the command:
|
||||
# sudo apt-get install libboost-python-dev cmake
|
||||
|
||||
import dlib, sys
|
||||
import sys
|
||||
|
||||
import dlib
|
||||
from skimage import io
|
||||
|
||||
|
||||
@ -35,18 +39,18 @@ detector = dlib.get_frontal_face_detector()
|
||||
win = dlib.image_window()
|
||||
|
||||
for f in sys.argv[1:]:
|
||||
print("processing file: ", f)
|
||||
print("Processing file: {}".format(f))
|
||||
img = io.imread(f)
|
||||
# The 1 in the second argument indicates that we should upsample the image
|
||||
# 1 time. This will make everything bigger and allow us to detect more
|
||||
# faces.
|
||||
dets = detector(img,1)
|
||||
print("number of faces detected: ", len(dets))
|
||||
for d in dets:
|
||||
print(" detection position left,top,right,bottom:", d.left(), d.top(), d.right(), d.bottom())
|
||||
dets = detector(img, 1)
|
||||
print("Number of faces detected: {}".format(len(dets)))
|
||||
for k, d in enumerate(dets):
|
||||
print("Detection {}: Left: {} Top: {} Right: {} Bottom: {}".format(
|
||||
k, d.left(), d.top(), d.right(), d.bottom()))
|
||||
|
||||
win.clear_overlay()
|
||||
win.set_image(img)
|
||||
win.add_overlay(dets)
|
||||
raw_input("Hit enter to continue")
|
||||
|
||||
|
@ -1,50 +1,48 @@
|
||||
#!/usr/bin/python
|
||||
# The contents of this file are in the public domain. See LICENSE_FOR_EXAMPLE_PROGRAMS.txt
|
||||
#
|
||||
#
|
||||
# This simple example shows how to call dlib's optimal linear assignment problem solver.
|
||||
# It is an implementation of the famous Hungarian algorithm and is quite fast, operating in
|
||||
# O(N^3) time.
|
||||
# This simple example shows how to call dlib's optimal linear assignment
|
||||
# problem solver.
|
||||
# It is an implementation of the famous Hungarian algorithm and is quite fast,
|
||||
# operating in O(N^3) time.
|
||||
#
|
||||
# COMPILING THE DLIB PYTHON INTERFACE
|
||||
# Dlib comes with a compiled python interface for python 2.7 on MS Windows. If
|
||||
# Dlib comes with a compiled python interface for python 2.7 on MS Windows. If
|
||||
# you are using another python version or operating system then you need to
|
||||
# compile the dlib python interface before you can use this file. To do this,
|
||||
# run compile_dlib_python_module.bat. This should work on any operating system
|
||||
# so long as you have CMake and boost-python installed. On Ubuntu, this can be
|
||||
# done easily by running the command: sudo apt-get install libboost-python-dev cmake
|
||||
|
||||
|
||||
# run compile_dlib_python_module.bat. This should work on any operating
|
||||
# system so long as you have CMake and boost-python installed.
|
||||
# On Ubuntu, this can be done easily by running the command:
|
||||
# sudo apt-get install libboost-python-dev cmake
|
||||
import dlib
|
||||
|
||||
# Let's imagine you need to assign N people to N jobs. Additionally, each person will make
|
||||
# your company a certain amount of money at each job, but each person has different skills
|
||||
# so they are better at some jobs and worse at others. You would like to find the best way
|
||||
# to assign people to these jobs. In particular, you would like to maximize the amount of
|
||||
# money the group makes as a whole. This is an example of an assignment problem and is
|
||||
# what is solved by the dlib.max_cost_assignment() routine.
|
||||
# Let's imagine you need to assign N people to N jobs. Additionally, each
|
||||
# person will make your company a certain amount of money at each job, but each
|
||||
# person has different skills so they are better at some jobs and worse at
|
||||
# others. You would like to find the best way to assign people to these jobs.
|
||||
# In particular, you would like to maximize the amount of money the group makes
|
||||
# as a whole. This is an example of an assignment problem and is what is solved
|
||||
# by the dlib.max_cost_assignment() routine.
|
||||
|
||||
# So in this example, let's imagine we have 3 people and 3 jobs. We represent the amount of
|
||||
# money each person will produce at each job with a cost matrix. Each row corresponds to a
|
||||
# person and each column corresponds to a job. So for example, below we are saying that
|
||||
# person 0 will make $1 at job 0, $2 at job 1, and $6 at job 2.
|
||||
# So in this example, let's imagine we have 3 people and 3 jobs. We represent
|
||||
# the amount of money each person will produce at each job with a cost matrix.
|
||||
# Each row corresponds to a person and each column corresponds to a job. So for
|
||||
# example, below we are saying that person 0 will make $1 at job 0, $2 at job 1,
|
||||
# and $6 at job 2.
|
||||
cost = dlib.matrix([[1, 2, 6],
|
||||
[5, 3, 6],
|
||||
[4, 5, 0]])
|
||||
|
||||
|
||||
# To find out the best assignment of people to jobs we just need to call this function.
|
||||
# To find out the best assignment of people to jobs we just need to call this
|
||||
# function.
|
||||
assignment = dlib.max_cost_assignment(cost)
|
||||
|
||||
|
||||
# This prints optimal assignments: [2, 0, 1]
|
||||
# which indicates that we should assign the person from the first row of the cost matrix to
|
||||
# job 2, the middle row person to job 0, and the bottom row person to job 1.
|
||||
print("optimal assignments: ", assignment)
|
||||
|
||||
# which indicates that we should assign the person from the first row of the
|
||||
# cost matrix to job 2, the middle row person to job 0, and the bottom row
|
||||
# person to job 1.
|
||||
print("Optimal assignments: {}".format(assignment))
|
||||
|
||||
# This prints optimal cost: 16.0
|
||||
# which is correct since our optimal assignment is 6+5+5.
|
||||
print("optimal cost: ", dlib.assignment_cost(cost, assignment))
|
||||
|
||||
|
||||
print("Optimal cost: {}".format(dlib.assignment_cost(cost, assignment)))
|
||||
|
@ -1,68 +1,74 @@
|
||||
#!/usr/bin/python
|
||||
# The contents of this file are in the public domain. See LICENSE_FOR_EXAMPLE_PROGRAMS.txt
|
||||
#
|
||||
#
|
||||
# This example shows how to use dlib to learn to do sequence segmentation. In a sequence
|
||||
# segmentation task we are given a sequence of objects (e.g. words in a sentence) and we
|
||||
# are supposed to detect certain subsequences (e.g. the names of people). Therefore, in
|
||||
# the code below we create some very simple training sequences and use them to learn a
|
||||
# sequence segmentation model. In particular, our sequences will be sentences represented
|
||||
# as arrays of words and our task will be to learn to identify person names. Once we have
|
||||
# our segmentation model we can use it to find names in new sentences, as we will show.
|
||||
# This example shows how to use dlib to learn to do sequence segmentation. In
|
||||
# a sequence segmentation task we are given a sequence of objects (e.g. words in
|
||||
# a sentence) and we are supposed to detect certain subsequences (e.g. the names
|
||||
# of people). Therefore, in the code below we create some very simple training
|
||||
# sequences and use them to learn a sequence segmentation model. In particular,
|
||||
# our sequences will be sentences represented as arrays of words and our task
|
||||
# will be to learn to identify person names. Once we have our segmentation
|
||||
# model we can use it to find names in new sentences, as we will show.
|
||||
#
|
||||
# COMPILING THE DLIB PYTHON INTERFACE
|
||||
# Dlib comes with a compiled python interface for python 2.7 on MS Windows. If
|
||||
# Dlib comes with a compiled python interface for python 2.7 on MS Windows. If
|
||||
# you are using another python version or operating system then you need to
|
||||
# compile the dlib python interface before you can use this file. To do this,
|
||||
# run compile_dlib_python_module.bat. This should work on any operating system
|
||||
# so long as you have CMake and boost-python installed. On Ubuntu, this can be
|
||||
# done easily by running the command: sudo apt-get install libboost-python-dev cmake
|
||||
|
||||
|
||||
import dlib
|
||||
# run compile_dlib_python_module.bat. This should work on any operating
|
||||
# system so long as you have CMake and boost-python installed.
|
||||
# On Ubuntu, this can be done easily by running the command:
|
||||
# sudo apt-get install libboost-python-dev cmake
|
||||
import sys
|
||||
import dlib
|
||||
|
||||
# The sequence segmentation models we work with in this example are chain structured
|
||||
# conditional random field style models. Therefore, central to a sequence segmentation
|
||||
# model is some method for converting the elements of a sequence into feature vectors.
|
||||
# That is, while you might start out representing your sequence as an array of strings, the
|
||||
# dlib interface works in terms of arrays of feature vectors. Each feature vector should
|
||||
# capture important information about its corresponding element in the original raw
|
||||
# sequence. So in this example, since we work with sequences of words and want to identify
|
||||
# names, we will create feature vectors that tell us if the word is capitalized or not. In
|
||||
# our simple data, this will be enough to identify names. Therefore, we define
|
||||
# sentence_to_vectors() which takes a sentence represented as a string and converts it into
|
||||
# an array of words and then associates a feature vector with each word.
|
||||
|
||||
# The sequence segmentation models we work with in this example are chain
|
||||
# structured conditional random field style models. Therefore, central to a
|
||||
# sequence segmentation model is some method for converting the elements of a
|
||||
# sequence into feature vectors. That is, while you might start out representing
|
||||
# your sequence as an array of strings, the dlib interface works in terms of
|
||||
# arrays of feature vectors. Each feature vector should capture important
|
||||
# information about its corresponding element in the original raw sequence. So
|
||||
# in this example, since we work with sequences of words and want to identify
|
||||
# names, we will create feature vectors that tell us if the word is capitalized
|
||||
# or not. In our simple data, this will be enough to identify names.
|
||||
# Therefore, we define sentence_to_vectors() which takes a sentence represented
|
||||
# as a string and converts it into an array of words and then associates a
|
||||
# feature vector with each word.
|
||||
def sentence_to_vectors(sentence):
|
||||
# Create an empty array of vectors
|
||||
vects = dlib.vectors()
|
||||
for word in sentence.split():
|
||||
# Our vectors are very simple 1-dimensional vectors. The value of the single
|
||||
# feature is 1 if the first letter of the word is capitalized and 0 otherwise.
|
||||
if (word[0].isupper()):
|
||||
# Our vectors are very simple 1-dimensional vectors. The value of the
|
||||
# single feature is 1 if the first letter of the word is capitalized and
|
||||
# 0 otherwise.
|
||||
if word[0].isupper():
|
||||
vects.append(dlib.vector([1]))
|
||||
else:
|
||||
vects.append(dlib.vector([0]))
|
||||
return vects
|
||||
|
||||
# Dlib also supports the use of a sparse vector representation. This is more efficient
|
||||
# than the above form when you have very high dimensional vectors that are mostly full of
|
||||
# zeros. In dlib, each sparse vector is represented as an array of pair objects. Each
|
||||
# pair contains an index and value. Any index not listed in the vector is implicitly
|
||||
# associated with a value of zero. Additionally, when using sparse vectors with
|
||||
# dlib.train_sequence_segmenter() you can use "unsorted" sparse vectors. This means you
|
||||
# can add the index/value pairs into your sparse vectors in any order you want and don't
|
||||
# need to worry about them being in sorted order.
|
||||
def sentence_to_sparse_vectors(sentence):
|
||||
vects = dlib.sparse_vectors()
|
||||
has_cap = dlib.sparse_vector()
|
||||
no_cap = dlib.sparse_vector()
|
||||
# make has_cap equivalent to dlib.vector([1])
|
||||
has_cap.append(dlib.pair(0,1))
|
||||
# Since we didn't add anything to no_cap it is equivalent to dlib.vector([0])
|
||||
|
||||
# Dlib also supports the use of a sparse vector representation. This is more
|
||||
# efficient than the above form when you have very high dimensional vectors that
|
||||
# are mostly full of zeros. In dlib, each sparse vector is represented as an
|
||||
# array of pair objects. Each pair contains an index and value. Any index not
|
||||
# listed in the vector is implicitly associated with a value of zero.
|
||||
# Additionally, when using sparse vectors with dlib.train_sequence_segmenter()
|
||||
# you can use "unsorted" sparse vectors. This means you can add the index/value
|
||||
# pairs into your sparse vectors in any order you want and don't need to worry
|
||||
# about them being in sorted order.
|
||||
def sentence_to_sparse_vectors(sentence):
|
||||
vects = dlib.sparse_vectors()
|
||||
has_cap = dlib.sparse_vector()
|
||||
no_cap = dlib.sparse_vector()
|
||||
# make has_cap equivalent to dlib.vector([1])
|
||||
has_cap.append(dlib.pair(0, 1))
|
||||
|
||||
# Since we didn't add anything to no_cap it is equivalent to
|
||||
# dlib.vector([0])
|
||||
for word in sentence.split():
|
||||
if (word[0].isupper()):
|
||||
if word[0].isupper():
|
||||
vects.append(has_cap)
|
||||
else:
|
||||
vects.append(no_cap)
|
||||
@ -77,56 +83,49 @@ def print_segment(sentence, names):
|
||||
sys.stdout.write("\n")
|
||||
|
||||
|
||||
# Now let's make some training data. Each example is a sentence as well as a
|
||||
# set of ranges which indicate the locations of any names.
|
||||
names = dlib.ranges() # make an array of dlib.range objects.
|
||||
segments = dlib.rangess() # make an array of arrays of dlib.range objects.
|
||||
sentences = ["The other day I saw a man named Jim Smith",
|
||||
"Davis King is the main author of the dlib Library",
|
||||
"Bob Jones is a name and so is George Clinton",
|
||||
"My dog is named Bob Barker",
|
||||
"ABC is an acronym but John James Smith is a name",
|
||||
"No names in this sentence at all"]
|
||||
|
||||
# Now let's make some training data. Each example is a sentence as well as a set of ranges
|
||||
# which indicate the locations of any names.
|
||||
names = dlib.ranges() # make an array of dlib.range objects.
|
||||
segments = dlib.rangess() # make an array of arrays of dlib.range objects.
|
||||
sentences = []
|
||||
|
||||
|
||||
sentences.append("The other day I saw a man named Jim Smith")
|
||||
# We want to detect person names. So we note that the name is located within the
|
||||
# range [8, 10). Note that we use half open ranges to identify segments. So in
|
||||
# this case, the segment identifies the string "Jim Smith".
|
||||
# We want to detect person names. So we note that the name is located within
|
||||
# the range [8, 10). Note that we use half open ranges to identify segments.
|
||||
# So in this case, the segment identifies the string "Jim Smith".
|
||||
names.append(dlib.range(8, 10))
|
||||
segments.append(names)
|
||||
names.clear() # make names empty for use again below
|
||||
# make names empty for use again below
|
||||
names.clear()
|
||||
|
||||
|
||||
sentences.append("Davis King is the main author of the dlib Library")
|
||||
names.append(dlib.range(0, 2))
|
||||
segments.append(names)
|
||||
names.clear()
|
||||
|
||||
|
||||
sentences.append("Bob Jones is a name and so is George Clinton")
|
||||
names.append(dlib.range(0, 2))
|
||||
names.append(dlib.range(8, 10))
|
||||
segments.append(names)
|
||||
names.clear()
|
||||
|
||||
|
||||
sentences.append("My dog is named Bob Barker")
|
||||
names.append(dlib.range(4, 6))
|
||||
segments.append(names)
|
||||
names.clear()
|
||||
|
||||
|
||||
sentences.append("ABC is an acronym but John James Smith is a name")
|
||||
names.append(dlib.range(5, 8))
|
||||
segments.append(names)
|
||||
names.clear()
|
||||
|
||||
|
||||
sentences.append("No names in this sentence at all")
|
||||
segments.append(names)
|
||||
names.clear()
|
||||
|
||||
|
||||
# Now before we can pass these training sentences to the dlib tools we need to convert them
|
||||
# into arrays of vectors as discussed above. We can use either a sparse or dense
|
||||
# representation depending on our needs. In this example, we show how to do it both ways.
|
||||
# Now before we can pass these training sentences to the dlib tools we need to
|
||||
# convert them into arrays of vectors as discussed above. We can use either a
|
||||
# sparse or dense representation depending on our needs. In this example, we
|
||||
# show how to do it both ways.
|
||||
use_sparse_vects = False
|
||||
if use_sparse_vects:
|
||||
# Make an array of arrays of dlib.sparse_vector objects.
|
||||
@ -139,46 +138,49 @@ else:
|
||||
for s in sentences:
|
||||
training_sequences.append(sentence_to_vectors(s))
|
||||
|
||||
|
||||
|
||||
# Now that we have a simple training set we can train a sequence segmenter. However, the
|
||||
# sequence segmentation trainer has some optional parameters we can set. These parameters
|
||||
# determine properties of the segmentation model we will learn. See the dlib documentation
|
||||
# for the sequence_segmenter object for a full discussion of their meanings.
|
||||
# Now that we have a simple training set we can train a sequence segmenter.
|
||||
# However, the sequence segmentation trainer has some optional parameters we can
|
||||
# set. These parameters determine properties of the segmentation model we will
|
||||
# learn. See the dlib documentation for the sequence_segmenter object for a
|
||||
# full discussion of their meanings.
|
||||
params = dlib.segmenter_params()
|
||||
params.window_size = 3
|
||||
params.use_high_order_features = True
|
||||
params.use_BIO_model = True
|
||||
# This is the common SVM C parameter. Larger values encourage the trainer to attempt to
|
||||
# fit the data exactly but might overfit. In general, you determine this parameter by
|
||||
# cross-validation.
|
||||
# This is the common SVM C parameter. Larger values encourage the trainer to
|
||||
# attempt to fit the data exactly but might overfit. In general, you determine
|
||||
# this parameter by cross-validation.
|
||||
params.C = 10
|
||||
|
||||
# Train a model. The model object is responsible for predicting the locations of names in
|
||||
# new sentences.
|
||||
# Train a model. The model object is responsible for predicting the locations
|
||||
# of names in new sentences.
|
||||
model = dlib.train_sequence_segmenter(training_sequences, segments, params)
|
||||
|
||||
# Let's print out the things the model thinks are names. The output is a set
|
||||
# of ranges which are predicted to contain names. If you run this example
|
||||
# program you will see that it gets them all correct.
|
||||
for i, s in enumerate(sentences):
|
||||
print_segment(s, model(training_sequences[i]))
|
||||
|
||||
# Let's print out the things the model thinks are names. The output is a set of ranges
|
||||
# which are predicted to contain names. If you run this example program you will see that
|
||||
# it gets them all correct.
|
||||
for i in range(len(sentences)):
|
||||
print_segment(sentences[i], model(training_sequences[i]))
|
||||
|
||||
# Let's also try segmenting a new sentence. This will print out "Bob Bucket". Note that we
|
||||
# need to remember to use the same vector representation as we used during training.
|
||||
test_sentence = "There once was a man from Nantucket whose name rhymed with Bob Bucket"
|
||||
# Let's also try segmenting a new sentence. This will print out "Bob Bucket".
|
||||
# Note that we need to remember to use the same vector representation as we used
|
||||
# during training.
|
||||
test_sentence = "There once was a man from Nantucket " \
|
||||
"whose name rhymed with Bob Bucket"
|
||||
if use_sparse_vects:
|
||||
print_segment(test_sentence, model(sentence_to_sparse_vectors(test_sentence)))
|
||||
print_segment(test_sentence,
|
||||
model(sentence_to_sparse_vectors(test_sentence)))
|
||||
else:
|
||||
print_segment(test_sentence, model(sentence_to_vectors(test_sentence)))
|
||||
|
||||
# We can also measure the accuracy of a model relative to some labeled data. This
|
||||
# statement prints the precision, recall, and F1-score of the model relative to the data in
|
||||
# training_sequences/segments.
|
||||
print("Test on training data:", dlib.test_sequence_segmenter(model, training_sequences, segments))
|
||||
|
||||
# We can also do 5-fold cross-validation and print the resulting precision, recall, and F1-score.
|
||||
print("cross validation:", dlib.cross_validate_sequence_segmenter(training_sequences, segments, 5, params))
|
||||
|
||||
# We can also measure the accuracy of a model relative to some labeled data.
|
||||
# This statement prints the precision, recall, and F1-score of the model
|
||||
# relative to the data in training_sequences/segments.
|
||||
print("Test on training data: {}".format(
|
||||
dlib.test_sequence_segmenter(model, training_sequences, segments)))
|
||||
|
||||
# We can also do 5-fold cross-validation and print the resulting precision,
|
||||
# recall, and F1-score.
|
||||
print("Cross validation: {}".format(
|
||||
dlib.cross_validate_sequence_segmenter(training_sequences, segments, 5,
|
||||
params)))
|
||||
|
@ -14,23 +14,21 @@
|
||||
# come to the top of the ranked list.
|
||||
#
|
||||
# COMPILING THE DLIB PYTHON INTERFACE
|
||||
# Dlib comes with a compiled python interface for python 2.7 on MS Windows. If
|
||||
# Dlib comes with a compiled python interface for python 2.7 on MS Windows. If
|
||||
# you are using another python version or operating system then you need to
|
||||
# compile the dlib python interface before you can use this file. To do this,
|
||||
# run compile_dlib_python_module.bat. This should work on any operating system
|
||||
# so long as you have CMake and boost-python installed. On Ubuntu, this can be
|
||||
# done easily by running the command: sudo apt-get install libboost-python-dev cmake
|
||||
|
||||
|
||||
# run compile_dlib_python_module.bat. This should work on any operating
|
||||
# system so long as you have CMake and boost-python installed.
|
||||
# On Ubuntu, this can be done easily by running the command:
|
||||
# sudo apt-get install libboost-python-dev cmake
|
||||
import dlib
|
||||
|
||||
|
||||
# Now let's make some testing data. To make it really simple, let's suppose that
|
||||
# we are ranking 2D vectors and that vectors with positive values in the first
|
||||
# dimension should rank higher than other vectors. So what we do is make
|
||||
# Now let's make some testing data. To make it really simple, let's suppose
|
||||
# that we are ranking 2D vectors and that vectors with positive values in the
|
||||
# first dimension should rank higher than other vectors. So what we do is make
|
||||
# examples of relevant (i.e. high ranking) and non-relevant (i.e. low ranking)
|
||||
# vectors and store them into a ranking_pair object like so:
|
||||
|
||||
data = dlib.ranking_pair()
|
||||
# Here we add two examples. In real applications, you would want lots of
|
||||
# examples of relevant and non-relevant vectors.
|
||||
@ -53,8 +51,10 @@ rank = trainer.train(data)
|
||||
# Now if you call rank on a vector it will output a ranking score. In
|
||||
# particular, the ranking score for relevant vectors should be larger than the
|
||||
# score for non-relevant vectors.
|
||||
print("ranking score for a relevant vector: ", rank(data.relevant[0]))
|
||||
print("ranking score for a non-relevant vector: ", rank(data.nonrelevant[0]))
|
||||
print("Ranking score for a relevant vector: {}".format(
|
||||
rank(data.relevant[0])))
|
||||
print("Ranking score for a non-relevant vector: {}".format(
|
||||
rank(data.nonrelevant[0])))
|
||||
# The output is the following:
|
||||
# ranking score for a relevant vector: 0.5
|
||||
# ranking score for a non-relevant vector: -0.5
|
||||
@ -70,14 +70,11 @@ print(dlib.test_ranking_function(rank, data))
|
||||
# The ranking scores are computed by taking the dot product between a learned
|
||||
# weight vector and a data vector. If you want to see the learned weight vector
|
||||
# you can display it like so:
|
||||
print("weights: \n", rank.weights)
|
||||
print("Weights: {}".format(rank.weights))
|
||||
# In this case the weights are:
|
||||
# 0.5
|
||||
# -0.5
|
||||
|
||||
|
||||
|
||||
|
||||
# In the above example, our data contains just two sets of objects. The
|
||||
# relevant set and non-relevant set. The trainer is attempting to find a
|
||||
# ranking function that gives every relevant vector a higher score than every
|
||||
@ -94,7 +91,6 @@ print("weights: \n", rank.weights)
|
||||
# to the trainer. Therefore, each ranking_pair would represent the
|
||||
# relevant/non-relevant sets for a particular query. An example is shown below
|
||||
# (for simplicity, we reuse our data from above to make 4 identical "queries").
|
||||
|
||||
queries = dlib.ranking_pairs()
|
||||
queries.append(data)
|
||||
queries.append(data)
|
||||
@ -104,7 +100,6 @@ queries.append(data)
|
||||
# We can train just as before.
|
||||
rank = trainer.train(queries)
|
||||
|
||||
|
||||
# Now that we have multiple ranking_pair instances, we can also use
|
||||
# cross_validate_ranking_trainer(). This performs cross-validation by splitting
|
||||
# the queries up into folds. That is, it lets the trainer train on a subset of
|
||||
@ -112,9 +107,8 @@ rank = trainer.train(queries)
|
||||
# splits and returns the overall ranking accuracy based on the held out data.
|
||||
# Just like test_ranking_function(), it reports both the ordering accuracy and
|
||||
# mean average precision.
|
||||
print("cross validation results: ", dlib.cross_validate_ranking_trainer(trainer, queries, 4))
|
||||
|
||||
|
||||
print("Cross validation results: {}".format(
|
||||
dlib.cross_validate_ranking_trainer(trainer, queries, 4)))
|
||||
|
||||
# Finally, note that the ranking tools also support the use of sparse vectors in
|
||||
# addition to dense vectors (which we used above). So if we wanted to do
|
||||
@ -131,19 +125,20 @@ samp = dlib.sparse_vector()
|
||||
# increasing order and no index value shows up more than once. If necessary,
|
||||
# you can use the dlib.make_sparse_vector() routine to make a sparse vector
|
||||
# object properly sorted and contain unique indices.
|
||||
samp.append(dlib.pair(0,1))
|
||||
samp.append(dlib.pair(0, 1))
|
||||
data.relevant.append(samp)
|
||||
|
||||
# Now make samp represent the same vector as dlib.vector([0, 1])
|
||||
samp.clear()
|
||||
samp.append(dlib.pair(1,1))
|
||||
samp.append(dlib.pair(1, 1))
|
||||
data.nonrelevant.append(samp)
|
||||
|
||||
trainer = dlib.svm_rank_trainer_sparse()
|
||||
rank = trainer.train(data)
|
||||
print("ranking score for a relevant vector: ", rank(data.relevant[0]))
|
||||
print("ranking score for a non-relevant vector: ", rank(data.nonrelevant[0]))
|
||||
print("Ranking score for a relevant vector: {}".format(
|
||||
rank(data.relevant[0])))
|
||||
print("Ranking score for a non-relevant vector: {}".format(
|
||||
rank(data.nonrelevant[0])))
|
||||
# Just as before, the output is the following:
|
||||
# ranking score for a relevant vector: 0.5
|
||||
# ranking score for a non-relevant vector: -0.5
|
||||
|
||||
|
@ -1,151 +1,167 @@
|
||||
#!/usr/bin/python
|
||||
# The contents of this file are in the public domain. See LICENSE_FOR_EXAMPLE_PROGRAMS.txt
|
||||
#
|
||||
# This is an example illustrating the use of the structural SVM solver from the dlib C++
|
||||
# Library. Therefore, this example teaches you the central ideas needed to setup a
|
||||
# structural SVM model for your machine learning problems. To illustrate the process, we
|
||||
# use dlib's structural SVM solver to learn the parameters of a simple multi-class
|
||||
# classifier. We first discuss the multi-class classifier model and then walk through
|
||||
# using the structural SVM tools to find the parameters of this classification model.
|
||||
#
|
||||
# As an aside, dlib's C++ interface to the structural SVM solver is threaded. So on a
|
||||
# multi-core computer it is significantly faster than using the python interface. So
|
||||
# consider using the C++ interface instead if you find that running it in python is slow.
|
||||
# This is an example illustrating the use of the structural SVM solver from
|
||||
# the dlib C++ Library. Therefore, this example teaches you the central ideas
|
||||
# needed to setup a structural SVM model for your machine learning problems. To
|
||||
# illustrate the process, we use dlib's structural SVM solver to learn the
|
||||
# parameters of a simple multi-class classifier. We first discuss the
|
||||
# multi-class classifier model and then walk through using the structural SVM
|
||||
# tools to find the parameters of this classification model. As an aside,
|
||||
# dlib's C++ interface to the structural SVM solver is threaded. So on a
|
||||
# multi-core computer it is significantly faster than using the python
|
||||
# interface. So consider using the C++ interface instead if you find that
|
||||
# running it in python is slow.
|
||||
#
|
||||
# COMPILING THE DLIB PYTHON INTERFACE
|
||||
# Dlib comes with a compiled python interface for python 2.7 on MS Windows. If
|
||||
# Dlib comes with a compiled python interface for python 2.7 on MS Windows. If
|
||||
# you are using another python version or operating system then you need to
|
||||
# compile the dlib python interface before you can use this file. To do this,
|
||||
# run compile_dlib_python_module.bat. This should work on any operating system
|
||||
# so long as you have CMake and boost-python installed. On Ubuntu, this can be
|
||||
# done easily by running the command: sudo apt-get install libboost-python-dev cmake
|
||||
|
||||
|
||||
# run compile_dlib_python_module.bat. This should work on any operating
|
||||
# system so long as you have CMake and boost-python installed.
|
||||
# On Ubuntu, this can be done easily by running the command:
|
||||
# sudo apt-get install libboost-python-dev cmake
|
||||
import dlib
|
||||
|
||||
|
||||
def main():
|
||||
# In this example, we have three types of samples: class 0, 1, or 2. That is, each of
|
||||
# our sample vectors falls into one of three classes. To keep this example very
|
||||
# simple, each sample vector is zero everywhere except at one place. The non-zero
|
||||
# dimension of each vector determines the class of the vector. So for example, the
|
||||
# first element of samples has a class of 1 because samples[0][1] is the only non-zero
|
||||
# element of samples[0].
|
||||
samples = [[0,2,0], [1,0,0], [0,4,0], [0,0,3]];
|
||||
# Since we want to use a machine learning method to learn a 3-class classifier we need
|
||||
# to record the labels of our samples. Here samples[i] has a class label of labels[i].
|
||||
labels = [1,0,1,2]
|
||||
# In this example, we have three types of samples: class 0, 1, or 2. That
|
||||
# is, each of our sample vectors falls into one of three classes. To keep
|
||||
# this example very simple, each sample vector is zero everywhere except at
|
||||
# one place. The non-zero dimension of each vector determines the class of
|
||||
# the vector. So for example, the first element of samples has a class of 1
|
||||
# because samples[0][1] is the only non-zero element of samples[0].
|
||||
samples = [[0, 2, 0], [1, 0, 0], [0, 4, 0], [0, 0, 3]]
|
||||
# Since we want to use a machine learning method to learn a 3-class
|
||||
# classifier we need to record the labels of our samples. Here samples[i]
|
||||
# has a class label of labels[i].
|
||||
labels = [1, 0, 1, 2]
|
||||
|
||||
# Now that we have some training data we can tell the structural SVM to learn the
|
||||
# parameters of our 3-class classifier model. The details of this will be explained
|
||||
# later. For now, just note that it finds the weights (i.e. a vector of real valued
|
||||
# parameters) such that predict_label(weights, sample) always returns the correct label
|
||||
# for a sample vector.
|
||||
problem = three_class_classifier_problem(samples, labels)
|
||||
# Now that we have some training data we can tell the structural SVM to
|
||||
# learn the parameters of our 3-class classifier model. The details of this
|
||||
# will be explained later. For now, just note that it finds the weights
|
||||
# (i.e. a vector of real valued parameters) such that predict_label(weights,
|
||||
# sample) always returns the correct label for a sample vector.
|
||||
problem = ThreeClassClassifierProblem(samples, labels)
|
||||
weights = dlib.solve_structural_svm_problem(problem)
|
||||
|
||||
# Print the weights and then evaluate predict_label() on each of our training samples.
|
||||
# Note that the correct label is predicted for each sample.
|
||||
# Print the weights and then evaluate predict_label() on each of our
|
||||
# training samples. Note that the correct label is predicted for each
|
||||
# sample.
|
||||
print(weights)
|
||||
for i in range(len(samples)):
|
||||
print("predicted label for sample[{0}]: {1}".format(i, predict_label(weights, samples[i])))
|
||||
for k, s in enumerate(samples):
|
||||
print("Predicted label for sample[{0}]: {1}".format(
|
||||
k, predict_label(weights, s)))
|
||||
|
||||
|
||||
def predict_label(weights, sample):
|
||||
"""Given the 9-dimensional weight vector which defines a 3 class classifier, predict the
|
||||
class of the given 3-dimensional sample vector. Therefore, the output of this
|
||||
function is either 0, 1, or 2 (i.e. one of the three possible labels)."""
|
||||
"""Given the 9-dimensional weight vector which defines a 3 class classifier,
|
||||
predict the class of the given 3-dimensional sample vector. Therefore, the
|
||||
output of this function is either 0, 1, or 2 (i.e. one of the three possible
|
||||
labels)."""
|
||||
|
||||
# Our 3-class classifier model can be thought of as containing 3 separate linear
|
||||
# classifiers. So to predict the class of a sample vector we evaluate each of these
|
||||
# three classifiers and then whatever classifier has the largest output "wins" and
|
||||
# predicts the label of the sample. This is the popular one-vs-all multi-class
|
||||
# classifier model.
|
||||
#
|
||||
# Keeping this in mind, the code below simply pulls the three separate weight vectors
|
||||
# out of weights and then evaluates each against sample. The individual classifier
|
||||
# scores are stored in scores and the highest scoring index is returned as the label.
|
||||
# Our 3-class classifier model can be thought of as containing 3 separate
|
||||
# linear classifiers. So to predict the class of a sample vector we
|
||||
# evaluate each of these three classifiers and then whatever classifier has
|
||||
# the largest output "wins" and predicts the label of the sample. This is
|
||||
# the popular one-vs-all multi-class classifier model.
|
||||
# Keeping this in mind, the code below simply pulls the three separate
|
||||
# weight vectors out of weights and then evaluates each against sample. The
|
||||
# individual classifier scores are stored in scores and the highest scoring
|
||||
# index is returned as the label.
|
||||
w0 = weights[0:3]
|
||||
w1 = weights[3:6]
|
||||
w2 = weights[6:9]
|
||||
scores = [dot(w0, sample), dot(w1,sample), dot(w2, sample)]
|
||||
scores = [dot(w0, sample), dot(w1, sample), dot(w2, sample)]
|
||||
max_scoring_label = scores.index(max(scores))
|
||||
return max_scoring_label
|
||||
|
||||
|
||||
def dot(a, b):
|
||||
"Compute the dot product between the two vectors a and b."
|
||||
return sum(i*j for i,j in zip(a,b))
|
||||
"""Compute the dot product between the two vectors a and b."""
|
||||
return sum(i * j for i, j in zip(a, b))
|
||||
|
||||
###########################################################################################
|
||||
|
||||
class three_class_classifier_problem:
|
||||
################################################################################
|
||||
|
||||
|
||||
class ThreeClassClassifierProblem:
|
||||
# Now we arrive at the meat of this example program. To use the
|
||||
# dlib.solve_structural_svm_problem() routine you need to define an object which tells
|
||||
# the structural SVM solver what to do for your problem. In this example, this is done
|
||||
# by defining the three_class_classifier_problem object. Before we get into the
|
||||
# details, we first discuss some background information on structural SVMs.
|
||||
# dlib.solve_structural_svm_problem() routine you need to define an object
|
||||
# which tells the structural SVM solver what to do for your problem. In
|
||||
# this example, this is done by defining the ThreeClassClassifierProblem
|
||||
# object. Before we get into the details, we first discuss some background
|
||||
# information on structural SVMs.
|
||||
#
|
||||
# A structural SVM is a supervised machine learning method for learning to predict
|
||||
# complex outputs. This is contrasted with a binary classifier which makes only simple
|
||||
# yes/no predictions. A structural SVM, on the other hand, can learn to predict
|
||||
# complex outputs such as entire parse trees or DNA sequence alignments. To do this,
|
||||
# it learns a function F(x,y) which measures how well a particular data sample x
|
||||
# matches a label y, where a label is potentially a complex thing like a parse tree.
|
||||
# However, to keep this example program simple we use only a 3 category label output.
|
||||
# A structural SVM is a supervised machine learning method for learning to
|
||||
# predict complex outputs. This is contrasted with a binary classifier
|
||||
# which makes only simple yes/no predictions. A structural SVM, on the
|
||||
# other hand, can learn to predict complex outputs such as entire parse
|
||||
# trees or DNA sequence alignments. To do this, it learns a function F(x,y)
|
||||
# which measures how well a particular data sample x matches a label y,
|
||||
# where a label is potentially a complex thing like a parse tree. However,
|
||||
# to keep this example program simple we use only a 3 category label output.
|
||||
#
|
||||
# At test time, the best label for a new x is given by the y which maximizes F(x,y).
|
||||
# To put this into the context of the current example, F(x,y) computes the score for a
|
||||
# given sample and class label. The predicted class label is therefore whatever value
|
||||
# of y which makes F(x,y) the biggest. This is exactly what predict_label() does.
|
||||
# That is, it computes F(x,0), F(x,1), and F(x,2) and then reports which label has the
|
||||
# At test time, the best label for a new x is given by the y which
|
||||
# maximizes F(x,y). To put this into the context of the current example,
|
||||
# F(x,y) computes the score for a given sample and class label. The
|
||||
# predicted class label is therefore whatever value of y which makes F(x,y)
|
||||
# the biggest. This is exactly what predict_label() does. That is, it
|
||||
# computes F(x,0), F(x,1), and F(x,2) and then reports which label has the
|
||||
# biggest value.
|
||||
#
|
||||
# At a high level, a structural SVM can be thought of as searching the parameter space
|
||||
# of F(x,y) for the set of parameters that make the following inequality true as often
|
||||
# as possible:
|
||||
# At a high level, a structural SVM can be thought of as searching the
|
||||
# parameter space of F(x,y) for the set of parameters that make the
|
||||
# following inequality true as often as possible:
|
||||
# F(x_i,y_i) > max{over all incorrect labels of x_i} F(x_i, y_incorrect)
|
||||
# That is, it seeks to find the parameter vector such that F(x,y) always gives the
|
||||
# highest score to the correct output. To define the structural SVM optimization
|
||||
# problem precisely, we first introduce some notation:
|
||||
# - let PSI(x,y) == the joint feature vector for input x and a label y.
|
||||
# - let F(x,y|w) == dot(w,PSI(x,y)).
|
||||
# (we use the | notation to emphasize that F() has the parameter vector of
|
||||
# weights called w)
|
||||
# - let LOSS(idx,y) == the loss incurred for predicting that the idx-th training
|
||||
# sample has a label of y. Note that LOSS() should always be >= 0 and should
|
||||
# become exactly 0 when y is the correct label for the idx-th sample. Moreover,
|
||||
# it should notionally indicate how bad it is to predict y for the idx'th sample.
|
||||
# - let x_i == the i-th training sample.
|
||||
# - let y_i == the correct label for the i-th training sample.
|
||||
# - The number of data samples is N.
|
||||
# That is, it seeks to find the parameter vector such that F(x,y) always
|
||||
# gives the highest score to the correct output. To define the structural
|
||||
# SVM optimization problem precisely, we first introduce some notation:
|
||||
# - let PSI(x,y) == the joint feature vector for input x and a label y
|
||||
# - let F(x,y|w) == dot(w,PSI(x,y)).
|
||||
# (we use the | notation to emphasize that F() has the parameter vector
|
||||
# of weights called w)
|
||||
# - let LOSS(idx,y) == the loss incurred for predicting that the
|
||||
# idx-th training sample has a label of y. Note that LOSS()
|
||||
# should always be >= 0 and should become exactly 0 when y is the
|
||||
# correct label for the idx-th sample. Moreover, it should notionally
|
||||
# indicate how bad it is to predict y for the idx'th sample.
|
||||
# - let x_i == the i-th training sample.
|
||||
# - let y_i == the correct label for the i-th training sample.
|
||||
# - The number of data samples is N.
|
||||
#
|
||||
# Then the optimization problem solved by a structural SVM using
|
||||
# dlib.solve_structural_svm_problem() is the following:
|
||||
# Minimize: h(w) == 0.5*dot(w,w) + C*R(w)
|
||||
#
|
||||
# Where R(w) == sum from i=1 to N: 1/N * sample_risk(i,w)
|
||||
# and sample_risk(i,w) == max over all Y: LOSS(i,Y) + F(x_i,Y|w) - F(x_i,y_i|w)
|
||||
# and C > 0
|
||||
# Where R(w) == sum from i=1 to N: 1/N * sample_risk(i,w) and
|
||||
# sample_risk(i,w) == max over all
|
||||
# Y: LOSS(i,Y) + F(x_i,Y|w) - F(x_i,y_i|w) and C > 0
|
||||
#
|
||||
# You can think of the sample_risk(i,w) as measuring the degree of error you would make
|
||||
# when predicting the label of the i-th sample using parameters w. That is, it is zero
|
||||
# only when the correct label would be predicted and grows larger the more "wrong" the
|
||||
# predicted output becomes. Therefore, the objective function is minimizing a balance
|
||||
# between making the weights small (typically this reduces overfitting) and fitting the
|
||||
# training data. The degree to which you try to fit the data is controlled by the C
|
||||
# parameter.
|
||||
# You can think of the sample_risk(i,w) as measuring the degree of error
|
||||
# you would make when predicting the label of the i-th sample using
|
||||
# parameters w. That is, it is zero only when the correct label would be
|
||||
# predicted and grows larger the more "wrong" the predicted output becomes.
|
||||
# Therefore, the objective function is minimizing a balance between making
|
||||
# the weights small (typically this reduces overfitting) and fitting the
|
||||
# training data. The degree to which you try to fit the data is controlled
|
||||
# by the C parameter.
|
||||
#
|
||||
# For a more detailed introduction to structured support vector machines you should
|
||||
# consult the following paper:
|
||||
# For a more detailed introduction to structured support vector machines
|
||||
# you should consult the following paper:
|
||||
# Predicting Structured Objects with Support Vector Machines by
|
||||
# Thorsten Joachims, Thomas Hofmann, Yisong Yue, and Chun-nam Yu
|
||||
#
|
||||
|
||||
# Finally, we come back to the code. To use dlib.solve_structural_svm_problem() you
|
||||
# need to provide the things discussed above. This is the value of C, the number of
|
||||
# training samples, the dimensionality of PSI(), as well as methods for calculating the
|
||||
# loss values and PSI() vectors. You will also need to write code that can compute:
|
||||
# Finally, we come back to the code. To use
|
||||
# dlib.solve_structural_svm_problem() you need to provide the things
|
||||
# discussed above. This is the value of C, the number of training samples,
|
||||
# the dimensionality of PSI(), as well as methods for calculating the loss
|
||||
# values and PSI() vectors. You will also need to write code that can
|
||||
# compute:
|
||||
# max over all Y: LOSS(i,Y) + F(x_i,Y|w). To summarize, the
|
||||
# three_class_classifier_problem class is required to have the following fields:
|
||||
# ThreeClassClassifierProblem class is required to have the following
|
||||
# fields:
|
||||
# - C
|
||||
# - num_samples
|
||||
# - num_dimensions
|
||||
@ -155,152 +171,162 @@ class three_class_classifier_problem:
|
||||
C = 1
|
||||
|
||||
# There are also a number of optional arguments:
|
||||
# epsilon is the stopping tolerance. The optimizer will run until R(w) is within
|
||||
# epsilon of its optimal value. If you don't set this then it defaults to 0.001.
|
||||
#epsilon = 1e-13
|
||||
# epsilon is the stopping tolerance. The optimizer will run until R(w) is
|
||||
# within epsilon of its optimal value. If you don't set this then it
|
||||
# defaults to 0.001.
|
||||
# epsilon = 1e-13
|
||||
|
||||
# Uncomment this and the optimizer will print its progress to standard out. You will
|
||||
# be able to see things like the current risk gap. The optimizer continues until the
|
||||
# Uncomment this and the optimizer will print its progress to standard
|
||||
# out. You will be able to see things like the current risk gap. The
|
||||
# optimizer continues until the
|
||||
# risk gap is below epsilon.
|
||||
#be_verbose = True
|
||||
# be_verbose = True
|
||||
|
||||
# If you want to require that the learned weights are all non-negative then set this
|
||||
# field to True.
|
||||
#learns_nonnegative_weights = True
|
||||
# If you want to require that the learned weights are all non-negative
|
||||
# then set this field to True.
|
||||
# learns_nonnegative_weights = True
|
||||
|
||||
# The optimizer uses an internal cache to avoid unnecessary calls to your
|
||||
# separation_oracle() routine. This parameter controls the size of that cache. Bigger
|
||||
# values use more RAM and might make the optimizer run faster. You can also disable it
|
||||
# by setting it to 0 which is good to do when your separation_oracle is very fast. If
|
||||
# If you don't call this function it defaults to a value of 5.
|
||||
#max_cache_size = 20
|
||||
|
||||
# separation_oracle() routine. This parameter controls the size of that
|
||||
# cache. Bigger values use more RAM and might make the optimizer run
|
||||
# faster. You can also disable it by setting it to 0 which is good to do
|
||||
# when your separation_oracle is very fast. If If you don't call this
|
||||
# function it defaults to a value of 5.
|
||||
# max_cache_size = 20
|
||||
|
||||
def __init__(self, samples, labels):
|
||||
# dlib.solve_structural_svm_problem() expects the class to have num_samples and
|
||||
# num_dimensions fields. These fields should contain the number of training
|
||||
# samples and the dimensionality of the PSI feature vector respectively.
|
||||
# dlib.solve_structural_svm_problem() expects the class to have
|
||||
# num_samples and num_dimensions fields. These fields should contain
|
||||
# the number of training samples and the dimensionality of the PSI
|
||||
# feature vector respectively.
|
||||
self.num_samples = len(samples)
|
||||
self.num_dimensions = len(samples[0])*3
|
||||
|
||||
self.samples = samples
|
||||
self.labels = labels
|
||||
|
||||
|
||||
def make_psi(self, x, label):
|
||||
"""Compute PSI(x,label)."""
|
||||
# All we are doing here is taking x, which is a 3 dimensional sample vector in this
|
||||
# example program, and putting it into one of 3 places in a 9 dimensional PSI
|
||||
# vector, which we then return. So this function returns PSI(x,label). To see why
|
||||
# we setup PSI like this, recall how predict_label() works. It takes in a 9
|
||||
# dimensional weight vector and breaks the vector into 3 pieces. Each piece then
|
||||
# defines a different classifier and we use them in a one-vs-all manner to predict
|
||||
# the label. So now that we are in the structural SVM code we have to define the
|
||||
# PSI vector to correspond to this usage. That is, we need to setup PSI so that
|
||||
# argmax_y dot(weights,PSI(x,y)) == predict_label(weights,x). This is how we tell
|
||||
# the structural SVM solver what kind of problem we are trying to solve.
|
||||
# All we are doing here is taking x, which is a 3 dimensional sample
|
||||
# vector in this example program, and putting it into one of 3 places in
|
||||
# a 9 dimensional PSI vector, which we then return. So this function
|
||||
# returns PSI(x,label). To see why we setup PSI like this, recall how
|
||||
# predict_label() works. It takes in a 9 dimensional weight vector and
|
||||
# breaks the vector into 3 pieces. Each piece then defines a different
|
||||
# classifier and we use them in a one-vs-all manner to predict the
|
||||
# label. So now that we are in the structural SVM code we have to
|
||||
# define the PSI vector to correspond to this usage. That is, we need
|
||||
# to setup PSI so that argmax_y dot(weights,PSI(x,y)) ==
|
||||
# predict_label(weights,x). This is how we tell the structural SVM
|
||||
# solver what kind of problem we are trying to solve.
|
||||
#
|
||||
# It's worth emphasizing that the single biggest step in using a structural SVM is
|
||||
# deciding how you want to represent PSI(x,label). It is always a vector, but
|
||||
# deciding what to put into it to solve your problem is often not a trivial task.
|
||||
# Part of the difficulty is that you need an efficient method for finding the label
|
||||
# that makes dot(w,PSI(x,label)) the biggest. Sometimes this is easy, but often
|
||||
# finding the max scoring label turns into a difficult combinatorial optimization
|
||||
# problem. So you need to pick a PSI that doesn't make the label maximization step
|
||||
# intractable but also still well models your problem.
|
||||
|
||||
# Create a dense vector object (note that you can also use unsorted sparse vectors
|
||||
# (i.e. dlib.sparse_vector objects) to represent your PSI vector. This is useful
|
||||
# if you have very high dimensional PSI vectors that are mostly zeros. In the
|
||||
# context of this example, you would simply return a dlib.sparse_vector at the end
|
||||
# of make_psi() and the rest of the example would still work properly. ).
|
||||
# It's worth emphasizing that the single biggest step in using a
|
||||
# structural SVM is deciding how you want to represent PSI(x,label). It
|
||||
# is always a vector, but deciding what to put into it to solve your
|
||||
# problem is often not a trivial task. Part of the difficulty is that
|
||||
# you need an efficient method for finding the label that makes
|
||||
# dot(w,PSI(x,label)) the biggest. Sometimes this is easy, but often
|
||||
# finding the max scoring label turns into a difficult combinatorial
|
||||
# optimization problem. So you need to pick a PSI that doesn't make the
|
||||
# label maximization step intractable but also still well models your
|
||||
# problem.
|
||||
#
|
||||
# Create a dense vector object (note that you can also use unsorted
|
||||
# sparse vectors (i.e. dlib.sparse_vector objects) to represent your
|
||||
# PSI vector. This is useful if you have very high dimensional PSI
|
||||
# vectors that are mostly zeros. In the context of this example, you
|
||||
# would simply return a dlib.sparse_vector at the end of make_psi() and
|
||||
# the rest of the example would still work properly. ).
|
||||
psi = dlib.vector()
|
||||
# Set it to have 9 dimensions. Note that the elements of the vector are 0
|
||||
# initialized.
|
||||
# Set it to have 9 dimensions. Note that the elements of the vector
|
||||
# are 0 initialized.
|
||||
psi.resize(self.num_dimensions)
|
||||
dims = len(x)
|
||||
if (label == 0):
|
||||
for i in range(0,dims):
|
||||
if label == 0:
|
||||
for i in range(0, dims):
|
||||
psi[i] = x[i]
|
||||
elif (label == 1):
|
||||
for i in range(dims,2*dims):
|
||||
psi[i] = x[i-dims]
|
||||
else: # the label must be 2
|
||||
for i in range(2*dims,3*dims):
|
||||
psi[i] = x[i-2*dims]
|
||||
elif label == 1:
|
||||
for i in range(dims, 2 * dims):
|
||||
psi[i] = x[i - dims]
|
||||
else: # the label must be 2
|
||||
for i in range(2 * dims, 3 * dims):
|
||||
psi[i] = x[i - 2 * dims]
|
||||
return psi
|
||||
|
||||
|
||||
# Now we get to the two member functions that are directly called by
|
||||
# dlib.solve_structural_svm_problem().
|
||||
#
|
||||
# In get_truth_joint_feature_vector(), all you have to do is return the PSI() vector
|
||||
# for the idx-th training sample when it has its true label. So here it returns
|
||||
# In get_truth_joint_feature_vector(), all you have to do is return the
|
||||
# PSI() vector for the idx-th training sample when it has its true label.
|
||||
# So here it returns
|
||||
# PSI(self.samples[idx], self.labels[idx]).
|
||||
def get_truth_joint_feature_vector(self, idx):
|
||||
return self.make_psi(self.samples[idx], self.labels[idx])
|
||||
|
||||
|
||||
# separation_oracle() is more interesting. dlib.solve_structural_svm_problem() will
|
||||
# call separation_oracle() many times during the optimization. Each time it will give
|
||||
# it the current value of the parameter weights and the separation_oracle() is supposed
|
||||
# to find the label that most violates the structural SVM objective function for the
|
||||
# idx-th sample. Then the separation oracle reports the corresponding PSI vector and
|
||||
# loss value. To state this more precisely, the separation_oracle() member function
|
||||
# has the following contract:
|
||||
# separation_oracle() is more interesting.
|
||||
# dlib.solve_structural_svm_problem() will call separation_oracle() many
|
||||
# times during the optimization. Each time it will give it the current
|
||||
# value of the parameter weights and the separation_oracle() is supposed to
|
||||
# find the label that most violates the structural SVM objective function
|
||||
# for the idx-th sample. Then the separation oracle reports the
|
||||
# corresponding PSI vector and loss value. To state this more precisely,
|
||||
# the separation_oracle() member function has the following contract:
|
||||
# requires
|
||||
# - 0 <= idx < self.num_samples
|
||||
# - len(current_solution) == self.num_dimensions
|
||||
# - 0 <= idx < self.num_samples
|
||||
# - len(current_solution) == self.num_dimensions
|
||||
# ensures
|
||||
# - runs the separation oracle on the idx-th sample. We define this as follows:
|
||||
# - let X == the idx-th training sample.
|
||||
# - let PSI(X,y) == the joint feature vector for input X and an arbitrary label y.
|
||||
# - let F(X,y) == dot(current_solution,PSI(X,y)).
|
||||
# - let LOSS(idx,y) == the loss incurred for predicting that the idx-th sample
|
||||
# has a label of y. Note that LOSS() should always be >= 0 and should
|
||||
# become exactly 0 when y is the correct label for the idx-th sample.
|
||||
# - runs the separation oracle on the idx-th sample.
|
||||
# We define this as follows:
|
||||
# - let X == the idx-th training sample.
|
||||
# - let PSI(X,y) == the joint feature vector for input X
|
||||
# and an arbitrary label y.
|
||||
# - let F(X,y) == dot(current_solution,PSI(X,y)).
|
||||
# - let LOSS(idx,y) == the loss incurred for predicting that the
|
||||
# idx-th sample has a label of y. Note that LOSS()
|
||||
# should always be >= 0 and should become exactly 0 when y is the
|
||||
# correct label for the idx-th sample.
|
||||
#
|
||||
# Then the separation oracle finds a Y such that:
|
||||
# Y = argmax over all y: LOSS(idx,y) + F(X,y)
|
||||
# (i.e. It finds the label which maximizes the above expression.)
|
||||
# Then the separation oracle finds a Y such that:
|
||||
# Y = argmax over all y: LOSS(idx,y) + F(X,y)
|
||||
# (i.e. It finds the label which maximizes the above expression.)
|
||||
#
|
||||
# Finally, separation_oracle() returns LOSS(idx,Y),PSI(X,Y)
|
||||
# Finally, separation_oracle() returns LOSS(idx,Y),PSI(X,Y)
|
||||
def separation_oracle(self, idx, current_solution):
|
||||
samp = self.samples[idx]
|
||||
dims = len(samp)
|
||||
scores = [0,0,0]
|
||||
scores = [0, 0, 0]
|
||||
# compute scores for each of the three classifiers
|
||||
scores[0] = dot(current_solution[0:dims], samp)
|
||||
scores[1] = dot(current_solution[dims:2*dims], samp)
|
||||
scores[2] = dot(current_solution[2*dims:3*dims], samp)
|
||||
|
||||
# Add in the loss-augmentation. Recall that we maximize LOSS(idx,y) + F(X,y) in
|
||||
# the separate oracle, not just F(X,y) as we normally would in predict_label().
|
||||
# Therefore, we must add in this extra amount to account for the loss-augmentation.
|
||||
# For our simple multi-class classifier, we incur a loss of 1 if we don't predict
|
||||
# the correct label and a loss of 0 if we get the right label.
|
||||
if (self.labels[idx] != 0):
|
||||
# Add in the loss-augmentation. Recall that we maximize
|
||||
# LOSS(idx,y) + F(X,y) in the separate oracle, not just F(X,y) as we
|
||||
# normally would in predict_label(). Therefore, we must add in this
|
||||
# extra amount to account for the loss-augmentation. For our simple
|
||||
# multi-class classifier, we incur a loss of 1 if we don't predict the
|
||||
# correct label and a loss of 0 if we get the right label.
|
||||
if self.labels[idx] != 0:
|
||||
scores[0] += 1
|
||||
if (self.labels[idx] != 1):
|
||||
if self.labels[idx] != 1:
|
||||
scores[1] += 1
|
||||
if (self.labels[idx] != 2):
|
||||
if self.labels[idx] != 2:
|
||||
scores[2] += 1
|
||||
|
||||
# Now figure out which classifier has the largest loss-augmented score.
|
||||
max_scoring_label = scores.index(max(scores))
|
||||
# And finally record the loss that was associated with that predicted label.
|
||||
# Again, the loss is 1 if the label is incorrect and 0 otherwise.
|
||||
if (max_scoring_label == self.labels[idx]):
|
||||
# And finally record the loss that was associated with that predicted
|
||||
# label. Again, the loss is 1 if the label is incorrect and 0 otherwise.
|
||||
if max_scoring_label == self.labels[idx]:
|
||||
loss = 0
|
||||
else:
|
||||
loss = 1
|
||||
|
||||
# Finally, return the loss and PSI vector corresponding to the label we just found.
|
||||
# Finally, return the loss and PSI vector corresponding to the label
|
||||
# we just found.
|
||||
psi = self.make_psi(samp, max_scoring_label)
|
||||
return loss,psi
|
||||
|
||||
return loss, psi
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
||||
|
@ -1,37 +1,41 @@
|
||||
#!/usr/bin/python
|
||||
# The contents of this file are in the public domain. See LICENSE_FOR_EXAMPLE_PROGRAMS.txt
|
||||
#
|
||||
# This example program shows how you can use dlib to make an object detector
|
||||
# for things like faces, pedestrians, and any other semi-rigid object. In
|
||||
# particular, we go though the steps to train the kind of sliding window
|
||||
# object detector first published by Dalal and Triggs in 2005 in the paper
|
||||
# Histograms of Oriented Gradients for Human Detection.
|
||||
#
|
||||
# This example program shows how you can use dlib to make an object
|
||||
# detector for things like faces, pedestrians, and any other semi-rigid
|
||||
# object. In particular, we go though the steps to train the kind of sliding
|
||||
# window object detector first published by Dalal and Triggs in 2005 in the
|
||||
# paper Histograms of Oriented Gradients for Human Detection.
|
||||
#
|
||||
# COMPILING THE DLIB PYTHON INTERFACE
|
||||
# Dlib comes with a compiled python interface for python 2.7 on MS Windows. If
|
||||
# Dlib comes with a compiled python interface for python 2.7 on MS Windows. If
|
||||
# you are using another python version or operating system then you need to
|
||||
# compile the dlib python interface before you can use this file. To do this,
|
||||
# run compile_dlib_python_module.bat. This should work on any operating system
|
||||
# so long as you have CMake and boost-python installed. On Ubuntu, this can be
|
||||
# done easily by running the command: sudo apt-get install libboost-python-dev cmake
|
||||
# run compile_dlib_python_module.bat. This should work on any operating
|
||||
# system so long as you have CMake and boost-python installed.
|
||||
# On Ubuntu, this can be done easily by running the command:
|
||||
# sudo apt-get install libboost-python-dev cmake
|
||||
import os
|
||||
import sys
|
||||
import glob
|
||||
|
||||
import dlib, sys, glob
|
||||
import dlib
|
||||
from skimage import io
|
||||
|
||||
|
||||
# In this example we are going to train a face detector based on the small
|
||||
# faces dataset in the examples/faces directory. This means you need to supply
|
||||
# the path to this faces folder as a command line argument so we will know
|
||||
# where it is.
|
||||
if (len(sys.argv) != 2):
|
||||
print("Give the path to the examples/faces directory as the argument to this")
|
||||
print("program. For example, if you are in the python_examples folder then ")
|
||||
print("execute this program by running:")
|
||||
print(" ./train_object_detector.py ../examples/faces")
|
||||
if len(sys.argv) != 2:
|
||||
print(
|
||||
"Give the path to the examples/faces directory as the argument to this "
|
||||
"program. For example, if you are in the python_examples folder then "
|
||||
"execute this program by running:\n"
|
||||
" ./train_object_detector.py ../examples/faces")
|
||||
exit()
|
||||
faces_folder = sys.argv[1]
|
||||
|
||||
|
||||
# Now let's do the training. The train_simple_object_detector() function has a
|
||||
# bunch of options, all of which come with reasonable default values. The next
|
||||
# few lines goes over some of these options.
|
||||
@ -59,20 +63,22 @@ options.be_verbose = True
|
||||
# images with boxes. To see how to use it read the tools/imglab/README.txt
|
||||
# file. But for this example, we just use the training.xml file included with
|
||||
# dlib.
|
||||
dlib.train_simple_object_detector(faces_folder+"/training.xml", "detector.svm", options)
|
||||
|
||||
training_xml_path = os.path.join(faces_folder, "training.xml")
|
||||
testing_xml_path = os.path.join(faces_folder, "testing.xml")
|
||||
|
||||
dlib.train_simple_object_detector(training_xml_path, "detector.svm", options)
|
||||
|
||||
# Now that we have a face detector we can test it. The first statement tests
|
||||
# it on the training data. It will print(the precision, recall, and then)
|
||||
# average precision.
|
||||
print("\ntraining accuracy: {}".format(dlib.test_simple_object_detector(faces_folder+"/training.xml", "detector.svm")))
|
||||
print("") # Print blank line to create gap from previous output
|
||||
print("Training accuracy: {}".format(
|
||||
dlib.test_simple_object_detector(training_xml_path, "detector.svm")))
|
||||
# However, to get an idea if it really worked without overfitting we need to
|
||||
# run it on images it wasn't trained on. The next line does this. Happily, we
|
||||
# see that the object detector works perfectly on the testing images.
|
||||
print("testing accuracy: {}".format(dlib.test_simple_object_detector(faces_folder+"/testing.xml", "detector.svm")))
|
||||
|
||||
|
||||
print("Testing accuracy: {}".format(
|
||||
dlib.test_simple_object_detector(testing_xml_path, "detector.svm")))
|
||||
|
||||
# Now let's use the detector as you would in a normal application. First we
|
||||
# will load it from disk.
|
||||
@ -84,39 +90,37 @@ win_det.set_image(detector)
|
||||
|
||||
# Now let's run the detector over the images in the faces folder and display the
|
||||
# results.
|
||||
print("\nShowing detections on the images in the faces folder...")
|
||||
print("Showing detections on the images in the faces folder...")
|
||||
win = dlib.image_window()
|
||||
for f in glob.glob(faces_folder+"/*.jpg"):
|
||||
print("processing file:", f)
|
||||
for f in glob.glob(faces_folder + "/*.jpg"):
|
||||
print("Processing file: {}".format(f))
|
||||
img = io.imread(f)
|
||||
dets = detector(img)
|
||||
print("number of faces detected:", len(dets))
|
||||
for d in dets:
|
||||
print(" detection position left,top,right,bottom:", d.left(), d.top(), d.right(), d.bottom())
|
||||
print("Number of faces detected: {}".format(len(dets)))
|
||||
for k, d in enumerate(dets):
|
||||
print("Detection {}: Left: {} Top: {} Right: {} Bottom: {}".format(
|
||||
k, d.left(), d.top(), d.right(), d.bottom()))
|
||||
|
||||
win.clear_overlay()
|
||||
win.set_image(img)
|
||||
win.add_overlay(dets)
|
||||
raw_input("Hit enter to continue")
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
# Finally, note that you don't have to use the XML based input to
|
||||
# train_simple_object_detector(). If you have already loaded your training
|
||||
# images and bounding boxes for the objects then you can call it as shown
|
||||
# below.
|
||||
|
||||
# You just need to put your images into a list.
|
||||
images = [io.imread(faces_folder + '/2008_002506.jpg'), io.imread(faces_folder + '/2009_004587.jpg') ]
|
||||
images = [io.imread(faces_folder + '/2008_002506.jpg'),
|
||||
io.imread(faces_folder + '/2009_004587.jpg')]
|
||||
# Then for each image you make a list of rectangles which give the pixel
|
||||
# locations of the edges of the boxes.
|
||||
boxes_img1 = ([dlib.rectangle(left=329, top=78, right=437, bottom=186),
|
||||
dlib.rectangle(left=224, top=95, right=314, bottom=185),
|
||||
dlib.rectangle(left=125, top=65, right=214, bottom=155) ] )
|
||||
boxes_img2 = ([dlib.rectangle(left=154, top=46, right=228, bottom=121 ),
|
||||
dlib.rectangle(left=266, top=280, right=328, bottom=342) ] )
|
||||
dlib.rectangle(left=224, top=95, right=314, bottom=185),
|
||||
dlib.rectangle(left=125, top=65, right=214, bottom=155)])
|
||||
boxes_img2 = ([dlib.rectangle(left=154, top=46, right=228, bottom=121),
|
||||
dlib.rectangle(left=266, top=280, right=328, bottom=342)])
|
||||
# And then you aggregate those lists of boxes into one big list and then call
|
||||
# train_simple_object_detector().
|
||||
boxes = [boxes_img1, boxes_img2]
|
||||
@ -132,4 +136,5 @@ raw_input("Hit enter to continue")
|
||||
# test_simple_object_detector(). If you have already loaded your training
|
||||
# images and bounding boxes for the objects then you can call it as shown
|
||||
# below.
|
||||
print("Training accuracy: {}".format(dlib.test_simple_object_detector(images, boxes, "detector.svm")))
|
||||
print("Training accuracy: {}".format(
|
||||
dlib.test_simple_object_detector(images, boxes, "detector.svm")))
|
||||
|
Loading…
Reference in New Issue
Block a user