mirror of
https://github.com/davisking/dlib.git
synced 2024-11-01 10:14:53 +08:00
Python3 friendly printing in examples
This commit is contained in:
parent
e3aee32f34
commit
cc3bb4993c
@ -35,15 +35,15 @@ detector = dlib.get_frontal_face_detector()
|
|||||||
win = dlib.image_window()
|
win = dlib.image_window()
|
||||||
|
|
||||||
for f in sys.argv[1:]:
|
for f in sys.argv[1:]:
|
||||||
print "processing file: ", f
|
print("processing file: ", f)
|
||||||
img = io.imread(f)
|
img = io.imread(f)
|
||||||
# The 1 in the second argument indicates that we should upsample the image
|
# The 1 in the second argument indicates that we should upsample the image
|
||||||
# 1 time. This will make everything bigger and allow us to detect more
|
# 1 time. This will make everything bigger and allow us to detect more
|
||||||
# faces.
|
# faces.
|
||||||
dets = detector(img,1)
|
dets = detector(img,1)
|
||||||
print "number of faces detected: ", len(dets)
|
print("number of faces detected: ", len(dets))
|
||||||
for d in dets:
|
for d in dets:
|
||||||
print " detection position left,top,right,bottom:", d.left(), d.top(), d.right(), d.bottom()
|
print(" detection position left,top,right,bottom:", d.left(), d.top(), d.right(), d.bottom())
|
||||||
|
|
||||||
win.clear_overlay()
|
win.clear_overlay()
|
||||||
win.set_image(img)
|
win.set_image(img)
|
||||||
|
@ -40,11 +40,11 @@ assignment = dlib.max_cost_assignment(cost)
|
|||||||
# This prints optimal assignments: [2, 0, 1]
|
# This prints optimal assignments: [2, 0, 1]
|
||||||
# which indicates that we should assign the person from the first row of the cost matrix to
|
# which indicates that we should assign the person from the first row of the cost matrix to
|
||||||
# job 2, the middle row person to job 0, and the bottom row person to job 1.
|
# job 2, the middle row person to job 0, and the bottom row person to job 1.
|
||||||
print "optimal assignments: ", assignment
|
print("optimal assignments: ", assignment)
|
||||||
|
|
||||||
|
|
||||||
# This prints optimal cost: 16.0
|
# This prints optimal cost: 16.0
|
||||||
# which is correct since our optimal assignment is 6+5+5.
|
# which is correct since our optimal assignment is 6+5+5.
|
||||||
print "optimal cost: ", dlib.assignment_cost(cost, assignment)
|
print("optimal cost: ", dlib.assignment_cost(cost, assignment))
|
||||||
|
|
||||||
|
|
||||||
|
@ -176,9 +176,9 @@ else:
|
|||||||
# We can also measure the accuracy of a model relative to some labeled data. This
|
# We can also measure the accuracy of a model relative to some labeled data. This
|
||||||
# statement prints the precision, recall, and F1-score of the model relative to the data in
|
# statement prints the precision, recall, and F1-score of the model relative to the data in
|
||||||
# training_sequences/segments.
|
# training_sequences/segments.
|
||||||
print "Test on training data:", dlib.test_sequence_segmenter(model, training_sequences, segments)
|
print("Test on training data:", dlib.test_sequence_segmenter(model, training_sequences, segments))
|
||||||
|
|
||||||
# We can also do 5-fold cross-validation and print the resulting precision, recall, and F1-score.
|
# We can also do 5-fold cross-validation and print the resulting precision, recall, and F1-score.
|
||||||
print "cross validation:", dlib.cross_validate_sequence_segmenter(training_sequences, segments, 5, params)
|
print("cross validation:", dlib.cross_validate_sequence_segmenter(training_sequences, segments, 5, params))
|
||||||
|
|
||||||
|
|
||||||
|
@ -53,8 +53,8 @@ rank = trainer.train(data)
|
|||||||
# Now if you call rank on a vector it will output a ranking score. In
|
# Now if you call rank on a vector it will output a ranking score. In
|
||||||
# particular, the ranking score for relevant vectors should be larger than the
|
# particular, the ranking score for relevant vectors should be larger than the
|
||||||
# score for non-relevant vectors.
|
# score for non-relevant vectors.
|
||||||
print "ranking score for a relevant vector: ", rank(data.relevant[0])
|
print("ranking score for a relevant vector: ", rank(data.relevant[0]))
|
||||||
print "ranking score for a non-relevant vector: ", rank(data.nonrelevant[0])
|
print("ranking score for a non-relevant vector: ", rank(data.nonrelevant[0]))
|
||||||
# The output is the following:
|
# The output is the following:
|
||||||
# ranking score for a relevant vector: 0.5
|
# ranking score for a relevant vector: 0.5
|
||||||
# ranking score for a non-relevant vector: -0.5
|
# ranking score for a non-relevant vector: -0.5
|
||||||
@ -65,12 +65,12 @@ print "ranking score for a non-relevant vector: ", rank(data.nonrelevant[0])
|
|||||||
# In this case, the ordering accuracy tells us how often a non-relevant vector
|
# In this case, the ordering accuracy tells us how often a non-relevant vector
|
||||||
# was ranked ahead of a relevant vector. In this case, it returns 1 for both
|
# was ranked ahead of a relevant vector. In this case, it returns 1 for both
|
||||||
# metrics, indicating that the rank function outputs a perfect ranking.
|
# metrics, indicating that the rank function outputs a perfect ranking.
|
||||||
print dlib.test_ranking_function(rank, data)
|
print(dlib.test_ranking_function(rank, data))
|
||||||
|
|
||||||
# The ranking scores are computed by taking the dot product between a learned
|
# The ranking scores are computed by taking the dot product between a learned
|
||||||
# weight vector and a data vector. If you want to see the learned weight vector
|
# weight vector and a data vector. If you want to see the learned weight vector
|
||||||
# you can display it like so:
|
# you can display it like so:
|
||||||
print "weights: \n", rank.weights
|
print("weights: \n", rank.weights)
|
||||||
# In this case the weights are:
|
# In this case the weights are:
|
||||||
# 0.5
|
# 0.5
|
||||||
# -0.5
|
# -0.5
|
||||||
@ -112,7 +112,7 @@ rank = trainer.train(queries)
|
|||||||
# splits and returns the overall ranking accuracy based on the held out data.
|
# splits and returns the overall ranking accuracy based on the held out data.
|
||||||
# Just like test_ranking_function(), it reports both the ordering accuracy and
|
# Just like test_ranking_function(), it reports both the ordering accuracy and
|
||||||
# mean average precision.
|
# mean average precision.
|
||||||
print "cross validation results: ", dlib.cross_validate_ranking_trainer(trainer, queries, 4)
|
print("cross validation results: ", dlib.cross_validate_ranking_trainer(trainer, queries, 4))
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
@ -141,8 +141,8 @@ data.nonrelevant.append(samp)
|
|||||||
|
|
||||||
trainer = dlib.svm_rank_trainer_sparse()
|
trainer = dlib.svm_rank_trainer_sparse()
|
||||||
rank = trainer.train(data)
|
rank = trainer.train(data)
|
||||||
print "ranking score for a relevant vector: ", rank(data.relevant[0])
|
print("ranking score for a relevant vector: ", rank(data.relevant[0]))
|
||||||
print "ranking score for a non-relevant vector: ", rank(data.nonrelevant[0])
|
print("ranking score for a non-relevant vector: ", rank(data.nonrelevant[0]))
|
||||||
# Just as before, the output is the following:
|
# Just as before, the output is the following:
|
||||||
# ranking score for a relevant vector: 0.5
|
# ranking score for a relevant vector: 0.5
|
||||||
# ranking score for a non-relevant vector: -0.5
|
# ranking score for a non-relevant vector: -0.5
|
||||||
|
@ -46,9 +46,9 @@ def main():
|
|||||||
|
|
||||||
# Print the weights and then evaluate predict_label() on each of our training samples.
|
# Print the weights and then evaluate predict_label() on each of our training samples.
|
||||||
# Note that the correct label is predicted for each sample.
|
# Note that the correct label is predicted for each sample.
|
||||||
print weights
|
print(weights)
|
||||||
for i in range(len(samples)):
|
for i in range(len(samples)):
|
||||||
print "predicted label for sample[{0}]: {1}".format(i, predict_label(weights, samples[i]))
|
print("predicted label for sample[{0}]: {1}".format(i, predict_label(weights, samples[i])))
|
||||||
|
|
||||||
def predict_label(weights, sample):
|
def predict_label(weights, sample):
|
||||||
"""Given the 9-dimensional weight vector which defines a 3 class classifier, predict the
|
"""Given the 9-dimensional weight vector which defines a 3 class classifier, predict the
|
||||||
|
@ -24,10 +24,10 @@ from skimage import io
|
|||||||
# the path to this faces folder as a command line argument so we will know
|
# the path to this faces folder as a command line argument so we will know
|
||||||
# where it is.
|
# where it is.
|
||||||
if (len(sys.argv) != 2):
|
if (len(sys.argv) != 2):
|
||||||
print "Give the path to the examples/faces directory as the argument to this"
|
print("Give the path to the examples/faces directory as the argument to this")
|
||||||
print "program. For example, if you are in the python_examples folder then "
|
print("program. For example, if you are in the python_examples folder then ")
|
||||||
print "execute this program by running:"
|
print("execute this program by running:")
|
||||||
print " ./train_object_detector.py ../examples/faces"
|
print(" ./train_object_detector.py ../examples/faces")
|
||||||
exit()
|
exit()
|
||||||
faces_folder = sys.argv[1]
|
faces_folder = sys.argv[1]
|
||||||
|
|
||||||
@ -64,13 +64,13 @@ dlib.train_simple_object_detector(faces_folder+"/training.xml","detector.svm", o
|
|||||||
|
|
||||||
|
|
||||||
# Now that we have a face detector we can test it. The first statement tests
|
# Now that we have a face detector we can test it. The first statement tests
|
||||||
# it on the training data. It will print the precision, recall, and then
|
# it on the training data. It will print(the precision, recall, and then)
|
||||||
# average precision.
|
# average precision.
|
||||||
print "\ntraining accuracy:", dlib.test_simple_object_detector(faces_folder+"/training.xml", "detector.svm")
|
print("\ntraining accuracy:", dlib.test_simple_object_detector(faces_folder+"/training.xml", "detector.svm"))
|
||||||
# However, to get an idea if it really worked without overfitting we need to
|
# However, to get an idea if it really worked without overfitting we need to
|
||||||
# run it on images it wasn't trained on. The next line does this. Happily, we
|
# run it on images it wasn't trained on. The next line does this. Happily, we
|
||||||
# see that the object detector works perfectly on the testing images.
|
# see that the object detector works perfectly on the testing images.
|
||||||
print "testing accuracy: ", dlib.test_simple_object_detector(faces_folder+"/testing.xml", "detector.svm")
|
print("testing accuracy: ", dlib.test_simple_object_detector(faces_folder+"/testing.xml", "detector.svm"))
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
@ -84,15 +84,15 @@ win_det.set_image(detector)
|
|||||||
|
|
||||||
# Now let's run the detector over the images in the faces folder and display the
|
# Now let's run the detector over the images in the faces folder and display the
|
||||||
# results.
|
# results.
|
||||||
print "\nShowing detections on the images in the faces folder..."
|
print("\nShowing detections on the images in the faces folder...")
|
||||||
win = dlib.image_window()
|
win = dlib.image_window()
|
||||||
for f in glob.glob(faces_folder+"/*.jpg"):
|
for f in glob.glob(faces_folder+"/*.jpg"):
|
||||||
print "processing file:", f
|
print("processing file:", f)
|
||||||
img = io.imread(f)
|
img = io.imread(f)
|
||||||
dets = detector(img)
|
dets = detector(img)
|
||||||
print "number of faces detected:", len(dets)
|
print("number of faces detected:", len(dets))
|
||||||
for d in dets:
|
for d in dets:
|
||||||
print " detection position left,top,right,bottom:", d.left(), d.top(), d.right(), d.bottom()
|
print(" detection position left,top,right,bottom:", d.left(), d.top(), d.right(), d.bottom())
|
||||||
|
|
||||||
win.clear_overlay()
|
win.clear_overlay()
|
||||||
win.set_image(img)
|
win.set_image(img)
|
||||||
|
Loading…
Reference in New Issue
Block a user