mirror of
https://github.com/davisking/dlib.git
synced 2024-11-01 10:14:53 +08:00
Added code to get face_chip images..
This commit is contained in:
parent
9fe352d696
commit
5f26972551
98
python_examples/face_alignment.py
Executable file
98
python_examples/face_alignment.py
Executable file
@ -0,0 +1,98 @@
|
||||
#!/usr/bin/python
|
||||
# The contents of this file are in the public domain. See LICENSE_FOR_EXAMPLE_PROGRAMS.txt
|
||||
#
|
||||
# This example shows how to use dlib's face recognition tool for image alignment.
|
||||
#
|
||||
# COMPILING/INSTALLING THE DLIB PYTHON INTERFACE
|
||||
# You can install dlib using the command:
|
||||
# pip install dlib
|
||||
#
|
||||
# Alternatively, if you want to compile dlib yourself then go into the dlib
|
||||
# root folder and run:
|
||||
# python setup.py install
|
||||
# or
|
||||
# python setup.py install --yes USE_AVX_INSTRUCTIONS
|
||||
# if you have a CPU that supports AVX instructions, since this makes some
|
||||
# things run faster. This code will also use CUDA if you have CUDA and cuDNN
|
||||
# installed.
|
||||
#
|
||||
# Compiling dlib should work on any operating system so long as you have
|
||||
# CMake and boost-python installed. On Ubuntu, this can be done easily by
|
||||
# running the command:
|
||||
# sudo apt-get install libboost-python-dev cmake
|
||||
#
|
||||
# Also note that this example requires OpenCV and Numpy which can be installed
|
||||
# via the command:
|
||||
# pip install opencv-python numpy
|
||||
# Or downloaded from http://opencv.org/releases.html
|
||||
|
||||
import sys
|
||||
import os
|
||||
import dlib
|
||||
import glob
|
||||
import cv2
|
||||
import numpy as np
|
||||
|
||||
if len(sys.argv) != 4:
|
||||
print(
|
||||
"Call this program like this:\n"
|
||||
" ./face_alignment.py shape_predictor_5_face_landmarks.dat dlib_face_recognition_resnet_model_v1.dat ../examples/faces/bald_guys.jpg\n"
|
||||
"You can download a trained facial shape predictor and recognition model from:\n"
|
||||
" http://dlib.net/files/shape_predictor_5_face_landmarks.dat.bz2\n"
|
||||
" http://dlib.net/files/dlib_face_recognition_resnet_model_v1.dat.bz2")
|
||||
exit()
|
||||
|
||||
predictor_path = sys.argv[1]
|
||||
face_rec_model_path = sys.argv[2]
|
||||
face_file_path = sys.argv[3]
|
||||
|
||||
# Load all the models we need: a detector to find the faces, a shape predictor
|
||||
# to find face landmarks so we can precisely localize the face, and finally the
|
||||
# face recognition model.
|
||||
detector = dlib.get_frontal_face_detector()
|
||||
sp = dlib.shape_predictor(predictor_path)
|
||||
facerec = dlib.face_recognition_model_v1(face_rec_model_path)
|
||||
|
||||
# Load the image using OpenCV
|
||||
bgr_img = cv2.imread(face_file_path)
|
||||
if bgr_img is None:
|
||||
print("Sorry, we could not load '{}' as an image".format(face_file_path))
|
||||
exit()
|
||||
|
||||
# Convert to RGB since dlib uses RGB images
|
||||
img = cv2.cvtColor(bgr_img, cv2.COLOR_BGR2RGB)
|
||||
|
||||
# Ask the detector to find the bounding boxes of each face. The 1 in the
|
||||
# second argument indicates that we should upsample the image 1 time. This
|
||||
# will make everything bigger and allow us to detect more faces.
|
||||
dets = detector(img, 1)
|
||||
|
||||
num_faces = len(dets)
|
||||
if num_faces == 0:
|
||||
print("Sorry, there were no faces found in '{}'".format(face_file_path))
|
||||
exit()
|
||||
|
||||
# The full object detection object
|
||||
faces = dlib.full_object_detections()
|
||||
for detection in dets:
|
||||
faces.append(sp(img, detection))
|
||||
|
||||
# Get the aligned face images
|
||||
# Optionally:
|
||||
# images = dlib.get_face_chips(img, faces, size=160, padding=0.25)
|
||||
images = dlib.get_face_chips(img, faces, size=320)
|
||||
for image in images:
|
||||
cv_rgb_image = np.array(image).astype(np.uint8)
|
||||
cv_bgr_img = cv2.cvtColor(cv_rgb_image, cv2.COLOR_RGB2BGR)
|
||||
cv2.imshow('image',cv_bgr_img)
|
||||
cv2.waitKey(0)
|
||||
|
||||
# It is also possible to get a single chip
|
||||
image = dlib.get_face_chip(img, faces[0])
|
||||
cv_rgb_image = np.array(image).astype(np.uint8)
|
||||
cv_bgr_img = cv2.cvtColor(cv_rgb_image, cv2.COLOR_RGB2BGR)
|
||||
cv2.imshow('image',cv_bgr_img)
|
||||
cv2.waitKey(0)
|
||||
|
||||
cv2.destroyAllWindows()
|
||||
|
@ -182,6 +182,9 @@ void save_face_chips (
|
||||
float padding = 0.25
|
||||
)
|
||||
{
|
||||
if (!is_rgb_python_image(img))
|
||||
throw dlib::error("Unsupported image type, must be RGB image.");
|
||||
|
||||
int num_faces = faces.size();
|
||||
std::vector<chip_details> dets;
|
||||
for (auto& f : faces)
|
||||
@ -221,6 +224,75 @@ void save_face_chip (
|
||||
BOOST_PYTHON_FUNCTION_OVERLOADS(save_face_chip_with_defaults, save_face_chip, 3, 5)
|
||||
BOOST_PYTHON_FUNCTION_OVERLOADS(save_face_chips_with_defaults, save_face_chips, 3, 5)
|
||||
|
||||
// ----------------------------------------------------------------------------------------
|
||||
|
||||
boost::python::list get_face_chips (
|
||||
object img,
|
||||
const std::vector<full_object_detection>& faces,
|
||||
size_t size = 150,
|
||||
float padding = 0.25
|
||||
)
|
||||
{
|
||||
if (!is_rgb_python_image(img))
|
||||
throw dlib::error("Unsupported image type, must be RGB image.");
|
||||
|
||||
if (faces.size() < 1) {
|
||||
throw dlib::error("No face were specified in the faces array.");
|
||||
}
|
||||
|
||||
boost::python::list chips_list;
|
||||
|
||||
int num_faces = faces.size();
|
||||
std::vector<chip_details> dets;
|
||||
for (auto& f : faces)
|
||||
dets.push_back(get_face_chip_details(f, size, padding));
|
||||
dlib::array<matrix<rgb_pixel>> face_chips;
|
||||
extract_image_chips(numpy_rgb_image(img), dets, face_chips);
|
||||
|
||||
for (auto& chip : face_chips)
|
||||
{
|
||||
boost::python::list img;
|
||||
|
||||
for(int row=0; row<size; row++) {
|
||||
boost::python::list row_list;
|
||||
for(int col=0; col<size; col++) {
|
||||
rgb_pixel pixel = chip(row, col);
|
||||
boost::python::list item;
|
||||
|
||||
item.append(pixel.red);
|
||||
item.append(pixel.green);
|
||||
item.append(pixel.blue);
|
||||
row_list.append(item);
|
||||
}
|
||||
img.append(row_list);
|
||||
}
|
||||
|
||||
chips_list.append(img);
|
||||
}
|
||||
return chips_list;
|
||||
}
|
||||
|
||||
boost::python::list get_face_chip (
|
||||
object img,
|
||||
const full_object_detection& face,
|
||||
size_t size = 150,
|
||||
float padding = 0.25
|
||||
)
|
||||
{
|
||||
std::vector<full_object_detection> faces(1, face);
|
||||
boost::python::list result = get_face_chips(img, faces, size, padding);
|
||||
size_t num_images = boost::python::len(result);
|
||||
if(num_images == 1) {
|
||||
return boost::python::extract<boost::python::list>(result[0]);
|
||||
} else {
|
||||
throw dlib::error("No face chips found!");
|
||||
}
|
||||
}
|
||||
|
||||
BOOST_PYTHON_FUNCTION_OVERLOADS(get_face_chip_with_defaults, get_face_chip, 2, 4)
|
||||
BOOST_PYTHON_FUNCTION_OVERLOADS(get_face_chips_with_defaults, get_face_chips, 2, 4)
|
||||
|
||||
|
||||
// ----------------------------------------------------------------------------------------
|
||||
|
||||
void bind_face_recognition()
|
||||
@ -246,11 +318,19 @@ void bind_face_recognition()
|
||||
"Takes an image and a full_object_detections object that reference faces in that image and saves the faces with the specified file name prefix. The faces will be rotated upright and scaled to 150x150 pixels or with the optional specified size and padding.",
|
||||
(arg("img"), arg("faces"), arg("chip_filename"), arg("size"), arg("padding"))
|
||||
));
|
||||
def("get_face_chip", &get_face_chip, get_face_chip_with_defaults(
|
||||
"Takes an image and a full_object_detection that references a face in that image and returns the face as a list of lists representing the image. The face will be rotated upright and scaled to 150x150 pixels or with the optional specified size and padding.",
|
||||
(arg("img"), arg("face"), arg("size"), arg("padding"))
|
||||
));
|
||||
def("get_face_chips", &get_face_chips, get_face_chips_with_defaults(
|
||||
"Takes an image and a full_object_detections object that reference faces in that image and returns the faces as a list of list of lists representing the image. The faces will be rotated upright and scaled to 150x150 pixels or with the optional specified size and padding.",
|
||||
(arg("img"), arg("faces"), arg("size"), arg("padding"))
|
||||
));
|
||||
def("chinese_whispers_clustering", &chinese_whispers_clustering, (arg("descriptors"), arg("threshold")),
|
||||
"Takes a list of descriptors and returns a list that contains a label for each descriptor. Clustering is done using dlib::chinese_whispers."
|
||||
);
|
||||
|
||||
{
|
||||
{
|
||||
typedef std::vector<full_object_detection> type;
|
||||
class_<type>("full_object_detections", "An array of full_object_detection objects.")
|
||||
.def(vector_indexing_suite<type>())
|
||||
|
Loading…
Reference in New Issue
Block a user