Upload main.py
Browse files
main.py
ADDED
|
@@ -0,0 +1,122 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
import onnxruntime as rt
|
| 3 |
+
import mediapipe as mp
|
| 4 |
+
import cv2
|
| 5 |
+
import os
|
| 6 |
+
import time
|
| 7 |
+
from skimage.transform import SimilarityTransform
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
# ---------------------------------------------------------------------------------------------------------------------
|
| 11 |
+
# INITIALIZATIONS
|
| 12 |
+
|
| 13 |
+
# Target landmark coordinates for alignment (used in training)
|
| 14 |
+
LANDMARKS_TARGET = np.array(
|
| 15 |
+
[
|
| 16 |
+
[38.2946, 51.6963],
|
| 17 |
+
[73.5318, 51.5014],
|
| 18 |
+
[56.0252, 71.7366],
|
| 19 |
+
[41.5493, 92.3655],
|
| 20 |
+
[70.7299, 92.2041],
|
| 21 |
+
],
|
| 22 |
+
dtype=np.float32,
|
| 23 |
+
)
|
| 24 |
+
|
| 25 |
+
# Initialize Face Detector (For Example Mediapipe)
|
| 26 |
+
FACE_DETECTOR = mp.solutions.face_mesh.FaceMesh(
|
| 27 |
+
refine_landmarks=True, min_detection_confidence=0.5, min_tracking_confidence=0.5, max_num_faces=1
|
| 28 |
+
)
|
| 29 |
+
|
| 30 |
+
# Initialize the Face Recognition Model (FaceTransformerOctupletLoss)
|
| 31 |
+
FACE_RECOGNIZER = rt.InferenceSession("FaceTransformerOctupletLoss.onnx", providers=rt.get_available_providers())
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
# ---------------------------------------------------------------------------------------------------------------------
|
| 35 |
+
# FACE CAPTURE
|
| 36 |
+
|
| 37 |
+
# Capture a frame with your Webcam and store it on disk
|
| 38 |
+
if not os.path.exists("img.jpg"):
|
| 39 |
+
cap = cv2.VideoCapture(1) # open webcam
|
| 40 |
+
time.sleep(2) # wait for camera to warm up
|
| 41 |
+
|
| 42 |
+
if not cap.isOpened():
|
| 43 |
+
raise IOError("Cannot open webcam")
|
| 44 |
+
|
| 45 |
+
ret, img = cap.read() # capture a frame
|
| 46 |
+
if ret:
|
| 47 |
+
cv2.imwrite("img.jpg", img) # save the frame
|
| 48 |
+
else:
|
| 49 |
+
img = cv2.imread("img.jpg") # read the frame from disk
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
# ---------------------------------------------------------------------------------------------------------------------
|
| 53 |
+
# FACE DETECTION
|
| 54 |
+
|
| 55 |
+
# Process the image with the face detector
|
| 56 |
+
result = FACE_DETECTOR.process(img)
|
| 57 |
+
|
| 58 |
+
if result.multi_face_landmarks:
|
| 59 |
+
# Select 5 Landmarks (Eye Centers, Nose Tip, Left Mouth Corner, Right Mouth Corner)
|
| 60 |
+
five_landmarks = np.asarray(result.multi_face_landmarks[0].landmark)[[470, 475, 1, 57, 287]]
|
| 61 |
+
|
| 62 |
+
# Extract the x and y coordinates of the landmarks of interest
|
| 63 |
+
landmarks = np.asarray(
|
| 64 |
+
[[landmark.x * img.shape[1], landmark.y * img.shape[0]] for landmark in five_landmarks]
|
| 65 |
+
)
|
| 66 |
+
|
| 67 |
+
# Extract the x and y coordinates of all landmarks
|
| 68 |
+
all_x_coords = [landmark.x * img.shape[1] for landmark in result.multi_face_landmarks[0].landmark]
|
| 69 |
+
all_y_coords = [landmark.y * img.shape[0] for landmark in result.multi_face_landmarks[0].landmark]
|
| 70 |
+
|
| 71 |
+
# Compute the bounding box of the face
|
| 72 |
+
x_min, x_max = int(min(all_x_coords)), int(max(all_x_coords))
|
| 73 |
+
y_min, y_max = int(min(all_y_coords)), int(max(all_y_coords))
|
| 74 |
+
bbox = [[x_min, y_min], [x_max, y_max]]
|
| 75 |
+
|
| 76 |
+
else:
|
| 77 |
+
print("No faces detected")
|
| 78 |
+
exit()
|
| 79 |
+
|
| 80 |
+
|
| 81 |
+
# ---------------------------------------------------------------------------------------------------------------------
|
| 82 |
+
# FACE ALIGNMENT
|
| 83 |
+
|
| 84 |
+
# Align Image with the 5 Landmarks
|
| 85 |
+
tform = SimilarityTransform()
|
| 86 |
+
tform.estimate(landmarks, LANDMARKS_TARGET)
|
| 87 |
+
tmatrix = tform.params[0:2, :]
|
| 88 |
+
img_aligned = cv2.warpAffine(img, tmatrix, (112, 112), borderValue=0.0)
|
| 89 |
+
|
| 90 |
+
# safe to disk
|
| 91 |
+
cv2.imwrite("img2_aligned.jpg", img_aligned)
|
| 92 |
+
|
| 93 |
+
|
| 94 |
+
# ---------------------------------------------------------------------------------------------------------------------
|
| 95 |
+
# FACE RECOGNITION
|
| 96 |
+
|
| 97 |
+
# Inference face embeddings with onnxruntime
|
| 98 |
+
input_image = (np.asarray([img_aligned]).astype(np.float32)).clip(0.0, 255.0).transpose(0, 3, 1, 2)
|
| 99 |
+
embedding = FACE_RECOGNIZER.run(None, {"input_image": input_image})[0][0]
|
| 100 |
+
|
| 101 |
+
print("Embedding:", embedding)
|
| 102 |
+
|
| 103 |
+
# If you have embeddings for several facial images - you can then compute the cosine distance between them and distinguish
|
| 104 |
+
# between different or same people based on a threshold. For example, if the cosine distance is less than 0.5, then the
|
| 105 |
+
# two images are of the same person, otherwise they are of different people. The lower the cosine distance, the more similar
|
| 106 |
+
# the two images are. The cosine distance is a value between 0 and 2, where 0 means the two images are identical and 2 means
|
| 107 |
+
# the two images are completely different.
|
| 108 |
+
|
| 109 |
+
# ---------------------------------------------------------------------------------------------------------------------
|
| 110 |
+
# VISUALIZATION
|
| 111 |
+
|
| 112 |
+
# Draw Boundingbox on a copy of image
|
| 113 |
+
img_draw = img.copy()
|
| 114 |
+
cv2.rectangle(img_draw, (bbox[0][0], bbox[0][1]), (bbox[1][0], bbox[1][1]), (255, 0, 0), 2)
|
| 115 |
+
|
| 116 |
+
# Show the detected face on the image
|
| 117 |
+
cv2.imshow("img", img_draw)
|
| 118 |
+
cv2.waitKey(0)
|
| 119 |
+
|
| 120 |
+
# Show the aligned image
|
| 121 |
+
cv2.imshow("img", img_aligned)
|
| 122 |
+
cv2.waitKey(0)
|