diff --git a/.gitignore b/.gitignore index a18183b..83b1d7c 100644 --- a/.gitignore +++ b/.gitignore @@ -56,3 +56,9 @@ alpha_nlu/aclImdb/ alpha_nlu/aclImdb_v1.tar.gz alpha_nlu/metadata.tsv alpha_nlu/vectors.tsv + +# Byte-compiled / optimized / DLL files +__pycache__/ + +# virtual env +env/ \ No newline at end of file diff --git a/epsilon_cv/face_comparison.py b/epsilon_cv/face_comparison.py new file mode 100644 index 0000000..5d66ee3 --- /dev/null +++ b/epsilon_cv/face_comparison.py @@ -0,0 +1,43 @@ +""" +General description +TODO : add MIT liscence + +""" + +# nom de fonction : ma_fonction() +# variable : ma_variable +# nom de classe : MaClasse() + +import cv2 +from simple_facerec import SimpleFacerec + +# Encode faces from a folder +sfr = SimpleFacerec() +sfr.load_encoding_images("epsilon_cv/images") + + +# Load Camera +cap = cv2.VideoCapture(0) + +if not (cap.isOpened()): + print("Could not open video device") + +while True: + ret, frame = cap.read() + + # Detect Faces + face_locations, face_names = sfr.detect_known_faces(frame) + for face_loc, name in zip(face_locations, face_names): + y1, x2, y2, x1 = face_loc[0], face_loc[1], face_loc[2], face_loc[3] + + cv2.putText(frame, name,(x1, y1 - 10), cv2.FONT_HERSHEY_DUPLEX, 1, (0, 0, 200), 2) + cv2.rectangle(frame, (x1, y1), (x2, y2), (0, 0, 200), 4) + + cv2.imshow("Frame", frame) + + key = cv2.waitKey(1) + if key == 27: # escape button to quit + break + +cap.release() +cv2.destroyAllWindows() \ No newline at end of file diff --git a/epsilon_cv/face_recognition.py b/epsilon_cv/face_recognition.py deleted file mode 100644 index b95b20c..0000000 --- a/epsilon_cv/face_recognition.py +++ /dev/null @@ -1,9 +0,0 @@ -""" -General description -TODO : add MIT liscence - -""" - -# nom de fonction : ma_fonction() -# variable : ma_variable -# nom de classe : MaClasse() diff --git a/epsilon_cv/images/Adam Levine.jpg b/epsilon_cv/images/Adam Levine.jpg new file mode 100644 index 0000000..791a6d4 Binary files /dev/null and b/epsilon_cv/images/Adam Levine.jpg differ diff --git a/epsilon_cv/images/Brad Pitt.jpg b/epsilon_cv/images/Brad Pitt.jpg new file mode 100644 index 0000000..f772418 Binary files /dev/null and b/epsilon_cv/images/Brad Pitt.jpg differ diff --git a/epsilon_cv/images/Chris Hemsworth.jpg b/epsilon_cv/images/Chris Hemsworth.jpg new file mode 100644 index 0000000..618c561 Binary files /dev/null and b/epsilon_cv/images/Chris Hemsworth.jpg differ diff --git a/epsilon_cv/images/Zack Effron.jpg b/epsilon_cv/images/Zack Effron.jpg new file mode 100644 index 0000000..8a7455c Binary files /dev/null and b/epsilon_cv/images/Zack Effron.jpg differ diff --git a/epsilon_cv/simple_facerec.py b/epsilon_cv/simple_facerec.py new file mode 100644 index 0000000..baeba09 --- /dev/null +++ b/epsilon_cv/simple_facerec.py @@ -0,0 +1,74 @@ +# Source: pysource.com +# https://pysource.com/2021/08/16/face-recognition-in-real-time-with-opencv-and-python/ + +import face_recognition +import cv2 +import os +import glob +import numpy as np + +class SimpleFacerec: + def __init__(self): + self.known_face_encodings = [] + self.known_face_names = [] + + # Resize frame for a faster speed + self.frame_resizing = 0.25 + + def load_encoding_images(self, images_path): + """ + Load encoding images from path + :param images_path: + :return: + """ + # Load Images + images_path = glob.glob(os.path.join(images_path, "*.*")) + + print("{} encoding images found.".format(len(images_path))) + + # Store image encoding and names + for img_path in images_path: + img = cv2.imread(img_path) + rgb_img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) + + # Get the filename only from the initial file path. + basename = os.path.basename(img_path) + (filename, ext) = os.path.splitext(basename) + # Get encoding + img_encoding = face_recognition.face_encodings(rgb_img)[0] + + # Store file name and file encoding + self.known_face_encodings.append(img_encoding) + self.known_face_names.append(filename) + print("Encoding images loaded") + + def detect_known_faces(self, frame): + small_frame = cv2.resize(frame, (0, 0), fx=self.frame_resizing, fy=self.frame_resizing) + # Find all the faces and face encodings in the current frame of video + # Convert the image from BGR color (which OpenCV uses) to RGB color (which face_recognition uses) + rgb_small_frame = cv2.cvtColor(small_frame, cv2.COLOR_BGR2RGB) + face_locations = face_recognition.face_locations(rgb_small_frame) + face_encodings = face_recognition.face_encodings(rgb_small_frame, face_locations) + + face_names = [] + for face_encoding in face_encodings: + # See if the face is a match for the known face(s) + matches = face_recognition.compare_faces(self.known_face_encodings, face_encoding) + name = "Unknown" + + # # If a match was found in known_face_encodings, just use the first one. + # if True in matches: + # first_match_index = matches.index(True) + # name = known_face_names[first_match_index] + + # Or instead, use the known face with the smallest distance to the new face + face_distances = face_recognition.face_distance(self.known_face_encodings, face_encoding) + best_match_index = np.argmin(face_distances) + if matches[best_match_index]: + name = self.known_face_names[best_match_index] + face_names.append(name) + + # Convert to numpy array to adjust coordinates with frame resizing quickly + face_locations = np.array(face_locations) + face_locations = face_locations / self.frame_resizing + return face_locations.astype(int), face_names diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000..f9a428c --- /dev/null +++ b/requirements.txt @@ -0,0 +1,2 @@ +cv2 +face_recognition \ No newline at end of file