diff --git a/README.md b/README.md index 4f09075..29af2e5 100644 --- a/README.md +++ b/README.md @@ -10,7 +10,7 @@ Face recognition sample with opencv-python. ```bash sudo apt-get update && sudo apt-get upgrade - sudo apt-get install python3-opencv + sudo apt-get install python3-opencv python3-picamera python3-numpy python3-pil ``` * DB Browser for SQLite diff --git a/detector_picam.py b/detector_picam.py index a637f83..6777e95 100644 --- a/detector_picam.py +++ b/detector_picam.py @@ -1,4 +1,10 @@ +# detector_webcam.py +# Finding the person in front of the camera is anyone who stored in database +# Using Pi Camera v2 module (single threading) +# +# Project: Face Recognition using OpenCV and Raspberry Pi # Ref: https://www.pytorials.com/face-recognition-using-opencv-part-3/ +# By: Mickey Chan @ 2019 # Import required modules import cv2 @@ -19,7 +25,7 @@ print("Please train the data first") exit(0) -# Setup GPIO for unlock LED +# Setup GPIO for door lock relayPin = 26 GPIO.setmode(GPIO.BCM) GPIO.setup(relayPin, GPIO.OUT) @@ -37,24 +43,24 @@ camera.framerate = 30 rawCapture = PiRGBArray(camera, size=(640, 480)) -# Setup Classifier for detect face +# Setup Classifier for detecting face faceCascade = cv2.CascadeClassifier("haarcascade_frontalface_default.xml") # Setup LBPH recognizer for face recognition recognizer = cv2.face.createLBPHFaceRecognizer() # or LBPHFaceRecognizer_create() # Load training data -recognizer.load(fname) # read() for LBPHFaceRecognizer_create() +recognizer.load(fname) # change to read() for LBPHFaceRecognizer_create() for frame in camera.capture_continuous(rawCapture, format="bgr", use_video_port=True): - # Turn off unlock LED when timeout + # Lock the door again when timeout if time.time() - lastUnlockedAt > unlockDuration: GPIO.output(relayPin, 0) frame = frame.array # Detect face - gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) - faces = faceCascade.detectMultiScale(gray, scaleFactor = 1.5, minNeighbors = 5) + gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) # Convert captured frame to grayscale + faces = faceCascade.detectMultiScale(gray, scaleFactor = 1.3, minNeighbors = 5) # Detect face(s) inside the frame for (x, y, w, h) in faces: - # Try to recognize the face a + # Try to recognize the face using recognizer roiGray = gray[y:y+h, x:x+w] id_, conf = recognizer.predict(roiGray) print(id_, conf) @@ -63,7 +69,7 @@ # retrieve the user name from database, # draw a rectangle around the face, # print the name of the user and - # light up the unlock LED for 5 secord + # unlock the door for 5 secords if conf <= 70: cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2) # retrieve user name from database @@ -71,6 +77,7 @@ result = db.fetchall() name = result[0][0] + # You may do anything below for detected user, e.g. unlock the door GPIO.output(relayPin, 1) # Unlock lastUnlockedAt = time.time() print("[Unlock] " + name + " (" + str(conf) + ")") @@ -82,7 +89,8 @@ #cv2.putText(frame, 'No Match', (x+2,y+h-5), font, 1, (0,0,255), 2) cv2.imshow("Face Recognizer", frame) - rawCapture.truncate(0) + rawCapture.truncate(0) # Clear frame buffer for next frame + # Press ESC or 'q' to quit the program key = cv2.waitKey(1) & 0xff if key == 27 or key == ord('q'): diff --git a/detector_webcam.py b/detector_webcam.py index 9187770..c8b87c0 100644 --- a/detector_webcam.py +++ b/detector_webcam.py @@ -1,4 +1,10 @@ +# detector_webcam.py +# Finding the person in front of the camera is anyone who stored in database +# Using USB webcam or IP Cam (single threading) +# +# Project: Face Recognition using OpenCV and Raspberry Pi # Ref: https://www.pytorials.com/face-recognition-using-opencv-part-3/ +# By: Mickey Chan @ 2019 # Import required modules import cv2 @@ -17,7 +23,7 @@ print("Please train the data first") exit(0) -# Setup GPIO for unlock LED +# Setup GPIO for door lock relayPin = 26 GPIO.setmode(GPIO.BCM) GPIO.setup(relayPin, GPIO.OUT) @@ -36,15 +42,15 @@ vSource = 0 # first USB webcam vStream = cv2.VideoCapture(vSource) -# Setup Classifier for detect face +# Setup Classifier for detecting face faceCascade = cv2.CascadeClassifier("haarcascade_frontalface_default.xml") # Setup LBPH recognizer for face recognition recognizer = cv2.face.createLBPHFaceRecognizer() # or LBPHFaceRecognizer_create() # Load training data -recognizer.load(fname) # read() for LBPHFaceRecognizer_create() +recognizer.load(fname) # change to read() for LBPHFaceRecognizer_create() while vStream.isOpened(): - # Turn off unlock LED when timeout + # Lock the door again when timeout if time.time() - lastUnlockedAt > unlockDuration: GPIO.output(relayPin, 0) @@ -56,11 +62,11 @@ lastDetectedAt = time.time() # Detect face - gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) - faces = faceCascade.detectMultiScale(gray, scaleFactor = 1.1, minNeighbors = 2) + gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) # Convert captured frame to grayscale + faces = faceCascade.detectMultiScale(gray, scaleFactor = 1.3, minNeighbors = 5) # Detect face(s) inside the frame for (x, y, w, h) in faces: - # Try to recognize the face a + # Try to recognize the face using recognizer roiGray = gray[y:y+h, x:x+w] id_, conf = recognizer.predict(roiGray) print(id_, conf) @@ -69,7 +75,7 @@ # retrieve the user name from database, # draw a rectangle around the face, # print the name of the user and - # light up the unlock LED for 5 secord + # unlock the door for 5 secords if conf <= 70: cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2) # retrieve user name from database @@ -77,6 +83,7 @@ result = db.fetchall() name = result[0][0] + # You may do anything below for detected user, e.g. unlock the door GPIO.output(relayPin, 1) # Unlock lastUnlockedAt = time.time() print("[Unlock] " + str(id_) + ":" + name + " (" + str(conf) + ")") @@ -89,7 +96,7 @@ cv2.imshow("Face Recognizer", frame) - # Press ESC to quit the program + # Press ESC or 'q' to quit the program key = cv2.waitKey(1) & 0xff if key == 27 or key == ord('q'): break diff --git a/recordface_picam.py b/recordface_picam.py index d44fc11..95d8d68 100644 --- a/recordface_picam.py +++ b/recordface_picam.py @@ -1,4 +1,9 @@ +# recordface_picam.py +# Capture face image of a person for face recognition, using Pi Camera v2 module +# +# Project: Face Recognition using OpenCV and Raspberry Pi # Ref: https://www.pytorials.com/face-recognition-using-opencv-part-2/ +# By: Mickey Chan @ 2019 # Import required modules import cv2 @@ -18,41 +23,46 @@ os.makedirs(dirName) print("DataSet Directory Created") +# Ask for the user's name name = input("What's his/her Name?") +imgCapture = 30 # Number of face image we have to capture saveFace = False -frameColor = (0,0,255) -userDir = "User_" +frameColor = (0,0,255) # Frame color for detected face +userDir = "User_" # Prefix of face image directory name beginTime = 0 -# Connect to video source +# Connect to video source: Pi Camera v2 camera = PiCamera() camera.resolution = (640, 480) camera.framerate = 30 -rawCapture = PiRGBArray(camera, size=(640, 480)) +rawCapture = PiRGBArray(camera, size=(640, 480)) # A Numpy compatible array for storing catpured frame (frame buffer) -# Setup Classifier for detect face +# Setup Classifier for detecting face faceCascade = cv2.CascadeClassifier("haarcascade_frontalface_default.xml") +# Continuously capture video until collected require amount of face data count = 1 -for frame in camera.capture_continuous(rawCapture, format="bgr", use_video_port=True): +for frame in camera.capture_continuous(rawCapture, format="bgr", use_video_port=True): # Read a frame frame = frame.array; cv2.putText(frame, "Press 'f' to start face capture", (10, 480-10), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (255,255,255), 2) - gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) - faces = faceCascade.detectMultiScale(gray, scaleFactor = 1.5, minNeighbors = 5) + # Find any face in the frame + gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) # Convert captured frame to grayscale + faces = faceCascade.detectMultiScale(gray, scaleFactor = 1.3, minNeighbors = 5) # Detect face(s) inside the frame + # If found, save captured face image for (x, y, w, h) in faces: - cv2.rectangle(frame, (x,y), (x+w, y+h), frameColor, 2) + cv2.rectangle(frame, (x,y), (x+w, y+h), frameColor, 2) # Draw a frame surrounding the face + # Save captured face data if saveFace: roiGray = gray[y:y+h, x:x+w] fileName = userDir + "/" + f'{count:02}' + ".jpg" cv2.imwrite(fileName, roiGray) - #print(fileName) cv2.imshow("face", roiGray) count += 1 - cv2.imshow('frame', frame) - rawCapture.truncate(0) + cv2.imshow('frame', frame) # Show the video frame + rawCapture.truncate(0) # Clear frame buffer for next frame # Press 'f' to begin detect, # Press ESC or 'q' to quit @@ -69,8 +79,8 @@ os.makedirs(userDir) #print("Maked directory: " + userDir) - # Quit face detection when captured 30 images - if count > 30: + # Quit face detection when captured required faces + if count > imgCapture: break # Clean up @@ -80,7 +90,7 @@ db.execute("INSERT INTO `users` (`name`) VALUES(?)", (name,)) uid = db.lastrowid print("User ID:" + str(uid)) -# Rename temperary directory with UID +# Rename temperary directory with USER ID newUserDir = os.path.join(dirName, str(uid)) os.rename(userDir, newUserDir); #print("Renamed user dataset directory name to " + newUserDir) diff --git a/recordface_webcam.py b/recordface_webcam.py index 667bf55..2f4afd1 100644 --- a/recordface_webcam.py +++ b/recordface_webcam.py @@ -1,4 +1,9 @@ +# recordface_webcam.py +# Capture face image of a person for face recognition, using webcam or IP cam +# +# Project: Face Recognition using OpenCV and Raspberry Pi # Ref: https://www.pytorials.com/face-recognition-using-opencv-part-2/ +# By: Mickey Chan @ 2019 # Import required modules import cv2 @@ -16,12 +21,13 @@ os.makedirs(dirName) print("DataSet Directory Created") +# Ask for the user's name name = input("What's his/her Name?") -imgCapture = 30 +imgCapture = 30 # Number of face image we have to capture saveFace = False -frameColor = (0,0,255) -userDir = "User_" +frameColor = (0,0,255) # Frame color for detected face +userDir = "User_" # Prefix of face image directory name beginTime = 0 # Connect to video source @@ -29,24 +35,28 @@ vSource = 0 # first USB webcam vStream = cv2.VideoCapture(vSource) -# Setup Classifier for detect face +# Setup Classifier for detecting face faceCascade = cv2.CascadeClassifier("haarcascade_frontalface_default.xml") +# Continuously capture video until collected require amount of face data count = 1 -frameRate = 5 +frameRate = 5 # Frequency for capturing face prevTime = 0 while vStream.isOpened(): timeElapsed = time.time() - prevTime - ok, frame = vStream.read() + ok, frame = vStream.read() # Read a frame if not ok: break cv2.putText(frame, "Press 'f' to start face capture", (10, 480-10), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (255,255,255), 2) if timeElapsed > 1./frameRate: prevTime = time.time() - gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) - faces = faceCascade.detectMultiScale(gray, scaleFactor = 1.1, minNeighbors = 2) + # Find any face in the frame + gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) # Convert captured frame to grayscale + faces = faceCascade.detectMultiScale(gray, scaleFactor = 1.3, minNeighbors = 5) # Detect face(s) inside the frame + # If found, save captured face image for (x, y, w, h) in faces: - cv2.rectangle(frame, (x,y), (x+w, y+h), frameColor, 2) + cv2.rectangle(frame, (x,y), (x+w, y+h), frameColor, 2) # Draw a frame surrounding the face + # Save captured face data if saveFace: roiGray = gray[y:y+h, x:x+w] fileName = userDir + "/" + f'{count:02}' + ".jpg" @@ -54,7 +64,7 @@ cv2.imshow("face", roiGray) count += 1 - cv2.imshow('frame', frame) + cv2.imshow('frame', frame) # Show the video frame # Press 'f' to begin detect, # Press ESC or 'q' to quit key = cv2.waitKey(1) & 0xff @@ -69,7 +79,7 @@ if not os.path.exists(userDir): os.makedirs(userDir) - # Quit face detection when captured 30 images + # Quit face detection when captured required faces if count > imgCapture: break @@ -80,7 +90,7 @@ db.execute("INSERT INTO `users` (`name`) VALUES(?)", (name,)) uid = db.lastrowid print("User ID:" + str(uid)) -# Rename temperary directory with UID +# Rename temperary directory with USER ID newUserDir = os.path.join(dirName, str(uid)) os.rename(userDir, newUserDir); #print("Renamed user dataset directory name to " + newUserDir) diff --git a/setup.py b/setup.py index 92870bd..77cca65 100644 --- a/setup.py +++ b/setup.py @@ -1,6 +1,11 @@ +# setup.py +# Prepare environment for the project +# +# Project: Face Recognition using OpenCV and Raspberry Pi # Ref: https://www.pytorials.com/face-recognition-using-opencv-part-2/ +# By: Mickey Chan @ 2019 -# Setup database +# Setup user database import sqlite3 conn = sqlite3.connect('database.db') diff --git a/threading/Capturer.py b/threading/Capturer.py index e0c00e4..528ba03 100644 --- a/threading/Capturer.py +++ b/threading/Capturer.py @@ -1,3 +1,10 @@ +# Capture.py +# Worker object for capturing video frame +# +# Project: Face Recognition using OpenCV and Raspberry Pi +# Ref: https://github.com/nrsyed/computer-vision/tree/master/multithread +# By: Mickey Chan @ 2019 + from threading import Thread import cv2 @@ -19,7 +26,7 @@ def get(self): if not self.grabbed: self.stop() else: - (self.grabbed, self.frame) = self.stream.read() + (self.grabbed, self.frame) = self.stream.read() # Capture a video frame self.stream.release() return diff --git a/threading/Detector.py b/threading/Detector.py index ad1502a..de782a6 100644 --- a/threading/Detector.py +++ b/threading/Detector.py @@ -1,3 +1,10 @@ +# Detector.py +# Worker object for doing face recognition +# +# Project: Face Recognition using OpenCV and Raspberry Pi +# Ref: https://github.com/nrsyed/computer-vision/tree/master/multithread +# By: Mickey Chan @ 2019 + from threading import Thread import cv2 import sqlite3 @@ -31,11 +38,18 @@ def detect(self): if time.time() - self.lastUnlockedAt > self.unlockDuration: GPIO.output(self.lockPin, 0) - gray = cv2.cvtColor(self.frame, cv2.COLOR_BGR2GRAY) - faces = self.faceCascade.detectMultiScale(gray, scaleFactor = 1.1, minNeighbors = 2) + gray = cv2.cvtColor(self.frame, cv2.COLOR_BGR2GRAY) # Convert captured frame to grayscale + faces = self.faceCascade.detectMultiScale(gray, scaleFactor = 1.3, minNeighbors = 5) # Detect face(s) inside the frame for (x, y, w, h) in faces: + # Try to recognize the face using recognizer roiGray = gray[y:y+h, x:x+w] id_, conf = self.recognizer.predict(roiGray) + + # If recognized face has enough confident (<= 70), + # retrieve the user name from database, + # draw a rectangle around the face, + # print the name of the user and + # unlock the door for 5 secords if conf <= 70: self.db.execute("SELECT `name` FROM `users` WHERE `id` = (?);", (id_,)) result = self.db.fetchall() @@ -43,6 +57,8 @@ def detect(self): print("[Unlock] " + str(id_) + ":" + name + " (" + str(conf) + ")") cv2.rectangle(self.frame, (x, y), (x+w, y+h), (0, 255, 0), 2) cv2.putText(self.frame, name, (x+2,y+h-5), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (150,255,0), 2) + + # You may do anything below for detected user, e.g. unlock the door if self.lockPin > 0: GPIO.output(self.lockPin, 1) self.lastUnlockedAt = time.time() @@ -52,6 +68,7 @@ def detect(self): if self.lockPin > 0: GPIO.output(self.lockPin, 0) cv2.imshow("Face Recognizer", self.frame) + # Press ESC or 'q' to quit the program key = cv2.waitKey(1) & 0xff if key == 27 or key == ord('q'): self.stop() diff --git a/threading/Show.py b/threading/Show.py index 9dd8004..dd1e992 100644 --- a/threading/Show.py +++ b/threading/Show.py @@ -1,3 +1,10 @@ +# Show.py +# Worker object for just showing capture frame on desktop +# +# Project: Face Recognition using OpenCV and Raspberry Pi +# Ref: https://github.com/nrsyed/computer-vision/tree/master/multithread +# By: Mickey Chan @ 2019 + from threading import Thread import cv2 @@ -15,7 +22,8 @@ def start(self): def show(self): while not self.stopped: - cv2.imshow("Face Recognizer", self.frame) + cv2.imshow("Face Recognizer", self.frame) # Show the video frame + # Press ESC or 'q' to quit the program key = cv2.waitKey(1) & 0xff if key == 27 or key == ord('q'): self.stop() diff --git a/threading/detect_main.py b/threading/detect_main.py index 8627149..9d7b16d 100644 --- a/threading/detect_main.py +++ b/threading/detect_main.py @@ -1,10 +1,16 @@ +# detect_main.py +# Finding the person in front of the camera is anyone who stored in database +# Using USB webcam or IP Cam (multi-threading) +# +# Project: Face Recognition using OpenCV and Raspberry Pi # Ref: https://github.com/nrsyed/computer-vision/tree/master/multithread +# By: Mickey Chan @ 2019 import os import RPi.GPIO as GPIO -from Capturer import Capturer -from Detector import Detector -#from Show import Show +from Capturer import Capturer # Worker object for capturing video frame +from Detector import Detector # Worker object for doing face recognition +#from Show import Show # Worker object for just showing capture frame on desktop # Connect to video source #vSource = "rtsp://192.168.1.100:8554/live.sdp" # RTSP URL of IP Cam @@ -24,20 +30,21 @@ def main(): faceDetector = Detector(dname, cname, fname, videoGetter.frame, relayPin).start() #videoShower = Show(videoGetter.frame).start() - # Setup GPIO for unlock LED + # Setup GPIO for door lock GPIO.setmode(GPIO.BCM) GPIO.setup(relayPin, GPIO.OUT) GPIO.output(relayPin, 0) while True: + # Stop the thread when its other worker stopped if videoGetter.stopped or faceDetector.stopped: #videoShower.stopped: if not videoGetter.stopped: videoGetter.stop() if not faceDetector.stopped: faceDetector.stop() #if not videoShower.stopped: videoShower.stop() break - frame = videoGetter.frame - faceDetector.frame = frame + frame = videoGetter.frame # Capture a video frame + faceDetector.frame = frame # Submit the video frame to detector for process #videoShower.frame = frame GPIO.cleanup() diff --git a/trainer.py b/trainer.py index 96f8239..927934b 100644 --- a/trainer.py +++ b/trainer.py @@ -1,4 +1,9 @@ +# trainer.py +# Train recognizer using captured face images +# +# Project: Face Recognition using OpenCV and Raspberry Pi # Ref: https://www.pytorials.com/face-recognition-using-opencv-part-2/ +# By: Mickey Chan @ 2019 # Import required modules import os @@ -11,7 +16,7 @@ faceCascade = cv2.CascadeClassifier("haarcascade_frontalface_default.xml") recognizer = cv2.face.createLBPHFaceRecognizer() # or LBPHFaceRecognizer_create() -# Setup directory for storing trained data +# Create directory for storing trained data baseDir = os.path.dirname(os.path.abspath(__file__)) imageDir = os.path.join(baseDir, "dataset") recognizerDir = os.path.join(baseDir, "recognizer") @@ -37,20 +42,19 @@ id_ = int(os.path.basename(root)) print("UID:" + str(id_)) - # Convert the face image to grey scale and convert pixel data to Numpy Array + # Convert the face image to grayscale and convert pixel data to Numpy Array faceImage = Image.open(path).convert("L") faceArray = np.array(faceImage, "uint8") - # Insert USER ID and face data into to dataset + # Insert USER ID and face data into dataset yIDs.append(id_) xFaces.append(faceArray) # Display the face image to be used for training cv2.imshow("training", faceArray) - # Allow user press a key to interrupt training process cv2.waitKey(10) -# Training +# Train recognizer and then save trained model recognizer.train(xFaces, np.array(yIDs)) recognizer.save(recognizerDir + "/trainingData.yml")