forked from Hironsan/BossSensor
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathcamera_reader.py
More file actions
79 lines (65 loc) · 2.45 KB
/
camera_reader.py
File metadata and controls
79 lines (65 loc) · 2.45 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
#! /usr/local/bin/python
# -*- coding:utf-8 -*-
import os
import sys
import datetime
import cv2
import numpy as np
from model_train import Model
from facetime_call import FaceTimeDriver
DATA_FILE_PATH = './data'
def capture(image, label):
sub_dir = os.path.join(DATA_FILE_PATH, label)
if not os.path.exists(sub_dir):
os.mkdir(sub_dir)
filename = datetime.date.strftime(datetime.datetime.now(), "%Y_%m_%d_%H_%M_%S") + "_" + label + '.png'
filepath = os.path.join(sub_dir, filename)
cv2.imwrite(filepath, image)
def process(image, model, face_driver):
result = model.predict(image)
if face_driver is not None:
face_driver.action(result)
return result
if __name__ == '__main__':
driver = None
# load model
if len(sys.argv) <= 1:
model = Model()
model.load()
driver = FaceTimeDriver()
cap = cv2.VideoCapture(0)
cascade_path = "/usr/local/opt/opencv/share/OpenCV/haarcascades/haarcascade_frontalface_default.xml"
cascade = cv2.CascadeClassifier(cascade_path)
while cap.isOpened():
ret, frame = cap.read()
if not ret: continue
frame = frame[:, ::-1, :]
frame = frame.copy()
frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
face_rect = cascade.detectMultiScale(frame_gray, scaleFactor=1.2, minNeighbors=3, minSize=(10, 10))
if len(face_rect) > 0:
# print('face detected')
color = (255, 255, 255) # 白
for rect in face_rect:
width, height = rect[2:4]
if width < 150 or height < 150:
continue
x, y = rect[0:2]
cv2.rectangle(frame, tuple(rect[0:2]), tuple(rect[0:2] + rect[2:4]), color, thickness=2)
image = frame_gray[y - 10: y + height, x: x + width]
image = image[:, :, None] * np.array([[[1, 1, 1]]])
image = image.copy()
if len(sys.argv) > 1:
label = sys.argv[1]
capture(image, label)
else:
guess = process(image, model, driver)
if guess != 0:
cv2.rectangle(frame, tuple(rect[0:2]), tuple(rect[0:2] + rect[2:4]), (0, 255, 0), thickness=2)
# don't show if you don't need it.
cv2.imshow('Gesture', frame)
k = cv2.waitKey(50)
if k == 27:
break
cap.release()
cv2.destroyAllWindows()