-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathTest.py
188 lines (153 loc) · 6.35 KB
/
Test.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
# Test the Emotion Detection System
import cv2
import logging
import sys
from EmotionRecognition import EmotionRecognition
from Constants import *
import numpy as np
from sklearn.metrics import classification_report, confusion_matrix
from tensorflow import keras as k
cascade_classifier = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
def format_image(image):
if len(image.shape) > 2 and image.shape[2] == 3:
# determine whether the image is color
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
else:
# Image read from buffer
image = cv2.imdecode(image, cv2.CV_LOAD_IMAGE_GRAYSCALE)
faces = cascade_classifier.detectMultiScale(image,scaleFactor = 1.3, minNeighbors = 5)
if not len(faces) > 0:
return None
# initialize the first face as having maximum area, then find the one with max_area
max_area_face = faces[0]
for face in faces:
if face[2] * face[3] > max_area_face[2] * max_area_face[3]:
max_area_face = face
face = max_area_face
# extract ROI of face
image = image[face[1]:(face[1] + face[2]), face[0]:(face[0] + face[3])]
try:
# resize the image so that it can be passed to the neural network
image = cv2.resize(image, (SIZE_FACE,SIZE_FACE), interpolation = cv2.INTER_CUBIC) / 255.
except Exception:
print("----->Problem during resize")
return None
return image
def testloop(filename, model_number):
# Initialize object of EMR class
network = EmotionRecognition()
network.build_network(model_number)
#Load the model from the file
network.load_model(filename)
cap = cv2.VideoCapture(1)
font = cv2.FONT_HERSHEY_SIMPLEX
feelings_faces = []
# append the list with the emoji images
for index, emotion in enumerate(EMOTIONS):
feelings_faces.append(cv2.imread(EMOJIS_FOLDER + emotion + '.png', -1))
while True:
#Read a image from the stream
ret, frame = cap.read()
#Find the face in the image an cut it out
facecasc = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = facecasc.detectMultiScale(gray, 1.3, 5)
#Compute the proabilities for emotions
result = network.predict(format_image(frame))
if result is not None:
#Write the different emotions with a bar shown the proability
for index, emotion in enumerate(EMOTIONS):
cv2.putText(frame, emotion, (10, index * 20 + 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 1);
cv2.rectangle(frame, (130, index * 20 + 10), (130 + int(result[0][index] * 100), (index + 1) * 20 + 4),
(255, 0, 0), -1)
#Find the emotion with the highest proability and write a Emoji in the video stream
maxindex = np.argmax(result[0])
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(frame, EMOTIONS[maxindex], (10, 360), font, 2, (255, 255, 255), 2, cv2.LINE_AA)
face_image = feelings_faces[maxindex]
for c in range(0, 3):
# the shape of face_image is (x,y,4)
# the fourth channel is 0 or 1
# in most cases it is 0, so, we assign the roi to the emoji
# you could also do:
# frame[200:320,10:130,c] = frame[200:320, 10:130, c] * (1.0 - face_image[:, :, 3] / 255.0)
frame[200:320, 10:130, c] = face_image[:, :, c] * (face_image[:, :, 3] / 255.0) + frame[200:320, 10:130, c] * (1.0 - face_image[:, :, 3] / 255.0)
if not len(faces) > 0:
# do nothing if no face is detected
a = 1
else:
# draw box around face with maximum area
max_area_face = faces[0]
for face in faces:
if face[2] * face[3] > max_area_face[2] * max_area_face[3]:
max_area_face = face
face = max_area_face
(x, y, w, h) = max_area_face
frame = cv2.rectangle(frame, (x, y - 50), (x + w, y + h + 10), (255, 0, 0), 2)
cv2.imshow('Video', cv2.resize(frame, None, fx=2, fy=2, interpolation=cv2.INTER_CUBIC))
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
def testAccuracy(filename, model_number):
# Initialize object of EMR class
network = EmotionRecognition()
network.build_network(model_number)
# Load the model from the file
network.load_model(filename)
# eval_data_gen = k.preprocessing.image.ImageDataGenerator(
# rescale=1. / 255.
# )
# eval_gen = eval_data_gen.flow_from_directory(
# "data/test",
# target_size=None,
# batch_size=1,
# class_mode="categorical",
# color_mode="grayscale"
# )
train_set, validation_set = network.generate_test_data()
metrics = network.model.evaluate_generator(validation_set, steps=10)
prediction = network.model.predict_generator(validation_set, steps=1)
y_pred = prediction.argmax(axis=-1)
y_pred = y_pred
print('test1')
print(y_pred)
print("test2")
print(validation_set.classes)
CM = confusion_matrix(validation_set.classes, y_pred)
print(CM)
# val = validation_set.classes
# res_class = k.np_utils.probas_to_classes(result)
#
# print(classification_report(val, res_class))
# print(network.model.metrics_names)
#
# return {"model": "test",
# "metrics": metrics,
# "names": network.model.metrics_names
# }
# y_test = []
# files = glob.glob("D:/workspace/Data-Mining Projekt/Live/data/test/*.png")
# for myFile in files:
# print(myFile)
# image = cv2.imread(myFile)
# y_test.append(image)
#
# y_test = np.argmax(validation_set, axis=1) # Convert one-hot to index
#
# y_predict = network.model.predict(y_test)
#
# print(classification_report(Y_test, y_pred))
#Entry of the application
if __name__ == "__main__":
if len(sys.argv) <= 1:
exit(0)
if sys.argv[1] == 'train':
network = EmotionRecognition()
network.full_training(sys.argv[3])
network.save_model(sys.argv[2])
print('Training finished and model saved')
elif sys.argv[1] == 'testloop':
testloop(sys.argv[2], sys.argv[3])
elif sys.argv[1] == 'test':
testAccuracy(sys.argv[2], sys.argv[3])