-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathmain.py
55 lines (47 loc) · 1.72 KB
/
main.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
from locale import normalize
from unittest import result
import cv2
import mediapipe as mp
from keras.models import load_model
import numpy as np
cap = cv2.VideoCapture(0)
hands = mp.solutions.hands.Hands(max_num_hands=1)
classes = ['A', 'B', 'C', 'D', 'E']
model = load_model('keras_model.h5')
data = np.ndarray(shape=(1, 224, 224, 3), dtype=np.float32)
while True:
success, img = cap.read()
frameRGB = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
results = hands.process(frameRGB)
handsPoints = result.multi_hand_landmarks
h, w, _ = img.shape
if handsPoints != None:
for hand in handsPoints:
x_max = 0
y_max = 0
x_min = w
y_min = h
for lm in hand.landmark:
x, y = int(lm.x * w), int(lm.y * h)
if x > x_max:
x_max = x
if x < x_min:
x_min = x
if y > y_max:
y_max = y
if y < y_min:
y_min = y
cv2.rectangle(img, (x_min-50, y_min-50), (x_max+50, y_max+50), (0,255,0), 2)
try:
imgCrop = img[y_min+50:y_max-50, x_min-50:x_max+50]
imgCrop =cv2.resize(imgCrop, (224, 224))
imgArray = np.asarray(imgCrop)
normalized_image_array = (imgArray.astype(np.float32) / 127.0) - 1
data[0] = normalized_image_array
production = model.predict(data)
indexVal = np.argmax(production)
cv2.putText(img, classes[indexVal], (x_min-50, y_min-65), cv2.FONT_HERSHEY_COMPLEX, 3, (0, 0, 255), 5)
except:
continue
cv2.imshow('Imagem', img)
cv2.waitKey(1)