-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathtesting_model.py
executable file
·54 lines (42 loc) · 1.29 KB
/
testing_model.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jun 15 22:52:24 2020
@author: aarav
"""
# Reference: https://stackoverflow.com/a/23312964/5370202
import socket
import atexit
import struct
import sys
import matplotlib.pyplot as plt
import netifaces as ni
import os
import numpy as np
import cv2
import imutils
from imutils import face_utils
import pickle
dir_path = os.path.dirname(os.path.realpath(__file__))
print('dir path is \n ', dir_path)
sys.path.insert(0, dir_path+"/Modules")
sys.path.insert(0, dir_path)
import utils
import FaceEliminator
from keras.preprocessing import image
# Following modules are used specifically for Gesture recognition
currentModuleName = __file__.split(os.path.sep)[-1]
print('current module name \n',currentModuleName)
from tensorflow import keras
model = keras.models.load_model('/home/aarav/Desktop/MajorProject/Models/m.h5')
#test_image = cv2.imread('/home/aarav/Desktop/MajorProject/Dataset/Letters/a/100.png')
test_image = cv2.imread('/home/aarav/Desktop/1.png')
gray = cv2.cvtColor(test_image, cv2.COLOR_BGR2GRAY)
handFound, hand, contours_of_hand = utils.get_my_hand(gray)
hand= cv2.resize(hand, (64,64))
hand=hand/255
img = image.img_to_array(hand)
img = np.expand_dims(img, axis = 0)
pred= model.predict_classes(img)
#l= max(pred[0])
print(pred)