-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathtest.py
97 lines (74 loc) · 2.59 KB
/
test.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
#!/usr/bin/env python
# coding: utf-8
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
import json
import time
import datetime
from tqdm import tqdm
from glob import glob
from copy import deepcopy
from PIL import Image, ImageDraw, ImageFont
import cv2
import numpy as np
import matplotlib.pyplot as plt
import math
import operator
from functools import reduce
from Agents.agent import *
dir_path, file_path = os.path.realpath(__file__).rsplit("\\", 1)
print(dir_path, '-->', file_path)
# Read the configuration file
with open('RL_config.json', 'r') as reader:
Config = json.load(reader)
# Assign training hyper-parameters with configuration
PROCESS = Config['Process']
useText = Config['UseText']
useIcon = Config['UseIcon']
MAX_MOVES = Config['MaxMovesPerEpisode']
optimizerQN = Config['OptimizerQN']
agent = Agent(process=PROCESS,
use_text=useText,
use_icon=useIcon,
optimizer=optimizerQN,
mode='inference')
""" Test the network """
test_idx = 0
testing = True
while testing:
test_idx += 1
print("\n\t\t[TEST]", test_idx)
_ = input("Press any key to test ")
# Get 1st new observation
agent.be_ready()
current_moves = 0
while agent.on_duty or current_moves>MAX_MOVES:
current_moves += 1
print("\nMOVE {} -------".format(current_moves))
# _ = input("Press any key to take action ")
raw_current_state = agent.observe()
start = time.time()
current_contexts, button_centers = agent.extract_contexts(raw_current_state)
stop_det = time.time()
print("Computational Time for Detection:", stop_det-start)
action_batch = agent.query(current_contexts)
stop_dec = time.time()
print("Computational Time for Decision:", stop_dec-stop_det)
action_id = action_batch[0]
action = agent.actions_list[action_id]
""" AGENT practices an ACTION to the ENVIRONMENT """
try:
action_args = dict()
if action in button_centers.keys():
action_args['click_x'] = button_centers[action][1]
action_args['click_y'] = button_centers[action][0]
if 'label' in action:
action_args['image'] = raw_current_state
agent.does(action, **action_args)
except KeyError:
print("\n".join(
"{} - {} - {}".format(idx, button, position) \
for idx, (button, position) in enumerate(button_centers.items())
))
print("Agent chooses {} not existing in this state!".format(action))
time.sleep(0.19)