forked from johnnycode8/gym_solutions
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathfrozen_lake_qe.py
86 lines (65 loc) · 3.07 KB
/
frozen_lake_qe.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
# This file is almost identical to frozen_lake_q.py, except this uses the frozen_lake_enhanced.py environment.
import gymnasium as gym
import numpy as np
import matplotlib.pyplot as plt
import pickle
# Register the enhanced frozen lake environment
# Sample of registration entry found in C:\Users\<username>\.conda\envs\gymenv\Lib\site-packages\gymnasium\envs\__init__.py
gym.register(
id="FrozenLake-enhanced", # give it a unique id
entry_point="frozen_lake_enhanced:FrozenLakeEnv", # frozen_lake_enhanced = name of file 'frozen_lake_enhanced.py'
kwargs={"map_name": "8x8"},
max_episode_steps=200,
reward_threshold=0.85, # optimum = 0.91
)
def run(episodes, is_training=True, render=False):
# 'FrozenLake-enhanced' is the id specified above
env = gym.make('FrozenLake-enhanced', desc=None, map_name="8x8", is_slippery=True, render_mode='human' if render else None)
if(is_training):
q = np.zeros((env.observation_space.n, env.action_space.n)) # init a 64 x 4 array
else:
f = open('frozen_lake8x8.pkl', 'rb')
q = pickle.load(f)
f.close()
learning_rate_a = 0.9 # alpha or learning rate
discount_factor_g = 0.9 # gamma or discount rate. Near 0: more weight/reward placed on immediate state. Near 1: more on future state.
epsilon = 1 # 1 = 100% random actions
epsilon_decay_rate = 0.0001 # epsilon decay rate. 1/0.0001 = 10,000
rng = np.random.default_rng() # random number generator
rewards_per_episode = np.zeros(episodes)
for i in range(episodes):
state = env.reset()[0] # states: 0 to 63, 0=top left corner,63=bottom right corner
terminated = False # True when fall in hole or reached goal
truncated = False # True when actions > 200
while(not terminated and not truncated):
if is_training and rng.random() < epsilon:
action = env.action_space.sample() # actions: 0=left,1=down,2=right,3=up
else:
action = np.argmax(q[state,:])
new_state,reward,terminated,truncated,_ = env.step(action)
if is_training:
q[state,action] = q[state,action] + learning_rate_a * (
reward + discount_factor_g * np.max(q[new_state,:]) - q[state,action]
)
# pass the q table and episode count to the environment for rendering
if(env.render_mode=='human'):
env.set_q(q)
env.set_episode(i)
state = new_state
epsilon = max(epsilon - epsilon_decay_rate, 0)
if(epsilon==0):
learning_rate_a = 0.0001
if reward == 1:
rewards_per_episode[i] = 1
env.close()
sum_rewards = np.zeros(episodes)
for t in range(episodes):
sum_rewards[t] = np.sum(rewards_per_episode[max(0, t-100):(t+1)])
plt.plot(sum_rewards)
plt.savefig('frozen_lake8x8.png')
if is_training:
f = open("frozen_lake8x8.pkl","wb")
pickle.dump(q, f)
f.close()
if __name__ == '__main__':
run(15000, is_training=True, render=True)