-
Notifications
You must be signed in to change notification settings - Fork 10
/
Copy pathsac.py
122 lines (103 loc) · 3.41 KB
/
sac.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
# -*- coding: utf-8 -*-
"""Run module for SAC on LunarLanderContinuous-v2.
- Author: Curt Park
- Contact: [email protected]
"""
import numpy as np
import torch
import torch.optim as optim
from config.agent.lunarlander_continuous_v2.utils import LunarLanderContinuousHER
from algorithms.common.networks.mlp import MLP, FlattenMLP, TanhGaussianDistParams
from algorithms.sac.agent import Agent
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# hyper parameters
hyper_params = {
"GAMMA": 0.99,
"TAU": 5e-3,
"W_ENTROPY": 1e-3,
"W_MEAN_REG": 1e-3,
"W_STD_REG": 1e-3,
"W_PRE_ACTIVATION_REG": 0.0,
"LR_ACTOR": 3e-4,
"LR_VF": 3e-4,
"LR_QF1": 3e-4,
"LR_QF2": 3e-4,
"LR_ENTROPY": 3e-4,
"DELAYED_UPDATE": 2,
"BUFFER_SIZE": int(1e6),
"BATCH_SIZE": 512,
"AUTO_ENTROPY_TUNING": True,
"WEIGHT_DECAY": 0.0,
"INITIAL_RANDOM_ACTION": 5000,
"NETWORK": {
"ACTOR_HIDDEN_SIZES": [256, 256],
"VF_HIDDEN_SIZES": [256, 256],
"QF_HIDDEN_SIZES": [256, 256],
},
# HER
"USE_HER": True,
"SUCCESS_SCORE": 250.0,
"DESIRED_STATES_FROM_DEMO": True,
}
def get(env, args):
"""Run training or test.
Args:
env (gym.Env): openAI Gym environment with continuous action space
args (argparse.Namespace): arguments including training settings
"""
state_dim = env.observation_space.shape[0]
action_dim = env.action_space.shape[0]
if hyper_params["USE_HER"]:
state_dim *= 2
hidden_sizes_actor = hyper_params["NETWORK"]["ACTOR_HIDDEN_SIZES"]
hidden_sizes_vf = hyper_params["NETWORK"]["VF_HIDDEN_SIZES"]
hidden_sizes_qf = hyper_params["NETWORK"]["QF_HIDDEN_SIZES"]
# target entropy
target_entropy = -np.prod((action_dim,)).item() # heuristic
# create actor
actor = TanhGaussianDistParams(
input_size=state_dim, output_size=action_dim, hidden_sizes=hidden_sizes_actor
).to(device)
# create v_critic
vf = MLP(input_size=state_dim, output_size=1, hidden_sizes=hidden_sizes_vf).to(
device
)
vf_target = MLP(
input_size=state_dim, output_size=1, hidden_sizes=hidden_sizes_vf
).to(device)
vf_target.load_state_dict(vf.state_dict())
# create q_critic
qf_1 = FlattenMLP(
input_size=state_dim + action_dim, output_size=1, hidden_sizes=hidden_sizes_qf
).to(device)
qf_2 = FlattenMLP(
input_size=state_dim + action_dim, output_size=1, hidden_sizes=hidden_sizes_qf
).to(device)
# create optimizers
actor_optim = optim.Adam(
actor.parameters(),
lr=hyper_params["LR_ACTOR"],
weight_decay=hyper_params["WEIGHT_DECAY"],
)
vf_optim = optim.Adam(
vf.parameters(),
lr=hyper_params["LR_VF"],
weight_decay=hyper_params["WEIGHT_DECAY"],
)
qf_1_optim = optim.Adam(
qf_1.parameters(),
lr=hyper_params["LR_QF1"],
weight_decay=hyper_params["WEIGHT_DECAY"],
)
qf_2_optim = optim.Adam(
qf_2.parameters(),
lr=hyper_params["LR_QF2"],
weight_decay=hyper_params["WEIGHT_DECAY"],
)
# make tuples to create an agent
models = (actor, vf, vf_target, qf_1, qf_2)
optims = (actor_optim, vf_optim, qf_1_optim, qf_2_optim)
# HER
her = LunarLanderContinuousHER() if hyper_params["USE_HER"] else None
# create an agent
return Agent(env, args, hyper_params, models, optims, target_entropy, her)