Skip to content

Commit

Permalink
rearrange files and fix typo
Browse files Browse the repository at this point in the history
  • Loading branch information
wzever committed Apr 17, 2023
1 parent a757d55 commit cd966db
Show file tree
Hide file tree
Showing 35 changed files with 3,155 additions and 3,154 deletions.
58 changes: 29 additions & 29 deletions ppo_bihyb_dag.yaml → config/ppo_bihyb_dag.yaml
Original file line number Diff line number Diff line change
@@ -1,29 +1,29 @@
# environment configs
scheduler_type: cp
num_init_dags: 50
gamma: 0.95
resource_limit: 6000
train_sample: 50
test_sample: 10

# learning configs
batch_size: 1
lr_steps: []
max_timesteps: 20
k_epochs: 10
update_timestep: 20
learning_rate: 0.001
eps_clip: 0.1

# model parameters
node_output_size: 64 #16
gnn_layers: 5

# test parameters
search_size: 3

# misc configs
log_interval: 1
test_interval: 50
max_episodes: 5000
test_model_weight: pretrained/PPO_cp_dag_num50_beam3_ratio0.0892.pt
# environment configs
scheduler_type: cp
num_init_dags: 50
gamma: 0.95
resource_limit: 6000
train_sample: 50
test_sample: 10

# learning configs
batch_size: 1
lr_steps: []
max_timesteps: 20
k_epochs: 10
update_timestep: 20
learning_rate: 0.001
eps_clip: 0.1

# model parameters
node_output_size: 64 #16
gnn_layers: 5

# test parameters
search_size: 3

# misc configs
log_interval: 1
test_interval: 50
max_episodes: 5000
test_model_weight: pretrained/PPO_cp_dag_num50_beam3_ratio0.0892.pt
56 changes: 28 additions & 28 deletions ppo_bihyb_ged.yaml → config/ppo_bihyb_ged.yaml
Original file line number Diff line number Diff line change
@@ -1,28 +1,28 @@
# environment configs
solver_type: ipfp
dataset: AIDS-20-30
gamma: 0.95
train_sample: 50
test_sample: 10

# learning configs
batch_size: 1
lr_steps: []
max_timesteps: 10
k_epochs: 10
update_timestep: 10
learning_rate: 0.001
eps_clip: 0.1

# model parameters
node_output_size: 64
gnn_layers: 3

# test parameters
search_size: 3

# misc configs
log_interval: 1
test_interval: 200
max_episodes: 10000
test_model_weight: pretrained/PPO_ipfp_datasetAIDS-20-30_beam3_ratio0.2117.pt
# environment configs
solver_type: ipfp
dataset: AIDS-20-30
gamma: 0.95
train_sample: 50
test_sample: 10

# learning configs
batch_size: 1
lr_steps: []
max_timesteps: 10
k_epochs: 10
update_timestep: 10
learning_rate: 0.001
eps_clip: 0.1

# model parameters
node_output_size: 64
gnn_layers: 3

# test parameters
search_size: 3

# misc configs
log_interval: 1
test_interval: 200
max_episodes: 10000
test_model_weight: pretrained/PPO_ipfp_datasetAIDS-20-30_beam3_ratio0.2117.pt
58 changes: 29 additions & 29 deletions ppo_bihyb_hcp.yaml → config/ppo_bihyb_hcp.yaml
Original file line number Diff line number Diff line change
@@ -1,29 +1,29 @@
# environment configs
solver_type: lkh-fast
min_size: 34
max_size: 104
gamma: 0.95
train_sample: 50
test_sample: 20

# learning configs
batch_size: 1
lr_steps: []
max_timesteps: 10
k_epochs: 10
update_timestep: 20
learning_rate: 0.001
eps_clip: 0.1

# model parameters
node_output_size: 16
gnn_layers: 3

# test parameters
search_size: 12

# misc configs
log_interval: 1
test_interval: 100
max_episodes: 200
test_model_weight: pretrained/PPO_lkh-fast_min34_max103_beam12_opt0.2500.pt
# environment configs
solver_type: lkh-fast
min_size: 34
max_size: 104
gamma: 0.95
train_sample: 50
test_sample: 20

# learning configs
batch_size: 1
lr_steps: []
max_timesteps: 10
k_epochs: 10
update_timestep: 20
learning_rate: 0.001
eps_clip: 0.1

# model parameters
node_output_size: 16
gnn_layers: 3

# test parameters
search_size: 12

# misc configs
log_interval: 1
test_interval: 100
max_episodes: 200
test_model_weight: pretrained/PPO_lkh-fast_min34_max103_beam12_opt0.2500.pt
54 changes: 27 additions & 27 deletions ppo_single_dag.yaml → config/ppo_single_dag.yaml
Original file line number Diff line number Diff line change
@@ -1,27 +1,27 @@
# environment configs
num_init_dags: 50
gamma: 0.99
resource_limit: 6000
train_sample: 50
test_sample: 10

# learning configs
batch_size: 1
lr_steps: []
max_timesteps: 5000
k_epochs: 10
update_timestep: 50
learning_rate: 0.001
eps_clip: 0.1

# model parameters
node_output_size: 64
gnn_layers: 5

# test parameters
search_size: 10

# misc configs
log_interval: 1
test_interval: 10 #50
max_episodes: 5000
# environment configs
num_init_dags: 50
gamma: 0.99
resource_limit: 6000
train_sample: 50
test_sample: 10

# learning configs
batch_size: 1
lr_steps: []
max_timesteps: 5000
k_epochs: 10
update_timestep: 50
learning_rate: 0.001
eps_clip: 0.1

# model parameters
node_output_size: 64
gnn_layers: 5

# test parameters
search_size: 10

# misc configs
log_interval: 1
test_interval: 10 #50
max_episodes: 5000
52 changes: 26 additions & 26 deletions ppo_single_ged.yaml → config/ppo_single_ged.yaml
Original file line number Diff line number Diff line change
@@ -1,26 +1,26 @@
# environment configs
dataset: AIDS-20-30
gamma: 0.99
train_sample: 50
test_sample: 10

# learning configs
batch_size: 1
lr_steps: []
max_timesteps: 200
k_epochs: 10
update_timestep: 50
learning_rate: 0.001
eps_clip: 0.1

# model parameters
node_output_size: 64
gnn_layers: 3

# test parameters
search_size: 10

# misc configs
log_interval: 1
test_interval: 200
max_episodes: 10000
# environment configs
dataset: AIDS-20-30
gamma: 0.99
train_sample: 50
test_sample: 10

# learning configs
batch_size: 1
lr_steps: []
max_timesteps: 200
k_epochs: 10
update_timestep: 50
learning_rate: 0.001
eps_clip: 0.1

# model parameters
node_output_size: 64
gnn_layers: 3

# test parameters
search_size: 10

# misc configs
log_interval: 1
test_interval: 200
max_episodes: 10000
54 changes: 27 additions & 27 deletions ppo_single_hcp.yaml → config/ppo_single_hcp.yaml
Original file line number Diff line number Diff line change
@@ -1,27 +1,27 @@
# environment configs
min_size: 34
max_size: 103
gamma: 0.95
train_sample: 50
test_sample: 20

# learning configs
batch_size: 1
lr_steps: []
max_timesteps: 500
k_epochs: 10
update_timestep: 8
learning_rate: 0.001
eps_clip: 0.1

# model parameters
node_output_size: 16
gnn_layers: 3

# test parameters
search_size: 12

# misc configs
log_interval: 1
test_interval: 100
max_episodes: 200
# environment configs
min_size: 34
max_size: 103
gamma: 0.95
train_sample: 50
test_sample: 20

# learning configs
batch_size: 1
lr_steps: []
max_timesteps: 500
k_epochs: 10
update_timestep: 8
learning_rate: 0.001
eps_clip: 0.1

# model parameters
node_output_size: 16
gnn_layers: 3

# test parameters
search_size: 12

# misc configs
log_interval: 1
test_interval: 100
max_episodes: 200
2 changes: 1 addition & 1 deletion dag_ppo_bihyb_eval.py
Original file line number Diff line number Diff line change
Expand Up @@ -176,7 +176,7 @@ def evaluate(policy_net, dag_graph, eval_graphs, max_steps=10, search_size=10, m
import random
from torch.multiprocessing import Pool, cpu_count

from dag_graph import DAGraph
from utils.dag_graph import DAGraph
from dag_data.dag_generator import load_tpch_tuples
from dag_ppo_bihyb_train import ActorCritic, parse_arguments

Expand Down
10 changes: 5 additions & 5 deletions dag_ppo_bihyb_train.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,10 +11,10 @@
from torch.multiprocessing import Pool, cpu_count
from copy import deepcopy

from dag_ppo_bihyb_model import ActorNet, CriticNet, GraphEncoder
from utils import print_args
from tfboard_helper import TensorboardUtil
from dag_graph import DAGraph
from src.dag_ppo_bihyb_model import ActorNet, CriticNet, GraphEncoder
from utils.utils import print_args
from utils.tfboard_helper import TensorboardUtil
from utils.dag_graph import DAGraph
from dag_data.dag_generator import load_tpch_tuples
from dag_ppo_bihyb_eval import evaluate

Expand Down Expand Up @@ -442,7 +442,7 @@ def parse_arguments():
args = parser.parse_args()

if args.config:
with open(args.config) as f:
with open('config/' + args.config) as f:
cfg_dict = yaml.load(f)
for key, val in cfg_dict.items():
assert hasattr(args, key), f'Unknown config key: {key}'
Expand Down
2 changes: 1 addition & 1 deletion dag_ppo_single_eval.py
Original file line number Diff line number Diff line change
Expand Up @@ -158,7 +158,7 @@ def evaluate(policy_net, dag_graph, eval_graphs, max_steps=10, search_size=10, m
import random
from torch.multiprocessing import Pool, cpu_count

from dag_graph import DAGraph
from utils.dag_graph import DAGraph
from dag_data.dag_generator import load_tpch_tuples
from dag_ppo_single_train import ActorCritic, parse_arguments

Expand Down
Loading

0 comments on commit cd966db

Please sign in to comment.