Skip to content
Open
26 changes: 26 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -57,6 +57,7 @@ Available commands:
* `finetune` – Fine-tune an existing pre-trained model
* `evaluate` – Evaluate model performance on a dataset
* `predict` – Run inference and save predictions
* `iterate` – Run hyperparameter optimization (HPO) and repeated experiments on multiple datasets/tasks

---

Expand Down Expand Up @@ -144,3 +145,28 @@ gridfm_graphkit predict --config path/to/config.yaml --model_path path/to/model.
| `--output_path` | `str` | Directory where predictions are saved. | `data` |

---

---

## Running Iterate

```bash
gridfm_graphkit iterate --config path/to/config.yaml
```

### Arguments

| Argument | Type | Description | Default |
| --------------- | ----- | --------------------------------------------- | --------- |
| `--config` | `str` | Path to `iterate` config file. | `None` |
| `--seed` | `int`. | Seed for reproducibility. | `None` |
| `--hpo_spec` | `namespace` | Parameters for HPO/repeated experiments | `None` |
| `--tasks` | `namespace` | MLflow run name. | `None` |
| `--model` | `namespace` | MLflow logging directory. | `None` |
| `--optimizer` | `namespace` | Dataset directory. | `None` |
| `--training` | `namespace` | Directory where predictions are saved. | `None` |
| `--callbacks` | `namespace` | Directory where predictions are saved. | `None` |

---
**Note:** Namespace inputs can be provided in the config or as command line arguments. If provided on the command line, namespaces inputs can be provided with `.` notation, e.g. `--hpo_spec.experiment_name my_exp`. Run `gridfm_graphkit iterate -h` for full list of inputs allows in each namespace.

12 changes: 8 additions & 4 deletions examples/config/case118_ieee_base.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -31,11 +31,15 @@ training:
devices: auto
strategy: auto
optimizer:
type: Adam
learning_rate: 0.0001
beta1: 0.9
beta2: 0.999
lr_decay: 0.7
lr_patience: 10
optimizer_params:
betas: [0.9, 0.999]
scheduler_type: ReduceLROnPlateau
scheduler_params:
mode: min
factor: 0.7
patience: 10
callbacks:
patience: 100
tol: 0
12 changes: 8 additions & 4 deletions examples/config/case240_pserc_base.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -31,11 +31,15 @@ training:
devices: auto
strategy: auto
optimizer:
type: Adam
learning_rate: 0.0001
beta1: 0.9
beta2: 0.999
lr_decay: 0.7
lr_patience: 10
optimizer_params:
betas: [0.9, 0.999]
scheduler_type: ReduceLROnPlateau
scheduler_params:
mode: min
factor: 0.7
patience: 10
callbacks:
patience: 100
tol: 0
12 changes: 8 additions & 4 deletions examples/config/case24_ieee_rts_base.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -31,11 +31,15 @@ training:
devices: auto
strategy: auto
optimizer:
type: Adam
learning_rate: 0.0001
beta1: 0.9
beta2: 0.999
lr_decay: 0.7
lr_patience: 10
optimizer_params:
betas: [0.9, 0.999]
scheduler_type: ReduceLROnPlateau
scheduler_params:
mode: min
factor: 0.7
patience: 10
callbacks:
patience: 100
tol: 0
12 changes: 8 additions & 4 deletions examples/config/case300_ieee_base.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -31,11 +31,15 @@ training:
devices: auto
strategy: auto
optimizer:
type: Adam
learning_rate: 0.0001
beta1: 0.9
beta2: 0.999
lr_decay: 0.7
lr_patience: 10
optimizer_params:
betas: [0.9, 0.999]
scheduler_type: ReduceLROnPlateau
scheduler_params:
mode: min
factor: 0.7
patience: 10
callbacks:
patience: 100
tol: 0
12 changes: 8 additions & 4 deletions examples/config/case30_ieee_base.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -31,11 +31,15 @@ training:
devices: auto
strategy: auto
optimizer:
type: Adam
learning_rate: 0.0001
beta1: 0.9
beta2: 0.999
lr_decay: 0.7
lr_patience: 10
optimizer_params:
betas: [0.9, 0.999]
scheduler_type: ReduceLROnPlateau
scheduler_params:
mode: min
factor: 0.7
patience: 10
callbacks:
patience: 100
tol: 0
86 changes: 86 additions & 0 deletions examples/config/case30_ieee_base_hpo.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,86 @@
seed: 42
# data:
# networks: ["case30_ieee"]
# scenarios: [1023]
# normalization: "baseMVAnorm"
# baseMVA: 100
# mask_type: "rnd"
# mask_value: 0.0
# mask_ratio: 0.5
# mask_dim: 6
# learn_mask: False
# val_ratio: 0.1
# test_ratio: 0.1
# workers: 4
model:
attention_head: 8
dropout: 0.1
edge_dim: 2
hidden_size: 256
input_dim: 9
num_layers: 8
output_dim: 6
pe_dim: 20
type: GPSTransformer
model_path: "/dccstor/sentinel1/nsimumba/neso_gridfm/gridfm-graphkit/examples/models/GridFM_v0_2.pth"
training:
batch_size: 1
epochs: 2
losses: ["MaskedMSE", "PBE"]
loss_weights: [0.01, 0.99]
accelerator: auto
devices: auto
strategy: auto
optimizer:
type: Adam
learning_rate: 0.0001
optimizer_params:
betas: [0.9, 0.999]
scheduler_type: ReduceLROnPlateau
scheduler_params:
mode: min
factor: 0.7
patience: 10
callbacks:
patience: 100
tol: 0
optuna_early_prune: True

hpo_spec:
experiment_name: GPSTransformer_8
run_name: top_run
optimization_space:
batch_size: [8, 16, 32]
learning_rate:
min: 0.000006
max: 0.001
type: real
log: true
n_trials: 2
bayesian_search: True
results_folder: "/dccstor/sentinel1/nsimumba/neso_gridfm/results/iterate"
save_models: False
num_repetitions: 2
repeat_on_best: True
report_on_best_val: True
continue_existing_experiment: True

tasks:
- name: feature_reconstruction_base
type: feature_reconstruction
metric: "Validation loss"
direction: min
data:
data_path: "/dccstor/gridfm/PowerGraph_TP"
networks: ["case30_ieee"]
scenarios: [1023]
normalization: "baseMVAnorm"
baseMVA: 100
mask_type: "rnd"
mask_value: 0.0
mask_ratio: 0.5
mask_dim: 6
learn_mask: False
val_ratio: 0.1
test_ratio: 0.1
workers: 4
12 changes: 8 additions & 4 deletions examples/config/case39_epri_base.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -31,11 +31,15 @@ training:
devices: auto
strategy: auto
optimizer:
type: Adam
learning_rate: 0.0001
beta1: 0.9
beta2: 0.999
lr_decay: 0.7
lr_patience: 10
optimizer_params:
betas: [0.9, 0.999]
scheduler_type: ReduceLROnPlateau
scheduler_params:
mode: min
factor: 0.7
patience: 10
callbacks:
patience: 100
tol: 0
12 changes: 8 additions & 4 deletions examples/config/case57_ieee_base.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -31,11 +31,15 @@ training:
devices: auto
strategy: auto
optimizer:
type: Adam
learning_rate: 0.0001
beta1: 0.9
beta2: 0.999
lr_decay: 0.7
lr_patience: 10
optimizer_params:
betas: [0.9, 0.999]
scheduler_type: ReduceLROnPlateau
scheduler_params:
mode: min
factor: 0.7
patience: 10
callbacks:
patience: 100
tol: 0
12 changes: 8 additions & 4 deletions examples/config/case89_pegase_base.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -31,11 +31,15 @@ training:
devices: auto
strategy: auto
optimizer:
type: Adam
learning_rate: 0.0001
beta1: 0.9
beta2: 0.999
lr_decay: 0.7
lr_patience: 10
optimizer_params:
betas: [0.9, 0.999]
scheduler_type: ReduceLROnPlateau
scheduler_params:
mode: min
factor: 0.7
patience: 10
callbacks:
patience: 100
tol: 0
12 changes: 8 additions & 4 deletions examples/config/gridFMv0.1_pretraining.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -37,11 +37,15 @@ model:
pe_dim: 20
type: GNN_TransformerConv
optimizer:
beta1: 0.9
beta2: 0.999
type: Adam
learning_rate: 1.0e-05
lr_decay: 0.7
lr_patience: 10
optimizer_params:
betas: [0.9, 0.999]
scheduler_type: ReduceLROnPlateau
scheduler_params:
mode: min
factor: 0.7
patience: 10
seed: 200
training:
batch_size: 64
Expand Down
12 changes: 8 additions & 4 deletions examples/config/gridFMv0.2_pretraining.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -37,11 +37,15 @@ model:
pe_dim: 20
type: GPSTransformer
optimizer:
beta1: 0.9
beta2: 0.999
type: Adam
learning_rate: 0.0001
lr_decay: 0.7
lr_patience: 10
optimizer_params:
betas: [0.9, 0.999]
scheduler_type: ReduceLROnPlateau
scheduler_params:
mode: min
factor: 0.7
patience: 10
seed: 0
training:
batch_size: 64
Expand Down
Loading
Loading