Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
43 changes: 43 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,49 @@ We recommend using python 3.10, 3.11 or 3.12 and also using a virtual environmen
pip install terratorch-iterate
```

### New instructions for iterate v0.3
Iterate v0.3 can optimize over arbitrary code running on arbitrary workload managers.
Slurm and LSF are supported, Kubernetes/OpenShift and PBS coming soon.

From version 0.3 on the current iterate can be used using `iterate-classig`. Here are some usage examples

#### Prerequisites
mkdir deleteme.iterate
cd deleteme.iterate
python -m venv .venv
source ./venv/bin/activate
wget https://raw.githubusercontent.com/terrastackai/iterate/refs/heads/main/examples/bumpy_function.py
wget https://raw.githubusercontent.com/terrastackai/iterate/refs/heads/main/examples/bumpy_hpo.yaml
pip install terratorch-iterate==0.3

#### Run locally
```
iterate \
--script bumpy_function.py \
--root-dir . \
--optuna-study-name terratorch_hpo_nas_2 \
--optuna-db-path "sqlite:///iterate_study.db" \
--hpo-yaml bumpy_hpo.yaml \
--wlm none \
--metric yval
```
#### Run on LSF
```
iterate \
--script bumpy_function.py \
--root-dir . \
--optuna-study-name terratorch_hpo_nas_2 \
--optuna-db-path "sqlite:///iterate_study.db" \
--hpo-yaml bumpy_hpo.yaml \
--wlm lsf \
--metric yval \
--gpu-count 0
```
#### Useful commands
```
pip install optuna-dashboard
optuna-dashboard --host 0.0.0.0 sqlite:///iterate_study.db
```
### Suggested setup for development

```sh
Expand Down
94 changes: 94 additions & 0 deletions examples/bumpy_function.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,94 @@
#!/usr/bin/env python3
import argparse
import math


def bumpy_function_3d(
x, y, z,
global_mu, global_sigma,
mu_rest, sigma_rest, amps_rest,
):
"""
3D smooth multimodal function with:
- one global optimum = 1 at global_mu = (mx,my,mz)
- multiple local optima < 1

f(p) = 1 - Π_k (1 - a_k * exp(-||p - mu_k||^2 / (2 sigma_k^2)))
"""

def sqdist(p, q):
return (p[0] - q[0])**2 + (p[1] - q[1])**2 + (p[2] - q[2])**2

p = (x, y, z)

# Global peak (amplitude = 1)
val = 1.0 - math.exp(
-sqdist(p, global_mu) / (2.0 * global_sigma**2)
)

# Local peaks
for mu_k, sig_k, a_k in zip(mu_rest, sigma_rest, amps_rest):
term = 1.0 - a_k * math.exp(
-sqdist(p, mu_k) / (2.0 * sig_k**2)
)
val *= term

return 1.0 - val


if __name__ == "__main__":
parser = argparse.ArgumentParser("Evaluate the 3D bumpy multimodal function.")

parser.add_argument("--x", type=float, required=True)
parser.add_argument("--y", type=float, required=True)
parser.add_argument("--z", type=float, required=True)
parser.add_argument("--trial-number", type=int, default=0)

parser.add_argument(
"--global-mu",
type=float,
nargs=3,
default=[0.0, 0.0, 0.0],
metavar=("MX", "MY", "MZ"),
)
parser.add_argument("--global-sigma", type=float, default=0.7)

parser.add_argument(
"--mu-rest",
type=float,
nargs="*",
default=[-2.0, 0.0, 0.0, 2.0, 0.0, 0.0],
help="Flat list of (x y z) triplets",
)
parser.add_argument(
"--sigma-rest",
type=float,
nargs="*",
default=[0.6, 0.6],
)
parser.add_argument(
"--amps-rest",
type=float,
nargs="*",
default=[0.5, 0.8],
)

args = parser.parse_args()

mu_rest = [
tuple(args.mu_rest[i:i+3])
for i in range(0, len(args.mu_rest), 3)
]

yval = bumpy_function_3d(
x=args.x,
y=args.y,
z=args.z,
global_mu=tuple(args.global_mu),
global_sigma=args.global_sigma,
mu_rest=mu_rest,
sigma_rest=args.sigma_rest,
amps_rest=args.amps_rest,
)

print(f'yval: {yval}, trial_number: {args.trial_number}')
30 changes: 30 additions & 0 deletions examples/bumpy_hpo.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,30 @@
# =======================
# Static parameters - passed to the underlying training script as is
# =======================

static:
global-mu: 23 42 66

# ========================
# Training hyperparameters - evaluated by optuna and passed to the underlying training script
# ========================

hpo:
x:
type: float
low: 1
high: 100
log: true

y:
type: float
low: 1
high: 100
log: true

z:
type: float
low: 1
high: 100
log: true

9 changes: 5 additions & 4 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ include = ["terratorch_iterate*"]
[project]

name = "terratorch-iterate"
version = "0.2.3"
version = "0.3"
requires-python = ">= 3.11"
description = "A terratorch's plugin for benchmarking and hyperparameter optimization"
authors = [
Expand Down Expand Up @@ -77,8 +77,8 @@ dependencies = [
]

[project.urls]
Homepage = "https://github.com/IBM/terratorch-iterate"
Issues = "https://github.com/IBM/terratorch-iterate/issues"
Homepage = "https://github.com/terrastackai/iterate"
Issues = "https://github.com/terrastackai/iterate/issues"

[project.optional-dependencies]
dev = [
Expand Down Expand Up @@ -113,7 +113,8 @@ line-length = 88
skip-string-normalization = true

[project.scripts]
iterate = "terratorch_iterate.main:main"
iterate-classic = "terratorch_iterate.main:main"
iterate = "terratorch_iterate.iterate2:main"

[tool.isort]
multi_line_output = 3
Expand Down
Loading
Loading