Skip to content

Commit

Permalink
locality: make first-class citizen plot (#45)
Browse files Browse the repository at this point in the history
* locality: make first-class citizen plot

* scheduler: back to very-network

* plots: overall improvements

* plot: commit plotting scripts after submission
  • Loading branch information
csegarragonz authored May 23, 2024
1 parent da421c1 commit 11e13d9
Show file tree
Hide file tree
Showing 26 changed files with 1,801 additions and 1,114 deletions.
2 changes: 1 addition & 1 deletion requirements.txt
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
ansible==7.0.0
black==22.3.0
faasmctl>=0.43.0
faasmctl>=0.44.0
flake8==3.9.2
hoststats==0.1.1
invoke>=2.1.0
Expand Down
8 changes: 6 additions & 2 deletions tasks/elastic/plot.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,11 @@
from os.path import join
from pandas import read_csv
from tasks.util.elastic import ELASTIC_PLOTS_DIR, ELASTIC_RESULTS_DIR
from tasks.util.plot import SINGLE_COL_FIGSIZE, save_plot
from tasks.util.plot import (
SINGLE_COL_FIGSIZE,
get_color_for_baseline,
save_plot,
)


def _read_results():
Expand Down Expand Up @@ -37,7 +41,6 @@ def plot(ctx):
Plot the slowdown of OpenMP's ParRes kernels
"""
results = _read_results()
print(results)
makedirs(ELASTIC_PLOTS_DIR, exist_ok=True)
fig, ax = subplots(figsize=SINGLE_COL_FIGSIZE)

Expand All @@ -56,6 +59,7 @@ def plot(ctx):
ax.bar(
xs,
ys,
color=get_color_for_baseline("omp-elastic", "granny"),
edgecolor="black",
)

Expand Down
10 changes: 7 additions & 3 deletions tasks/lammps/run.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@
LAMMPS_RESULTS_DIR,
LAMMPS_SIM_WORKLOAD,
LAMMPS_SIM_WORKLOAD_CONFIGS,
get_faasm_benchmark,
get_lammps_data_file,
get_lammps_migration_params,
)
from tasks.util.openmpi import (
Expand Down Expand Up @@ -50,7 +50,6 @@ def wasm(ctx, w, repeats=1):
"""
num_vms = len(get_faasm_worker_ips())
assert num_vms == 2, "Expected 2 VMs got: {}!".format(num_vms)
data_file = basename(get_faasm_benchmark(LAMMPS_SIM_WORKLOAD)["data"][0])

for workload in w:
if workload not in LAMMPS_SIM_WORKLOAD_CONFIGS:
Expand All @@ -60,6 +59,9 @@ def wasm(ctx, w, repeats=1):
)
)
workload_config = LAMMPS_SIM_WORKLOAD_CONFIGS[workload]
data_file = basename(
get_lammps_data_file(workload_config["data-file"])["data"][0]
)

csv_name = "lammps_granny_{}.csv".format(workload)
_init_csv_file(csv_name)
Expand Down Expand Up @@ -100,7 +102,6 @@ def native(ctx, w, repeats=1):
"""
num_cpus_per_vm = 8
num_vms = 2
data_file = get_faasm_benchmark(LAMMPS_SIM_WORKLOAD)["data"][0]

for workload in w:
if workload not in LAMMPS_SIM_WORKLOAD_CONFIGS:
Expand All @@ -110,6 +111,9 @@ def native(ctx, w, repeats=1):
)
)
workload_config = LAMMPS_SIM_WORKLOAD_CONFIGS[workload]
data_file = get_lammps_data_file(workload_config["data-file"])["data"][
0
]

pod_names, pod_ips = get_native_mpi_pods("lammps")
assert (
Expand Down
24 changes: 12 additions & 12 deletions tasks/makespan/locality.md
Original file line number Diff line number Diff line change
Expand Up @@ -25,25 +25,28 @@ inv cluster.provision --vm Standard_D8_v5 --nodes ${NUM_VMS} + 1
inv cluster.credentials
```

## Native (OpenMPI)
## Native (Native Batch Schedulers with Granny's MPI implementation)

First, deploy the native `k8s` cluster:
For this experiment, our native baselines also run with Granny's MPI
implementation. This is to make the comparison of improvement of performance
through better locality fair:

```bash
inv makespan.native.deploy --num-vms ${NUM_VMS}
faasmctl deploy.k8s --workers=${NUM_VMS}
inv makespan.wasm.upload
```

Now, you can run the different OpenMPI baselines:
Now, you can run the different baselines:

```bash
inv makespan.run.native-batch --workload mpi-migrate --num-vms ${NUM_VMS} --num-tasks ${NUM_TASKS}
inv makespan.run.native-slurm --workload mpi-migrate --num-vms ${NUM_VMS} --num-tasks ${NUM_TASKS}
inv makespan.run.native-batch --workload mpi-locality --num-vms ${NUM_VMS} --num-tasks ${NUM_TASKS}
inv makespan.run.native-slurm --workload mpi-locality --num-vms ${NUM_VMS} --num-tasks ${NUM_TASKS}
```

Once you are done, you may remove the native OpenMPI cluster:

```bash
inv makespan.native.delete
faasmctl delete
```

## Granny
Expand All @@ -63,11 +66,8 @@ inv makespan.wasm.upload
Third, run the experiment:

```bash
# Granny with migration disabled as another baseline
inv makespan.run.granny --workload mpi-migrate --num-vms ${NUM_VMS} --num-tasks ${NUM_TASKS}

# Granny with migration enabled (aka Granny)
inv makespan.run.granny --workload mpi-migrate --num-vms ${NUM_VMS} --num-tasks ${NUM_TASKS} --migrate
# Granny with migration enabled
inv makespan.run.granny --workload mpi-locality --num-vms ${NUM_VMS} --num-tasks ${NUM_TASKS} --migrate
```

During an experiment, you may monitor the state of the cluster (in a separete
Expand Down
Loading

0 comments on commit 11e13d9

Please sign in to comment.