Skip to content

run benchmark script #45

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Draft
wants to merge 1 commit into
base: main
Choose a base branch
from
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
27 changes: 27 additions & 0 deletions src/benchmark.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,27 @@
{
"config_path": "./configs",
"defense_models": {
"cloak": {
"run_groups": {
"1": {
"hparams": {
"client.min_scale": [0, 1],
"server.lr": [3e-4, 3e-3]
},
"attacks": {"include": [], "exclude": []}
}
},
"attacks": {
"all": true,
"include": [],
"exclude": []
}
}
},
"attack_models": {
"supervised_decoder": {
"config": "decoder_attack.json",
"run_groups": {}
}
}
}
13 changes: 10 additions & 3 deletions src/expt_automate.py
Original file line number Diff line number Diff line change
Expand Up @@ -61,9 +61,15 @@ def run_experiment(config):
"nopeek_fairface_data_resnet18_split6_1_alpha_0.97",
"uniform_noise_fairface_data_resnet18_split6_1_distribution_gaussian_mean_0_sigma_0"]}

sys_config = {"dataset_path": "/u/abhi24/Datasets/Faces/fairface/",
"experiments_folder": "/u/abhi24/Workspace/simba/experiments/",
"gpu_devices":[0,1]}
sys_config = {
"dataset_path": "/home/justinyu/fairface/",
"experiments_folder": "/home/justinyu/experiments/",
"gpu_devices": [1, 3]
}

# sys_config = {"dataset_path": "/u/abhi24/Datasets/Faces/fairface/",
# "experiments_folder": "/u/abhi24/Workspace/simba/experiments/",
# "gpu_devices":[0,1]}

bench_config = combine_configs(bench_config, sys_config)

Expand All @@ -75,3 +81,4 @@ def run_experiment(config):
# For attack
bench_config[param] = val
run_experiment(bench_config)

96 changes: 96 additions & 0 deletions src/run_benchmark.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,96 @@
from pprint import pprint
import argparse
from utils.benchmark_utils import create_configs_for_bench

# bench_default = "./benchmark.json"

# parser = argparse.ArgumentParser(description='Run SIMBA benchmark script')
# parser.add_argument('benchmark_config', default=bench_default, type=open,
# help='filepath for benchmark config, default: {}'.format(bench_default))
# # not implemented
# parser.add_argument('-f', '--force', action='store_true',
# help='force rerun all benchmarks, even if they exist')

# args = parser.parse_args()


"""
Run Groups
For each model, you can specify any number of 'run groups' with a unique group id. Each run group allows you to specific a set of hyperparams to run the model with.
You can also include/exclude attacks, customize if they will be shown on the final benchmark graph etc.

Hyperparameters
Each hparams config is a dictionary where keys are json access paths in the base config of the model, and the value is an array of potential hparam values.
The cartesian product of hparam values is calculated and a model will be run on each. For K hparams, we will run N_0 * ... * N_K models where N_i is the length
of the values array for hparam i.

Attacks:
Which attacks to run on which defense can be configured using attack settings. These consist of "include", "exclude" and a special "@all" token.
Attacks can be included/excluded globally, per model, and per run group


Benchmark Config Fields:

{
config_path: path to base folder for config files
defense_models/attack_models: configs for each defense/attack model
[model name]
attacks: (optional, configure attacks to be run on this model)
run_groups: (see Run Groups above)
[group id]
hparams: (see Hyperparameters above)
attacks: (optional, configue attacks to be run on this group)
default_attacks: settings for which attacks will be default be run on all defense models
}



TODOS:
- will need to change experiment naming system
- what to do with attack hp? should run all attacks variations on every defense?
- option to only run attack on best performing defense
"""

bench_config = {
"config_path": "./configs",
"defense_models": {
"cloak": {
"run_groups": {
"run1": {
# add option to correlate params tgt, i.e. split layer
"hparams": {
"client.min_scale": [0, 1],
"server.lr": [3e-4, 3e-3],
},
"attacks": {"include": [], "exclude": []}
},
"run2": {
# add option to correlate params tgt, i.e. split layer
"hparams": {},
"attacks": {"include": ["input_optimization_attack"], "exclude": []}
}
},
"attacks": {
"include": [],
"exclude": [],
}
}
},
"attack_models": {
"supervised_decoder": {
"config": "decoder_attack.json",
"run_groups": {},
},
"input_optimization_attack": {
"run_groups": {},
}
},
"default_attacks": {
"include": ["supervised_decoder"],
"exclude": [],
}
}


if __name__ == '__main__':
pprint(create_configs_for_bench(bench_config))
136 changes: 136 additions & 0 deletions src/utils/benchmark_utils.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,136 @@

import json
import os
import random

ALL_TOKEN = "@all"

def apply_attacks_settings(curr_attacks: set[str], settings, all_attacks: set[str]):
include, exclude = set(settings["include"]), set(settings["exclude"])
new_attacks = curr_attacks.copy()
new_attacks = new_attacks.union(all_attacks) if ALL_TOKEN in include else new_attacks.union(include)
new_attacks = set() if ALL_TOKEN in exclude else new_attacks.difference(exclude)

assert len(new_attacks.difference(all_attacks)) == 0 # check attacks in all_attacks

return new_attacks

# replace this
def get_expname(model_config, group_id=""):
return f"{model_config['method']}-{group_id}-{random.randint(0,1000)}"

def create_configs_for_bench(all_bench_config):
defense_base_config_paths, attack_base_config_paths = get_base_config_paths(all_bench_config)
# add error catching here
defense_base_configs = {name: json.load(open(path, 'r')) for name, path in defense_base_config_paths.items()}
attack_base_configs = {name: json.load(open(path, 'r')) for name, path in attack_base_config_paths.items()}

all_attacks = set(attack_base_configs.keys())
default_included_attacks = apply_attacks_settings(set(), all_bench_config["default_attacks"], all_attacks)

attack_to_defense_map = {attack: set() for attack in all_attacks}

all_defense_configs = []
for defense_name, model_bench_config in all_bench_config["defense_models"].items():
base_config = defense_base_configs[defense_name]
run_groups = model_bench_config["run_groups"].items()
model_included_attacks = apply_attacks_settings(default_included_attacks, model_bench_config["attacks"], all_attacks) if "attacks" in model_bench_config else default_included_attacks

if len(run_groups) == 0:
all_defense_configs.append(base_config)
[attack_to_defense_map[attack].add(get_expname(base_config)) for attack in model_included_attacks]
continue

for group_id, run_group in model_bench_config["run_groups"].items():
hparams = run_group["hparams"]
hparam_combos = generate_hparam_combos_from_hparams(hparams)
new_configs = generate_hparams_configs(base_config, hparam_combos, group_id)
all_defense_configs.extend(new_configs)

group_included_attacks = apply_attacks_settings(model_included_attacks, run_group["attacks"], all_attacks) if "attacks" in run_group else model_included_attacks
[[attack_to_defense_map[attack].add(get_expname(config,group_id)) for attack in group_included_attacks] for config in new_configs]

all_attack_configs = []
for attack_name, model_bench_config in all_bench_config["attack_models"].items():
base_config = attack_base_configs[attack_name]
run_groups = model_bench_config["run_groups"].items()

attack_configs = []
if len(run_groups) == 0:
attack_configs.append(base_config)
else:
for group_id, run_group in model_bench_config["run_groups"].items():
hparams = run_group["hparams"]
hparam_combos = generate_hparam_combos_from_hparams(hparams)
new_configs = generate_hparams_configs(base_config, hparam_combos, group_id)
attack_configs.extend(new_configs)

defenses_to_attack = attack_to_defense_map[attack_name]
for config in attack_configs:
for defense in defenses_to_attack:
jmespath_update("challenge_experiment", defense, config)
all_attack_configs.append(config)
return all_defense_configs, all_attack_configs


def generate_hparams_configs(base_config, hparam_runs, group_id):
new_configs = [base_config]
for hparam_dict in hparam_runs:
new_config = base_config.copy()
for hparam_path, val in hparam_dict.items():
jmespath_update(hparam_path, val, new_config)
jmespath_update("rungroup_id", group_id, new_config)
new_configs.append(new_config)
return new_configs

"""
In-place modifies source dict with keypath and val
"""
def jmespath_update(key, val, source):
curr_key, *rest = key.split(".")
if len(rest) > 0:
if curr_key not in source:
source[curr_key] = {}
return jmespath_update(".".join(rest), val, source[curr_key])
source[curr_key] = val

"""
Creates list of hparam combinations from a single model bench config
"""
def generate_hparam_combos_from_hparams(hparams):
def recurse(flattened_hparams, runs = [{}]):
if len(flattened_hparams) == 0:
return runs
hparam, values = flattened_hparams[0]
new_runs = []
for run in runs:
new_runs.extend([dict({hparam: val}, **run) for val in values])
return recurse(flattened_hparams[1:], new_runs)
result = recurse([[key, val] for key, val in hparams.items()])
return [item for item in result if item] # exclude empty dict




def get_base_config_paths(bench_config) -> tuple[dict[str, str], dict[str,str]]:
config_folder = bench_config["config_path"]

defenses: dict[str, str] = {}
attacks: dict[str, str] = {}
for name, model_def in bench_config["defense_models"].items():
filename = model_def["config"] if "config" in model_def else f"{name}.json"
filepath = os.path.join(config_folder, filename)
if os.path.isfile(filepath):
defenses[name] = filepath
else:
print(f"No config found for defense model '{name}'. Tried path: {filepath}")

for name, model_def in bench_config["attack_models"].items():
filename = model_def["config"] if "config" in model_def else f"{name}.json"
filepath = os.path.join(config_folder, filename)
if os.path.isfile(filepath):
attacks[name] = filepath
else:
print(f"No config found for attack model '{name}'. Tried path: {filepath}")

return defenses, attacks