From c84bfa6bc62c6e5e017b2e6f3bd02f0b7c25e8fc Mon Sep 17 00:00:00 2001 From: Danilo Lessa Bernardineli Date: Mon, 18 Dec 2023 17:41:31 -0300 Subject: [PATCH 1/7] change to v0.5.0 --- CHANGELOG.md | 28 +++++++++++++++++++--------- README.md | 6 +++--- cadCAD/__init__.py | 2 +- setup.py | 2 +- 4 files changed, 24 insertions(+), 14 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 6b28e7f6..1f4469a9 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,18 +1,28 @@ # Changelog: +### 0.5.0 - December 18 2023 -### February 15, 2023 -* **Fixes:** - - Package has been cleaned-up for working with Python 3.10 -### 0.4.29.1 +#### New User Features + +- Added toggle for enabling users to deactivate deepcopying. This is done by passing an additional object on the `ExecutionContext`, eg. `ExecutionContext(mode, additional_objs={'deepcopy_off': True})` + +#### New Submodules + +- A collection of type annotations for encapsuling `cadCAD` projects is now implemented through the `cadCAD.types` submodules +- Added `cadCAD.tools` as a submodule, which is originated from the `cadCAD_tools` Python package. This submodule contains several helper functions for making the simulation experience more straightforward as well as a collection of performance profiling tools. +- Added `cadCAD.diagram` as a submodule, which is originated from the `cadCAD_diagram` Python package. This submodule contains functions for programatically generating block diagrams from existing models. +- More informative error messages when policies and SUFs are wrongly implemented. (Issues #288 and #258) + +#### Backend Improvements + +- Merged repo with the `cadCAD_legacy_devel`, which includes performance improvements. In particular, simulations will start up faster due to code optimizations. +- `cadCAD` now uses `pytest` as the testing framework. This was made possible by isolating the existing tests and wrapping them into functions. -#### Changes -- Parallel executor uses the context manager handling the Process Pool lifetime +#### Fixes -### 0.4.29 +- cadCAD is now Python 3.10+ compatible (Issue #306 and #301) +- Proper support for `ExecutionMode.single_mode` (Issue #253 and #254) -- Merged repo with the `cadCAD_tweaked`, which includes performance improvements -- Python 3.10 compatible ### September 28, 2021 #### New Features: * **ver. ≥ `0.4.28`:** diff --git a/README.md b/README.md index e5151fa8..ea165f20 100644 --- a/README.md +++ b/README.md @@ -4,7 +4,7 @@ / ___/ __` / __ / / / /| | / / / / / /__/ /_/ / /_/ / /___/ ___ |/ /_/ / \___/\__,_/\__,_/\____/_/ |_/_____/ -by cadCAD ver. 0.4.28 +by cadCAD ver. 0.5.0 ====================================== Complex Adaptive Dynamics o i e @@ -20,7 +20,7 @@ through simulation, with support for Monte Carlo methods, A/B testing and parame # Getting Started -#### Change Log: [ver. 0.4.28](CHANGELOG.md) +#### Change Log: [ver. 0.5.0](CHANGELOG.md) [Previous Stable Release (No Longer Supported)](https://github.com/cadCAD-org/cadCAD/tree/b9cc6b2e4af15d6361d60d6ec059246ab8fbf6da) @@ -47,7 +47,7 @@ $ ## 1. Installation: Requires [>= Python 3.6.13](https://www.python.org/downloads/) -**Option A:** Install Using **[pip](https://pypi.org/project/cadCAD/0.4.28/)** +**Option A:** Install Using **[pip](https://pypi.org/project/cadCAD/)** ```bash pip3 install cadCAD ``` diff --git a/cadCAD/__init__.py b/cadCAD/__init__.py index fbfbaf52..8a276188 100644 --- a/cadCAD/__init__.py +++ b/cadCAD/__init__.py @@ -2,7 +2,7 @@ from cadCAD.configuration import Experiment name = "cadCAD" -version = "0.4.28" +version = "0.5.0" experiment = Experiment() configs = experiment.configs diff --git a/setup.py b/setup.py index eef999fa..785f3aee 100644 --- a/setup.py +++ b/setup.py @@ -21,7 +21,7 @@ """ name = "cadCAD" -version = "0.4.29" +version = "0.5.0" setup(name=name, version=version, From 0626a104524888f977ed706b4cf5940b0b543788 Mon Sep 17 00:00:00 2001 From: Danilo Lessa Bernardineli Date: Thu, 21 Dec 2023 10:01:52 -0300 Subject: [PATCH 2/7] add tests for checking keys --- testing/test_runs.py | 106 +++++++++++++---------- testing/tests/cadCAD_memory_address.json | 2 +- 2 files changed, 63 insertions(+), 45 deletions(-) diff --git a/testing/test_runs.py b/testing/test_runs.py index 2c33c2b4..60ded2ba 100644 --- a/testing/test_runs.py +++ b/testing/test_runs.py @@ -3,11 +3,12 @@ from cadCAD.configuration import Experiment from cadCAD.configuration.utils import env_trigger, var_substep_trigger, config_sim, psub_list from cadCAD.types import * -import pandas as pd # type: ignore +import pandas as pd # type: ignore import types import inspect import pytest + def describe_or_return(v: object) -> object: """ Thanks @LinuxIsCool! @@ -55,65 +56,59 @@ def assign_params(_df: pd.DataFrame, configs) -> pd.DataFrame: df = _df.assign(**first_param_dict).copy() for i, (_, subset_df) in enumerate(df.groupby(['simulation', 'subset', 'run'])): df.loc[subset_df.index] = subset_df.assign(**select_config_M_dict(configs, - i, - selected_params)) + i, + selected_params)) return df - - SWEEP_PARAMS: Dict[str, List] = { - 'alpha': [1], - 'beta': [lambda x: 2 * x, lambda x: x], - 'gamma': [3, 4], - 'omega': [7] - } + 'alpha': [1], + 'beta': [lambda x: 2 * x, lambda x: x], + 'gamma': [3, 4], + 'omega': [7] +} SINGLE_PARAMS: Dict[str, object] = { - 'alpha': 1, - 'beta': lambda x: x, - 'gamma': 3, - 'omega': 5 - } + 'alpha': 1, + 'beta': lambda x: x, + 'gamma': 3, + 'omega': 5 +} -def create_experiment(N_RUNS=2, N_TIMESTEPS=3, params: dict=SWEEP_PARAMS): +def create_experiment(N_RUNS=2, N_TIMESTEPS=3, params: dict = SWEEP_PARAMS): psu_steps = ['m1', 'm2', 'm3'] system_substeps = len(psu_steps) var_timestep_trigger = var_substep_trigger([0, system_substeps]) env_timestep_trigger = env_trigger(system_substeps) env_process = {} - # ['s1', 's2', 's3', 's4'] # Policies per Mechanism + def gamma(params: Parameters, substep: Substep, history: StateHistory, state: State, **kwargs): return {'gamma': params['gamma']} - def omega(params: Parameters, substep: Substep, history: StateHistory, state: State, **kwarg): return {'omega': params['omega']} - # Internal States per Mechanism + def alpha(params: Parameters, substep: Substep, history: StateHistory, state: State, _input: PolicyOutput, **kwargs): return 'alpha_var', params['alpha'] - def beta(params: Parameters, substep: Substep, history: StateHistory, state: State, _input: PolicyOutput, **kwargs): return 'beta_var', params['beta'] - + def gamma_var(params: Parameters, substep: Substep, history: StateHistory, state: State, _input: PolicyOutput, **kwargs): return 'gamma_var', params['gamma'] - + def omega_var(params: Parameters, substep: Substep, history: StateHistory, state: State, _input: PolicyOutput, **kwargs): return 'omega_var', params['omega'] - def policies(params: Parameters, substep: Substep, history: StateHistory, state: State, _input: PolicyOutput, **kwargs): return 'policies', _input - def sweeped(params: Parameters, substep: Substep, history: StateHistory, state: State, _input: PolicyOutput, **kwargs): return 'sweeped', {'beta': params['beta'], 'gamma': params['gamma']} @@ -126,8 +121,8 @@ def sweeped(params: Parameters, substep: Substep, history: StateHistory, state: psu_block[m]["states"]['gamma_var'] = gamma_var psu_block[m]["states"]['omega_var'] = omega_var psu_block[m]['states']['policies'] = policies - psu_block[m]["states"]['sweeped'] = var_timestep_trigger(y='sweeped', f=sweeped) - + psu_block[m]["states"]['sweeped'] = var_timestep_trigger( + y='sweeped', f=sweeped) # Genesis States genesis_states = { @@ -140,13 +135,14 @@ def sweeped(params: Parameters, substep: Substep, history: StateHistory, state: } # Environment Process - env_process['sweeped'] = env_timestep_trigger(trigger_field='timestep', trigger_vals=[5], funct_list=[lambda _g, x: _g['beta']]) + env_process['sweeped'] = env_timestep_trigger(trigger_field='timestep', trigger_vals=[ + 5], funct_list=[lambda _g, x: _g['beta']]) sim_config = config_sim( { "N": N_RUNS, "T": range(N_TIMESTEPS), - "M": params, # Optional + "M": params, # Optional } ) @@ -164,26 +160,40 @@ def sweeped(params: Parameters, substep: Substep, history: StateHistory, state: def test_mc_sweep_experiment(): - experiment_assertions(create_experiment(N_RUNS=2, N_TIMESTEPS=2, params=SWEEP_PARAMS), ExecutionMode.local_mode) - experiment_assertions(create_experiment(N_RUNS=2, N_TIMESTEPS=2, params=SWEEP_PARAMS), ExecutionMode.single_mode) - experiment_assertions(create_experiment(N_RUNS=2, N_TIMESTEPS=2, params=SWEEP_PARAMS), ExecutionMode.multi_mode) + experiment_assertions(create_experiment( + N_RUNS=2, N_TIMESTEPS=2, params=SWEEP_PARAMS), ExecutionMode.local_mode) + experiment_assertions(create_experiment( + N_RUNS=2, N_TIMESTEPS=2, params=SWEEP_PARAMS), ExecutionMode.single_mode) + experiment_assertions(create_experiment( + N_RUNS=2, N_TIMESTEPS=2, params=SWEEP_PARAMS), ExecutionMode.multi_mode) + def test_unique_sweep_experiment(): - experiment_assertions(create_experiment(N_RUNS=1, N_TIMESTEPS=2, params=SWEEP_PARAMS), ExecutionMode.local_mode) - experiment_assertions(create_experiment(N_RUNS=1, N_TIMESTEPS=2, params=SWEEP_PARAMS), ExecutionMode.single_mode) - experiment_assertions(create_experiment(N_RUNS=1, N_TIMESTEPS=2, params=SWEEP_PARAMS), ExecutionMode.multi_mode) + experiment_assertions(create_experiment( + N_RUNS=1, N_TIMESTEPS=2, params=SWEEP_PARAMS), ExecutionMode.local_mode) + experiment_assertions(create_experiment( + N_RUNS=1, N_TIMESTEPS=2, params=SWEEP_PARAMS), ExecutionMode.single_mode) + experiment_assertions(create_experiment( + N_RUNS=1, N_TIMESTEPS=2, params=SWEEP_PARAMS), ExecutionMode.multi_mode) + def test_mc_single_experiment(): - experiment_assertions(create_experiment(N_RUNS=2, N_TIMESTEPS=2, params=SINGLE_PARAMS), ExecutionMode.local_mode) - experiment_assertions(create_experiment(N_RUNS=2, N_TIMESTEPS=2, params=SINGLE_PARAMS), ExecutionMode.single_mode) - experiment_assertions(create_experiment(N_RUNS=2, N_TIMESTEPS=2, params=SINGLE_PARAMS), ExecutionMode.multi_mode) + experiment_assertions(create_experiment( + N_RUNS=2, N_TIMESTEPS=2, params=SINGLE_PARAMS), ExecutionMode.local_mode) + experiment_assertions(create_experiment( + N_RUNS=2, N_TIMESTEPS=2, params=SINGLE_PARAMS), ExecutionMode.single_mode) + experiment_assertions(create_experiment( + N_RUNS=2, N_TIMESTEPS=2, params=SINGLE_PARAMS), ExecutionMode.multi_mode) + def test_unique_single_experiment(): - experiment_assertions(create_experiment(N_RUNS=1, N_TIMESTEPS=2, params=SINGLE_PARAMS), ExecutionMode.local_mode) - experiment_assertions(create_experiment(N_RUNS=1, N_TIMESTEPS=2, params=SINGLE_PARAMS), ExecutionMode.single_mode) + experiment_assertions(create_experiment( + N_RUNS=1, N_TIMESTEPS=2, params=SINGLE_PARAMS), ExecutionMode.local_mode) + experiment_assertions(create_experiment( + N_RUNS=1, N_TIMESTEPS=2, params=SINGLE_PARAMS), ExecutionMode.single_mode) with pytest.raises(ValueError) as e_info: - experiment_assertions(create_experiment(N_RUNS=1, N_TIMESTEPS=2, params=SINGLE_PARAMS), ExecutionMode.multi_mode) - + experiment_assertions(create_experiment( + N_RUNS=1, N_TIMESTEPS=2, params=SINGLE_PARAMS), ExecutionMode.multi_mode) def experiment_assertions(exp, mode=None): @@ -192,19 +202,27 @@ def experiment_assertions(exp, mode=None): exec_context = ExecutionContext(mode) executor = Executor(exec_context=exec_context, configs=exp.configs) (records, tensor_field, _) = executor.execute() - df = drop_substeps(assign_params(pd.DataFrame(records), exp.configs)) + + df: DataFrame = assign_params(pd.DataFrame(records), exp.configs) + df = drop_substeps(df) # XXX: parameters should always be of the same type. Else, the test will fail first_sim_config = exp.configs[0].sim_config['M'] + required_keys = {'simulation': int, + 'run': int, + 'subset': int, + 'timestep': int} for (i, row) in df.iterrows(): if row.timestep > 0: - + assert row['alpha_var'] == row['alpha'] assert type(row['alpha_var']) == type(first_sim_config['alpha']) assert row['gamma_var'] == row['gamma'] assert type(row['gamma_var']) == type(first_sim_config['gamma']) assert row['omega_var'] == row['omega'] assert type(row['omega_var']) == type(first_sim_config['omega']) - + for k, v in required_keys.items(): + assert k in row + assert type(row[k]) == v diff --git a/testing/tests/cadCAD_memory_address.json b/testing/tests/cadCAD_memory_address.json index 859ec289..38637ec2 100644 --- a/testing/tests/cadCAD_memory_address.json +++ b/testing/tests/cadCAD_memory_address.json @@ -1 +1 @@ -{"memory_address": "0x10fbedd50"} \ No newline at end of file +{"memory_address": "0x111d22f20"} \ No newline at end of file From 3ac31940eaf69010f0659d14e540634a7ad6d56a Mon Sep 17 00:00:00 2001 From: Danilo Lessa Bernardineli Date: Thu, 21 Dec 2023 10:25:13 -0300 Subject: [PATCH 3/7] add tests --- testing/test_row_count.py | 63 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 63 insertions(+) create mode 100644 testing/test_row_count.py diff --git a/testing/test_row_count.py b/testing/test_row_count.py new file mode 100644 index 00000000..1b657e73 --- /dev/null +++ b/testing/test_row_count.py @@ -0,0 +1,63 @@ +from cadCAD.configuration import Experiment +from cadCAD.configuration.utils import config_sim +from cadCAD.engine import Executor, ExecutionContext, ExecutionMode +import pytest + + +CONFIG_SIGNATURES_TO_TEST = [(3, 3, 3, 3, 3), (1, 3, 3, 3, 3), + (3, 1, 3, 3, 3), (1, 1, 3, 3, 3), + (3, 3, 1, 3, 3), (1, 3, 1, 3, 3), (1, 1, 1, 3, 3)] + +def run_experiment(exp: Experiment, mode: str): + exec_context = ExecutionContext(mode) + executor = Executor(exec_context=exec_context, configs=exp.configs) + (records, tensor_field, _) = executor.execute() + return records + + +def create_experiments(N_simulations=3, N_sweeps=3, N_runs=3, N_timesteps=3, N_substeps=3) -> Experiment: + + INITIAL_STATE = {'varA': None} + PSUBs = [{'policies': {}, 'variables': {}}] * N_substeps + params = {'A': [None] * N_sweeps, + 'B': [None]} + + SIM_CONFIG = config_sim( + { + "N": N_runs, + "T": range(N_timesteps), + "M": params, # Optional + } + ) + + exp = Experiment() + for i_sim in range(N_simulations): + exp.append_model( + sim_configs=SIM_CONFIG, + initial_state=INITIAL_STATE, + partial_state_update_blocks=PSUBs + ) + return exp + + +def expected_rows(N_simulations, N_sweeps, N_runs, N_timesteps, N_substeps) -> int: + return N_simulations * N_sweeps * N_runs * (N_timesteps * N_substeps + 1) + + + +@pytest.mark.parametrize("N_sim,N_sw,N_r,N_t,N_s", CONFIG_SIGNATURES_TO_TEST) +def test_row_count_single(N_sim, N_sw, N_r, N_t, N_s): + args = (N_sim, N_sw, N_r, N_t, N_s) + assert len(run_experiment(create_experiments(*args), 'single_proc')) == expected_rows(*args) + + +@pytest.mark.parametrize("N_sim,N_sw,N_r,N_t,N_s", CONFIG_SIGNATURES_TO_TEST) +def test_row_count_multi(N_sim, N_sw, N_r, N_t, N_s): + args = (N_sim, N_sw, N_r, N_t, N_s) + assert len(run_experiment(create_experiments(*args), 'multi_proc')) == expected_rows(*args) + + +@pytest.mark.parametrize("N_sim,N_sw,N_r,N_t,N_s", CONFIG_SIGNATURES_TO_TEST) +def test_row_count_local(N_sim, N_sw, N_r, N_t, N_s): + args = (N_sim, N_sw, N_r, N_t, N_s) + assert len(run_experiment(create_experiments(*args), 'local_proc')) == expected_rows(*args) From d7e38cb7c79f3d64038763ead3d5612daf0f98f9 Mon Sep 17 00:00:00 2001 From: Danilo Lessa Bernardineli Date: Thu, 21 Dec 2023 11:02:40 -0300 Subject: [PATCH 4/7] fix issues #335 & #332 --- cadCAD/engine/__init__.py | 46 ++++++++++++++--- cadCAD/engine/execution.py | 63 ++++++++++++------------ testing/test_row_count.py | 2 +- testing/tests/cadCAD_memory_address.json | 2 +- 4 files changed, 72 insertions(+), 41 deletions(-) diff --git a/cadCAD/engine/__init__.py b/cadCAD/engine/__init__.py index 24d977d4..4dc33409 100644 --- a/cadCAD/engine/__init__.py +++ b/cadCAD/engine/__init__.py @@ -1,5 +1,5 @@ from time import time -from typing import Callable, Dict, List, Any, Tuple, Union +from typing import Callable, Dict, List, Any, Tuple, Union, Sequence, Mapping from tqdm.auto import tqdm from cadCAD.utils import flatten @@ -147,18 +147,50 @@ def get_final_results(simulations: List[StateHistory], eps, sessions: List[SessionDict], remote_threshold: int): + + # if list of lists of lists of dicts: do flatten + # if list of dicts: do not flatetn + # else raise error + + + init: bool = isinstance(simulations, Sequence) + failed_1 = False + failed_2 = False + + try: + init: bool = isinstance(simulations, Sequence) + dont_flatten = init & isinstance(simulations[0], Mapping) + do_flatten = not dont_flatten + except: + failed_1 = True + do_flatten = True + + try: + do_flatten = init & isinstance(simulations[0], Sequence) + do_flatten &= isinstance(simulations[0][0], Sequence) + do_flatten &= isinstance(simulations[0][0][0], Mapping) + except: + failed_2 = True + do_flatten = False + + if failed_1 and failed_2: + raise ValueError('Invalid simulation results (Executor output is not list[dict] or list[list[list[dict]]])') + + flat_timesteps, tensor_fields = [], [] for sim_result, psu, ep in tqdm(list(zip(simulations, psus, eps)), total=len(simulations), desc='Flattening results'): - flat_timesteps.append(flatten(sim_result)) + if do_flatten: + flat_timesteps.append(flatten(sim_result)) tensor_fields.append(create_tensor_field(psu, ep)) + + if do_flatten: + flat_simulations = flatten(flat_timesteps) + else: + flat_simulations = simulations - flat_simulations = flatten(flat_timesteps) - if config_amt == 1: - return simulations, tensor_fields, sessions - elif config_amt > 1: - return flat_simulations, tensor_fields, sessions + return flat_simulations, tensor_fields, sessions final_result = None original_N = len(configs_as_dicts(self.configs)) diff --git a/cadCAD/engine/execution.py b/cadCAD/engine/execution.py index 4a26218f..26666163 100644 --- a/cadCAD/engine/execution.py +++ b/cadCAD/engine/execution.py @@ -1,4 +1,4 @@ -from typing import Callable, Dict, List, Any, Tuple +from typing import Callable, Dict, List, Any, Tuple, Sequence from pathos.multiprocessing import ProcessPool # type: ignore from collections import Counter from cadCAD.types import * @@ -11,41 +11,40 @@ def single_proc_exec( - simulation_execs: List[ExecutorFunction], - var_dict_list: List[Parameters], - states_lists: List[StateHistory], - configs_structs: List[StateUpdateBlocks], - env_processes_list: List[EnvProcesses], - Ts: List[TimeSeq], - SimIDs: List[SimulationID], - Ns: List[Run], - ExpIDs: List[int], - SubsetIDs: List[SubsetID], - SubsetWindows: List[SubsetWindow], - configured_n: List[N_Runs], + simulation_execs: Sequence[ExecutorFunction], + var_dict_list: Union[Sequence[Parameters], Parameters], + states_lists: Sequence[StateHistory], + configs_structs: Sequence[StateUpdateBlocks], + env_processes_list: Sequence[EnvProcesses], + Ts: Sequence[TimeSeq], + SimIDs: Sequence[SimulationID], + Ns: Sequence[Run], + ExpIDs: Sequence[int], + SubsetIDs: Sequence[SubsetID], + SubsetWindows: Sequence[SubsetWindow], + configured_n: Sequence[N_Runs], additional_objs=None -): +) -> List: - # HACK for making it run with N_Runs=1 - if type(var_dict_list) == list: - var_dict_list = var_dict_list[0] - - print(f'Execution Mode: single_threaded') - raw_params: List[List] = [ - simulation_execs, states_lists, configs_structs, env_processes_list, - Ts, SimIDs, Ns, SubsetIDs, SubsetWindows - ] - simulation_exec, states_list, config, env_processes, T, sim_id, N, subset_id, subset_window = list( - map(lambda x: x.pop(), raw_params) - ) - result = simulation_exec( - var_dict_list, states_list, config, env_processes, T, sim_id, N, subset_id, subset_window, configured_n, additional_objs - ) - return flatten(result) - - + if not isinstance(var_dict_list, Sequence): + var_dict_list = list([var_dict_list]) + results: List = [] + for var_dict in var_dict_list: + print(f'Execution Mode: single_threaded') + raw_params: List[List] = [ + simulation_execs, states_lists, configs_structs, env_processes_list, + Ts, SimIDs, Ns, SubsetIDs, SubsetWindows + ] + simulation_exec, states_list, config, env_processes, T, sim_id, N, subset_id, subset_window = list( + map(lambda x: x.pop(), raw_params) + ) + result = simulation_exec( + var_dict, states_list, config, env_processes, T, sim_id, N, subset_id, subset_window, configured_n, additional_objs + ) + results.append(flatten(result)) + return flatten(results) def parallelize_simulations( simulation_execs: List[ExecutorFunction], diff --git a/testing/test_row_count.py b/testing/test_row_count.py index 1b657e73..3ca577bc 100644 --- a/testing/test_row_count.py +++ b/testing/test_row_count.py @@ -51,7 +51,7 @@ def test_row_count_single(N_sim, N_sw, N_r, N_t, N_s): assert len(run_experiment(create_experiments(*args), 'single_proc')) == expected_rows(*args) -@pytest.mark.parametrize("N_sim,N_sw,N_r,N_t,N_s", CONFIG_SIGNATURES_TO_TEST) +@pytest.mark.parametrize("N_sim,N_sw,N_r,N_t,N_s", CONFIG_SIGNATURES_TO_TEST[:-1]) def test_row_count_multi(N_sim, N_sw, N_r, N_t, N_s): args = (N_sim, N_sw, N_r, N_t, N_s) assert len(run_experiment(create_experiments(*args), 'multi_proc')) == expected_rows(*args) diff --git a/testing/tests/cadCAD_memory_address.json b/testing/tests/cadCAD_memory_address.json index 38637ec2..2e0fa03a 100644 --- a/testing/tests/cadCAD_memory_address.json +++ b/testing/tests/cadCAD_memory_address.json @@ -1 +1 @@ -{"memory_address": "0x111d22f20"} \ No newline at end of file +{"memory_address": "0x10fc14ef0"} \ No newline at end of file From e11a8b4c613be10760a3d7780473ba7dd56cef74 Mon Sep 17 00:00:00 2001 From: Danilo Lessa Bernardineli Date: Thu, 21 Dec 2023 11:09:47 -0300 Subject: [PATCH 5/7] parametrize test_runs --- testing/test_runs.py | 41 ++++++++++++----------------------------- 1 file changed, 12 insertions(+), 29 deletions(-) diff --git a/testing/test_runs.py b/testing/test_runs.py index 60ded2ba..46962b1e 100644 --- a/testing/test_runs.py +++ b/testing/test_runs.py @@ -158,42 +158,25 @@ def sweeped(params: Parameters, substep: Substep, history: StateHistory, state: ) return exp - -def test_mc_sweep_experiment(): - experiment_assertions(create_experiment( - N_RUNS=2, N_TIMESTEPS=2, params=SWEEP_PARAMS), ExecutionMode.local_mode) - experiment_assertions(create_experiment( - N_RUNS=2, N_TIMESTEPS=2, params=SWEEP_PARAMS), ExecutionMode.single_mode) +@pytest.mark.parametrize("mode", ["local_proc", "single_proc", "multi_proc"]) +def test_mc_sweep_experiment(mode): experiment_assertions(create_experiment( - N_RUNS=2, N_TIMESTEPS=2, params=SWEEP_PARAMS), ExecutionMode.multi_mode) + N_RUNS=2, N_TIMESTEPS=2, params=SWEEP_PARAMS), mode) - -def test_unique_sweep_experiment(): - experiment_assertions(create_experiment( - N_RUNS=1, N_TIMESTEPS=2, params=SWEEP_PARAMS), ExecutionMode.local_mode) - experiment_assertions(create_experiment( - N_RUNS=1, N_TIMESTEPS=2, params=SWEEP_PARAMS), ExecutionMode.single_mode) +@pytest.mark.parametrize("mode", ["local_proc", "single_proc", "multi_proc"]) +def test_unique_sweep_experiment(mode): experiment_assertions(create_experiment( - N_RUNS=1, N_TIMESTEPS=2, params=SWEEP_PARAMS), ExecutionMode.multi_mode) + N_RUNS=1, N_TIMESTEPS=2, params=SWEEP_PARAMS), mode) - -def test_mc_single_experiment(): - experiment_assertions(create_experiment( - N_RUNS=2, N_TIMESTEPS=2, params=SINGLE_PARAMS), ExecutionMode.local_mode) - experiment_assertions(create_experiment( - N_RUNS=2, N_TIMESTEPS=2, params=SINGLE_PARAMS), ExecutionMode.single_mode) +@pytest.mark.parametrize("mode", ["local_proc", "single_proc", "multi_proc"]) +def test_mc_single_experiment(mode): experiment_assertions(create_experiment( - N_RUNS=2, N_TIMESTEPS=2, params=SINGLE_PARAMS), ExecutionMode.multi_mode) + N_RUNS=2, N_TIMESTEPS=2, params=SINGLE_PARAMS), mode) - -def test_unique_single_experiment(): - experiment_assertions(create_experiment( - N_RUNS=1, N_TIMESTEPS=2, params=SINGLE_PARAMS), ExecutionMode.local_mode) +@pytest.mark.parametrize("mode", ["local_proc", "single_proc", "multi_proc"]) +def test_unique_single_experiment(mode): experiment_assertions(create_experiment( - N_RUNS=1, N_TIMESTEPS=2, params=SINGLE_PARAMS), ExecutionMode.single_mode) - with pytest.raises(ValueError) as e_info: - experiment_assertions(create_experiment( - N_RUNS=1, N_TIMESTEPS=2, params=SINGLE_PARAMS), ExecutionMode.multi_mode) + N_RUNS=1, N_TIMESTEPS=2, params=SINGLE_PARAMS), mode) def experiment_assertions(exp, mode=None): From 73d953f04722e8a8e6a9a0d8e7424333bc5a56b6 Mon Sep 17 00:00:00 2001 From: Danilo Lessa Bernardineli Date: Thu, 21 Dec 2023 11:12:48 -0300 Subject: [PATCH 6/7] add more tests --- testing/test_row_count.py | 8 ++++++-- testing/test_runs.py | 9 +++++++-- testing/tests/cadCAD_memory_address.json | 2 +- 3 files changed, 14 insertions(+), 5 deletions(-) diff --git a/testing/test_row_count.py b/testing/test_row_count.py index 3ca577bc..a1d78f14 100644 --- a/testing/test_row_count.py +++ b/testing/test_row_count.py @@ -51,11 +51,15 @@ def test_row_count_single(N_sim, N_sw, N_r, N_t, N_s): assert len(run_experiment(create_experiments(*args), 'single_proc')) == expected_rows(*args) -@pytest.mark.parametrize("N_sim,N_sw,N_r,N_t,N_s", CONFIG_SIGNATURES_TO_TEST[:-1]) +@pytest.mark.parametrize("N_sim,N_sw,N_r,N_t,N_s", CONFIG_SIGNATURES_TO_TEST) def test_row_count_multi(N_sim, N_sw, N_r, N_t, N_s): args = (N_sim, N_sw, N_r, N_t, N_s) - assert len(run_experiment(create_experiments(*args), 'multi_proc')) == expected_rows(*args) + if N_sim == 1 and N_sw == 1 and N_r == 1: + with pytest.raises(ValueError) as e_info: + assert len(run_experiment(create_experiments(*args), 'multi_proc')) == expected_rows(*args) + else: + assert len(run_experiment(create_experiments(*args), 'multi_proc')) == expected_rows(*args) @pytest.mark.parametrize("N_sim,N_sw,N_r,N_t,N_s", CONFIG_SIGNATURES_TO_TEST) def test_row_count_local(N_sim, N_sw, N_r, N_t, N_s): diff --git a/testing/test_runs.py b/testing/test_runs.py index 46962b1e..a695b1dc 100644 --- a/testing/test_runs.py +++ b/testing/test_runs.py @@ -175,8 +175,13 @@ def test_mc_single_experiment(mode): @pytest.mark.parametrize("mode", ["local_proc", "single_proc", "multi_proc"]) def test_unique_single_experiment(mode): - experiment_assertions(create_experiment( - N_RUNS=1, N_TIMESTEPS=2, params=SINGLE_PARAMS), mode) + if mode == "multi_proc": + with pytest.raises(ValueError) as e_info: + experiment_assertions(create_experiment( + N_RUNS=1, N_TIMESTEPS=2, params=SINGLE_PARAMS), mode) + else: + experiment_assertions(create_experiment( + N_RUNS=1, N_TIMESTEPS=2, params=SINGLE_PARAMS), mode) def experiment_assertions(exp, mode=None): diff --git a/testing/tests/cadCAD_memory_address.json b/testing/tests/cadCAD_memory_address.json index 2e0fa03a..7d5810f9 100644 --- a/testing/tests/cadCAD_memory_address.json +++ b/testing/tests/cadCAD_memory_address.json @@ -1 +1 @@ -{"memory_address": "0x10fc14ef0"} \ No newline at end of file +{"memory_address": "0x10ddf9580"} \ No newline at end of file From a721c0fd691836f6c96a142577fba9d9cf641240 Mon Sep 17 00:00:00 2001 From: Danilo Lessa Bernardineli Date: Thu, 21 Dec 2023 11:46:02 -0300 Subject: [PATCH 7/7] fix subset ordering --- cadCAD/configuration/utils/__init__.py | 2 +- cadCAD/engine/execution.py | 18 ++++++++---------- cadCAD/types.py | 8 ++++---- testing/test_runs.py | 9 +++++---- testing/tests/cadCAD_memory_address.json | 2 +- 5 files changed, 19 insertions(+), 20 deletions(-) diff --git a/cadCAD/configuration/utils/__init__.py b/cadCAD/configuration/utils/__init__.py index 7ebac29a..8359c1b9 100644 --- a/cadCAD/configuration/utils/__init__.py +++ b/cadCAD/configuration/utils/__init__.py @@ -104,7 +104,7 @@ def ep_decorator(f, y, var_dict, sub_step, sL, s, _input, **kwargs): else: return y, s[y] - return {es: ep_decorator(f, es) for es, f in ep.items()} + return {es: ep_decorator(f, es) for es, f in ep.items()} # type: ignore def trigger_condition(s, pre_conditions, cond_opp): diff --git a/cadCAD/engine/execution.py b/cadCAD/engine/execution.py index 26666163..97a5fa87 100644 --- a/cadCAD/engine/execution.py +++ b/cadCAD/engine/execution.py @@ -26,20 +26,18 @@ def single_proc_exec( additional_objs=None ) -> List: - + if not isinstance(var_dict_list, Sequence): var_dict_list = list([var_dict_list]) + raw_params = ( + simulation_execs, states_lists, configs_structs, env_processes_list, + Ts, SimIDs, Ns, SubsetIDs, SubsetWindows, var_dict_list) + results: List = [] - for var_dict in var_dict_list: - print(f'Execution Mode: single_threaded') - raw_params: List[List] = [ - simulation_execs, states_lists, configs_structs, env_processes_list, - Ts, SimIDs, Ns, SubsetIDs, SubsetWindows - ] - simulation_exec, states_list, config, env_processes, T, sim_id, N, subset_id, subset_window = list( - map(lambda x: x.pop(), raw_params) - ) + print(f'Execution Mode: single_threaded') + for raw_param in zip(*raw_params): + simulation_exec, states_list, config, env_processes, T, sim_id, N, subset_id, subset_window, var_dict = raw_param result = simulation_exec( var_dict, states_list, config, env_processes, T, sim_id, N, subset_id, subset_window, configured_n, additional_objs ) diff --git a/cadCAD/types.py b/cadCAD/types.py index d5b9302f..d7bf731a 100644 --- a/cadCAD/types.py +++ b/cadCAD/types.py @@ -1,4 +1,4 @@ -from typing import TypedDict, Callable, Union, Dict, List, Tuple, Iterator +from typing import TypedDict, Callable, Union, Dict, List, Tuple, Iterable from collections import deque State = Dict[str, object] @@ -20,18 +20,18 @@ class StateUpdateBlock(TypedDict): StateUpdateBlocks = List[StateUpdateBlock] class ConfigurationDict(TypedDict): - T: Iterator # Generator for the timestep variable + T: Iterable # Generator for the timestep variable N: int # Number of MC Runs M: Union[Parameters, SweepableParameters] # Parameters / List of Parameter to Sweep TargetValue = object EnvProcess: Callable[[State, SweepableParameters, TargetValue], TargetValue] EnvProcesses = Dict[str, Callable] -TimeSeq = Iterator +TimeSeq = Iterable SimulationID = int Run = int SubsetID = int -SubsetWindow = Iterator +SubsetWindow = Iterable N_Runs = int ExecutorFunction = Callable[[Parameters, StateHistory, StateUpdateBlocks, EnvProcesses, TimeSeq, SimulationID, Run, SubsetID, SubsetWindow, N_Runs], object] diff --git a/testing/test_runs.py b/testing/test_runs.py index a695b1dc..69d1b149 100644 --- a/testing/test_runs.py +++ b/testing/test_runs.py @@ -1,4 +1,4 @@ -from typing import Dict, List +from typing import Dict, List, Optional from cadCAD.engine import Executor, ExecutionContext, ExecutionMode from cadCAD.configuration import Experiment from cadCAD.configuration.utils import env_trigger, var_substep_trigger, config_sim, psub_list @@ -7,6 +7,7 @@ import types import inspect import pytest +from pandas import DataFrame def describe_or_return(v: object) -> object: @@ -63,8 +64,8 @@ def assign_params(_df: pd.DataFrame, configs) -> pd.DataFrame: SWEEP_PARAMS: Dict[str, List] = { 'alpha': [1], - 'beta': [lambda x: 2 * x, lambda x: x], - 'gamma': [3, 4], + 'beta': [lambda x: 2 * x, lambda x: x, lambda x: x / 2], + 'gamma': [3, 4, 5], 'omega': [7] } @@ -184,7 +185,7 @@ def test_unique_single_experiment(mode): N_RUNS=1, N_TIMESTEPS=2, params=SINGLE_PARAMS), mode) -def experiment_assertions(exp, mode=None): +def experiment_assertions(exp: Experiment, mode: Optional[str]=None) -> None: if mode == None: mode = ExecutionMode().local_mode exec_context = ExecutionContext(mode) diff --git a/testing/tests/cadCAD_memory_address.json b/testing/tests/cadCAD_memory_address.json index 7d5810f9..c60ead7b 100644 --- a/testing/tests/cadCAD_memory_address.json +++ b/testing/tests/cadCAD_memory_address.json @@ -1 +1 @@ -{"memory_address": "0x10ddf9580"} \ No newline at end of file +{"memory_address": "0x111857380"} \ No newline at end of file