-
Notifications
You must be signed in to change notification settings - Fork 12
[WIP] Jet Stirred Reactors #17
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: master
Are you sure you want to change the base?
Changes from 32 commits
22487f1
c76177f
05c5bfb
c21e72b
74470d5
318fbdc
95cdef7
bf299aa
8cb4b2b
caa248a
07a2f3b
c337fc5
51761d9
b8ca5e0
1408523
771ea25
0aaf21f
110cfcf
d8ade12
927215b
1e270b8
28404c2
8a5bb53
586d596
81811df
b78a87e
03c3aae
0dd2cc8
fe7ab9d
1dbb77a
a130918
7869d40
10fd2dc
d88ff03
55ac50f
04f58ca
d058045
c71ce3f
fccf2e9
685d1e7
ca74a47
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -70,3 +70,6 @@ target/ | |
|
|
||
| # project-specific data files | ||
| *.h5 | ||
|
|
||
| # IDE specific files | ||
| .vscode | ||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -17,11 +17,11 @@ | |
| print('Warning: YAML must be installed to read input file.') | ||
| raise | ||
|
|
||
| from pyked.chemked import ChemKED, DataPoint | ||
| from pyked.chemked import ChemKED | ||
|
|
||
| # Local imports | ||
| from .utils import units | ||
| from .simulation import Simulation | ||
| from .simulation import AutoIgnitionSimulation as Simulation | ||
|
|
||
| min_deviation = 0.10 | ||
| """float: minimum allowable standard deviation for experimental data""" | ||
|
|
@@ -40,7 +40,7 @@ def create_simulations(dataset, properties): | |
| Returns | ||
| ------- | ||
| simulations : list | ||
| List of :class:`Simulation` objects for each simulation | ||
| List of :class:`AutoignitionSimulation` objects for each simulation | ||
|
Member
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Perhaps I should wait to review until refactoring finished, but this file/module/class seems a little confused right now about if it's specific to Autoignition, or if it's generic for all simulations. So I'm going to pause reviewing now. Let me know if/when I should take another look. |
||
|
|
||
| """ | ||
|
|
||
|
|
@@ -59,19 +59,20 @@ def create_simulations(dataset, properties): | |
| ) | ||
| return simulations | ||
|
|
||
|
|
||
| def simulation_worker(sim_tuple): | ||
| """Worker for multiprocessing of simulation cases. | ||
|
|
||
| Parameters | ||
| ---------- | ||
| sim_tuple : tuple | ||
| Contains Simulation object and other parameters needed to setup | ||
| Contains AutoignitionSimulation object and other parameters needed to setup | ||
| and run case. | ||
|
|
||
| Returns | ||
| ------- | ||
| sim : ``Simulation`` | ||
| Simulation case with calculated ignition delay. | ||
| sim : ``AutoignitionSimulation`` | ||
| AutoignitionSimulation case with calculated ignition delay. | ||
|
|
||
| """ | ||
| sim, model_file, model_spec_key, path, restart = sim_tuple | ||
|
|
@@ -113,7 +114,6 @@ def estimate_std_dev(indep_variable, dep_variable): | |
| dep_variable = numpy.delete(dep_variable, idx[1:]) | ||
| indep_variable = numpy.delete(indep_variable, idx[1:]) | ||
|
|
||
|
|
||
| # ensure data sorted based on independent variable to avoid some problems | ||
| sorted_vars = sorted(zip(indep_variable, dep_variable)) | ||
| indep_variable = [pt[0] for pt in sorted_vars] | ||
|
|
@@ -143,8 +143,8 @@ def get_changing_variable(cases): | |
|
|
||
| Parameters | ||
| ---------- | ||
| cases : list(pyked.chemked.DataPoint) | ||
| List of DataPoint with experimental case data. | ||
| cases : list(pyked.chemked.IgnitionDataPoint) | ||
| List of IgnitionDataPoint with experimental case data. | ||
|
|
||
| Returns | ||
| ------- | ||
|
|
@@ -192,7 +192,7 @@ def evaluate_model(model_name, spec_keys_file, dataset_file, | |
| data_path='data', model_path='models', | ||
| results_path='results', model_variant_file=None, | ||
| num_threads=None, print_results=False, restart=False, | ||
| skip_validation=False, | ||
| skip_validation=False | ||
| ): | ||
| """Evaluates the ignition delay error of a model for a given dataset. | ||
|
|
||
|
|
@@ -257,7 +257,7 @@ def evaluate_model(model_name, spec_keys_file, dataset_file, | |
| # If number of threads not specified, use either max number of available | ||
| # cores minus 1, or use 1 if multiple cores not available. | ||
| if not num_threads: | ||
| num_threads = multiprocessing.cpu_count()-1 or 1 | ||
| num_threads = multiprocessing.cpu_count() - 1 or 1 | ||
|
|
||
| # Loop through all datasets | ||
| for idx_set, dataset in enumerate(dataset_list): | ||
|
|
@@ -290,27 +290,18 @@ def evaluate_model(model_name, spec_keys_file, dataset_file, | |
| # Need to check if Ar or He in reactants but not model, | ||
| # and if so skip this dataset (for now). | ||
| ####################################################### | ||
| if ((any(['Ar' in spec for case in properties.datapoints | ||
| for spec in case.composition] | ||
| ) | ||
| and 'Ar' not in model_spec_key[model_name] | ||
| ) or | ||
| (any(['He' in spec for case in properties.datapoints | ||
| for spec in case.composition] | ||
| ) | ||
| and 'He' not in model_spec_key[model_name] | ||
| ) | ||
| ): | ||
| warnings.warn('Warning: Ar or He in dataset, but not in model. Skipping.', | ||
| RuntimeWarning | ||
| ) | ||
| Ar_in_model = 'Ar' in model_spec_key[model_name] | ||
| He_in_model = 'He' in model_spec_key[model_name] | ||
| Ar_in_dataset = any(['Ar' in spec for case in properties.datapoints for spec in case.composition]) | ||
| He_in_dataset = any(['He' in spec for case in properties.datapoints for spec in case.composition]) | ||
| if (Ar_in_dataset and not Ar_in_model) or (He_in_dataset and not He_in_model): | ||
| warnings.warn( | ||
| 'Warning: Ar or He in dataset, but not in model. Skipping.', | ||
| RuntimeWarning | ||
| ) | ||
| error_func_sets[idx_set] = numpy.nan | ||
| continue | ||
|
|
||
| # Use available number of processors minus one, | ||
| # or one process if single core. | ||
| pool = multiprocessing.Pool(processes=num_threads) | ||
|
|
||
| # setup all cases | ||
| jobs = [] | ||
| for idx, sim in enumerate(simulations): | ||
|
|
@@ -323,7 +314,7 @@ def evaluate_model(model_name, spec_keys_file, dataset_file, | |
| bath_gases = set(model_variant[model_name]['bath gases']) | ||
| gases = bath_gases.intersection( | ||
| set([c['species-name'] for c in sim.properties.composition]) | ||
| ) | ||
| ) | ||
|
|
||
| # If only one bath gas present, use that. If multiple, use the | ||
| # predominant species. If none of the designated bath gases | ||
|
|
@@ -347,11 +338,10 @@ def evaluate_model(model_name, spec_keys_file, dataset_file, | |
|
|
||
| # choose closest pressure | ||
| # better way to do this? | ||
| i = numpy.argmin(numpy.abs(numpy.array( | ||
| [float(n) | ||
| for n in list(model_variant[model_name]['pressures']) | ||
| ] | ||
| ) - pres)) | ||
| i = numpy.argmin(numpy.abs(numpy.array([ | ||
| float(n) | ||
| for n in list(model_variant[model_name]['pressures']) | ||
| ]) - pres)) | ||
| pres = list(model_variant[model_name]['pressures'])[i] | ||
| model_mod += model_variant[model_name]['pressures'][pres] | ||
|
|
||
|
|
@@ -361,13 +351,19 @@ def evaluate_model(model_name, spec_keys_file, dataset_file, | |
|
|
||
| jobs.append([sim, model_file, model_spec_key[model_name], results_path, restart]) | ||
|
|
||
| # run all cases | ||
| jobs = tuple(jobs) | ||
| results = pool.map(simulation_worker, jobs) | ||
| if num_threads == 1: | ||
| # Don't use the threadpool if only 1 processor (useful for debugging) | ||
| results = [] | ||
| for job in jobs: | ||
| results.append(simulation_worker(job)) | ||
| else: | ||
| pool = multiprocessing.Pool(processes=num_threads) | ||
| jobs = tuple(jobs) | ||
| results = pool.map(simulation_worker, jobs) | ||
|
|
||
| # not adding more proceses, and ensure all finished | ||
| pool.close() | ||
| pool.join() | ||
| # not adding more proceses, and ensure all finished | ||
| pool.close() | ||
| pool.join() | ||
|
|
||
| dataset_meta['datapoints'] = [] | ||
|
|
||
|
|
@@ -399,16 +395,17 @@ def evaluate_model(model_name, spec_keys_file, dataset_file, | |
|
|
||
| # calculate error function for this dataset | ||
| error_func = numpy.power( | ||
| (numpy.log(ignition_delays_sim) - | ||
| numpy.log(ignition_delays_exp)) / standard_dev, 2 | ||
| ) | ||
| (numpy.log(ignition_delays_sim) - numpy.log(ignition_delays_exp)) | ||
| / standard_dev, 2 | ||
| ) | ||
| error_func = numpy.nanmean(error_func) | ||
| error_func_sets[idx_set] = error_func | ||
| dataset_meta['error function'] = float(error_func) | ||
|
|
||
| dev_func = (numpy.log(ignition_delays_sim) - | ||
| numpy.log(ignition_delays_exp) | ||
| ) / standard_dev | ||
| dev_func = ( | ||
| numpy.log(ignition_delays_sim) | ||
| - numpy.log(ignition_delays_exp) | ||
| ) / standard_dev | ||
| dev_func = numpy.nanmean(dev_func) | ||
| dev_func_sets[idx_set] = dev_func | ||
| dataset_meta['absolute deviation'] = float(dev_func) | ||
|
|
||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
In general I'm not sure I like the idea of importing something under a different name - could likely confuse people down the line. Is there a good reason for it?