Skip to content

Commit

Permalink
Comply with black also on examples and tests (equinor#74)
Browse files Browse the repository at this point in the history
  • Loading branch information
anders-kiaer authored Nov 11, 2019
1 parent 6c49bc4 commit 24dd94f
Show file tree
Hide file tree
Showing 8 changed files with 246 additions and 184 deletions.
2 changes: 1 addition & 1 deletion .travis.yml
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ install:
- export PATH=$PATH:$PWD

script:
- black --check webviz_subsurface_components setup.py
- black --check webviz_subsurface_components tests examples setup.py
- npm run test
- npm run linting
- npm run build:all
Expand Down
2 changes: 2 additions & 0 deletions bandit.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
#Allow use of assert for tests
skips: ['B101']
109 changes: 57 additions & 52 deletions examples/example_hm.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,19 +10,18 @@
import dash_core_components as dcc



def generate_synthetic_data(num_groups, num_iter, num_realizations):
"""Create synthetic test data. In reality, this data will
come from an assisted history matching run.
"""

obs_group_names = ['Obs. group ' + str(i) for i in range(num_groups)]
obs_group_names = [f"Obs. group {i}" for i in range(num_groups)]
number_dp = np.random.randint(low=10, high=100, size=num_groups)

df = pd.DataFrame()

for i in range(num_iter):
ensemble_name = 'Iteration ' + str(i)
ensemble_name = f"Iteration {i}"

# Random test data following
# chisquared distribution (i.e. normal distribution squared):
Expand All @@ -35,31 +34,34 @@ def generate_synthetic_data(num_groups, num_iter, num_realizations):
neg = misfits * (1 - split)

for j in range(num_realizations):
realization_name = 'Realization ' + str(j)
realization_name = f"Realization {j}"

scale = 1.0 + np.random.rand() * 0.4
realization_pos = scale * pos
realization_neg = scale * neg

df = df.append(pd.DataFrame(
OrderedDict([
('obs_group_name', obs_group_names),
('ensemble_name', ensemble_name),
('realization', realization_name),
('total_pos', realization_pos),
('total_neg', realization_neg),
('number_data_points', number_dp)
])))
df = df.append(
pd.DataFrame(
OrderedDict(
[
("obs_group_name", obs_group_names),
("ensemble_name", ensemble_name),
("realization", realization_name),
("total_pos", realization_pos),
("total_neg", realization_neg),
("number_data_points", number_dp),
]
)
)
)

return df.set_index(["obs_group_name", "ensemble_name", "realization"])

return df.set_index(['obs_group_name', 'ensemble_name', 'realization'])

def _get_unsorted_edges():
"""P10 - P90 unsorted edge coordinates"""

retval = {
'low': chi2.ppf(0.1, 1),
'high': chi2.ppf(0.9, 1)
}
retval = {"low": chi2.ppf(0.1, 1), "high": chi2.ppf(0.9, 1)}

return retval

Expand All @@ -69,13 +71,12 @@ def _get_sorted_edges(number_observation_groups):

monte_carlo_iterations = 100000

sorted_values = np.empty((number_observation_groups,
monte_carlo_iterations))
sorted_values = np.empty((number_observation_groups, monte_carlo_iterations))

for i in range(monte_carlo_iterations):
sorted_values[:, i] = np.sort(np.random.chisquare(
df=1,
size=number_observation_groups))
sorted_values[:, i] = np.sort(
np.random.chisquare(df=1, size=number_observation_groups)
)

sorted_values = np.flip(sorted_values, 0)

Expand All @@ -87,44 +88,45 @@ def _get_sorted_edges(number_observation_groups):
# These values are to be used for drawing the stair stepped
# sorted P10-P90 area:

coordinates = {'low': list(P10), 'high': list(P90)}
coordinates = {"low": list(P10), "high": list(P90)}

return coordinates


class HistoryMatch():
class HistoryMatch:
def __init__(self, data):
super(HistoryMatch, self).__init__()

self.data = self._prepareData(data)
self.data = self._prepareData(data)

def get_data(self):
return self.data

def _prepareData(self, data):
data = data.copy().reset_index()

ensemble_labels = data.ensemble_name.unique().tolist()
num_obs_groups = len(data.obs_group_name.unique())

data['avg_pos'] = data['total_pos'] / data['number_data_points']
data['avg_neg'] = data['total_neg'] / data['number_data_points']
data["avg_pos"] = data["total_pos"] / data["number_data_points"]
data["avg_neg"] = data["total_neg"] / data["number_data_points"]

iterations = []
for ensemble in ensemble_labels:
df = data[data.ensemble_name == ensemble]
iterations.append(df.groupby('obs_group_name').mean())
iterations.append(df.groupby("obs_group_name").mean())

sorted_iterations = self._sortIterations(iterations)

iterations_dict = self._iterations_to_dict(sorted_iterations,
ensemble_labels)
iterations_dict = self._iterations_to_dict(sorted_iterations, ensemble_labels)

confidence_sorted = _get_sorted_edges(num_obs_groups)
confidence_unsorted = _get_unsorted_edges()

data = {}
data['iterations'] = iterations_dict
data['confidence_interval_sorted'] = confidence_sorted
data['confidence_interval_unsorted'] = confidence_unsorted
data["iterations"] = iterations_dict
data["confidence_interval_sorted"] = confidence_sorted
data["confidence_interval_unsorted"] = confidence_unsorted

return data

Expand All @@ -135,9 +137,9 @@ def _sortIterations(self, iterations):
sorted_df = df.copy()

sorted_data.append(
sorted_df.assign(f=sorted_df['avg_pos'] + sorted_df['avg_neg'])
.sort_values('f', ascending=False)
.drop('f', axis=1)
sorted_df.assign(f=sorted_df["avg_pos"] + sorted_df["avg_neg"])
.sort_values("f", ascending=False)
.drop("f", axis=1)
)

return sorted_data
Expand All @@ -146,26 +148,29 @@ def _iterations_to_dict(self, iterations, labels):
retval = []

for iteration, label in zip(iterations, labels):
retval.append({
'name': label,
'positive': iteration['avg_pos'].tolist(),
'negative': iteration['avg_neg'].tolist(),
'labels': iteration.index.tolist()
})
retval.append(
{
"name": label,
"positive": iteration["avg_pos"].tolist(),
"negative": iteration["avg_neg"].tolist(),
"labels": iteration.index.tolist(),
}
)

return retval

data = generate_synthetic_data(num_groups=50,
num_iter=4,
num_realizations=100)

app = dash.Dash(__name__)
data = generate_synthetic_data(num_groups=50, num_iter=4, num_realizations=100)

app.layout = html.Div(children=[
app = dash.Dash(__name__)

webviz_subsurface_components.HistoryMatch(id='parameters',
data=HistoryMatch(data).get_data())
])
app.layout = html.Div(
children=[
webviz_subsurface_components.HistoryMatch(
id="parameters", data=HistoryMatch(data).get_data()
)
]
)

if __name__ == '__main__':
if __name__ == "__main__":
app.run_server(debug=True)
Loading

0 comments on commit 24dd94f

Please sign in to comment.