Skip to content

Commit

Permalink
Merge pull request #16 from bthirion/master
Browse files Browse the repository at this point in the history
Hotfix for example generation
  • Loading branch information
bthirion authored Mar 15, 2024
2 parents 979b6ca + a2cb15f commit d991917
Show file tree
Hide file tree
Showing 4 changed files with 30 additions and 23 deletions.
31 changes: 14 additions & 17 deletions examples/plot_2D_simulation_example.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@
greater than the number of samples, standard statistical methods are
unlikely to recover the support. Then, the idea of clustered inference is to
compress the data without breaking the spatial structure, leading to a
compressed problem close to the original problem. This leads to a
compressed problem close to the original problem. This results in a more
powerful spatially relaxed inference. Indeed, thanks to the dimension reduction
the support recovery is feasible. However, due to the spatial compression,
there is a limited (and quantifiable) spatial uncertainty concerning the shape
Expand Down Expand Up @@ -82,6 +82,7 @@ def weight_map_2D_extended(shape, roi_size, delta):

roi_size_extended = roi_size + delta

# Create four regions in the corners
w = np.zeros(shape + (5,))
w[0:roi_size, 0:roi_size, 0] = 0.5
w[-roi_size:, -roi_size:, 1] = 0.5
Expand All @@ -92,6 +93,7 @@ def weight_map_2D_extended(shape, roi_size, delta):
w[0:roi_size_extended, -roi_size_extended:, 2] += 0.5
w[-roi_size_extended:, 0:roi_size_extended, 3] += 0.5

# round the shape a little bit
for i in range(roi_size_extended):
for j in range(roi_size_extended):
if (i - roi_size) + (j - roi_size) >= delta:
Expand Down Expand Up @@ -131,7 +133,7 @@ def add_one_subplot(ax, map, title):
ax.get_yaxis().set_visible(False)


def plot(maps, titles, save_fig=False):
def plot(maps, titles):
'''Make a summary plot from estimated supports'''

fig, axes = plt.subplots(3, 2, figsize=(4, 6))
Expand All @@ -143,11 +145,6 @@ def plot(maps, titles, save_fig=False):

fig.tight_layout()

if save_fig:
figname = 'figures/simu_2D.png'
plt.savefig(figname)
print(f'Save figure to {figname}')

plt.show()


Expand Down Expand Up @@ -188,9 +185,8 @@ def plot(maps, titles, save_fig=False):
# cluster diameter. However this choice is conservative, notably in the case
# of ensembled clustered inference. For these algorithms, we recommend to take
# the average cluster radius. In this example, we choose ``n_clusters = 200``,
# leading to a theoretical spatial tolerance ``delta = 6``. However, it
# turns out that ``delta = 2``, the average cluster radius, would have been
# sufficient for ensembled clustered inference algorithms (see Results).
# leading to a theoretical spatial tolerance ``delta = 6``, which is still
# conservative (see Results).

# hyper-parameters
n_clusters = 200
Expand All @@ -208,9 +204,9 @@ def plot(maps, titles, save_fig=False):
#
# Below, we translate the FWER target into z-score targets.
# To compute the z-score targets we also take into account for the multiple
# testing correction. To do so, we consider the Bonferroni correction.
# testing correction. To do so, we consider Bonferroni correction.
# For methods that do not reduce the feature space, the correction
# consists in dividing the FWER target by the number of features.
# consists in dividing the targeted FWER target by the number of features.
# For methods that group features into clusters, the correction
# consists in dividing by the number of clusters.

Expand Down Expand Up @@ -326,13 +322,14 @@ def plot(maps, titles, save_fig=False):
#############################################################################
# Analysis of the results
# -----------------------
# As argued in the first section of this example, the standard method that
# do not compress the problem is not relevant as it dramatically lacks power.
# As argued in the first section of this example, standard inference methods that
# do not compress the problem dramatically lack power.
# The support estimated from CluDL provides a more reasonable solution
# since we recover the four regions. However the shape of the estimated support
# is a bit rough.
# is a bit rough (as it is bound to a sub-optimal clustering).
# Finally, the solution provided by EnCluDL is more accurate since the shape
# of the estimated support is closer to the true support.
# Also, one can note that the theoretical spatial tolerance is quite
# conservative. In practice, we argue that the statistical guarantees are valid
# for a lower spatial tolerance thanks to the clustering randomization.
# conservative. In practice, Type-1 Error guarantees seem to hold
# for a lower spatial tolerance. This is an additional benefit of clustering
# randomization.
2 changes: 1 addition & 1 deletion hidimstat/desparsified_lasso.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ def _compute_all_residuals(X, alphas, gram, max_iter=5000, tol=1e-3,
method=method)
for i in range(n_features))

results = np.asarray(results)
results = np.asarray(results, dtype=object)
Z = np.stack(results[:, 0], axis=1)
omega_diag = np.stack(results[:, 1])

Expand Down
18 changes: 14 additions & 4 deletions hidimstat/noise_std.py
Original file line number Diff line number Diff line change
Expand Up @@ -59,8 +59,13 @@ def reid(X, y, eps=1e-2, tol=1e-4, max_iter=10000, n_jobs=1, seed=0):
cv = KFold(n_splits=5, shuffle=True, random_state=seed)

clf_lasso_cv = \
LassoCV(eps=eps, normalize=False, fit_intercept=False,
cv=cv, tol=tol, max_iter=max_iter, n_jobs=n_jobs)
LassoCV(
eps=eps,
fit_intercept=False,
cv=cv,
tol=tol,
max_iter=max_iter,
n_jobs=n_jobs)

clf_lasso_cv.fit(X, y)
beta_hat = clf_lasso_cv.coef_
Expand Down Expand Up @@ -164,8 +169,13 @@ def group_reid(X, Y, fit_Y=True, stationary=True, method='simple', order=1,
if fit_Y:

clf_mtlcv = \
MultiTaskLassoCV(eps=eps, normalize=False, fit_intercept=False,
cv=cv, tol=tol, max_iter=max_iter, n_jobs=n_jobs)
MultiTaskLassoCV(
eps=eps,
fit_intercept=False,
cv=cv,
tol=tol,
max_iter=max_iter,
n_jobs=n_jobs)

clf_mtlcv.fit(X, Y)
beta_hat = clf_mtlcv.coef_
Expand Down
2 changes: 1 addition & 1 deletion hidimstat/scenario.py
Original file line number Diff line number Diff line change
Expand Up @@ -190,7 +190,7 @@ def multivariate_simulation(n_samples=100,
X = []

for i in np.arange(n_samples):
Xi = ndimage.filters.gaussian_filter(X_[i], smooth_X)
Xi = ndimage.gaussian_filter(X_[i], smooth_X)
X.append(Xi.ravel())

X = np.asarray(X)
Expand Down

0 comments on commit d991917

Please sign in to comment.