-
Notifications
You must be signed in to change notification settings - Fork 10
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
* Examples: Added examples for mrc_meguk notts and cambridge sites. * Update CamCAN example scripts. * Added recommended setting example scripts for Oxford. * Updated Wakeman-Henson example scripts. --------- Co-authored-by: Rukuang Huang <[email protected]>
- Loading branch information
1 parent
168bdae
commit fbaedbd
Showing
41 changed files
with
1,444 additions
and
877 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -2,27 +2,33 @@ | |
""" | ||
|
||
# Authors: Chetan Gohil <[email protected]> | ||
|
||
import pathlib | ||
from glob import glob | ||
from dask.distributed import Client | ||
|
||
from osl import preprocessing, utils | ||
|
||
# Directories | ||
BASE_DIR = "/well/woolrich/projects/camcan" | ||
RAW_DIR = BASE_DIR + "/cc700/meg/pipeline/release005/BIDSsep/derivatives_rest/aa/AA_movecomp/aamod_meg_maxfilt_00002" | ||
PREPROC_DIR = BASE_DIR + "/winter23/preproc" | ||
PREPROC_DIR = BASE_DIR + "/summer23/preproc" | ||
|
||
# Settings | ||
config = """ | ||
preproc: | ||
- crop: {tmin: 30} | ||
- filter: {l_freq: 0.5, h_freq: 125, method: iir, iir_params: {order: 5, ftype: butter}} | ||
- notch_filter: {freqs: 50 100} | ||
- resample: {sfreq: 250} | ||
- filter: {l_freq: 0.5, h_freq: 200, method: iir, iir_params: {order: 5, ftype: butter}} | ||
- notch_filter: {freqs: 50 88 100 118 150 156 185, notch_widths: 2} | ||
- resample: {sfreq: 400} | ||
- bad_segments: {segment_len: 500, picks: mag, significance_level: 0.1} | ||
- bad_segments: {segment_len: 500, picks: grad, significance_level: 0.1} | ||
- bad_segments: {segment_len: 500, picks: mag, mode: diff, significance_level: 0.1} | ||
- bad_segments: {segment_len: 500, picks: grad, mode: diff, significance_level: 0.1} | ||
- bad_channels: {picks: meg, significance_level: 0.1} | ||
- ica_raw: {picks: meg, n_components: 64} | ||
- bad_channels: {picks: mag, significance_level: 0.1} | ||
- bad_channels: {picks: grad, significance_level: 0.1} | ||
- ica_raw: {picks: meg, n_components: 0.99} | ||
- ica_autoreject: {picks: meg, ecgmethod: correlation, eogthreshold: auto} | ||
- interpolate_bads: {} | ||
""" | ||
|
@@ -32,7 +38,7 @@ | |
|
||
# Get input files | ||
inputs = [] | ||
for subject in glob(RAW_DIR + "/sub-*"): | ||
for subject in sorted(glob(RAW_DIR + "/sub-*")): | ||
subject = pathlib.Path(subject).stem | ||
inputs.append(RAW_DIR + f"/{subject}/mf2pt2_{subject}_ses-rest_task-rest_meg.fif") | ||
|
||
|
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,108 @@ | ||
"""Coregisteration. | ||
The scripts was first run for all subjects (with n_init=1). Then for subjects | ||
whose coregistration looked a bit off we re-run this script just for that | ||
particular subject with a higher n_init. | ||
""" | ||
|
||
# Authors: Chetan Gohil <[email protected]> | ||
|
||
import numpy as np | ||
import pathlib | ||
from glob import glob | ||
from dask.distributed import Client | ||
|
||
from osl import source_recon, utils | ||
|
||
# Directories | ||
BASE_DIR = "/well/woolrich/projects/camcan" | ||
PREPROC_DIR = BASE_DIR + "/summer23/preproc" | ||
COREG_DIR = BASE_DIR + "/summer23/coreg" | ||
ANAT_DIR = BASE_DIR + "/cc700/mri/pipeline/release004/BIDS_20190411/anat" | ||
FSL_DIR = "/well/woolrich/projects/software/fsl" | ||
|
||
# Files | ||
PREPROC_FILE = ( | ||
PREPROC_DIR | ||
+ "/mf2pt2_{0}_ses-rest_task-rest_meg" | ||
+ "/mf2pt2_{0}_ses-rest_task-rest_meg_preproc_raw.fif" | ||
) | ||
SMRI_FILE = ANAT_DIR + "/{0}/anat/{0}_T1w.nii" | ||
|
||
# Settings | ||
config = """ | ||
source_recon: | ||
- extract_fiducials_from_fif: {} | ||
- fix_headshape_points: {} | ||
- compute_surfaces: | ||
include_nose: False | ||
- coregister: | ||
use_nose: False | ||
use_headshape: True | ||
#n_init: 50 | ||
""" | ||
|
||
|
||
def fix_headshape_points(src_dir, subject, preproc_file, smri_file, epoch_file): | ||
filenames = source_recon.rhino.get_coreg_filenames(src_dir, subject) | ||
|
||
# Load saved headshape and nasion files | ||
hs = np.loadtxt(filenames["polhemus_headshape_file"]) | ||
nas = np.loadtxt(filenames["polhemus_nasion_file"]) | ||
lpa = np.loadtxt(filenames["polhemus_lpa_file"]) | ||
rpa = np.loadtxt(filenames["polhemus_rpa_file"]) | ||
|
||
# Drop nasion by 4cm and remove headshape points more than 7 cm away | ||
nas[2] -= 40 | ||
distances = np.sqrt( | ||
(nas[0] - hs[0]) ** 2 + (nas[1] - hs[1]) ** 2 + (nas[2] - hs[2]) ** 2 | ||
) | ||
keep = distances > 70 | ||
hs = hs[:, keep] | ||
|
||
# Remove anything outside of rpa | ||
keep = hs[0] < rpa[0] | ||
hs = hs[:, keep] | ||
|
||
# Remove anything outside of lpa | ||
keep = hs[0] > lpa[0] | ||
hs = hs[:, keep] | ||
|
||
# Remove headshape points on the neck | ||
remove = hs[2] < min(lpa[2], rpa[2]) - 4 | ||
hs = hs[:, ~remove] | ||
|
||
# Overwrite headshape file | ||
utils.logger.log_or_print(f"overwritting {filenames['polhemus_headshape_file']}") | ||
np.savetxt(filenames["polhemus_headshape_file"], hs) | ||
|
||
|
||
if __name__ == "__main__": | ||
utils.logger.set_up(level="INFO") | ||
source_recon.setup_fsl(FSL_DIR) | ||
|
||
# Get subjects | ||
subjects = [] | ||
for subject in sorted(glob(PREPROC_FILE.replace("{0}", "*"))): | ||
subjects.append(pathlib.Path(subject).stem.split("_")[1]) | ||
|
||
# Setup files | ||
smri_files = [] | ||
preproc_files = [] | ||
for subject in subjects: | ||
smri_files.append(SMRI_FILE.format(subject)) | ||
preproc_files.append(PREPROC_FILE.format(subject)) | ||
|
||
# Setup parallel processing | ||
# client = Client(n_workers=8, threads_per_worker=1) | ||
|
||
# Run coregistration | ||
source_recon.run_src_batch( | ||
config, | ||
src_dir=COREG_DIR, | ||
subjects=subjects, | ||
preproc_files=preproc_files, | ||
smri_files=smri_files, | ||
extra_funcs=[fix_headshape_points], | ||
# dask_client=True, | ||
) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -2,12 +2,16 @@ | |
""" | ||
|
||
# Authors: Chetan Gohil <[email protected]> | ||
|
||
from glob import glob | ||
from dask.distributed import Client | ||
|
||
from osl import utils | ||
from osl.source_recon import find_template_subject, run_src_batch, setup_fsl | ||
|
||
SRC_DIR = "/well/woolrich/projects/camcan/winter23/src" | ||
# Directories | ||
SRC_DIR = "/well/woolrich/projects/camcan/summer23/src" | ||
FSL_DIR = "/well/woolrich/projects/software/fsl" | ||
|
||
if __name__ == "__main__": | ||
|
@@ -30,8 +34,8 @@ | |
template: {template} | ||
n_embeddings: 15 | ||
standardize: True | ||
n_init: 3 | ||
n_iter: 3000 | ||
n_init: 5 | ||
n_iter: 5000 | ||
max_flips: 20 | ||
""" | ||
|
||
|
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,19 @@ | ||
"""Data preparation: time-delay embedding and principal component analysis. | ||
""" | ||
|
||
# Authors: Chetan Gohil <[email protected]> | ||
|
||
from glob import glob | ||
|
||
from osl_dynamics.data import Data | ||
|
||
files = sorted(glob("/well/woolrich/projects/camcan/summer23/src/*/sflip_parc-raw.fif")) | ||
data = Data(files, picks="misc", reject_by_annotation="omit", n_jobs=16) | ||
methods = { | ||
"tde_pca": {"n_embeddings": 15, "n_pca_components": 100}, | ||
"standardize": {}, | ||
} | ||
data.prepare(methods) | ||
data.save("/well/woolrich/projects/camcan/summer23/prepared") | ||
data.delete_dir() |
Oops, something went wrong.