From a09e53ccc68a2078955088771edc20d947a60609 Mon Sep 17 00:00:00 2001 From: Kabilar Gunalan Date: Sat, 23 Apr 2022 10:20:00 -0500 Subject: [PATCH 01/32] Fix format --- workflow_miniscope/pipeline.py | 1 + 1 file changed, 1 insertion(+) diff --git a/workflow_miniscope/pipeline.py b/workflow_miniscope/pipeline.py index ba3ab71..916879f 100644 --- a/workflow_miniscope/pipeline.py +++ b/workflow_miniscope/pipeline.py @@ -1,4 +1,5 @@ import datajoint as dj + from element_lab import lab from element_animal import subject from element_session import session_with_datetime as session From 8561af8167128c887106a87634949608d1442993 Mon Sep 17 00:00:00 2001 From: Kabilar Gunalan Date: Sat, 23 Apr 2022 10:54:08 -0500 Subject: [PATCH 02/32] Add requirement --- requirements.txt | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 602b81e..a792b3f 100644 --- a/requirements.txt +++ b/requirements.txt @@ -4,4 +4,5 @@ element-animal==0.1.0b0 element-session==0.1.0b0 element-miniscope @ git+https://github.com/datajoint/element-miniscope.git element-interface @ git+https://github.com/datajoint/element-interface.git -djarchive-client @ git+https://github.com/datajoint/djarchive-client.git \ No newline at end of file +djarchive-client @ git+https://github.com/datajoint/djarchive-client.git +jupytext==1.13.7 \ No newline at end of file From 6285d87a77722cb5c29db740499a04411ffc7064 Mon Sep 17 00:00:00 2001 From: Kabilar Gunalan Date: Sat, 23 Apr 2022 19:17:35 -0500 Subject: [PATCH 03/32] Remove `tests/user_data` --- tests/user_data/sessions.csv | 5 ----- tests/user_data/subjects.csv | 4 ---- 2 files changed, 9 deletions(-) delete mode 100644 tests/user_data/sessions.csv delete mode 100644 tests/user_data/subjects.csv diff --git a/tests/user_data/sessions.csv b/tests/user_data/sessions.csv deleted file mode 100644 index f7f9291..0000000 --- a/tests/user_data/sessions.csv +++ /dev/null @@ -1,5 +0,0 @@ -subject,session_dir -subject1,F:/U24/workflow_imaging_data/subject1/20200609_170519 -subject1,F:/U24/workflow_imaging_data/subject1/20200609_171646 -subject2,F:/U24/workflow_imaging_data/subject2/20200420_1843959 -subject3,F:/U24/workflow_imaging_data/subject3/210107_run00_orientation_8dir diff --git a/tests/user_data/subjects.csv b/tests/user_data/subjects.csv deleted file mode 100644 index 538b942..0000000 --- a/tests/user_data/subjects.csv +++ /dev/null @@ -1,4 +0,0 @@ -subject,sex,subject_birth_date,subject_description -subject1,F,2020-01-01 00:00:01,91760 -subject2,M,2020-01-01 00:00:01,90853 -subject3,F,2020-01-01 00:00:01,sbx-JC015 From 9184e65350be339ac26b4fce1d0b397cc8897dff Mon Sep 17 00:00:00 2001 From: Kabilar Gunalan Date: Sat, 23 Apr 2022 20:06:06 -0500 Subject: [PATCH 04/32] Remove previous notebook versions. --- notebooks/00-parameters.py | 92 --- notebooks/01-ingest.ipynb | 484 ------------ notebooks/01-ingest.py | 163 ---- notebooks/02-explore.ipynb | 1401 ----------------------------------- notebooks/02-explore.py | 55 -- notebooks/03-automate.ipynb | 168 ----- notebooks/03-automate.py | 64 -- notebooks/04-drop.py | 40 - 8 files changed, 2467 deletions(-) delete mode 100644 notebooks/00-parameters.py delete mode 100644 notebooks/01-ingest.ipynb delete mode 100644 notebooks/01-ingest.py delete mode 100644 notebooks/02-explore.ipynb delete mode 100644 notebooks/02-explore.py delete mode 100644 notebooks/03-automate.ipynb delete mode 100644 notebooks/03-automate.py delete mode 100644 notebooks/04-drop.py diff --git a/notebooks/00-parameters.py b/notebooks/00-parameters.py deleted file mode 100644 index 479e154..0000000 --- a/notebooks/00-parameters.py +++ /dev/null @@ -1,92 +0,0 @@ -# --- -# jupyter: -# jupytext: -# formats: ipynb,py -# text_representation: -# extension: .py -# format_name: light -# format_version: '1.5' -# jupytext_version: 1.11.1 -# kernelspec: -# display_name: 'Python 3.7.10 64-bit (''workflow-miniscope'': conda)' -# metadata: -# interpreter: -# hash: fb69438121b2de1073146525f02c3c959a56b9d2c1d6b0a62c34f01201323d1c -# name: python3 -# --- - -# # Insert an entry into `imaging.ProcessingParamSet` -# -# + The entry will comprise the parameters used for processing with the analysis package. -# -# + If the same parameters are used to analyze multiple datasets, the parameters only need to be inserted once. -# -# + This step is in a separate Jupyter Notebook because the parameters would otherwise clutter the next main notebook (`01ingest.ipynb`). - -# Change into the parent directory to find the `dj_local_conf.json` file. -# When you type `import datajoint as dj` the credentials from `dj_local_conf.json` will be used to log into the database. -import os -os.chdir('..') - -import numpy as np -from workflow_miniscope.pipeline import * - -# ## Define the `MiniscopeAnalysis` parameters - -params = dict(pars_envs = ['memory_size_to_use', 12, 'memory_size_per_patch', 0.6, 'patch_dims', [64, 64]], - include_residual = False, - gSig = 3, - gSiz = 15, - ssub = 1, - with_dendrites = True, - updateA_search_method = 'dilate', - updateA_bSiz = 5, - updateA_dist = None, - spatial_constraints = ['connected', True, 'circular', False], - spatial_algorithm = 'hals_thresh', - Fs = 30, - tsub = 5, - deconv_flag = True, - deconv_options = ['type', 'ar1', 'method', 'constrained', 'smin', -5, 'optimize_pars', True, 'optimize_b', True, 'max_tau', 100], - nk = 3, - detrend_method = 'spline', - bg_model = 'ring', - nb = 1, - ring_radius = 23, - num_neighbors = [], - show_merge = False, - merge_thr = 0.65, - method_dist = 'max', - dmin = 5, - dmin_only = 2, - merge_thr_spatial = [0.8, 0.4, -float('inf')], - K = [], - min_corr = 0.9, - min_pnr = 15, - min_pixel = None, - bd = 0, - frame_range = [], - save_initialization = False, - use_parallel = True, - show_init = False, - choose_params = False, - center_psf = True, - min_corr_res = 0.7, - min_pnr_res = 8, - seed_method_res = 'auto', - update_sn = True, - with_manual_intervention = False) - -# ## Insert the `MiniscopeAnalysis` parameters -# -# + The `insert_new_params` is a utility function as part of the `imaging.ProcessingParamSet` table that is used to verify the parameter set does not already exist in the table. - -imaging.ProcessingParamSet.insert_new_params( - processing_method='mcgill_miniscope_analysis', - paramset_idx=0, - paramset_desc='Calcium imaging analysis with Miniscope Analysis using default parameters', - params=params) - -# ## Proceed to the `01ingest.ipynb` Jupyter Notebook -# -# + This notebook describes the steps to ingest the imaging metadata and processed data. diff --git a/notebooks/01-ingest.ipynb b/notebooks/01-ingest.ipynb deleted file mode 100644 index 87a862a..0000000 --- a/notebooks/01-ingest.ipynb +++ /dev/null @@ -1,484 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Ingestion routine\n", - "\n", - "+ The following script outlines the steps to ingest UCLA Miniscope data (acquired metadata and processed data) into the DataJoint `workflow-miniscope`.\n", - "\n", - "+ To ingest with a completely automated workflow, see `03automate.ipynb`." - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "metadata": {}, - "outputs": [], - "source": [ - "# Change into the parent directory to find the `dj_local_conf.json` file. \n", - "# When you type `import datajoint as dj` the credentials from `dj_local_conf.json` will be used to log into the database.\n", - "import os\n", - "os.chdir('..')" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import numpy as np\n", - "from workflow_miniscope.pipeline import *" - ] - }, - { - "source": [ - "## Schema diagrams\n", - "\n", - "+ The following outputs are the diagrams of the schemas comprising this workflow.\n", - "\n", - "+ Please refer back to these diagrams to visualize the relationships of different tables." - ], - "cell_type": "markdown", - "metadata": {} - }, - { - "cell_type": "code", - "execution_count": 4, - "metadata": {}, - "outputs": [ - { - "output_type": "execute_result", - "data": { - "text/plain": [ - "" - ], - "image/svg+xml": "\n\n\n\n\nLab\n\n\nLab\n\n\n\n\n\nLocation\n\n\nLocation\n\n\n\n\n\nLab->Location\n\n\n\n\nlab.LabMembership\n\n\nlab.LabMembership\n\n\n\n\n\nLab->lab.LabMembership\n\n\n\n\nUser\n\n\nUser\n\n\n\n\n\nUser->lab.LabMembership\n\n\n\n\nlab.ProjectUser\n\n\nlab.ProjectUser\n\n\n\n\n\nUser->lab.ProjectUser\n\n\n\n\nEquipment\n\n\nEquipment\n\n\n\n\n\nProtocol\n\n\nProtocol\n\n\n\n\n\nlab.UserRole\n\n\nlab.UserRole\n\n\n\n\n\nlab.UserRole->lab.LabMembership\n\n\n\n\nProject\n\n\nProject\n\n\n\n\n\nProject->lab.ProjectUser\n\n\n\n\nlab.ProtocolType\n\n\nlab.ProtocolType\n\n\n\n\n\nlab.ProtocolType->Protocol\n\n\n\n\nSource\n\n\nSource\n\n\n\n\n" - }, - "metadata": {}, - "execution_count": 4 - } - ], - "source": [ - "dj.Diagram(lab)" - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "metadata": {}, - "outputs": [ - { - "output_type": "execute_result", - "data": { - "text/plain": [ - "" - ], - "image/svg+xml": "\n\n\n\n\n`neuro_subject`.`subject__line`\n\n`neuro_subject`.`subject__line`\n\n\n\n`neuro_subject`.`#allele__source`\n\n`neuro_subject`.`#allele__source`\n\n\n\n`neuro_subject`.`subject__lab`\n\n`neuro_subject`.`subject__lab`\n\n\n\n`neuro_subject`.`subject__user`\n\n`neuro_subject`.`subject__user`\n\n\n\n`neuro_subject`.`subject__protocol`\n\n`neuro_subject`.`subject__protocol`\n\n\n\n`neuro_subject`.`subject__strain`\n\n`neuro_subject`.`subject__strain`\n\n\n\n`neuro_subject`.`#line__allele`\n\n`neuro_subject`.`#line__allele`\n\n\n\n`neuro_subject`.`subject__source`\n\n`neuro_subject`.`subject__source`\n\n\n\nsubject.SubjectCullMethod\n\n\nsubject.SubjectCullMethod\n\n\n\n\n\nsubject.Line\n\n\nsubject.Line\n\n\n\n\n\nsubject.Line->`neuro_subject`.`subject__line`\n\n\n\n\nsubject.Line->`neuro_subject`.`#line__allele`\n\n\n\n\nsubject.Strain\n\n\nsubject.Strain\n\n\n\n\n\nsubject.Strain->`neuro_subject`.`subject__strain`\n\n\n\n\nsubject.SubjectDeath\n\n\nsubject.SubjectDeath\n\n\n\n\n\nsubject.Zygosity\n\n\nsubject.Zygosity\n\n\n\n\n\nsubject.Allele\n\n\nsubject.Allele\n\n\n\n\n\nsubject.Allele->`neuro_subject`.`#allele__source`\n\n\n\n\nsubject.Allele->`neuro_subject`.`#line__allele`\n\n\n\n\nsubject.Allele->subject.Zygosity\n\n\n\n\nSubject\n\n\nSubject\n\n\n\n\n\nSubject->`neuro_subject`.`subject__line`\n\n\n\n\nSubject->`neuro_subject`.`subject__lab`\n\n\n\n\nSubject->`neuro_subject`.`subject__user`\n\n\n\n\nSubject->`neuro_subject`.`subject__protocol`\n\n\n\n\nSubject->`neuro_subject`.`subject__strain`\n\n\n\n\nSubject->`neuro_subject`.`subject__source`\n\n\n\n\nSubject->subject.SubjectCullMethod\n\n\n\n\nSubject->subject.SubjectDeath\n\n\n\n\nSubject->subject.Zygosity\n\n\n\n" - }, - "metadata": {}, - "execution_count": 5 - } - ], - "source": [ - "dj.Diagram(subject)" - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "metadata": {}, - "outputs": [ - { - "output_type": "execute_result", - "data": { - "text/plain": [ - "" - ], - "image/svg+xml": "\n\n\n\n\nSession\n\n\nSession\n\n\n\n\n\nsession.SessionDirectory\n\n\nsession.SessionDirectory\n\n\n\n\n\nSession->session.SessionDirectory\n\n\n\n\nsession.ProjectSession\n\n\nsession.ProjectSession\n\n\n\n\n\nSession->session.ProjectSession\n\n\n\n" - }, - "metadata": {}, - "execution_count": 6 - } - ], - "source": [ - "dj.Diagram(session)" - ] - }, - { - "cell_type": "code", - "execution_count": 7, - "metadata": {}, - "outputs": [ - { - "output_type": "execute_result", - "data": { - "text/plain": [ - "" - ], - "image/svg+xml": "\n\n\n\n\n`neuro_scan`.`_scan_info__field`\n\n`neuro_scan`.`_scan_info__field`\n\n\n\n`neuro_scan`.`_scan_info__scan_file`\n\n`neuro_scan`.`_scan_info__scan_file`\n\n\n\nscan.ScanLocation\n\n\nscan.ScanLocation\n\n\n\n\n\nscan.AcquisitionSoftware\n\n\nscan.AcquisitionSoftware\n\n\n\n\n\nscan.Scan\n\n\nscan.Scan\n\n\n\n\n\nscan.AcquisitionSoftware->scan.Scan\n\n\n\n\nscan.Channel\n\n\nscan.Channel\n\n\n\n\n\nscan.ScanInfo\n\n\nscan.ScanInfo\n\n\n\n\n\nscan.ScanInfo->`neuro_scan`.`_scan_info__field`\n\n\n\n\nscan.ScanInfo->`neuro_scan`.`_scan_info__scan_file`\n\n\n\n\nscan.Scan->scan.ScanLocation\n\n\n\n\nscan.Scan->scan.ScanInfo\n\n\n\n" - }, - "metadata": {}, - "execution_count": 7 - } - ], - "source": [ - "dj.Diagram(scan)" - ] - }, - { - "cell_type": "code", - "execution_count": 8, - "metadata": {}, - "outputs": [ - { - "output_type": "execute_result", - "data": { - "text/plain": [ - "" - ], - "image/svg+xml": "\n\n\n\n\n`neuro_imaging`.`_motion_correction__non_rigid_motion_correction`\n\n`neuro_imaging`.`_motion_correction__non_rigid_motion_correction`\n\n\n\n`neuro_imaging`.`_motion_correction__block`\n\n`neuro_imaging`.`_motion_correction__block`\n\n\n\n`neuro_imaging`.`_motion_correction__non_rigid_motion_correction`->`neuro_imaging`.`_motion_correction__block`\n\n\n\n\n`neuro_imaging`.`__mask_classification__mask_type`\n\n`neuro_imaging`.`__mask_classification__mask_type`\n\n\n\n`neuro_imaging`.`_motion_correction__summary`\n\n`neuro_imaging`.`_motion_correction__summary`\n\n\n\n`neuro_imaging`.`__fluorescence__trace`\n\n`neuro_imaging`.`__fluorescence__trace`\n\n\n\n`neuro_imaging`.`__activity__trace`\n\n`neuro_imaging`.`__activity__trace`\n\n\n\n`neuro_imaging`.`__fluorescence__trace`->`neuro_imaging`.`__activity__trace`\n\n\n\n\n`neuro_imaging`.`__segmentation__mask`\n\n`neuro_imaging`.`__segmentation__mask`\n\n\n\n`neuro_imaging`.`__segmentation__mask`->`neuro_imaging`.`__mask_classification__mask_type`\n\n\n\n\n`neuro_imaging`.`__segmentation__mask`->`neuro_imaging`.`__fluorescence__trace`\n\n\n\n\n`neuro_imaging`.`_motion_correction__rigid_motion_correction`\n\n`neuro_imaging`.`_motion_correction__rigid_motion_correction`\n\n\n\nimaging.MaskClassificationMethod\n\n\nimaging.MaskClassificationMethod\n\n\n\n\n\nimaging.MaskClassification\n\n\nimaging.MaskClassification\n\n\n\n\n\nimaging.MaskClassificationMethod->imaging.MaskClassification\n\n\n\n\nimaging.Processing\n\n\nimaging.Processing\n\n\n\n\n\nimaging.MotionCorrection\n\n\nimaging.MotionCorrection\n\n\n\n\n\nimaging.Processing->imaging.MotionCorrection\n\n\n\n\nimaging.Curation\n\n\nimaging.Curation\n\n\n\n\n\nimaging.Processing->imaging.Curation\n\n\n\n\nimaging.Activity\n\n\nimaging.Activity\n\n\n\n\n\nimaging.Activity->`neuro_imaging`.`__activity__trace`\n\n\n\n\nimaging.ProcessingTask\n\n\nimaging.ProcessingTask\n\n\n\n\n\nimaging.ProcessingTask->imaging.Processing\n\n\n\n\nimaging.MaskType\n\n\nimaging.MaskType\n\n\n\n\n\nimaging.MaskType->`neuro_imaging`.`__mask_classification__mask_type`\n\n\n\n\nimaging.ProcessingParamSet\n\n\nimaging.ProcessingParamSet\n\n\n\n\n\nimaging.ProcessingParamSet->imaging.ProcessingTask\n\n\n\n\nimaging.ProcessingMethod\n\n\nimaging.ProcessingMethod\n\n\n\n\n\nimaging.ProcessingMethod->imaging.ProcessingParamSet\n\n\n\n\nimaging.MotionCorrection->`neuro_imaging`.`_motion_correction__non_rigid_motion_correction`\n\n\n\n\nimaging.MotionCorrection->`neuro_imaging`.`_motion_correction__summary`\n\n\n\n\nimaging.MotionCorrection->`neuro_imaging`.`_motion_correction__rigid_motion_correction`\n\n\n\n\nimaging.Segmentation\n\n\nimaging.Segmentation\n\n\n\n\n\nimaging.MotionCorrection->imaging.Segmentation\n\n\n\n\nimaging.Segmentation->`neuro_imaging`.`__segmentation__mask`\n\n\n\n\nimaging.Segmentation->imaging.MaskClassification\n\n\n\n\nimaging.Fluorescence\n\n\nimaging.Fluorescence\n\n\n\n\n\nimaging.Segmentation->imaging.Fluorescence\n\n\n\n\nimaging.CellCompartment\n\n\nimaging.CellCompartment\n\n\n\n\n\nimaging.ActivityExtractionMethod\n\n\nimaging.ActivityExtractionMethod\n\n\n\n\n\nimaging.ActivityExtractionMethod->imaging.Activity\n\n\n\n\nimaging.MaskClassification->`neuro_imaging`.`__mask_classification__mask_type`\n\n\n\n\nimaging.Curation->imaging.Segmentation\n\n\n\n\nimaging.Fluorescence->`neuro_imaging`.`__fluorescence__trace`\n\n\n\n\nimaging.Fluorescence->imaging.Activity\n\n\n\n" - }, - "metadata": {}, - "execution_count": 8 - } - ], - "source": [ - "dj.Diagram(imaging)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Insert an entry into `subject.Subject`" - ] - }, - { - "cell_type": "code", - "execution_count": 9, - "metadata": {}, - "outputs": [], - "source": [ - "subject.Subject.insert1(dict(subject='subject1', \n", - " sex='F', \n", - " subject_birth_date='2019-01-01 00:00:01', \n", - " subject_description='no description'))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Insert an entry into `lab.Equipment`" - ] - }, - { - "cell_type": "code", - "execution_count": 10, - "metadata": {}, - "outputs": [], - "source": [ - "Equipment.insert1(dict(scanner='Miniscope-DAQ-V3'))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Insert an entry into `session.Session`" - ] - }, - { - "cell_type": "code", - "execution_count": 11, - "metadata": {}, - "outputs": [], - "source": [ - "session.Session.insert1(dict(subject='subject1', \n", - " session_datetime='2021-01-01 00:00:01'))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Insert an entry into `session.SessionDirectory`" - ] - }, - { - "cell_type": "code", - "execution_count": 12, - "metadata": {}, - "outputs": [], - "source": [ - "session.SessionDirectory.insert1(dict(subject='subject1', \n", - " session_datetime='2021-01-01 00:00:01', \n", - " session_dir='/subject1/session0'))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Insert an entry into `scan.Scan`" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "scan.Scan.insert1(dict(subject='subject1', \n", - " session_datetime='2021-01-01 00:00:01', \n", - " scan_id=0, \n", - " scanner='Miniscope-DAQ-V3', \n", - " acq_software='Miniscope-DAQ-V3',\n", - " scan_notes=''))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Populate `scan.ScanInfo`\n", - "\n", - "+ This imported table stores information about the acquired image (e.g. image dimensions, file paths, etc.).\n", - "+ `populate` automatically calls `make` for every key for which the auto-populated table is missing data.\n", - "+ `populate_settings` passes arguments to the `populate` method.\n", - "+ `display_progress=True` reports the progress bar" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "populate_settings = {'display_progress': True}" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "scan.ScanInfo.populate(**populate_settings)" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "collapsed": false - }, - "source": [ - "## Insert an entry into `imaging.ProcessingTask`\n", - "\n", - "+ This entry will trigger ingestion of the processed results (i.e. motion correction, segmentation, and traces)\n", - "\n", - "+ The `paramset_idx` is the parameter set stored in `imaging.ProcessingParamSet` that is used for the image processing.\n", - "\n", - "+ The `processing_output_dir` attribute contains the output directory of the processed results (relative the the imaging root data directory)." - ] - }, - { - "cell_type": "code", - "execution_count": 7, - "metadata": { - "collapsed": false, - "pycharm": { - "name": "#%%\n" - } - }, - "outputs": [], - "source": [ - "imaging.ProcessingTask.insert1(dict(subject='subject1', \n", - " session_datetime='2021-01-01 00:00:01', \n", - " scan_id=0,\n", - " paramset_idx=0,\n", - " processing_output_dir='/subject1/session0/miniscope_analysis',\n", - " task_mode='load'))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Populate `imaging.Processing`\n", - "\n", - "+ For the `task_mode=load` specified above in `imaging.ProcessingTask`, this step ensures that the output directory contains the valid processed outputs.\n", - "\n", - "+ In the future, this step will provide for the option to `trigger` the analysis within this workflow (if the `task_mode=trigger`)." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "imaging.Processing.populate(**populate_settings)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Populate `imaging.MotionCorrection`\n", - "\n", - "+ This table contains the rigid or non-rigid motion correction data including the shifts and summary images.\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "imaging.MotionCorrection.populate(**populate_settings)" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "collapsed": false - }, - "source": [ - "## Insert an entry into `imaging.Curation`\n", - "\n", - "+ The next step in the pipeline is the curation of segmentation results. If a manual curation was implemented, an entry needs to be manually inserted into the table Curation, which specifies the directory to the curated results in curation_output_dir. If we would like to process the processed outcome directly, an entry is also needed in Curation. A method create1_from_processing_task was provided to help this insertion. It copies the processing_output_dir in ProcessingTask to the field curation_output_dir in the table Curation with a new curation_id.\n", - "\n", - "+ In this example, we create/insert one `imaging.Curation` for each `imaging.ProcessingTask`, specifying the same output directory." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "imaging.Curation(dict(subject='subject1', \n", - " session_datetime='2021-01-01 00:00:01', \n", - " scan_id=0,\n", - " paramset_idx=0,\n", - " curation_id=0,\n", - " curation_time='2021-01-01 00:00:01', \n", - " curation_output_dir='/subject1/session0/miniscope_analysis',\n", - " manual_curation=False,\n", - " curation_note=''})" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Populate `imaging.Segmentation`\n", - "\n", - "+ This table contains the mask coordinates, weights, and centers." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "imaging.Segmentation.populate(**populate_settings)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Populate `imaging.MaskClassification`\n", - "\n", - "+ This table is currently not implemented." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "imaging.MaskClassification.populate(**populate_settings)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Populate `imaging.Fluorescence`\n", - "\n", - "+ This table contains the fluorescence traces prior filtering and spike extraction" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "imaging.Fluorescence.populate(**populate_settings)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Populate `imaging.Activity`\n", - "+ This table contains the inferred neural activity from the fluorescence traces." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "imaging.Activity.populate(**populate_settings)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Proceed to the `02explore.ipynb` Jupyter Notebook\n", - "\n", - "+ This notebook describes the steps to query, fetch, and visualize the imaging data." - ] - } - ], - "metadata": { - "jupytext": { - "formats": "ipynb,py" - }, - "kernelspec": { - "name": "python3", - "display_name": "Python 3.7.10 64-bit ('workflow-miniscope': conda)", - "metadata": { - "interpreter": { - "hash": "fb69438121b2de1073146525f02c3c959a56b9d2c1d6b0a62c34f01201323d1c" - } - } - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.7.10" - } - }, - "nbformat": 4, - "nbformat_minor": 2 -} \ No newline at end of file diff --git a/notebooks/01-ingest.py b/notebooks/01-ingest.py deleted file mode 100644 index 75c1f4b..0000000 --- a/notebooks/01-ingest.py +++ /dev/null @@ -1,163 +0,0 @@ -# --- -# jupyter: -# jupytext: -# formats: ipynb,py -# text_representation: -# extension: .py -# format_name: light -# format_version: '1.5' -# jupytext_version: 1.11.1 -# kernelspec: -# display_name: 'Python 3.7.10 64-bit (''workflow-miniscope'': conda)' -# metadata: -# interpreter: -# hash: fb69438121b2de1073146525f02c3c959a56b9d2c1d6b0a62c34f01201323d1c -# name: python3 -# --- - -# # Ingestion routine -# -# + The following script outlines the steps to ingest UCLA Miniscope data (acquired metadata and processed data) into the DataJoint `workflow-miniscope`. -# -# + To ingest with a completely automated workflow, see `03automate.ipynb`. - -# Change into the parent directory to find the `dj_local_conf.json` file. -# When you type `import datajoint as dj` the credentials from `dj_local_conf.json` will be used to log into the database. -import os -os.chdir('..') - -import numpy as np -from workflow_miniscope.pipeline import * - -# ## Schema diagrams -# -# + The following outputs are the diagrams of the schemas comprising this workflow. -# -# + Please refer back to these diagrams to visualize the relationships of different tables. - -dj.Diagram(lab) - -dj.Diagram(subject) - -dj.Diagram(session) - -dj.Diagram(scan) - -dj.Diagram(imaging) - -# ## Insert an entry into `subject.Subject` - -subject.Subject.insert1(dict(subject='subject1', - sex='F', - subject_birth_date='2019-01-01 00:00:01', - subject_description='no description')) - -# ## Insert an entry into `lab.Equipment` - -Equipment.insert1(dict(scanner='Miniscope-DAQ-V3')) - -# ## Insert an entry into `session.Session` - -session.Session.insert1(dict(subject='subject1', - session_datetime='2021-01-01 00:00:01')) - -# ## Insert an entry into `session.SessionDirectory` - -session.SessionDirectory.insert1(dict(subject='subject1', - session_datetime='2021-01-01 00:00:01', - session_dir='/subject1/session0')) - -# ## Insert an entry into `scan.Scan` - -scan.Scan.insert1(dict(subject='subject1', - session_datetime='2021-01-01 00:00:01', - scan_id=0, - scanner='Miniscope-DAQ-V3', - acq_software='Miniscope-DAQ-V3', - scan_notes='')) - -# ## Populate `scan.ScanInfo` -# -# + This imported table stores information about the acquired image (e.g. image dimensions, file paths, etc.). -# + `populate` automatically calls `make` for every key for which the auto-populated table is missing data. -# + `populate_settings` passes arguments to the `populate` method. -# + `display_progress=True` reports the progress bar - -populate_settings = {'display_progress': True} - -scan.ScanInfo.populate(**populate_settings) - -# ## Insert an entry into `imaging.ProcessingTask` -# -# + This entry will trigger ingestion of the processed results (i.e. motion correction, segmentation, and traces) -# -# + The `paramset_idx` is the parameter set stored in `imaging.ProcessingParamSet` that is used for the image processing. -# -# + The `processing_output_dir` attribute contains the output directory of the processed results (relative the the imaging root data directory). - -# + pycharm={"name": "#%%\n"} -imaging.ProcessingTask.insert1(dict(subject='subject1', - session_datetime='2021-01-01 00:00:01', - scan_id=0, - paramset_idx=0, - processing_output_dir='/subject1/session0/miniscope_analysis', - task_mode='load')) -# - - -# ## Populate `imaging.Processing` -# -# + For the `task_mode=load` specified above in `imaging.ProcessingTask`, this step ensures that the output directory contains the valid processed outputs. -# -# + In the future, this step will provide for the option to `trigger` the analysis within this workflow (if the `task_mode=trigger`). - -imaging.Processing.populate(**populate_settings) - -# ## Populate `imaging.MotionCorrection` -# -# + This table contains the rigid or non-rigid motion correction data including the shifts and summary images. -# - -imaging.MotionCorrection.populate(**populate_settings) - -# ## Insert an entry into `imaging.Curation` -# -# + The next step in the pipeline is the curation of segmentation results. If a manual curation was implemented, an entry needs to be manually inserted into the table Curation, which specifies the directory to the curated results in curation_output_dir. If we would like to process the processed outcome directly, an entry is also needed in Curation. A method create1_from_processing_task was provided to help this insertion. It copies the processing_output_dir in ProcessingTask to the field curation_output_dir in the table Curation with a new curation_id. -# -# + In this example, we create/insert one `imaging.Curation` for each `imaging.ProcessingTask`, specifying the same output directory. - -imaging.Curation(dict(subject='subject1', - session_datetime='2021-01-01 00:00:01', - scan_id=0, - paramset_idx=0, - curation_id=0, - curation_time='2021-01-01 00:00:01', - curation_output_dir='/subject1/session0/miniscope_analysis', - manual_curation=False, - curation_note=''}) - -# ## Populate `imaging.Segmentation` -# -# + This table contains the mask coordinates, weights, and centers. - -imaging.Segmentation.populate(**populate_settings) - -# ## Populate `imaging.MaskClassification` -# -# + This table is currently not implemented. - -imaging.MaskClassification.populate(**populate_settings) - -# ## Populate `imaging.Fluorescence` -# -# + This table contains the fluorescence traces prior filtering and spike extraction - -imaging.Fluorescence.populate(**populate_settings) - -# ## Populate `imaging.Activity` -# + This table contains the inferred neural activity from the fluorescence traces. - -imaging.Activity.populate(**populate_settings) - -# ## Proceed to the `02explore.ipynb` Jupyter Notebook -# -# + This notebook describes the steps to query, fetch, and visualize the imaging data. diff --git a/notebooks/02-explore.ipynb b/notebooks/02-explore.ipynb deleted file mode 100644 index 1a14a31..0000000 --- a/notebooks/02-explore.ipynb +++ /dev/null @@ -1,1401 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# [WIP] DataJoint U24 Workflow Imaging\n", - "This notebook will describe the steps for interacting with the data ingested into `workflow-miniscope`. \n", - "\n", - "Prior to using this notebook, please refer to the [README](https://github.com/datajoint/workflow-imaging) for the topics listed below. \n", - " + Installation instructions \n", - " + Directory structure and file naming convention \n", - " + Running the workflow " - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "metadata": {}, - "outputs": [], - "source": [ - "# Change into the parent directory to find the `dj_local_conf.json` file. \n", - "# When you type `import datajoint as dj` the credentials from `dj_local_conf.json` will be used to log into the database.\n", - "import os\n", - "os.chdir('..')" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from workflow_imaging.pipeline import *" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Workflow architecture" - ] - }, - { - "cell_type": "code", - "execution_count": 10, - "metadata": {}, - "outputs": [ - { - "data": { - "image/svg+xml": [ - "\n", - "\n", - "\n", - "\n", - "\n", - "Source\n", - "\n", - "\n", - "Source\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "lab.LabMembership\n", - "\n", - "\n", - "lab.LabMembership\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "User\n", - "\n", - "\n", - "User\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "User->lab.LabMembership\n", - "\n", - "\n", - "\n", - "\n", - "lab.ProjectUser\n", - "\n", - "\n", - "lab.ProjectUser\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "User->lab.ProjectUser\n", - "\n", - "\n", - "\n", - "\n", - "Protocol\n", - "\n", - "\n", - "Protocol\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "Location\n", - "\n", - "\n", - "Location\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "Lab\n", - "\n", - "\n", - "Lab\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "Lab->lab.LabMembership\n", - "\n", - "\n", - "\n", - "\n", - "Lab->Location\n", - "\n", - "\n", - "\n", - "\n", - "lab.Project\n", - "\n", - "\n", - "lab.Project\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "lab.Project->lab.ProjectUser\n", - "\n", - "\n", - "\n", - "\n", - "lab.ProtocolType\n", - "\n", - "\n", - "lab.ProtocolType\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "lab.ProtocolType->Protocol\n", - "\n", - "\n", - "\n", - "\n", - "lab.UserRole\n", - "\n", - "\n", - "lab.UserRole\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "lab.UserRole->lab.LabMembership\n", - "\n", - "\n", - "\n", - "" - ], - "text/plain": [ - "" - ] - }, - "execution_count": 10, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "dj.Diagram(lab)" - ] - }, - { - "cell_type": "code", - "execution_count": 14, - "metadata": {}, - "outputs": [ - { - "data": { - "image/svg+xml": [ - "\n", - "\n", - "\n", - "\n", - "\n", - "subject.Line\n", - "\n", - "\n", - "subject.Line\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "subject.Line.Allele\n", - "\n", - "\n", - "subject.Line.Allele\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "subject.Line->subject.Line.Allele\n", - "\n", - "\n", - "\n", - "\n", - "subject.Subject.Line\n", - "\n", - "\n", - "subject.Subject.Line\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "subject.Line->subject.Subject.Line\n", - "\n", - "\n", - "\n", - "\n", - "subject.BreedingPair\n", - "\n", - "\n", - "subject.BreedingPair\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "subject.Line->subject.BreedingPair\n", - "\n", - "\n", - "\n", - "\n", - "subject.SubjectLitter\n", - "\n", - "\n", - "subject.SubjectLitter\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "subject.SubjectCullMethod\n", - "\n", - "\n", - "subject.SubjectCullMethod\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "subject.Allele.Source\n", - "\n", - "\n", - "subject.Allele.Source\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "subject.Subject.Lab\n", - "\n", - "\n", - "subject.Subject.Lab\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "subject.Litter\n", - "\n", - "\n", - "subject.Litter\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "subject.Litter->subject.SubjectLitter\n", - "\n", - "\n", - "\n", - "\n", - "subject.Weaning\n", - "\n", - "\n", - "subject.Weaning\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "subject.Litter->subject.Weaning\n", - "\n", - "\n", - "\n", - "\n", - "subject.Subject.Protocol\n", - "\n", - "\n", - "subject.Subject.Protocol\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "subject.Subject.Strain\n", - "\n", - "\n", - "subject.Subject.Strain\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "subject.Subject.User\n", - "\n", - "\n", - "subject.Subject.User\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "subject.BreedingPair.Father\n", - "\n", - "\n", - "subject.BreedingPair.Father\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "subject.GenotypeTest\n", - "\n", - "\n", - "subject.GenotypeTest\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "subject.Zygosity\n", - "\n", - "\n", - "subject.Zygosity\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "subject.SubjectDeath\n", - "\n", - "\n", - "subject.SubjectDeath\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "subject.Subject.Source\n", - "\n", - "\n", - "subject.Subject.Source\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "subject.Allele.Sequence\n", - "\n", - "\n", - "subject.Allele.Sequence\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "subject.Strain\n", - "\n", - "\n", - "subject.Strain\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "subject.Strain->subject.Subject.Strain\n", - "\n", - "\n", - "\n", - "\n", - "subject.Cage\n", - "\n", - "\n", - "subject.Cage\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "subject.SubjectCaging\n", - "\n", - "\n", - "subject.SubjectCaging\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "subject.Cage->subject.SubjectCaging\n", - "\n", - "\n", - "\n", - "\n", - "subject.Sequence\n", - "\n", - "\n", - "subject.Sequence\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "subject.Sequence->subject.GenotypeTest\n", - "\n", - "\n", - "\n", - "\n", - "subject.Sequence->subject.Allele.Sequence\n", - "\n", - "\n", - "\n", - "\n", - "subject.Allele\n", - "\n", - "\n", - "subject.Allele\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "subject.Allele->subject.Allele.Source\n", - "\n", - "\n", - "\n", - "\n", - "subject.Allele->subject.Line.Allele\n", - "\n", - "\n", - "\n", - "\n", - "subject.Allele->subject.Zygosity\n", - "\n", - "\n", - "\n", - "\n", - "subject.Allele->subject.Allele.Sequence\n", - "\n", - "\n", - "\n", - "\n", - "subject.Subject\n", - "\n", - "\n", - "subject.Subject\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "subject.Subject->subject.SubjectLitter\n", - "\n", - "\n", - "\n", - "\n", - "subject.Subject->subject.SubjectCullMethod\n", - "\n", - "\n", - "\n", - "\n", - "subject.Subject->subject.Subject.Lab\n", - "\n", - "\n", - "\n", - "\n", - "subject.Subject->subject.Subject.Protocol\n", - "\n", - "\n", - "\n", - "\n", - "subject.Subject->subject.Subject.Strain\n", - "\n", - "\n", - "\n", - "\n", - "subject.Subject->subject.Subject.User\n", - "\n", - "\n", - "\n", - "\n", - "subject.Subject->subject.BreedingPair.Father\n", - "\n", - "\n", - "\n", - "\n", - "subject.Subject->subject.GenotypeTest\n", - "\n", - "\n", - "\n", - "\n", - "subject.Subject->subject.Subject.Line\n", - "\n", - "\n", - "\n", - "\n", - "subject.Subject->subject.Zygosity\n", - "\n", - "\n", - "\n", - "\n", - "subject.Subject->subject.SubjectDeath\n", - "\n", - "\n", - "\n", - "\n", - "subject.Subject->subject.Subject.Source\n", - "\n", - "\n", - "\n", - "\n", - "subject.BreedingPair.Mother\n", - "\n", - "\n", - "subject.BreedingPair.Mother\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "subject.Subject->subject.BreedingPair.Mother\n", - "\n", - "\n", - "\n", - "\n", - "subject.Subject->subject.SubjectCaging\n", - "\n", - "\n", - "\n", - "\n", - "subject.BreedingPair->subject.Litter\n", - "\n", - "\n", - "\n", - "\n", - "subject.BreedingPair->subject.BreedingPair.Father\n", - "\n", - "\n", - "\n", - "\n", - "subject.BreedingPair->subject.BreedingPair.Mother\n", - "\n", - "\n", - "\n", - "" - ], - "text/plain": [ - "" - ] - }, - "execution_count": 14, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "dj.Diagram(subject)" - ] - }, - { - "cell_type": "code", - "execution_count": 12, - "metadata": {}, - "outputs": [ - { - "data": { - "image/svg+xml": [ - "\n", - "\n", - "\n", - "\n", - "\n", - "scan.Scan\n", - "\n", - "\n", - "scan.Scan\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "scan.ScanLocation\n", - "\n", - "\n", - "scan.ScanLocation\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "scan.Scan->scan.ScanLocation\n", - "\n", - "\n", - "\n", - "\n", - "scan.ScanInfo\n", - "\n", - "\n", - "scan.ScanInfo\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "scan.Scan->scan.ScanInfo\n", - "\n", - "\n", - "\n", - "\n", - "scan.ScanInfo.ScanFile\n", - "\n", - "\n", - "scan.ScanInfo.ScanFile\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "scan.ScanInfo->scan.ScanInfo.ScanFile\n", - "\n", - "\n", - "\n", - "\n", - "scan.ScanInfo.Field\n", - "\n", - "\n", - "scan.ScanInfo.Field\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "scan.ScanInfo->scan.ScanInfo.Field\n", - "\n", - "\n", - "\n", - "\n", - "scan.Channel\n", - "\n", - "\n", - "scan.Channel\n", - "\n", - "\n", - "\n", - "\n", - "" - ], - "text/plain": [ - "" - ] - }, - "execution_count": 12, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "dj.Diagram(scan)" - ] - }, - { - "cell_type": "code", - "execution_count": 11, - "metadata": {}, - "outputs": [ - { - "data": { - "image/svg+xml": [ - "\n", - "\n", - "\n", - "\n", - "\n", - "imaging.Segmentation\n", - "\n", - "\n", - "imaging.Segmentation\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "imaging.Segmentation.Mask\n", - "\n", - "\n", - "imaging.Segmentation.Mask\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "imaging.Segmentation->imaging.Segmentation.Mask\n", - "\n", - "\n", - "\n", - "\n", - "imaging.MaskClassification\n", - "\n", - "\n", - "imaging.MaskClassification\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "imaging.Segmentation->imaging.MaskClassification\n", - "\n", - "\n", - "\n", - "\n", - "imaging.Fluorescence\n", - "\n", - "\n", - "imaging.Fluorescence\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "imaging.Segmentation->imaging.Fluorescence\n", - "\n", - "\n", - "\n", - "\n", - "imaging.MaskType\n", - "\n", - "\n", - "imaging.MaskType\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "imaging.MaskClassification.MaskType\n", - "\n", - "\n", - "imaging.MaskClassification.MaskType\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "imaging.MaskType->imaging.MaskClassification.MaskType\n", - "\n", - "\n", - "\n", - "\n", - "imaging.ProcessingMethod\n", - "\n", - "\n", - "imaging.ProcessingMethod\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "imaging.ProcessingParamSet\n", - "\n", - "\n", - "imaging.ProcessingParamSet\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "imaging.ProcessingMethod->imaging.ProcessingParamSet\n", - "\n", - "\n", - "\n", - "\n", - "imaging.MaskClassificationMethod\n", - "\n", - "\n", - "imaging.MaskClassificationMethod\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "imaging.MaskClassificationMethod->imaging.MaskClassification\n", - "\n", - "\n", - "\n", - "\n", - "imaging.MotionCorrection.RigidMotionCorrection\n", - "\n", - "\n", - "imaging.MotionCorrection.RigidMotionCorrection\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "imaging.ProcessingTask\n", - "\n", - "\n", - "imaging.ProcessingTask\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "imaging.ProcessingParamSet->imaging.ProcessingTask\n", - "\n", - "\n", - "\n", - "\n", - "imaging.Processing\n", - "\n", - "\n", - "imaging.Processing\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "imaging.Processing.ProcessingOutputFile\n", - "\n", - "\n", - "imaging.Processing.ProcessingOutputFile\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "imaging.Processing->imaging.Processing.ProcessingOutputFile\n", - "\n", - "\n", - "\n", - "\n", - "imaging.MotionCorrection\n", - "\n", - "\n", - "imaging.MotionCorrection\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "imaging.Processing->imaging.MotionCorrection\n", - "\n", - "\n", - "\n", - "\n", - "imaging.CellCompartment\n", - "\n", - "\n", - "imaging.CellCompartment\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "imaging.ProcessingTask->imaging.Processing\n", - "\n", - "\n", - "\n", - "\n", - "imaging.MotionCorrection.NonRigidMotionCorrection\n", - "\n", - "\n", - "imaging.MotionCorrection.NonRigidMotionCorrection\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "imaging.MotionCorrection.Block\n", - "\n", - "\n", - "imaging.MotionCorrection.Block\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "imaging.MotionCorrection.NonRigidMotionCorrection->imaging.MotionCorrection.Block\n", - "\n", - "\n", - "\n", - "\n", - "imaging.Fluorescence.Trace\n", - "\n", - "\n", - "imaging.Fluorescence.Trace\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "imaging.Segmentation.Mask->imaging.Fluorescence.Trace\n", - "\n", - "\n", - "\n", - "\n", - "imaging.Segmentation.Mask->imaging.MaskClassification.MaskType\n", - "\n", - "\n", - "\n", - "\n", - "imaging.Activity.Trace\n", - "\n", - "\n", - "imaging.Activity.Trace\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "imaging.Fluorescence.Trace->imaging.Activity.Trace\n", - "\n", - "\n", - "\n", - "\n", - "imaging.MaskClassification->imaging.MaskClassification.MaskType\n", - "\n", - "\n", - "\n", - "\n", - "imaging.ActivityExtractionMethod\n", - "\n", - "\n", - "imaging.ActivityExtractionMethod\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "imaging.Activity\n", - "\n", - "\n", - "imaging.Activity\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "imaging.ActivityExtractionMethod->imaging.Activity\n", - "\n", - "\n", - "\n", - "\n", - "imaging.MotionCorrection.Summary\n", - "\n", - "\n", - "imaging.MotionCorrection.Summary\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "imaging.Fluorescence->imaging.Fluorescence.Trace\n", - "\n", - "\n", - "\n", - "\n", - "imaging.Fluorescence->imaging.Activity\n", - "\n", - "\n", - "\n", - "\n", - "imaging.MotionCorrection->imaging.Segmentation\n", - "\n", - "\n", - "\n", - "\n", - "imaging.MotionCorrection->imaging.MotionCorrection.RigidMotionCorrection\n", - "\n", - "\n", - "\n", - "\n", - "imaging.MotionCorrection->imaging.MotionCorrection.NonRigidMotionCorrection\n", - "\n", - "\n", - "\n", - "\n", - "imaging.MotionCorrection->imaging.MotionCorrection.Summary\n", - "\n", - "\n", - "\n", - "\n", - "imaging.Activity->imaging.Activity.Trace\n", - "\n", - "\n", - "\n", - "" - ], - "text/plain": [ - "" - ] - }, - "execution_count": 11, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "dj.Diagram(imaging)" - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "metadata": {}, - "outputs": [], - "source": [ - "subject.Subject()" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "metadata": {}, - "outputs": [], - "source": [ - "Session()" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "metadata": {}, - "outputs": [], - "source": [ - "scan.ScanInfo()" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "metadata": {}, - "outputs": [], - "source": [ - "scan.ScanInfo.Field()" - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "metadata": {}, - "outputs": [], - "source": [ - "imaging.ProcessingParamSet()" - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "metadata": {}, - "outputs": [], - "source": [ - "imaging.ProcessingTask()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "jupytext": { - "formats": "ipynb,py" - }, - "kernelspec": { - "display_name": "Python 3.7.9 64-bit ('workflow-imaging': conda)", - "metadata": { - "interpreter": { - "hash": "134d995680d44ce2483a761d95a16e9ce77f34191f18929365aa0ab3279667a1" - } - }, - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.7.9-final" - } - }, - "nbformat": 4, - "nbformat_minor": 2 -} \ No newline at end of file diff --git a/notebooks/02-explore.py b/notebooks/02-explore.py deleted file mode 100644 index c80f6f1..0000000 --- a/notebooks/02-explore.py +++ /dev/null @@ -1,55 +0,0 @@ -# --- -# jupyter: -# jupytext: -# formats: ipynb,py -# text_representation: -# extension: .py -# format_name: light -# format_version: '1.5' -# jupytext_version: 1.11.1 -# kernelspec: -# display_name: 'Python 3.7.9 64-bit (''workflow-imaging'': conda)' -# metadata: -# interpreter: -# hash: 134d995680d44ce2483a761d95a16e9ce77f34191f18929365aa0ab3279667a1 -# name: python3 -# --- - -# # [WIP] DataJoint U24 Workflow Imaging -# This notebook will describe the steps for interacting with the data ingested into `workflow-miniscope`. -# -# Prior to using this notebook, please refer to the [README](https://github.com/datajoint/workflow-imaging) for the topics listed below. -# + Installation instructions -# + Directory structure and file naming convention -# + Running the workflow - -# Change into the parent directory to find the `dj_local_conf.json` file. -# When you type `import datajoint as dj` the credentials from `dj_local_conf.json` will be used to log into the database. -import os -os.chdir('..') - -from workflow_imaging.pipeline import * - -# ## Workflow architecture - -dj.Diagram(lab) - -dj.Diagram(subject) - -dj.Diagram(scan) - -dj.Diagram(imaging) - -subject.Subject() - -Session() - -scan.ScanInfo() - -scan.ScanInfo.Field() - -imaging.ProcessingParamSet() - -imaging.ProcessingTask() - - diff --git a/notebooks/03-automate.ipynb b/notebooks/03-automate.ipynb deleted file mode 100644 index 91a5ebf..0000000 --- a/notebooks/03-automate.ipynb +++ /dev/null @@ -1,168 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# [WIP] Automated workflow\n", - "### Method for inserting entries\n", - "\n", - "Modify `user_data/subjects.csv` and `user_data/sessions.csv`, and run the following commands\n", - "\n", - "or with the `ingest` method and accompanying `csv` files." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Change into the parent directory to find the `dj_local_conf.json` file. \n", - "# When you type `import datajoint as dj` the credentials from `dj_local_conf.json` will be used to log into the database.\n", - "import os\n", - "os.chdir('..')" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from workflow_miniscope.pipeline import *\n", - "from workflow_miniscope.ingest import ingest_subjects, ingest_sessions" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "metadata": {}, - "outputs": [ - { - "output_type": "stream", - "name": "stdout", - "text": [ - "\n---- Insert 1 entry(s) into subject.Subject ----\n\n---- Successfully completed ingest_subjects ----\n" - ] - } - ], - "source": [ - "ingest_subjects()" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "metadata": {}, - "outputs": [ - { - "output_type": "error", - "ename": "FileNotFoundError", - "evalue": "Unable to identify scan files from the supported acquisition softwares (Miniscope-DAQ-V3) at: /Users/kabilar/Documents/Data/U24/imaging_sample_data/pingping_miniscope/session0", - "traceback": [ - "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", - "\u001b[0;31mFileNotFoundError\u001b[0m Traceback (most recent call last)", - "\u001b[0;32m\u001b[0m in \u001b[0;36m\u001b[0;34m\u001b[0m\n\u001b[0;32m----> 1\u001b[0;31m \u001b[0mingest_sessions\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m", - "\u001b[0;32m~/Documents/GitHub/workflow-miniscope/workflow_miniscope/ingest.py\u001b[0m in \u001b[0;36mingest_sessions\u001b[0;34m(session_csv_path)\u001b[0m\n\u001b[1;32m 40\u001b[0m \u001b[0;32mbreak\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 41\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 42\u001b[0;31m \u001b[0;32mraise\u001b[0m \u001b[0mFileNotFoundError\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34mf'Unable to identify scan files from the supported acquisition softwares (Miniscope-DAQ-V3) at: {sess_dir}'\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 43\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 44\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0macq_software\u001b[0m \u001b[0;34m==\u001b[0m \u001b[0;34m'Miniscope-DAQ-V3'\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;31mFileNotFoundError\u001b[0m: Unable to identify scan files from the supported acquisition softwares (Miniscope-DAQ-V3) at: /Users/kabilar/Documents/Data/U24/imaging_sample_data/pingping_miniscope/session0" - ] - } - ], - "source": [ - "ingest_sessions()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import pathlib\n", - "from workflow_miniscope.paths import get_imaging_root_data_dir\n", - "\n", - "root_dir = pathlib.Path(get_imaging_root_data_dir())\n", - "\n", - "for scan_key in (scan.Scan & scan.ScanInfo - imaging.ProcessingTask).fetch('KEY'):\n", - " scan_file = root_dir / (scan.ScanInfo.ScanFile & scan_key).fetch('file_path')[0]\n", - " recording_dir = scan_file.parent\n", - "\n", - " miniscope_analysis_dir = recording_dir / 'miniscope_analysis'\n", - " if miniscope_analysis_dir.exists():\n", - " imaging.ProcessingTask.insert1({**scan_key,\n", - " 'paramset_idx': 0,\n", - " 'processing_output_dir': miniscope_analysis_dir.as_posix()})" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "+ To this end, we make use of a convenient function `imaging.Curation().create1_from_processing_task()`" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "for key in (imaging.ProcessingTask - imaging.Curation).fetch('KEY'):\n", - " imaging.Curation().create1_from_processing_task(key)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Method for populating tables" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from workflow_miniscope.populate import populate\n", - "populate(display_progress=False)" - ] - } - ], - "metadata": { - "jupytext": { - "formats": "ipynb,py" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.7.10" - }, - "kernelspec": { - "name": "python3", - "display_name": "Python 3.7.10 64-bit ('workflow-miniscope': conda)", - "metadata": { - "interpreter": { - "hash": "fb69438121b2de1073146525f02c3c959a56b9d2c1d6b0a62c34f01201323d1c" - } - } - } - }, - "nbformat": 4, - "nbformat_minor": 2 -} \ No newline at end of file diff --git a/notebooks/03-automate.py b/notebooks/03-automate.py deleted file mode 100644 index adb1f09..0000000 --- a/notebooks/03-automate.py +++ /dev/null @@ -1,64 +0,0 @@ -# --- -# jupyter: -# jupytext: -# formats: ipynb,py -# text_representation: -# extension: .py -# format_name: light -# format_version: '1.5' -# jupytext_version: 1.11.1 -# kernelspec: -# display_name: 'Python 3.7.10 64-bit (''workflow-miniscope'': conda)' -# metadata: -# interpreter: -# hash: fb69438121b2de1073146525f02c3c959a56b9d2c1d6b0a62c34f01201323d1c -# name: python3 -# --- - -# # [WIP] Automated workflow -# ### Method for inserting entries -# -# Modify `user_data/subjects.csv` and `user_data/sessions.csv`, and run the following commands -# -# or with the `ingest` method and accompanying `csv` files. - -# Change into the parent directory to find the `dj_local_conf.json` file. -# When you type `import datajoint as dj` the credentials from `dj_local_conf.json` will be used to log into the database. -import os -os.chdir('..') - -from workflow_miniscope.pipeline import * -from workflow_miniscope.ingest import ingest_subjects, ingest_sessions - -ingest_subjects() - -ingest_sessions() - -# + -import pathlib -from workflow_miniscope.paths import get_imaging_root_data_dir - -root_dir = pathlib.Path(get_imaging_root_data_dir()) - -for scan_key in (scan.Scan & scan.ScanInfo - imaging.ProcessingTask).fetch('KEY'): - scan_file = root_dir / (scan.ScanInfo.ScanFile & scan_key).fetch('file_path')[0] - recording_dir = scan_file.parent - - miniscope_analysis_dir = recording_dir / 'miniscope_analysis' - if miniscope_analysis_dir.exists(): - imaging.ProcessingTask.insert1({**scan_key, - 'paramset_idx': 0, - 'processing_output_dir': miniscope_analysis_dir.as_posix()}) -# - - - - -# + To this end, we make use of a convenient function `imaging.Curation().create1_from_processing_task()` - -for key in (imaging.ProcessingTask - imaging.Curation).fetch('KEY'): - imaging.Curation().create1_from_processing_task(key) - -# ### Method for populating tables - -from workflow_miniscope.populate import populate -populate(display_progress=False) diff --git a/notebooks/04-drop.py b/notebooks/04-drop.py deleted file mode 100644 index 0e9b3c1..0000000 --- a/notebooks/04-drop.py +++ /dev/null @@ -1,40 +0,0 @@ -# --- -# jupyter: -# jupytext: -# formats: ipynb,py:light -# text_representation: -# extension: .py -# format_name: light -# format_version: '1.5' -# jupytext_version: 1.11.1 -# kernelspec: -# display_name: 'Python 3.7.10 64-bit (''workflow-miniscope'': conda)' -# metadata: -# interpreter: -# hash: fb69438121b2de1073146525f02c3c959a56b9d2c1d6b0a62c34f01201323d1c -# name: python3 -# --- - -# # Drop schemas -# -# + This notebook is NOT required. -# + Schemas are not typically dropped in a production workflow with real data in it. -# + At the developmental phase, it might be required for the table redesign. -# + When dropping all schemas is needed, the following is the dependency order. - -# Change into the parent directory to find the `dj_local_conf.json` file. -# When you type `import datajoint as dj` the credentials from `dj_local_conf.json` will be used to log into the database. -import os -os.chdir('..') - -from workflow_miniscope.pipeline import * - -# + -# imaging.schema.drop() -# scan.schema.drop() -# session.schema.drop() -# subject.schema.drop() -# lab.schema.drop() -# - - - From 13108f70101535c537565251170e6eac00bef698 Mon Sep 17 00:00:00 2001 From: Kabilar Gunalan Date: Sat, 23 Apr 2022 20:15:16 -0500 Subject: [PATCH 05/32] Add jupytext paired script --- notebooks/06-drop-optional.ipynb | 60 ++++++++++++++------------- notebooks/scripts/06-drop-optional.py | 35 ++++++++++++++++ 2 files changed, 66 insertions(+), 29 deletions(-) create mode 100644 notebooks/scripts/06-drop-optional.py diff --git a/notebooks/06-drop-optional.ipynb b/notebooks/06-drop-optional.ipynb index e2d0bb5..c0cb54c 100644 --- a/notebooks/06-drop-optional.ipynb +++ b/notebooks/06-drop-optional.ipynb @@ -1,43 +1,22 @@ { - "metadata": { - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.7.9" - }, - "orig_nbformat": 2, - "kernelspec": { - "name": "python379jvsc74a57bd01a512f474e195e32ad84236879d3bb44800a92b431919ef0b10d543f5012a23c", - "display_name": "Python 3.7.9 64-bit ('workflow-calcium-imaging': conda)" - } - }, - "nbformat": 4, - "nbformat_minor": 2, "cells": [ { + "cell_type": "markdown", + "metadata": {}, "source": [ "# Drop schemas\n", "\n", "+ Schemas are not typically dropped in a production workflow with real data in it. \n", "+ At the developmental phase, it might be required for the table redesign.\n", "+ When dropping all schemas is needed, the following is the dependency order." - ], - "cell_type": "markdown", - "metadata": {} + ] }, { + "cell_type": "markdown", + "metadata": {}, "source": [ "Change into the parent directory to find the `dj_local_conf.json` file. " - ], - "cell_type": "markdown", - "metadata": {} + ] }, { "cell_type": "code", @@ -77,5 +56,28 @@ "outputs": [], "source": [] } - ] -} \ No newline at end of file + ], + "metadata": { + "jupytext": { + "formats": "ipynb,scripts//py" + }, + "kernelspec": { + "display_name": "Python 3.7.9 64-bit ('workflow-calcium-imaging': conda)", + "name": "python379jvsc74a57bd01a512f474e195e32ad84236879d3bb44800a92b431919ef0b10d543f5012a23c" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.7.9" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/notebooks/scripts/06-drop-optional.py b/notebooks/scripts/06-drop-optional.py new file mode 100644 index 0000000..28ae301 --- /dev/null +++ b/notebooks/scripts/06-drop-optional.py @@ -0,0 +1,35 @@ +# --- +# jupyter: +# jupytext: +# formats: ipynb,scripts//py +# text_representation: +# extension: .py +# format_name: light +# format_version: '1.5' +# jupytext_version: 1.13.7 +# kernelspec: +# display_name: 'Python 3.7.9 64-bit (''workflow-calcium-imaging'': conda)' +# name: python379jvsc74a57bd01a512f474e195e32ad84236879d3bb44800a92b431919ef0b10d543f5012a23c +# --- + +# # Drop schemas +# +# + Schemas are not typically dropped in a production workflow with real data in it. +# + At the developmental phase, it might be required for the table redesign. +# + When dropping all schemas is needed, the following is the dependency order. + +# Change into the parent directory to find the `dj_local_conf.json` file. + +import os +os.chdir('..') + +from workflow_miniscope.pipeline import * + +# + +# miniscope.schema.drop() +# session.schema.drop() +# subject.schema.drop() +# lab.schema.drop() +# - + + From c5df6b0c4c9651688e4a37e6d3ec508a3f855180 Mon Sep 17 00:00:00 2001 From: Kabilar Gunalan Date: Sat, 23 Apr 2022 20:20:20 -0500 Subject: [PATCH 06/32] Update Compose file --- docker/docker-compose-test.yaml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/docker/docker-compose-test.yaml b/docker/docker-compose-test.yaml index 8f291b1..bb2bb5b 100644 --- a/docker/docker-compose-test.yaml +++ b/docker/docker-compose-test.yaml @@ -17,6 +17,7 @@ services: build: context: ../../ dockerfile: ./workflow-miniscope/docker/Dockerfile.test + env_file: .env image: workflow-miniscope-test:0.1.0a2 container_name: workflow-miniscope-test environment: @@ -30,7 +31,7 @@ services: - -c - | echo "------ INTEGRATION TESTS ------" - pytest -sv --cov-report term-missing --cov=workflow_miniscope -p no:warnings + pytest -sv --cov-report term-missing --cov=workflow_miniscope -p no:warnings tests/ tail -f /dev/null volumes: - ${TEST_DATA_DIR}:/main/test_data From 0b99792366cff6ed2204baa1520e8b38d7618ddb Mon Sep 17 00:00:00 2001 From: Kabilar Gunalan Date: Sat, 23 Apr 2022 21:18:17 -0500 Subject: [PATCH 07/32] Update data download notebook --- notebooks/00-data-download-optional.ipynb | 167 ++++++++++++++++++ .../scripts/00-data-download-optional.py | 72 ++++++++ 2 files changed, 239 insertions(+) create mode 100644 notebooks/00-data-download-optional.ipynb create mode 100644 notebooks/scripts/00-data-download-optional.py diff --git a/notebooks/00-data-download-optional.ipynb b/notebooks/00-data-download-optional.ipynb new file mode 100644 index 0000000..3c0f38c --- /dev/null +++ b/notebooks/00-data-download-optional.ipynb @@ -0,0 +1,167 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Download example dataset\n", + "\n", + "+ This workflow will need miniscope calcium imaging data collected from the UCLA Miniscope and processed with CaImAn. We provide an example dataset to be downloaded to run through the workflow. This notebook walks you through the process to download the dataset.\n", + "\n", + "## Install `djarchive-client`\n", + "\n", + "+ The example dataset is hosted on `djarchive`, an AWS storage.\n", + "\n", + "+ We provide a client package, [djarchive-client](https://github.com/datajoint/djarchive-client), to download the data which can be installed with pip:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "pip install git+https://github.com/datajoint/djarchive-client.git" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Download example datasets using `djarchive-client`" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import djarchive_client\n", + "client = djarchive_client.client()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Browse the datasets that are available on `djarchive`:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "list(client.datasets())" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Browse the different versions of each dataset:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "list(client.revisions())" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "To download the dataset, let's prepare a directory, for example in `/tmp`:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import os\n", + "os.mkdir('/tmp/example_data')" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Run download for a given dataset and revision:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "client.download('workflow-miniscope-test-set', target_directory='/tmp/example_data', revision='v1')" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Directory structure\n", + "\n", + "+ After downloading, the directory will be organized as follows:\n", + "\n", + " ```\n", + " /tmp/example_data/\n", + " - subject1/\n", + " - session1/\n", + " - 0.avi\n", + " - metaData.json\n", + " - timeStamps.csv\n", + " - caiman/\n", + " - subject1_session1.hdf5\n", + " ```\n", + "\n", + "+ subject 1 data is recorded with the UCLA Miniscope and Miniscope-DAQ-V4 acquisition software, and processed with CaImAn.\n", + "\n", + "+ We will use the dataset for subject 1 as an example for the rest of the notebooks. If you use your own dataset for the workflow, change the path accordingly.\n", + "\n", + "## Next step\n", + "\n", + "+ In the next notebook ([01-configure](01-configure.ipynb)) we will set up the configuration file for the workflow." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [] + } + ], + "metadata": { + "jupytext": { + "formats": "ipynb,scripts//py", + "main_language": "python" + }, + "kernelspec": { + "display_name": "Python 3.7.9 64-bit ('workflow-calcium-imaging': conda)", + "name": "python379jvsc74a57bd01a512f474e195e32ad84236879d3bb44800a92b431919ef0b10d543f5012a23c" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.7.9" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/notebooks/scripts/00-data-download-optional.py b/notebooks/scripts/00-data-download-optional.py new file mode 100644 index 0000000..23781da --- /dev/null +++ b/notebooks/scripts/00-data-download-optional.py @@ -0,0 +1,72 @@ +# --- +# jupyter: +# jupytext: +# formats: ipynb,scripts//py +# text_representation: +# extension: .py +# format_name: light +# format_version: '1.5' +# jupytext_version: 1.13.7 +# kernelspec: +# display_name: 'Python 3.7.9 64-bit (''workflow-calcium-imaging'': conda)' +# name: python379jvsc74a57bd01a512f474e195e32ad84236879d3bb44800a92b431919ef0b10d543f5012a23c +# --- + +# # Download example dataset +# +# + This workflow will need miniscope calcium imaging data collected from the UCLA Miniscope and processed with CaImAn. We provide an example dataset to be downloaded to run through the workflow. This notebook walks you through the process to download the dataset. +# +# ## Install `djarchive-client` +# +# + The example dataset is hosted on `djarchive`, an AWS storage. +# +# + We provide a client package, [djarchive-client](https://github.com/datajoint/djarchive-client), to download the data which can be installed with pip: + +pip install git+https://github.com/datajoint/djarchive-client.git + +# ## Download example datasets using `djarchive-client` + +import djarchive_client +client = djarchive_client.client() + +# Browse the datasets that are available on `djarchive`: + +list(client.datasets()) + +# Browse the different versions of each dataset: + +list(client.revisions()) + +# To download the dataset, let's prepare a directory, for example in `/tmp`: + +import os +os.mkdir('/tmp/example_data') + +# Run download for a given dataset and revision: + +client.download('workflow-miniscope-test-set', target_directory='/tmp/example_data', revision='v1') + +# ## Directory structure +# +# + After downloading, the directory will be organized as follows: +# +# ``` +# /tmp/example_data/ +# - subject1/ +# - session1/ +# - 0.avi +# - metaData.json +# - timeStamps.csv +# - caiman/ +# - subject1_session1.hdf5 +# ``` +# +# + subject 1 data is recorded with the UCLA Miniscope and Miniscope-DAQ-V4 acquisition software, and processed with CaImAn. +# +# + We will use the dataset for subject 1 as an example for the rest of the notebooks. If you use your own dataset for the workflow, change the path accordingly. +# +# ## Next step +# +# + In the next notebook ([01-configure](01-configure.ipynb)) we will set up the configuration file for the workflow. + +# From 8645c5e4981cce53228e824b2369aba88222a1a3 Mon Sep 17 00:00:00 2001 From: Kabilar Gunalan Date: Sat, 23 Apr 2022 21:22:30 -0500 Subject: [PATCH 08/32] Update attribute name and import statement --- workflow_miniscope/pipeline.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/workflow_miniscope/pipeline.py b/workflow_miniscope/pipeline.py index 916879f..9e42d53 100644 --- a/workflow_miniscope/pipeline.py +++ b/workflow_miniscope/pipeline.py @@ -9,7 +9,8 @@ from element_animal.subject import Subject from element_session.session_with_datetime import Session -from .paths import get_miniscope_root_data_dir +from .paths import get_miniscope_root_data_dir, get_session_directory, \ + get_processed_root_data_dir if 'custom' not in dj.config: @@ -33,7 +34,7 @@ @lab.schema class Equipment(dj.Manual): definition = """ - scanner: varchar(32) + acquisition_hardware: varchar(32) """ From 0fe7b3c497cce4a55b274b02ebe4ecd4d30d885e Mon Sep 17 00:00:00 2001 From: Kabilar Gunalan Date: Sat, 23 Apr 2022 21:43:50 -0500 Subject: [PATCH 09/32] Update workflow structure notebook --- .../02-workflow-structure-optional.ipynb | 366 ++++++++++++++++++ .../scripts/02-workflow-structure-optional.py | 151 ++++++++ 2 files changed, 517 insertions(+) create mode 100644 notebooks/02-workflow-structure-optional.ipynb create mode 100644 notebooks/scripts/02-workflow-structure-optional.py diff --git a/notebooks/02-workflow-structure-optional.ipynb b/notebooks/02-workflow-structure-optional.ipynb new file mode 100644 index 0000000..0c5b74d --- /dev/null +++ b/notebooks/02-workflow-structure-optional.ipynb @@ -0,0 +1,366 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Introduction to the workflow structure\n", + "\n", + "This notebook gives a brief overview of the workflow structure and introduces some useful DataJoint tools to facilitate the exploration.\n", + "\n", + "+ DataJoint needs to be pre-configured before running this notebook, if you haven't set up the configuration, refer to notebook [01-configure](01-configure.ipynb).\n", + "\n", + "+ If you are familiar with DataJoint and the workflow structure, proceed directly to the next notebook [03-process](03-process.ipynb) to run the workflow.\n", + "\n", + "+ For a more thorough introduction of DataJoint functions, please visit our general tutorial site - [DataJoint CodeBook](https://codebook.datajoint.io).\n", + "\n", + "To load the local configuration, we will change the directory to the package root." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import os\n", + "os.chdir('..')" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Schemas and tables\n", + "\n", + "+ The current workflow is composed of multiple database schemas, each of them corresponds to a module within `workflow_miniscope.pipeline`" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import datajoint as dj\n", + "from workflow_miniscope.pipeline import lab, subject, session, miniscope" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "+ Each module contains a schema object that enables interaction with the schema in the database." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "title": "Each module imported above corresponds to one schema inside the database. For example, `ephys` corresponds to `neuro_ephys` schema in the database." + }, + "outputs": [], + "source": [ + "miniscope.schema" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "+ The table classes in the module corresponds to a table in the schema in the database." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "title": "Each datajoint table class inside the module corresponds to a table inside the schema. For example, the class `ephys.EphysRecording` correponds to the table `_ephys_recording` in the schema `neuro_ephys` in the database." + }, + "outputs": [], + "source": [ + "# preview columns and contents in a table\n", + "miniscope.Processing()" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "lines_to_next_cell": 0, + "title": "The first time importing the modules, empty schemas and tables will be created in the database." + }, + "source": [ + "+ By importing the modules for the first time, the schemas and tables will be created inside the database.\n", + "\n", + "+ Once created, importing modules will not create schemas and tables again, but the existing schemas/tables can be accessed and manipulated by the modules." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "lines_to_next_cell": 0, + "title": "The schemas and tables will not be re-created when importing modules if they have existed." + }, + "source": [ + "## DataJoint tools to explore schemas and tables\n", + "\n", + "+ `dj.list_schemas()`: list all schemas a user has access to in the current database" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "title": "`dj.list_schemas()`: list all schemas a user could access." + }, + "outputs": [], + "source": [ + "dj.list_schemas()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "+ `dj.Diagram()`: plot tables and dependencies in a schema. " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "title": "`dj.Diagram()`: plot tables and dependencies" + }, + "outputs": [], + "source": [ + "# plot diagram for all tables in a schema\n", + "dj.Diagram(miniscope)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "**Table tiers**: \n", + "\n", + "+ Manual table\n", + " + Visually represented with a green box.\n", + " + Manually inserted table\n", + " + Expect new entries daily, e.g. Subject, Recording. \n", + "+ Lookup table\n", + " + Visually represented with a gray box.\n", + " + Pre-inserted table\n", + " + Commonly used for general facts or parameters. e.g. Strain, ProcessingParamSet. \n", + "+ Imported table\n", + " + Visually represented with a blue oval.\n", + " + Auto-processing table\n", + " + Processing depends on the importing of external files. e.g. `Processing` requires output files from CaImAn. \n", + "+ Computed table\n", + " + Visually represented with a red circle.\n", + " + Auto-processing table\n", + " + Processing does not depend on files external to the database. \n", + "+ Part table\n", + " + Visually represented with plain text.\n", + " + As an appendix to the master table, all the part entries of a given master entry represent a intact set of the master entry. e.g. `Mask` of a `Segmentation`.\n", + "\n", + "**Dependencies**: \n", + "\n", + "+ One-to-one primary\n", + " + Visually represented with a thick solid line.\n", + " + Share the exact same primary key, meaning the child table inherits all the primary key fields from the parent table as its own primary key. \n", + "+ One-to-many primary\n", + " + Visually represented with a thin solid line.\n", + " + Inherit the primary key from the parent table, but have additional field(s) as part of the primary key as well.\n", + "+ Secondary dependency\n", + " + Visually represented with a dashed line.\n", + " + The child table inherits the primary key fields from parent table as its own secondary attribute." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "title": "`dj.Diagram()`: plot the diagram of the tables and dependencies. It could be used to plot tables in a schema or selected tables." + }, + "outputs": [], + "source": [ + "# plot diagram of tables in multiple schemas\n", + "dj.Diagram(subject) + dj.Diagram(session) + dj.Diagram(miniscope)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# plot diagram of selected tables and schemas\n", + "dj.Diagram(subject.Subject) + dj.Diagram(session.Session) + dj.Diagram(miniscope)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "lines_to_next_cell": 0, + "title": "`heading`:" + }, + "source": [ + "+ `describe()`: show table definition with foreign key references." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "miniscope.Processing.describe();" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "+ `heading`: show attribute definitions regardless of foreign key references" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "title": "`heading`: show table attributes regardless of foreign key references." + }, + "outputs": [], + "source": [ + "miniscope.Processing.heading" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "title": "ephys" + }, + "source": [ + "# DataJoint Elements installed in `workflow-miniscope`\n", + "\n", + "+ [`lab`](https://github.com/datajoint/element-lab): lab management related information, such as Lab, User, Project, Protocol, Source." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "dj.Diagram(lab)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "+ [`subject`](https://github.com/datajoint/element-animal): general animal information, such as User, Genetic background." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "dj.Diagram(subject)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "title": "[subject](https://github.com/datajoint/element-animal): contains the basic information of subject, including Strain, Line, Subject, Zygosity, and SubjectDeath information." + }, + "outputs": [], + "source": [ + "subject.Subject.describe();" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "+ [`session`](https://github.com/datajoint/element-session): General information of experimental sessions." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "dj.Diagram(session)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "title": "[session](https://github.com/datajoint/element-session): experimental session information" + }, + "outputs": [], + "source": [ + "session.Session.describe();" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "+ [`miniscope`](https://github.com/datajoint/element-miniscope): miniscope raw recording and processed data" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "title": "[probe and ephys](https://github.com/datajoint/element-array-ephys): Neuropixel based probe and ephys tables" + }, + "outputs": [], + "source": [ + "dj.Diagram(miniscope)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Summary and next step\n", + "\n", + "+ This notebook introduced the overall structures of the schemas and tables in the workflow and relevant tools to explore the schema structure and table definitions.\n", + "\n", + "+ In the next notebook [03-process](03-process.ipynb), we will introduce the detailed steps to run through the workflow." + ] + } + ], + "metadata": { + "jupytext": { + "encoding": "# -*- coding: utf-8 -*-", + "formats": "ipynb,scripts//py" + }, + "kernelspec": { + "display_name": "Python 3.7.9 64-bit ('workflow-calcium-imaging': conda)", + "name": "python379jvsc74a57bd01a512f474e195e32ad84236879d3bb44800a92b431919ef0b10d543f5012a23c" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.7.9" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/notebooks/scripts/02-workflow-structure-optional.py b/notebooks/scripts/02-workflow-structure-optional.py new file mode 100644 index 0000000..1ff2976 --- /dev/null +++ b/notebooks/scripts/02-workflow-structure-optional.py @@ -0,0 +1,151 @@ +# -*- coding: utf-8 -*- +# --- +# jupyter: +# jupytext: +# formats: ipynb,scripts//py +# text_representation: +# extension: .py +# format_name: light +# format_version: '1.5' +# jupytext_version: 1.13.7 +# kernelspec: +# display_name: 'Python 3.7.9 64-bit (''workflow-calcium-imaging'': conda)' +# name: python379jvsc74a57bd01a512f474e195e32ad84236879d3bb44800a92b431919ef0b10d543f5012a23c +# --- + +# # Introduction to the workflow structure +# +# This notebook gives a brief overview of the workflow structure and introduces some useful DataJoint tools to facilitate the exploration. +# +# + DataJoint needs to be pre-configured before running this notebook, if you haven't set up the configuration, refer to notebook [01-configure](01-configure.ipynb). +# +# + If you are familiar with DataJoint and the workflow structure, proceed directly to the next notebook [03-process](03-process.ipynb) to run the workflow. +# +# + For a more thorough introduction of DataJoint functions, please visit our general tutorial site - [DataJoint CodeBook](https://codebook.datajoint.io). +# +# To load the local configuration, we will change the directory to the package root. + +import os +os.chdir('..') + +# ## Schemas and tables +# +# + The current workflow is composed of multiple database schemas, each of them corresponds to a module within `workflow_miniscope.pipeline` + +import datajoint as dj +from workflow_miniscope.pipeline import lab, subject, session, miniscope + +# + Each module contains a schema object that enables interaction with the schema in the database. + +# + Each module imported above corresponds to one schema inside the database. For example, `ephys` corresponds to `neuro_ephys` schema in the database. +miniscope.schema + +# + The table classes in the module corresponds to a table in the schema in the database. + +# + Each datajoint table class inside the module corresponds to a table inside the schema. For example, the class `ephys.EphysRecording` correponds to the table `_ephys_recording` in the schema `neuro_ephys` in the database. +# preview columns and contents in a table +miniscope.Processing() + +# + The first time importing the modules, empty schemas and tables will be created in the database. [markdown] +# # + By importing the modules for the first time, the schemas and tables will be created inside the database. +# +# # + Once created, importing modules will not create schemas and tables again, but the existing schemas/tables can be accessed and manipulated by the modules. +# + The schemas and tables will not be re-created when importing modules if they have existed. [markdown] +# ## DataJoint tools to explore schemas and tables +# +# # + `dj.list_schemas()`: list all schemas a user has access to in the current database +# + `dj.list_schemas()`: list all schemas a user could access. +dj.list_schemas() + +# + `dj.Diagram()`: plot tables and dependencies in a schema. + +# + `dj.Diagram()`: plot tables and dependencies +# plot diagram for all tables in a schema +dj.Diagram(miniscope) +# - + +# **Table tiers**: +# +# + Manual table +# + Visually represented with a green box. +# + Manually inserted table +# + Expect new entries daily, e.g. Subject, Recording. +# + Lookup table +# + Visually represented with a gray box. +# + Pre-inserted table +# + Commonly used for general facts or parameters. e.g. Strain, ProcessingParamSet. +# + Imported table +# + Visually represented with a blue oval. +# + Auto-processing table +# + Processing depends on the importing of external files. e.g. `Processing` requires output files from CaImAn. +# + Computed table +# + Visually represented with a red circle. +# + Auto-processing table +# + Processing does not depend on files external to the database. +# + Part table +# + Visually represented with plain text. +# + As an appendix to the master table, all the part entries of a given master entry represent a intact set of the master entry. e.g. `Mask` of a `Segmentation`. +# +# **Dependencies**: +# +# + One-to-one primary +# + Visually represented with a thick solid line. +# + Share the exact same primary key, meaning the child table inherits all the primary key fields from the parent table as its own primary key. +# + One-to-many primary +# + Visually represented with a thin solid line. +# + Inherit the primary key from the parent table, but have additional field(s) as part of the primary key as well. +# + Secondary dependency +# + Visually represented with a dashed line. +# + The child table inherits the primary key fields from parent table as its own secondary attribute. + +# + `dj.Diagram()`: plot the diagram of the tables and dependencies. It could be used to plot tables in a schema or selected tables. +# plot diagram of tables in multiple schemas +dj.Diagram(subject) + dj.Diagram(session) + dj.Diagram(miniscope) +# - + +# plot diagram of selected tables and schemas +dj.Diagram(subject.Subject) + dj.Diagram(session.Session) + dj.Diagram(miniscope) + +# + `heading`: [markdown] +# # + `describe()`: show table definition with foreign key references. +# - +miniscope.Processing.describe(); + +# + `heading`: show attribute definitions regardless of foreign key references + +# + `heading`: show table attributes regardless of foreign key references. +miniscope.Processing.heading + +# + ephys [markdown] +# # DataJoint Elements installed in `workflow-miniscope` +# +# # + [`lab`](https://github.com/datajoint/element-lab): lab management related information, such as Lab, User, Project, Protocol, Source. +# - + +dj.Diagram(lab) + +# + [`subject`](https://github.com/datajoint/element-animal): general animal information, such as User, Genetic background. + +dj.Diagram(subject) + +# + [subject](https://github.com/datajoint/element-animal): contains the basic information of subject, including Strain, Line, Subject, Zygosity, and SubjectDeath information. +subject.Subject.describe(); + +# + [`session`](https://github.com/datajoint/element-session): General information of experimental sessions. + +dj.Diagram(session) + +# + [session](https://github.com/datajoint/element-session): experimental session information +session.Session.describe(); + +# + [`miniscope`](https://github.com/datajoint/element-miniscope): miniscope raw recording and processed data + +# + [probe and ephys](https://github.com/datajoint/element-array-ephys): Neuropixel based probe and ephys tables +dj.Diagram(miniscope) +# - + +# ## Summary and next step +# +# + This notebook introduced the overall structures of the schemas and tables in the workflow and relevant tools to explore the schema structure and table definitions. +# +# + In the next notebook [03-process](03-process.ipynb), we will introduce the detailed steps to run through the workflow. From e5e5170d4162f4452052608bc9325a888273ae04 Mon Sep 17 00:00:00 2001 From: Kabilar Gunalan Date: Sat, 23 Apr 2022 21:48:02 -0500 Subject: [PATCH 10/32] Update path handling --- notebooks/02-workflow-structure-optional.ipynb | 2 +- notebooks/06-drop-optional.ipynb | 2 +- notebooks/scripts/02-workflow-structure-optional.py | 2 +- notebooks/scripts/06-drop-optional.py | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/notebooks/02-workflow-structure-optional.ipynb b/notebooks/02-workflow-structure-optional.ipynb index 0c5b74d..b93d50c 100644 --- a/notebooks/02-workflow-structure-optional.ipynb +++ b/notebooks/02-workflow-structure-optional.ipynb @@ -24,7 +24,7 @@ "outputs": [], "source": [ "import os\n", - "os.chdir('..')" + "if os.path.basename(os.getcwd())=='notebooks': os.chdir('..')" ] }, { diff --git a/notebooks/06-drop-optional.ipynb b/notebooks/06-drop-optional.ipynb index c0cb54c..e955455 100644 --- a/notebooks/06-drop-optional.ipynb +++ b/notebooks/06-drop-optional.ipynb @@ -25,7 +25,7 @@ "outputs": [], "source": [ "import os\n", - "os.chdir('..')" + "if os.path.basename(os.getcwd())=='notebooks': os.chdir('..')" ] }, { diff --git a/notebooks/scripts/02-workflow-structure-optional.py b/notebooks/scripts/02-workflow-structure-optional.py index 1ff2976..b6d0b18 100644 --- a/notebooks/scripts/02-workflow-structure-optional.py +++ b/notebooks/scripts/02-workflow-structure-optional.py @@ -26,7 +26,7 @@ # To load the local configuration, we will change the directory to the package root. import os -os.chdir('..') +if os.path.basename(os.getcwd())=='notebooks': os.chdir('..') # ## Schemas and tables # diff --git a/notebooks/scripts/06-drop-optional.py b/notebooks/scripts/06-drop-optional.py index 28ae301..6e9e4ba 100644 --- a/notebooks/scripts/06-drop-optional.py +++ b/notebooks/scripts/06-drop-optional.py @@ -21,7 +21,7 @@ # Change into the parent directory to find the `dj_local_conf.json` file. import os -os.chdir('..') +if os.path.basename(os.getcwd())=='notebooks': os.chdir('..') from workflow_miniscope.pipeline import * From 781e75dbe6f858a80883df78bce88dcfbafe5ef7 Mon Sep 17 00:00:00 2001 From: Kabilar Gunalan Date: Sat, 23 Apr 2022 23:30:46 -0500 Subject: [PATCH 11/32] Update database configuration notebook --- notebooks/01-configure.ipynb | 221 ++++++++++++++++++++++++++++++ notebooks/scripts/01-configure.py | 94 +++++++++++++ 2 files changed, 315 insertions(+) create mode 100644 notebooks/01-configure.ipynb create mode 100644 notebooks/scripts/01-configure.py diff --git a/notebooks/01-configure.ipynb b/notebooks/01-configure.ipynb new file mode 100644 index 0000000..65c7f52 --- /dev/null +++ b/notebooks/01-configure.ipynb @@ -0,0 +1,221 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Configure DataJoint connection to the database\n", + "\n", + "+ To run `workflow-miniscope`, we need to properly set up the DataJoint configuration. The configuration will be saved in a file called `dj_local_conf.json` on each machine and this notebook walks you through the process.\n", + "\n", + "+ The configuration only needs to be set up once. If you have gone through the configuration before, directly go to [02-workflow-structure](02-workflow-structure-optional.ipynb).\n", + "\n", + "## Set up configuration in root directory of this package\n", + "\n", + "+ As a convention, we set the configuration up in the root directory of the `workflow-miniscope` package and always start importing DataJoint and pipeline modules from there." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import os\n", + "if os.path.basename(os.getcwd())=='notebooks': os.chdir('..')" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import datajoint as dj" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Configure database host address and credentials\n", + "\n", + "Now let's set up the host, user and password in the `dj.config` global variable" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import getpass\n", + "dj.config['database.host'] = '{YOUR_HOST}'\n", + "dj.config['database.user'] = '{YOUR_USERNAME}'\n", + "dj.config['database.password'] = getpass.getpass() # enter the password securily" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "You should be able to connect to the database at this stage." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "dj.conn()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Configure the `custom` field in `dj.config` for element-miniscope\n", + "\n", + "+ The major component of the current workflow is the [DataJoint element-miniscope](https://github.com/datajoint/element-miniscope). `element-miniscope` requires configurations in the field `custom` in `dj.config`:\n", + "\n", + "### Database prefix\n", + "\n", + "+ Giving a prefix to schema could help on the configuration of privilege settings. For example, if we set prefix `neuro_`, every schema created with the current workflow will start with `neuro_`, e.g. `neuro_lab`, `neuro_subject`, `neuro_session`, and `neuro_miniscope`.\n", + "\n", + "+ The prefix could be configurated as follows in `dj.config`:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "dj.config['custom'] = {'database.prefix': 'neuro_'}" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Root directories for miniscope calcium imaging raw data and processed results\n", + "\n", + "+ `miniscope_root_data_dir` field indicates the root directory for the miniscope raw data from the Miniscope-DAQ acquisition software (e.g. `*.avi`) or the processed results from CaImAn (e.g. `*.hdf5`). The root path typically do not contain information of subjects or sessions, all data from subjects/sessions should be subdirectories in the root path.\n", + "\n", + "+ In the database, every path for the raw miniscope data is relative to this root path. The benefit is that the absolute path could be configured for each machine, and when data transfer happens, we just need to change the root directory in the config file.\n", + "\n", + "+ The workflow supports multiple root directories. If there are multiple possible root directories, specify the `miniscope_root_data_dir` as a list.\n", + "\n", + "+ The root path(s) are specific to each machine, as the name of drive mount could be different for different operating systems or machines.\n", + "\n", + "+ In the context of the workflow, all the paths saved into the database or saved in the config file need to be in the POSIX standards (Unix/Linux), with `/`. The path conversion for machines of any operating system is taken care of inside the elements." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "If using our example dataset, downloaded with this notebook [00-data-download](00-data-download-optional.ipynb), the root directory will be:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# If there is only one root path:\n", + "dj.config['custom']['miniscope_root_data_dir'] = '/tmp/example_data'\n", + "# If there are multiple possible root paths:\n", + "dj.config['custom']['miniscope_root_data_dir'] = ['/tmp/example_data']" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "dj.config" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Save the configuration as a json file\n", + "\n", + "With the proper configurations, we could save this as a file, either as a local json file, or a global file." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "dj.config.save_local()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "ls" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Local configuration file is saved as `dj_local_conf.json` in the root directory of this package `workflow-miniscope`. Next time if you change your directory to `workflow-miniscope` before importing DataJoint and the pipeline packages, the configurations will get properly loaded.\n", + "\n", + "If saved globally, there will be a hidden configuration file saved in your root directory. The configuration will be loaded no matter where the directory is." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# dj.config.save_global()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Next Step\n", + "\n", + "After the configuration, we will be able to run through the workflow with the [02-workflow-structure](02-workflow-structure-optional.ipynb) notebook." + ] + } + ], + "metadata": { + "jupytext": { + "formats": "ipynb,scripts//py" + }, + "kernelspec": { + "display_name": "Python 3.7.9 64-bit ('workflow-calcium-imaging': conda)", + "name": "python379jvsc74a57bd01a512f474e195e32ad84236879d3bb44800a92b431919ef0b10d543f5012a23c" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.7.9" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/notebooks/scripts/01-configure.py b/notebooks/scripts/01-configure.py new file mode 100644 index 0000000..fd88344 --- /dev/null +++ b/notebooks/scripts/01-configure.py @@ -0,0 +1,94 @@ +# --- +# jupyter: +# jupytext: +# formats: ipynb,scripts//py +# text_representation: +# extension: .py +# format_name: light +# format_version: '1.5' +# jupytext_version: 1.13.7 +# kernelspec: +# display_name: 'Python 3.7.9 64-bit (''workflow-calcium-imaging'': conda)' +# name: python379jvsc74a57bd01a512f474e195e32ad84236879d3bb44800a92b431919ef0b10d543f5012a23c +# --- + +# # Configure DataJoint connection to the database +# +# + To run `workflow-miniscope`, we need to properly set up the DataJoint configuration. The configuration will be saved in a file called `dj_local_conf.json` on each machine and this notebook walks you through the process. +# +# + The configuration only needs to be set up once. If you have gone through the configuration before, directly go to [02-workflow-structure](02-workflow-structure-optional.ipynb). +# +# ## Set up configuration in root directory of this package +# +# + As a convention, we set the configuration up in the root directory of the `workflow-miniscope` package and always start importing DataJoint and pipeline modules from there. + +import os +if os.path.basename(os.getcwd())=='notebooks': os.chdir('..') + +import datajoint as dj + +# ## Configure database host address and credentials +# +# Now let's set up the host, user and password in the `dj.config` global variable + +import getpass +dj.config['database.host'] = '{YOUR_HOST}' +dj.config['database.user'] = '{YOUR_USERNAME}' +dj.config['database.password'] = getpass.getpass() # enter the password securily + +# You should be able to connect to the database at this stage. + +dj.conn() + +# ## Configure the `custom` field in `dj.config` for element-miniscope +# +# + The major component of the current workflow is the [DataJoint element-miniscope](https://github.com/datajoint/element-miniscope). `element-miniscope` requires configurations in the field `custom` in `dj.config`: +# +# ### Database prefix +# +# + Giving a prefix to schema could help on the configuration of privilege settings. For example, if we set prefix `neuro_`, every schema created with the current workflow will start with `neuro_`, e.g. `neuro_lab`, `neuro_subject`, `neuro_session`, and `neuro_miniscope`. +# +# + The prefix could be configurated as follows in `dj.config`: + +dj.config['custom'] = {'database.prefix': 'neuro_'} + +# ### Root directories for miniscope calcium imaging raw data and processed results +# +# + `miniscope_root_data_dir` field indicates the root directory for the miniscope raw data from the Miniscope-DAQ acquisition software (e.g. `*.avi`) or the processed results from CaImAn (e.g. `*.hdf5`). The root path typically do not contain information of subjects or sessions, all data from subjects/sessions should be subdirectories in the root path. +# +# + In the database, every path for the raw miniscope data is relative to this root path. The benefit is that the absolute path could be configured for each machine, and when data transfer happens, we just need to change the root directory in the config file. +# +# + The workflow supports multiple root directories. If there are multiple possible root directories, specify the `miniscope_root_data_dir` as a list. +# +# + The root path(s) are specific to each machine, as the name of drive mount could be different for different operating systems or machines. +# +# + In the context of the workflow, all the paths saved into the database or saved in the config file need to be in the POSIX standards (Unix/Linux), with `/`. The path conversion for machines of any operating system is taken care of inside the elements. + +# If using our example dataset, downloaded with this notebook [00-data-download](00-data-download-optional.ipynb), the root directory will be: + +# If there is only one root path: +dj.config['custom']['miniscope_root_data_dir'] = '/tmp/example_data' +# If there are multiple possible root paths: +dj.config['custom']['miniscope_root_data_dir'] = ['/tmp/example_data'] + +dj.config + +# ## Save the configuration as a json file +# +# With the proper configurations, we could save this as a file, either as a local json file, or a global file. + +dj.config.save_local() + +# ls + +# Local configuration file is saved as `dj_local_conf.json` in the root directory of this package `workflow-miniscope`. Next time if you change your directory to `workflow-miniscope` before importing DataJoint and the pipeline packages, the configurations will get properly loaded. +# +# If saved globally, there will be a hidden configuration file saved in your root directory. The configuration will be loaded no matter where the directory is. + +# + +# dj.config.save_global() +# - + +# ## Next Step +# +# After the configuration, we will be able to run through the workflow with the [02-workflow-structure](02-workflow-structure-optional.ipynb) notebook. From c9ffda9cf821641219263b93efae06f449cbb43a Mon Sep 17 00:00:00 2001 From: Kabilar Gunalan Date: Sun, 24 Apr 2022 14:10:42 -0500 Subject: [PATCH 12/32] Update automated ingestion script --- workflow_miniscope/ingest.py | 91 ++++++++++++++++++++++-------------- 1 file changed, 55 insertions(+), 36 deletions(-) diff --git a/workflow_miniscope/ingest.py b/workflow_miniscope/ingest.py index 7f6f887..a30f994 100644 --- a/workflow_miniscope/ingest.py +++ b/workflow_miniscope/ingest.py @@ -1,13 +1,14 @@ import pathlib import csv from datetime import datetime +import json -from .pipeline import subject, imaging, scan, session, Equipment -from .paths import get_imaging_root_data_dir - +from .pipeline import subject, session, Equipment, miniscope +from .paths import get_miniscope_root_data_dir +from element_interface.utils import find_full_path, recursive_search def ingest_subjects(subject_csv_path='./user_data/subjects.csv'): - # -------------- Insert new "Subject" -------------- + print('\n-------------- Insert new "Subject" --------------') with open(subject_csv_path, newline= '') as f: input_subjects = list(csv.DictReader(f, delimiter=',')) @@ -18,53 +19,71 @@ def ingest_subjects(subject_csv_path='./user_data/subjects.csv'): def ingest_sessions(session_csv_path='./user_data/sessions.csv'): - root_data_dir = get_imaging_root_data_dir() - # ---------- Insert new "Session" and "Scan" --------- + print('\n---- Insert new `Session` and `Recording` ----') with open(session_csv_path, newline='') as f: input_sessions = list(csv.DictReader(f, delimiter=',')) - # Folder structure: root / subject / session / .avi (raw) - session_list, session_dir_list, scan_list, scanner_list = [], [], [], [] - - for sess in input_sessions: - sess_dir = pathlib.Path(sess['session_dir']) - - # Search for Miniscope-DAQ-V3 files (in that order) - for scan_pattern, scan_type, glob_func in zip(['ms*.avi'], - ['Miniscope-DAQ-V3'], - [sess_dir.glob]): - scan_filepaths = [fp.as_posix() for fp in glob_func(scan_pattern)] - if len(scan_filepaths): - acq_software = scan_type + session_list, session_dir_list, recording_list, hardware_list = [], [], [], [] + + for single_session in input_sessions: + acquisition_software = single_session['acquisition_software'] + if acquisition_software not in ['Miniscope-DAQ-V3', 'Miniscope-DAQ-V4']: + raise NotImplementedError(f'Not implemented for acquisition software of ' + f'type {acquisition_software}.') + + # Folder structure: root / subject / session / .avi (raw) + session_dir = pathlib.Path(single_session['session_dir']) + session_path = find_full_path(get_miniscope_root_data_dir(), + session_dir) + recording_filepaths = [file_path.as_posix() for file_path + in session_path.glob('*.avi')] + if not recording_filepaths: + raise FileNotFoundError(f'No .avi files found in ' + f'{session_path}') + + # Read Miniscope DAQ *.json file + for metadata_filepath in session_path.glob('*.json'): + try: + recording_time = datetime.fromtimestamp( + metadata_filepath.stat().st_ctime) + with open(metadata_filepath) as json_file: + recording_metadata = json.load(json_file) + acquisition_hardware = recursive_search('deviceType', + recording_metadata) break - else: - raise FileNotFoundError(f'Unable to identify scan files from the supported acquisition softwares (Miniscope-DAQ-V3) at: {sess_dir}') + except OSError: + print(f'Could not find `deviceType` in Miniscope-DAQ json: ' + f'{metadata_filepath}') + continue - if acq_software == 'Miniscope-DAQ-V3': - daq_v3_fp = pathlib.Path(scan_filepaths[0]) - recording_time = datetime.fromtimestamp(daq_v3_fp.stat().st_ctime) - scanner = 'Miniscope-DAQ-V3' - else: - raise NotImplementedError(f'Processing scan from acquisition software of type {acq_software} is not yet implemented') - - session_key = {'subject': sess['subject'], 'session_datetime': recording_time} + session_key = dict(subject=single_session['subject'], + session_datetime=recording_time) if session_key not in session.Session(): - scanner_list.append({'scanner': scanner}) + hardware_list.append(dict(acquisition_hardware=acquisition_hardware)) + session_list.append(session_key) - scan_list.append({**session_key, 'scan_id': 0, 'scanner': scanner, 'acq_software': acq_software}) - session_dir_list.append({**session_key, 'session_dir': sess_dir.relative_to(root_data_dir).as_posix()}) + session_dir_list.append(dict(**session_key, + session_dir= + session_dir.relative_to(get_miniscope_root_data_dir()).as_posix())) + + recording_list.append(dict(**session_key, + recording_id=0, # Assumes one recording per session + acquisition_hardware=acquisition_hardware, + acquisition_software=acquisition_software, + recording_directory= + session_dir.relative_to(get_miniscope_root_data_dir()).as_posix())) - print(f'\n---- Insert {len(set(val for dic in scanner_list for val in dic.values()))} entry(s) into experiment.Equipment ----') - Equipment.insert(scanner_list, skip_duplicates=True) + print(f'\n---- Insert {len(set(val for dic in hardware_list for val in dic.values()))} entry(s) into lab.Equipment ----') + Equipment.insert(hardware_list, skip_duplicates=True) print(f'\n---- Insert {len(session_list)} entry(s) into session.Session ----') session.Session.insert(session_list) session.SessionDirectory.insert(session_dir_list) - print(f'\n---- Insert {len(scan_list)} entry(s) into scan.Scan ----') - scan.Scan.insert(scan_list) + print(f'\n---- Insert {len(recording_list)} entry(s) into miniscope.Recording ----') + miniscope.Recording.insert(recording_list) print('\n---- Successfully completed ingest_sessions ----') From acee799732290b5d14751ac74cd0008dba7db773 Mon Sep 17 00:00:00 2001 From: Kabilar Gunalan Date: Sun, 24 Apr 2022 14:12:06 -0500 Subject: [PATCH 13/32] Update package version --- workflow_miniscope/version.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/workflow_miniscope/version.py b/workflow_miniscope/version.py index 523b6b7..fb30593 100644 --- a/workflow_miniscope/version.py +++ b/workflow_miniscope/version.py @@ -1,2 +1,2 @@ """Package metadata""" -__version__ = '0.1.0a2' \ No newline at end of file +__version__ = '0.1.0' \ No newline at end of file From 849cd94be4d7c73e6e40e6ea7674f838a41909b9 Mon Sep 17 00:00:00 2001 From: Kabilar Gunalan Date: Sun, 24 Apr 2022 14:12:59 -0500 Subject: [PATCH 14/32] Add acquisition software to sessions csv --- user_data/sessions.csv | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/user_data/sessions.csv b/user_data/sessions.csv index b51bb15..e7af20c 100644 --- a/user_data/sessions.csv +++ b/user_data/sessions.csv @@ -1,2 +1,2 @@ -subject,session_dir -subject1,subject1/session1 \ No newline at end of file +subject,session_dir,acquisition_software +subject1,subject1/session1,Miniscope-DAQ-V4 \ No newline at end of file From d05965f72bc41d4ad8d190788d805a96e3401918 Mon Sep 17 00:00:00 2001 From: Kabilar Gunalan Date: Sun, 24 Apr 2022 14:27:10 -0500 Subject: [PATCH 15/32] Pin version number --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index a792b3f..f8722c7 100644 --- a/requirements.txt +++ b/requirements.txt @@ -2,7 +2,7 @@ datajoint>=0.13.0 element-lab==0.1.0b0 element-animal==0.1.0b0 element-session==0.1.0b0 -element-miniscope @ git+https://github.com/datajoint/element-miniscope.git +element-miniscope==0.1.0 element-interface @ git+https://github.com/datajoint/element-interface.git djarchive-client @ git+https://github.com/datajoint/djarchive-client.git jupytext==1.13.7 \ No newline at end of file From ae40356675052026ab7aa13d5b7afd5b3fe1398f Mon Sep 17 00:00:00 2001 From: Kabilar Gunalan Date: Sun, 24 Apr 2022 16:41:41 -0500 Subject: [PATCH 16/32] Add CaImAn installation --- docker/Dockerfile.dev | 1 + docker/Dockerfile.test | 6 ++++++ 2 files changed, 7 insertions(+) diff --git a/docker/Dockerfile.dev b/docker/Dockerfile.dev index 754c196..9ed5fbd 100644 --- a/docker/Dockerfile.dev +++ b/docker/Dockerfile.dev @@ -10,6 +10,7 @@ RUN git clone --branch master https://github.com/datajoint-company/CaImAn WORKDIR /main/CaImAn RUN conda install -n base -c conda-forge -y mamba RUN /bin/bash -c 'mamba env update --n base --file environment.yml' + # Suite2p requires np.__version__ == 1.21 RUN pip install numpy==1.21 RUN pip install . diff --git a/docker/Dockerfile.test b/docker/Dockerfile.test index 5d1eb33..47f98a1 100644 --- a/docker/Dockerfile.test +++ b/docker/Dockerfile.test @@ -5,6 +5,12 @@ USER anaconda:anaconda COPY ./workflow-miniscope/docker/apt_requirements.txt /tmp/ RUN /entrypoint.sh echo "Installed dependencies." +# Install Caiman +RUN git clone --branch master https://github.com/datajoint-company/CaImAn +WORKDIR /main/CaImAn +RUN conda install -n base -c conda-forge -y mamba +RUN /bin/bash -c 'mamba env update --n base --file environment.yml' + WORKDIR /main # Option 1 - Install DataJoint's remote fork of the workflow and elements From d054b163c937c5429c87096e077d47ea4ca5b71c Mon Sep 17 00:00:00 2001 From: Kabilar Gunalan Date: Sun, 24 Apr 2022 16:42:51 -0500 Subject: [PATCH 17/32] Add specific json file name --- workflow_miniscope/ingest.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/workflow_miniscope/ingest.py b/workflow_miniscope/ingest.py index a30f994..33c4f7e 100644 --- a/workflow_miniscope/ingest.py +++ b/workflow_miniscope/ingest.py @@ -43,7 +43,7 @@ def ingest_sessions(session_csv_path='./user_data/sessions.csv'): f'{session_path}') # Read Miniscope DAQ *.json file - for metadata_filepath in session_path.glob('*.json'): + for metadata_filepath in session_path.glob('metaData.json'): try: recording_time = datetime.fromtimestamp( metadata_filepath.stat().st_ctime) From 99ceda46b55c6998ef871c8675c00c7e2d7960a3 Mon Sep 17 00:00:00 2001 From: Kabilar Gunalan Date: Sun, 24 Apr 2022 16:54:36 -0500 Subject: [PATCH 18/32] Update image version --- docker/docker-compose-dev.yaml | 2 +- docker/docker-compose-test.yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/docker/docker-compose-dev.yaml b/docker/docker-compose-dev.yaml index 56d041b..1fb5cde 100644 --- a/docker/docker-compose-dev.yaml +++ b/docker/docker-compose-dev.yaml @@ -18,7 +18,7 @@ services: context: ../../ dockerfile: ./workflow-miniscope/docker/Dockerfile.dev env_file: .env - image: workflow-miniscope-dev:0.1.0a2 + image: workflow-miniscope-dev:0.1.0 container_name: workflow-miniscope-dev environment: - MINISCOPE_ROOT_DATA_DIR=/main/test_data/workflow_miniscope/ diff --git a/docker/docker-compose-test.yaml b/docker/docker-compose-test.yaml index bb2bb5b..847e1b9 100644 --- a/docker/docker-compose-test.yaml +++ b/docker/docker-compose-test.yaml @@ -18,7 +18,7 @@ services: context: ../../ dockerfile: ./workflow-miniscope/docker/Dockerfile.test env_file: .env - image: workflow-miniscope-test:0.1.0a2 + image: workflow-miniscope-test:0.1.0 container_name: workflow-miniscope-test environment: - DJ_HOST=db From 5e22ce1a08c0195df50369c5ead2b6c0f0a8b3bb Mon Sep 17 00:00:00 2001 From: Kabilar Gunalan Date: Sun, 24 Apr 2022 17:10:31 -0500 Subject: [PATCH 19/32] Install CaImAn from `flatironinstitute` base repo --- docker/Dockerfile.dev | 2 +- docker/Dockerfile.test | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/docker/Dockerfile.dev b/docker/Dockerfile.dev index 9ed5fbd..b760481 100644 --- a/docker/Dockerfile.dev +++ b/docker/Dockerfile.dev @@ -6,7 +6,7 @@ COPY ./workflow-miniscope/docker/apt_requirements.txt /tmp/ RUN /entrypoint.sh echo "Installed dependencies." # Install Caiman -RUN git clone --branch master https://github.com/datajoint-company/CaImAn +RUN git clone --branch master https://github.com/flatironinstitute/CaImAn WORKDIR /main/CaImAn RUN conda install -n base -c conda-forge -y mamba RUN /bin/bash -c 'mamba env update --n base --file environment.yml' diff --git a/docker/Dockerfile.test b/docker/Dockerfile.test index 47f98a1..b6733c9 100644 --- a/docker/Dockerfile.test +++ b/docker/Dockerfile.test @@ -6,7 +6,7 @@ COPY ./workflow-miniscope/docker/apt_requirements.txt /tmp/ RUN /entrypoint.sh echo "Installed dependencies." # Install Caiman -RUN git clone --branch master https://github.com/datajoint-company/CaImAn +RUN git clone --branch master https://github.com/flatironinstitute/CaImAn WORKDIR /main/CaImAn RUN conda install -n base -c conda-forge -y mamba RUN /bin/bash -c 'mamba env update --n base --file environment.yml' From 3a538494f93362677beba7409bd722d91fdab77c Mon Sep 17 00:00:00 2001 From: Kabilar Gunalan Date: Sun, 24 Apr 2022 23:10:22 -0500 Subject: [PATCH 20/32] Update Dockerfiles --- docker/Dockerfile.dev | 5 +---- docker/Dockerfile.test | 1 + 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/docker/Dockerfile.dev b/docker/Dockerfile.dev index b760481..e1336fe 100644 --- a/docker/Dockerfile.dev +++ b/docker/Dockerfile.dev @@ -9,10 +9,7 @@ RUN /entrypoint.sh echo "Installed dependencies." RUN git clone --branch master https://github.com/flatironinstitute/CaImAn WORKDIR /main/CaImAn RUN conda install -n base -c conda-forge -y mamba -RUN /bin/bash -c 'mamba env update --n base --file environment.yml' - -# Suite2p requires np.__version__ == 1.21 -RUN pip install numpy==1.21 +RUN /bin/bash -c 'mamba env update --n base --file environment.yml' RUN pip install . WORKDIR /main diff --git a/docker/Dockerfile.test b/docker/Dockerfile.test index b6733c9..0582b43 100644 --- a/docker/Dockerfile.test +++ b/docker/Dockerfile.test @@ -10,6 +10,7 @@ RUN git clone --branch master https://github.com/flatironinstitute/CaImAn WORKDIR /main/CaImAn RUN conda install -n base -c conda-forge -y mamba RUN /bin/bash -c 'mamba env update --n base --file environment.yml' +RUN pip install . WORKDIR /main From 74ed693ab9a6cf85fae8c2fb41da30af8822de02 Mon Sep 17 00:00:00 2001 From: Kabilar Gunalan Date: Mon, 25 Apr 2022 15:58:28 -0500 Subject: [PATCH 21/32] Revert import --- workflow_miniscope/pipeline.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/workflow_miniscope/pipeline.py b/workflow_miniscope/pipeline.py index 9e42d53..efad076 100644 --- a/workflow_miniscope/pipeline.py +++ b/workflow_miniscope/pipeline.py @@ -9,9 +9,7 @@ from element_animal.subject import Subject from element_session.session_with_datetime import Session -from .paths import get_miniscope_root_data_dir, get_session_directory, \ - get_processed_root_data_dir - +from .paths import get_miniscope_root_data_dir, get_session_directory if 'custom' not in dj.config: dj.config['custom'] = {} From b8f0c481574197e866554dfb60ea06af5ad5790c Mon Sep 17 00:00:00 2001 From: Kabilar Gunalan Date: Tue, 26 Apr 2022 04:21:41 +0000 Subject: [PATCH 22/32] Update Docker files --- docker/Dockerfile.dev | 7 ++++--- docker/Dockerfile.test | 9 ++++++--- 2 files changed, 10 insertions(+), 6 deletions(-) diff --git a/docker/Dockerfile.dev b/docker/Dockerfile.dev index e1336fe..17cef9f 100644 --- a/docker/Dockerfile.dev +++ b/docker/Dockerfile.dev @@ -1,16 +1,17 @@ -FROM datajoint/djbase:py3.8-debian-fcd8909 +FROM datajoint/djbase:py3.9-debian-8eb1715 USER anaconda:anaconda COPY ./workflow-miniscope/docker/apt_requirements.txt /tmp/ RUN /entrypoint.sh echo "Installed dependencies." -# Install Caiman -RUN git clone --branch master https://github.com/flatironinstitute/CaImAn +# Install CaImAn +RUN git clone --branch master https://github.com/kabilar/CaImAn WORKDIR /main/CaImAn RUN conda install -n base -c conda-forge -y mamba RUN /bin/bash -c 'mamba env update --n base --file environment.yml' RUN pip install . +RUN python caimanmanager.py install --inplace WORKDIR /main diff --git a/docker/Dockerfile.test b/docker/Dockerfile.test index 0582b43..6d04352 100644 --- a/docker/Dockerfile.test +++ b/docker/Dockerfile.test @@ -1,16 +1,17 @@ -FROM datajoint/djbase:py3.9-debian-fcd8909 +FROM datajoint/djbase:py3.9-debian-8eb1715 USER anaconda:anaconda COPY ./workflow-miniscope/docker/apt_requirements.txt /tmp/ RUN /entrypoint.sh echo "Installed dependencies." -# Install Caiman -RUN git clone --branch master https://github.com/flatironinstitute/CaImAn +# Install CaImAn +RUN git clone --branch master https://github.com/kabilar/CaImAn WORKDIR /main/CaImAn RUN conda install -n base -c conda-forge -y mamba RUN /bin/bash -c 'mamba env update --n base --file environment.yml' RUN pip install . +RUN python caimanmanager.py install --inplace WORKDIR /main @@ -52,4 +53,6 @@ RUN rm -f /main/workflow-miniscope/dj_local_conf.json RUN pip install /main/workflow-miniscope RUN pip install -r /main/workflow-miniscope/requirements_test.txt +RUN pip uninstall datajoint +RUN pip install git+ WORKDIR /main/workflow-miniscope From db78117f739d2bf28e01bfcb846f095ec23819b6 Mon Sep 17 00:00:00 2001 From: Kabilar Gunalan Date: Tue, 26 Apr 2022 04:22:33 +0000 Subject: [PATCH 23/32] Update paths --- workflow_miniscope/paths.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/workflow_miniscope/paths.py b/workflow_miniscope/paths.py index 2fb680a..acdb7e8 100644 --- a/workflow_miniscope/paths.py +++ b/workflow_miniscope/paths.py @@ -8,6 +8,4 @@ def get_miniscope_root_data_dir(): def get_session_directory(session_key: dict) -> str: from .pipeline import session session_dir = (session.SessionDirectory & session_key).fetch1('session_dir') - return session_dir - - + return session_dir \ No newline at end of file From b286063ed42d48c2f5fa5f48d837d184376f699d Mon Sep 17 00:00:00 2001 From: Kabilar Gunalan Date: Tue, 26 Apr 2022 15:24:30 +0000 Subject: [PATCH 24/32] Add AnatomicalLocation table --- workflow_miniscope/pipeline.py | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/workflow_miniscope/pipeline.py b/workflow_miniscope/pipeline.py index efad076..df86867 100644 --- a/workflow_miniscope/pipeline.py +++ b/workflow_miniscope/pipeline.py @@ -27,7 +27,7 @@ session.activate(db_prefix + 'session', linking_module=__name__) -# Declare table `Equipment` for use in element_miniscope ------------------------------- +# Declare table `Equipment` and `AnatomicalLocation` for use in element_miniscope ------ @lab.schema class Equipment(dj.Manual): @@ -35,6 +35,13 @@ class Equipment(dj.Manual): acquisition_hardware: varchar(32) """ +@lab.schema +class AnatomicalLocation(dj.Manual): + definition = """ + recording_location_id : varchar(16) + ---- + anatomical_description: varchar(256) + """ # Activate `miniscope` schema ---------------------------------------------------------- From dcdf9e914b2425876637f06a0bf834aff96e74b3 Mon Sep 17 00:00:00 2001 From: Kabilar Gunalan Date: Tue, 26 Apr 2022 15:56:18 +0000 Subject: [PATCH 25/32] Add diagram --- README.md | 2 + images/attached_miniscope_element.svg | 1639 +++++++++++++++++++++++++ 2 files changed, 1641 insertions(+) create mode 100644 images/attached_miniscope_element.svg diff --git a/README.md b/README.md index 61c9054..773e819 100644 --- a/README.md +++ b/README.md @@ -32,6 +32,8 @@ DataJoint Elements ([element-lab](https://github.com/datajoint/element-lab), [element-miniscope](https://github.com/datajoint/element-miniscope)) assembled together to form a fully functional workflow. +![element miniscope diagram](images/attached_miniscope_element.svg) + ## Installation instructions + The installation instructions can be found at the diff --git a/images/attached_miniscope_element.svg b/images/attached_miniscope_element.svg new file mode 100644 index 0000000..17737a9 --- /dev/null +++ b/images/attached_miniscope_element.svg @@ -0,0 +1,1639 @@ + + + + + + image/svg+xml + + + + + + + + + + + %3 + + + + 90 + + 90 + + + + Fluorescence.Trace + + + + Fluorescence.Trace + + + + + + 90->Fluorescence.Trace + + + + + 91 + + 91 + + + + Segmentation.Mask + + + + Segmentation.Mask + + + + + + 91->Segmentation.Mask + + + + + 92 + + 92 + + + + MotionCorrection + + + + MotionCorrection + + + + + + 92->MotionCorrection + + + + + Subject + + + + Subject + + + + + + Session + + + + Session + + + + + + Subject->Session + + + + + Recording + + + + Recording + + + + + + Session->Recording + + + + + RecordingLocation + + + + RecordingLocation + + + + + + Recording->RecordingLocation + + + + + RecordingInfo + + + + RecordingInfo + + + + + + Recording->RecordingInfo + + + + + ProcessingTask + + + + ProcessingTask + + + + + + Processing + + + + Processing + + + + + + ProcessingTask->Processing + + + + + Curation + + + + Curation + + + + + + RecordingInfo.File + + + + RecordingInfo.File + + + + + + RecordingInfo->ProcessingTask + + + + + RecordingInfo->RecordingInfo.File + + + + + MotionCorrection.Summary + + + + MotionCorrection.Summary + + + + + + MotionCorrection.RigidMotionCorrection + + + + MotionCorrection.RigidMotionCorrection + + + + + + MotionCorrection.NonRigidMotionCorrection + + + + MotionCorrection.NonRigidMotionCorrection + + + + + + MotionCorrection.Block + + + + MotionCorrection.Block + + + + + + MotionCorrection.NonRigidMotionCorrection->MotionCorrection.Block + + + + + MotionCorrection->MotionCorrection.Summary + + + + + MotionCorrection->MotionCorrection.RigidMotionCorrection + + + + + MotionCorrection->MotionCorrection.NonRigidMotionCorrection + + + + + Segmentation + + + + Segmentation + + + + + + MotionCorrection->Segmentation + + + + + MaskClassification.MaskType + + + + MaskClassification.MaskType + + + + + + Segmentation.Mask->MaskClassification.MaskType + + + + + Segmentation.Mask->Fluorescence.Trace + + + + + Segmentation->Segmentation.Mask + + + + + MaskClassification + + + + MaskClassification + + + + + + Segmentation->MaskClassification + + + + + Fluorescence + + + + Fluorescence + + + + + + Segmentation->Fluorescence + + + + + Processing->Curation + + + + + Processing->MotionCorrection + + + + + MaskClassification->MaskClassification.MaskType + + + + + Activity.Trace + + + + Activity.Trace + + + + + + Fluorescence.Trace->Activity.Trace + + + + + Fluorescence->Fluorescence.Trace + + + + + Activity + + + + Activity + + + + + + Fluorescence->Activity + + + + + Activity->Activity.Trace + + + + + ProcessingParamSet + + + + ProcessingParamSet + + + + + + ProcessingParamSet->ProcessingTask + + + + + ProcessingMethod + + + + ProcessingMethod + + + + + + ProcessingMethod->ProcessingParamSet + + + + + MaskType + + + + MaskType + + + + + + MaskType->MaskClassification.MaskType + + + + + MaskClassificationMethod + + + + MaskClassificationMethod + + + + + + MaskClassificationMethod->MaskClassification + + + + + Channel + + + + Channel + + + + + + Channel->90 + + + + + Channel->91 + + + + + Channel->92 + + + + + ActivityExtractionMethod + + + + ActivityExtractionMethod + + + + + + ActivityExtractionMethod->Activity + + + + + AcquisitionSoftware + + + + AcquisitionSoftware + + + + + + AcquisitionSoftware->Recording + + + + + Equipment + + + + Equipment + + + + + + Equipment->Recording + + + + + AnatomicalLocation + + + + AnatomicalLocation + + + + + + AnatomicalLocation->RecordingLocation + + + + + + MiniscopeElement + Upstreampipeline + From ff6348cb2564e5e8f35b51838ac0225fb146b50c Mon Sep 17 00:00:00 2001 From: Kabilar Gunalan Date: Tue, 26 Apr 2022 16:53:07 +0000 Subject: [PATCH 26/32] Update 03-process notebook --- notebooks/03-process.ipynb | 568 ++++++++++++++++++++++++++++++++ notebooks/scripts/03-process.py | 257 +++++++++++++++ 2 files changed, 825 insertions(+) create mode 100644 notebooks/03-process.ipynb create mode 100644 notebooks/scripts/03-process.py diff --git a/notebooks/03-process.ipynb b/notebooks/03-process.ipynb new file mode 100644 index 0000000..410f2a2 --- /dev/null +++ b/notebooks/03-process.ipynb @@ -0,0 +1,568 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Interactively run miniscope workflow\n", + "\n", + "+ This notebook walks you through the steps in detail to run the `workflow-miniscope`. \n", + "\n", + "+ The workflow requires the data acquired from the UCLA Miniscope and Miniscope-DAQ software and processing with CaImAn.\n", + "\n", + "+ If you haven't configured the paths, refer to [01-configure](01-configure.ipynb).\n", + "\n", + "+ To overview the schema structures, refer to [02-workflow-structure](02-workflow-structure.ipynb).\n", + "\n", + "+ If you need a more automatic approach to run the workflow, refer to [04-automate](04-automate-optional.ipynb)." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Let's change the directory to the package root directory to load the local configuration (`dj_local_conf.json`)." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import os\n", + "if os.path.basename(os.getcwd())=='notebooks': os.chdir('..')\n", + "import numpy as np" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## `Pipeline.py`\n", + "\n", + "+ This script `activates` the DataJoint `Elements` and declares other required tables." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from workflow_miniscope.pipeline import *\n", + "from element_interface.utils import find_full_path" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Schema diagrams\n", + "\n", + "+ The following outputs are the diagrams of the schemas comprising this workflow.\n", + "\n", + "+ Please refer back to these diagrams to visualize the relationships of different tables." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "dj.Diagram(subject.Subject) + dj.Diagram(session.Session) + \\\n", + " dj.Diagram(AnatomicalLocation) + dj.Diagram(Equipment) + dj.Diagram(miniscope) " + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Insert an entry into `subject.Subject`" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "subject.Subject.heading" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "subject.Subject.insert1(dict(subject='subject1', \n", + " sex='F', \n", + " subject_birth_date='2020-01-01', \n", + " subject_description='UCLA Miniscope acquisition'))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Insert an entry into `lab.Equipment`" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "Equipment.insert1(dict(acquisition_hardware='UCLA Miniscope'))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Insert an entry into `session.Session`" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "session.Session.describe();" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "session.Session.heading" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "session_key = dict(subject='subject1', \n", + " session_datetime='2021-01-01 00:00:01')\n", + "\n", + "session.Session.insert1(session_key)\n", + "\n", + "session.Session()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Insert an entry into `session.SessionDirectory`\n", + "\n", + "+ The `session_dir` is the relative path to the `miniscope_root_data_dir` for the given session, in POSIX format with `/`.\n", + "\n", + "+ Instead of a relative path, `session_dir` could be an absolute path but it is not recommended as the absolute path would have to match the `miniscope_root_data_dir` in `dj_local_conf.json`." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "session.SessionDirectory.describe();" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "session.SessionDirectory.heading" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "session.SessionDirectory.insert1(dict(**session_key, \n", + " session_dir='subject1/session1'))\n", + "\n", + "session.SessionDirectory()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Insert an entry into `miniscope.Recording`" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "miniscope.Recording.heading" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "recording_key = dict(**session_key,\n", + " recording_id=0)\n", + "\n", + "miniscope.Recording.insert1(dict(**recording_key, \n", + " acquisition_hardware='UCLA Miniscope', \n", + " acquisition_software='Miniscope-DAQ-V4',\n", + " recording_directory='subject1/session1',\n", + " recording_notes='No notes for this session.'))\n", + "miniscope.Recording()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Populate `miniscope.RecordingInfo`\n", + "\n", + "+ This imported table stores information about the acquired image (e.g. image dimensions, file paths, etc.).\n", + "+ `populate` automatically calls `make` for every key for which the auto-populated table is missing data.\n", + "+ `populate_settings` passes arguments to the `populate` method.\n", + "+ `display_progress=True` reports the progress bar" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "miniscope.RecordingInfo.describe();" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "miniscope.RecordingInfo.heading" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "populate_settings = {'display_progress': True}\n", + "miniscope.RecordingInfo.populate(**populate_settings)\n", + "miniscope.RecordingInfo()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Insert a new entry into `miniscope.ProcessingParamSet` for CaImAn\n", + "\n", + "+ Define and insert the parameters that will be used for the CaImAn processing.\n", + "\n", + "+ This step is not needed if you are using an existing ProcessingParamSet.\n", + "\n", + "### Define CaImAn parameters" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "avi_files = (miniscope.Recording * miniscope.RecordingInfo * miniscope.RecordingInfo.File & recording_key).fetch('recording_file_path')\n", + "avi_files = [find_full_path(get_miniscope_root_data_dir(), \n", + " avi_file).as_posix() for avi_file in avi_files]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "sampling_rate = (miniscope.Recording * miniscope.RecordingInfo & recording_key).fetch1('fps')" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "params = dict(pw_rigid=False, # perform piecewise-rigid motion correction\n", + " max_shifts= (5, 5), # maximum allowed rigid shift,\n", + " gSig_filt=(3, 3), # size of high pass spatial filtering, \n", + " # used in 1p data\n", + " strides=(48, 48), # start a new patch for pw-rigid motion correction every x pixels\n", + " overlaps=(24, 24), # overlap between pathes (size of patch strides+overlaps)\n", + " max_deviation_rigid=3, # maximum deviation allowed for patch with respect to rigid shifts\n", + " border_nan='copy', # replicate values along the boundaries\n", + " fnames=avi_files,\n", + " fr=sampling_rate, # imaging rate (Hz)\n", + " decay_time=0.4, # length of typical transient (in seconds)\n", + " method_init='corr_pnr', \n", + " gSig=(6, 6), # expected half size of neurons\n", + " gSiz=(15, 15), # half size for neuron bounding box\n", + " rf=48, # half size of each patch\n", + " stride=8, # amount of overlap between patches\n", + " p=0, # order of AR indicator dynamics\n", + " nb=0,\n", + " ssub=1, # spatial downsampling factor\n", + " min_SNR=1.5, # minimum SNR for accepting new components\n", + " min_pnr=10,\n", + " min_corr=0.8,\n", + " bas_nonneg=False,\n", + " center_psf=True,\n", + " rval_thr=0.85, # correlation threshold for new component \n", + " # inclusion\n", + " only_init=True,\n", + " merge_thr=0.65, # merging threshold\n", + " K=None # initial number of components\n", + " )" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Insert CaImAn parameters\n", + "\n", + "+ A method of the class `ProcessingParamset` called `insert_new_params` is a helper function to insert the CaImAn parameters and ensures that the parameter set inserted is not duplicated." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "miniscope.ProcessingParamSet.insert_new_params(\n", + " processing_method='caiman', \n", + " paramset_id=0, \n", + " paramset_desc='Calcium imaging analysis with CaImAn using default parameters',\n", + " params=params)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Insert new ProcessingTask to trigger analysis and ingestion of motion correction and segmentation results\n", + "\n", + "+ Motion correction and segmentation are performed for each recording in CaImAn.\n", + "\n", + "+ If `task_mode=trigger`, this entry will trigger running analysis (i.e. motion correction, segmentation, and traces) within the `miniscope.Processing` table.\n", + "\n", + "+ If the `task_mode=load` this step ensures that the output directory contains the valid processed outputs.\n", + "\n", + "+ The `paramset_id` is the parameter set stored in `miniscope.ProcessingParamSet` that is used for the imaging processing.\n", + " \n", + "+ The `processing_output_dir` stores the directory of the processing results (relative to the miniscope root data directory)." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "miniscope.ProcessingTask.insert1(dict(**recording_key,\n", + " paramset_id=0,\n", + " processing_output_dir='subject1/session1/caiman',\n", + " task_mode='trigger'))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Populate `miniscope.Processing`" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "miniscope.Processing.populate(**populate_settings)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Insert new Curation following the ProcessingTask\n", + "\n", + "+ The next step in the pipeline is the curation of motion correction and segmentation results.\n", + "\n", + "+ If a manual curation was implemented, an entry needs to be manually inserted into the table `miniscope.Curation`, which specifies the directory to the curated results in `curation_output_dir`. \n", + "\n", + "+ If we would like to use the processed outcome directly, an entry is also needed in `miniscope.Curation`. A method `create1_from_processing_task` was provided to help this insertion. It copies the `processing_output_dir` in `miniscope.ProcessingTask` to the field `curation_output_dir` in the table `miniscope.Curation` with a new `curation_id`.\n", + "\n", + " + In this example, we create/insert one `miniscope.Curation` for each `miniscope.ProcessingTask`, specifying the same output directory.\n", + "\n", + " + To this end, we could also make use of a convenient function `miniscope.Curation().create1_from_processing_task()`" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "miniscope.Curation.insert1(dict(**recording_key,\n", + " paramset_id=0,\n", + " curation_id=0,\n", + " curation_time='2022-04-30 12:22:15', \n", + " curation_output_dir='subject1/session1/caiman',\n", + " manual_curation=False,\n", + " curation_note=''))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Populate `miniscope.MotionCorrection`\n", + "\n", + "+ This table contains the rigid or non-rigid motion correction data including the shifts and summary images.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "miniscope.MotionCorrection.populate(**populate_settings)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Populate `miniscope.Segmentation`\n", + "\n", + "+ This table contains the mask coordinates, weights, and centers.\n", + "+ This table also inserts the data into `MaskClassification`, which is the classification of the segmented masks and the confidence of classification." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "miniscope.Segmentation.populate(**populate_settings)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Add another set of results from a new round of curation\n", + "\n", + "If you performed curation on an existing processed results (i.e. motion correction or segmentation) then:\n", + " \n", + "+ Add an entry into `miniscope.Curation` with the directory of the curated results and a new `curation_id`.\n", + "\n", + "+ Populate the `miniscope.MotionCorrection` and `miniscope.Segmentation` tables again." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Populate `miniscope.Fluorescence`\n", + "\n", + "+ This table contains the fluorescence traces prior to filtering and spike extraction." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "miniscope.Fluorescence.populate(**populate_settings)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Populate `miniscope.Activity`\n", + "+ This table contains the inferred neural activity from the fluorescence traces." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "miniscope.Activity.populate(**populate_settings)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Next steps\n", + "\n", + "+ Proceed to the [05-explore](05-explore.ipynb) to learn how to query, fetch, and visualize the imaging data." + ] + } + ], + "metadata": { + "interpreter": { + "hash": "d4d1e4263499bec80672ea0156c357c1ee493ec2b1c70f0acce89fc37c4a6abe" + }, + "jupytext": { + "formats": "ipynb,scripts//py" + }, + "kernelspec": { + "display_name": "Python 3.7.9 64-bit ('workflow-calcium-imaging': conda)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.4" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/notebooks/scripts/03-process.py b/notebooks/scripts/03-process.py new file mode 100644 index 0000000..abc6aed --- /dev/null +++ b/notebooks/scripts/03-process.py @@ -0,0 +1,257 @@ +# --- +# jupyter: +# jupytext: +# formats: ipynb,scripts//py +# text_representation: +# extension: .py +# format_name: light +# format_version: '1.5' +# jupytext_version: 1.13.7 +# kernelspec: +# display_name: 'Python 3.7.9 64-bit (''workflow-calcium-imaging'': conda)' +# language: python +# name: python3 +# --- + +# # Interactively run miniscope workflow +# +# + This notebook walks you through the steps in detail to run the `workflow-miniscope`. +# +# + The workflow requires the data acquired from the UCLA Miniscope and Miniscope-DAQ software and processing with CaImAn. +# +# + If you haven't configured the paths, refer to [01-configure](01-configure.ipynb). +# +# + To overview the schema structures, refer to [02-workflow-structure](02-workflow-structure.ipynb). +# +# + If you need a more automatic approach to run the workflow, refer to [04-automate](04-automate-optional.ipynb). + +# Let's change the directory to the package root directory to load the local configuration (`dj_local_conf.json`). + +import os +if os.path.basename(os.getcwd())=='notebooks': os.chdir('..') +import numpy as np + +# ## `Pipeline.py` +# +# + This script `activates` the DataJoint `Elements` and declares other required tables. + +from workflow_miniscope.pipeline import * +from element_interface.utils import find_full_path + +# ## Schema diagrams +# +# + The following outputs are the diagrams of the schemas comprising this workflow. +# +# + Please refer back to these diagrams to visualize the relationships of different tables. + +dj.Diagram(subject.Subject) + dj.Diagram(session.Session) + \ + dj.Diagram(AnatomicalLocation) + dj.Diagram(Equipment) + dj.Diagram(miniscope) + +# ## Insert an entry into `subject.Subject` + +subject.Subject.heading + +subject.Subject.insert1(dict(subject='subject1', + sex='F', + subject_birth_date='2020-01-01', + subject_description='UCLA Miniscope acquisition')) + +# ## Insert an entry into `lab.Equipment` + +Equipment.insert1(dict(acquisition_hardware='UCLA Miniscope')) + +# ## Insert an entry into `session.Session` + +session.Session.describe(); + +session.Session.heading + +# + +session_key = dict(subject='subject1', + session_datetime='2021-01-01 00:00:01') + +session.Session.insert1(session_key) + +session.Session() +# - + +# ## Insert an entry into `session.SessionDirectory` +# +# + The `session_dir` is the relative path to the `miniscope_root_data_dir` for the given session, in POSIX format with `/`. +# +# + Instead of a relative path, `session_dir` could be an absolute path but it is not recommended as the absolute path would have to match the `miniscope_root_data_dir` in `dj_local_conf.json`. + +session.SessionDirectory.describe(); + +session.SessionDirectory.heading + +# + +session.SessionDirectory.insert1(dict(**session_key, + session_dir='subject1/session1')) + +session.SessionDirectory() +# - + +# ## Insert an entry into `miniscope.Recording` + +miniscope.Recording.heading + +# + +recording_key = dict(**session_key, + recording_id=0) + +miniscope.Recording.insert1(dict(**recording_key, + acquisition_hardware='UCLA Miniscope', + acquisition_software='Miniscope-DAQ-V4', + recording_directory='subject1/session1', + recording_notes='No notes for this session.')) +miniscope.Recording() +# - + +# ## Populate `miniscope.RecordingInfo` +# +# + This imported table stores information about the acquired image (e.g. image dimensions, file paths, etc.). +# + `populate` automatically calls `make` for every key for which the auto-populated table is missing data. +# + `populate_settings` passes arguments to the `populate` method. +# + `display_progress=True` reports the progress bar + +miniscope.RecordingInfo.describe(); + +miniscope.RecordingInfo.heading + +populate_settings = {'display_progress': True} +miniscope.RecordingInfo.populate(**populate_settings) +miniscope.RecordingInfo() + +# ## Insert a new entry into `miniscope.ProcessingParamSet` for CaImAn +# +# + Define and insert the parameters that will be used for the CaImAn processing. +# +# + This step is not needed if you are using an existing ProcessingParamSet. +# +# ### Define CaImAn parameters + +avi_files = (miniscope.Recording * miniscope.RecordingInfo * miniscope.RecordingInfo.File & recording_key).fetch('recording_file_path') +avi_files = [find_full_path(get_miniscope_root_data_dir(), + avi_file).as_posix() for avi_file in avi_files] + +sampling_rate = (miniscope.Recording * miniscope.RecordingInfo & recording_key).fetch1('fps') + +params = dict(pw_rigid=False, # perform piecewise-rigid motion correction + max_shifts= (5, 5), # maximum allowed rigid shift, + gSig_filt=(3, 3), # size of high pass spatial filtering, + # used in 1p data + strides=(48, 48), # start a new patch for pw-rigid motion correction every x pixels + overlaps=(24, 24), # overlap between pathes (size of patch strides+overlaps) + max_deviation_rigid=3, # maximum deviation allowed for patch with respect to rigid shifts + border_nan='copy', # replicate values along the boundaries + fnames=avi_files, + fr=sampling_rate, # imaging rate (Hz) + decay_time=0.4, # length of typical transient (in seconds) + method_init='corr_pnr', + gSig=(6, 6), # expected half size of neurons + gSiz=(15, 15), # half size for neuron bounding box + rf=48, # half size of each patch + stride=8, # amount of overlap between patches + p=0, # order of AR indicator dynamics + nb=0, + ssub=1, # spatial downsampling factor + min_SNR=1.5, # minimum SNR for accepting new components + min_pnr=10, + min_corr=0.8, + bas_nonneg=False, + center_psf=True, + rval_thr=0.85, # correlation threshold for new component + # inclusion + only_init=True, + merge_thr=0.65, # merging threshold + K=None # initial number of components + ) + +# ### Insert CaImAn parameters +# +# + A method of the class `ProcessingParamset` called `insert_new_params` is a helper function to insert the CaImAn parameters and ensures that the parameter set inserted is not duplicated. + +miniscope.ProcessingParamSet.insert_new_params( + processing_method='caiman', + paramset_id=0, + paramset_desc='Calcium imaging analysis with CaImAn using default parameters', + params=params) + +# ## Insert new ProcessingTask to trigger analysis and ingestion of motion correction and segmentation results +# +# + Motion correction and segmentation are performed for each recording in CaImAn. +# +# + If `task_mode=trigger`, this entry will trigger running analysis (i.e. motion correction, segmentation, and traces) within the `miniscope.Processing` table. +# +# + If the `task_mode=load` this step ensures that the output directory contains the valid processed outputs. +# +# + The `paramset_id` is the parameter set stored in `miniscope.ProcessingParamSet` that is used for the imaging processing. +# +# + The `processing_output_dir` stores the directory of the processing results (relative to the miniscope root data directory). + +miniscope.ProcessingTask.insert1(dict(**recording_key, + paramset_id=0, + processing_output_dir='subject1/session1/caiman', + task_mode='trigger')) + +# ## Populate `miniscope.Processing` + +miniscope.Processing.populate(**populate_settings) + +# ## Insert new Curation following the ProcessingTask +# +# + The next step in the pipeline is the curation of motion correction and segmentation results. +# +# + If a manual curation was implemented, an entry needs to be manually inserted into the table `miniscope.Curation`, which specifies the directory to the curated results in `curation_output_dir`. +# +# + If we would like to use the processed outcome directly, an entry is also needed in `miniscope.Curation`. A method `create1_from_processing_task` was provided to help this insertion. It copies the `processing_output_dir` in `miniscope.ProcessingTask` to the field `curation_output_dir` in the table `miniscope.Curation` with a new `curation_id`. +# +# + In this example, we create/insert one `miniscope.Curation` for each `miniscope.ProcessingTask`, specifying the same output directory. +# +# + To this end, we could also make use of a convenient function `miniscope.Curation().create1_from_processing_task()` + +miniscope.Curation.insert1(dict(**recording_key, + paramset_id=0, + curation_id=0, + curation_time='2022-04-30 12:22:15', + curation_output_dir='subject1/session1/caiman', + manual_curation=False, + curation_note='')) + +# ## Populate `miniscope.MotionCorrection` +# +# + This table contains the rigid or non-rigid motion correction data including the shifts and summary images. +# + +miniscope.MotionCorrection.populate(**populate_settings) + +# ## Populate `miniscope.Segmentation` +# +# + This table contains the mask coordinates, weights, and centers. +# + This table also inserts the data into `MaskClassification`, which is the classification of the segmented masks and the confidence of classification. + +miniscope.Segmentation.populate(**populate_settings) + +# ## Add another set of results from a new round of curation +# +# If you performed curation on an existing processed results (i.e. motion correction or segmentation) then: +# +# + Add an entry into `miniscope.Curation` with the directory of the curated results and a new `curation_id`. +# +# + Populate the `miniscope.MotionCorrection` and `miniscope.Segmentation` tables again. + +# ## Populate `miniscope.Fluorescence` +# +# + This table contains the fluorescence traces prior to filtering and spike extraction. + +miniscope.Fluorescence.populate(**populate_settings) + +# ## Populate `miniscope.Activity` +# + This table contains the inferred neural activity from the fluorescence traces. + +miniscope.Activity.populate(**populate_settings) + +# ## Next steps +# +# + Proceed to the [05-explore](05-explore.ipynb) to learn how to query, fetch, and visualize the imaging data. From 2461b2890eef5d0bfac33818977fd64b42dc5c36 Mon Sep 17 00:00:00 2001 From: Kabilar Gunalan Date: Tue, 26 Apr 2022 22:58:04 +0000 Subject: [PATCH 27/32] Update parameter list --- notebooks/03-process.ipynb | 60 +++++++++++++++++++-------------- notebooks/scripts/03-process.py | 60 +++++++++++++++++++-------------- 2 files changed, 68 insertions(+), 52 deletions(-) diff --git a/notebooks/03-process.ipynb b/notebooks/03-process.ipynb index 410f2a2..7a59bcb 100644 --- a/notebooks/03-process.ipynb +++ b/notebooks/03-process.ipynb @@ -312,35 +312,43 @@ "metadata": {}, "outputs": [], "source": [ - "params = dict(pw_rigid=False, # perform piecewise-rigid motion correction\n", - " max_shifts= (5, 5), # maximum allowed rigid shift,\n", - " gSig_filt=(3, 3), # size of high pass spatial filtering, \n", - " # used in 1p data\n", - " strides=(48, 48), # start a new patch for pw-rigid motion correction every x pixels\n", - " overlaps=(24, 24), # overlap between pathes (size of patch strides+overlaps)\n", - " max_deviation_rigid=3, # maximum deviation allowed for patch with respect to rigid shifts\n", - " border_nan='copy', # replicate values along the boundaries\n", - " fnames=avi_files,\n", - " fr=sampling_rate, # imaging rate (Hz)\n", - " decay_time=0.4, # length of typical transient (in seconds)\n", - " method_init='corr_pnr', \n", - " gSig=(6, 6), # expected half size of neurons\n", - " gSiz=(15, 15), # half size for neuron bounding box\n", - " rf=48, # half size of each patch\n", - " stride=8, # amount of overlap between patches\n", - " p=0, # order of AR indicator dynamics\n", + "params = dict(fnames=avi_files,\n", + " fr=sampling_rate,\n", + " decay_time=0.4,\n", + " pw_rigid=False,\n", + " max_shifts= (5, 5),\n", + " gSig_filt=(3, 3),\n", + " strides=(48, 48),\n", + " overlaps=(24, 24),\n", + " max_deviation_rigid=3,\n", + " border_nan='copy',\n", + " method_init='corr_pnr',\n", + " K=None,\n", + " gSig=(3, 3),\n", + " gSiz=(13, 13),\n", + " merge_thr=0.7,\n", + " p=1,\n", + " tsub=2,\n", + " ssub=1,\n", + " rf=40,\n", + " stride=20,\n", + " only_init=True,\n", " nb=0,\n", - " ssub=1, # spatial downsampling factor\n", - " min_SNR=1.5, # minimum SNR for accepting new components\n", - " min_pnr=10,\n", + " nb_patch=0,\n", + " method_deconvolution='oasis',\n", + " low_rank_background=None,\n", + " update_background_components=True,\n", " min_corr=0.8,\n", - " bas_nonneg=False,\n", + " min_pnr=10,\n", + " normalize_init=False,\n", " center_psf=True,\n", - " rval_thr=0.85, # correlation threshold for new component \n", - " # inclusion\n", - " only_init=True,\n", - " merge_thr=0.65, # merging threshold\n", - " K=None # initial number of components\n", + " ssub_B=2,\n", + " ring_size_factor=1.4,\n", + " del_duplicates=True,\n", + " border_pix=0,\n", + " min_SNR=3,\n", + " rval_thr=0.85,\n", + " use_cnn=False,\n", " )" ] }, diff --git a/notebooks/scripts/03-process.py b/notebooks/scripts/03-process.py index abc6aed..a49251e 100644 --- a/notebooks/scripts/03-process.py +++ b/notebooks/scripts/03-process.py @@ -137,35 +137,43 @@ sampling_rate = (miniscope.Recording * miniscope.RecordingInfo & recording_key).fetch1('fps') -params = dict(pw_rigid=False, # perform piecewise-rigid motion correction - max_shifts= (5, 5), # maximum allowed rigid shift, - gSig_filt=(3, 3), # size of high pass spatial filtering, - # used in 1p data - strides=(48, 48), # start a new patch for pw-rigid motion correction every x pixels - overlaps=(24, 24), # overlap between pathes (size of patch strides+overlaps) - max_deviation_rigid=3, # maximum deviation allowed for patch with respect to rigid shifts - border_nan='copy', # replicate values along the boundaries - fnames=avi_files, - fr=sampling_rate, # imaging rate (Hz) - decay_time=0.4, # length of typical transient (in seconds) - method_init='corr_pnr', - gSig=(6, 6), # expected half size of neurons - gSiz=(15, 15), # half size for neuron bounding box - rf=48, # half size of each patch - stride=8, # amount of overlap between patches - p=0, # order of AR indicator dynamics +params = dict(fnames=avi_files, + fr=sampling_rate, + decay_time=0.4, + pw_rigid=False, + max_shifts= (5, 5), + gSig_filt=(3, 3), + strides=(48, 48), + overlaps=(24, 24), + max_deviation_rigid=3, + border_nan='copy', + method_init='corr_pnr', + K=None, + gSig=(3, 3), + gSiz=(13, 13), + merge_thr=0.7, + p=1, + tsub=2, + ssub=1, + rf=40, + stride=20, + only_init=True, nb=0, - ssub=1, # spatial downsampling factor - min_SNR=1.5, # minimum SNR for accepting new components - min_pnr=10, + nb_patch=0, + method_deconvolution='oasis', + low_rank_background=None, + update_background_components=True, min_corr=0.8, - bas_nonneg=False, + min_pnr=10, + normalize_init=False, center_psf=True, - rval_thr=0.85, # correlation threshold for new component - # inclusion - only_init=True, - merge_thr=0.65, # merging threshold - K=None # initial number of components + ssub_B=2, + ring_size_factor=1.4, + del_duplicates=True, + border_pix=0, + min_SNR=3, + rval_thr=0.85, + use_cnn=False, ) # ### Insert CaImAn parameters From c7014114bd6627e3732e3b76eef50e2762b0d2fb Mon Sep 17 00:00:00 2001 From: Kabilar Gunalan Date: Wed, 27 Apr 2022 15:27:38 +0000 Subject: [PATCH 28/32] Fix for multiple root data directories --- workflow_miniscope/ingest.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/workflow_miniscope/ingest.py b/workflow_miniscope/ingest.py index 33c4f7e..500fa5f 100644 --- a/workflow_miniscope/ingest.py +++ b/workflow_miniscope/ingest.py @@ -65,15 +65,13 @@ def ingest_sessions(session_csv_path='./user_data/sessions.csv'): session_list.append(session_key) session_dir_list.append(dict(**session_key, - session_dir= - session_dir.relative_to(get_miniscope_root_data_dir()).as_posix())) + session_dir=session_dir.as_posix())) recording_list.append(dict(**session_key, recording_id=0, # Assumes one recording per session acquisition_hardware=acquisition_hardware, acquisition_software=acquisition_software, - recording_directory= - session_dir.relative_to(get_miniscope_root_data_dir()).as_posix())) + recording_directory=session_dir.as_posix())) print(f'\n---- Insert {len(set(val for dic in hardware_list for val in dic.values()))} entry(s) into lab.Equipment ----') Equipment.insert(hardware_list, skip_duplicates=True) From 6af3cfff32cfa507564ef916c78beaf04f673011 Mon Sep 17 00:00:00 2001 From: Kabilar Gunalan Date: Wed, 27 Apr 2022 17:16:17 +0000 Subject: [PATCH 29/32] Remove file list and sampling rate from params --- notebooks/03-process.ipynb | 24 +----------------------- notebooks/scripts/03-process.py | 10 +--------- 2 files changed, 2 insertions(+), 32 deletions(-) diff --git a/notebooks/03-process.ipynb b/notebooks/03-process.ipynb index 7a59bcb..8b797d9 100644 --- a/notebooks/03-process.ipynb +++ b/notebooks/03-process.ipynb @@ -292,29 +292,7 @@ "metadata": {}, "outputs": [], "source": [ - "avi_files = (miniscope.Recording * miniscope.RecordingInfo * miniscope.RecordingInfo.File & recording_key).fetch('recording_file_path')\n", - "avi_files = [find_full_path(get_miniscope_root_data_dir(), \n", - " avi_file).as_posix() for avi_file in avi_files]" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "sampling_rate = (miniscope.Recording * miniscope.RecordingInfo & recording_key).fetch1('fps')" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "params = dict(fnames=avi_files,\n", - " fr=sampling_rate,\n", - " decay_time=0.4,\n", + "params = dict(decay_time=0.4,\n", " pw_rigid=False,\n", " max_shifts= (5, 5),\n", " gSig_filt=(3, 3),\n", diff --git a/notebooks/scripts/03-process.py b/notebooks/scripts/03-process.py index a49251e..dced950 100644 --- a/notebooks/scripts/03-process.py +++ b/notebooks/scripts/03-process.py @@ -131,15 +131,7 @@ # # ### Define CaImAn parameters -avi_files = (miniscope.Recording * miniscope.RecordingInfo * miniscope.RecordingInfo.File & recording_key).fetch('recording_file_path') -avi_files = [find_full_path(get_miniscope_root_data_dir(), - avi_file).as_posix() for avi_file in avi_files] - -sampling_rate = (miniscope.Recording * miniscope.RecordingInfo & recording_key).fetch1('fps') - -params = dict(fnames=avi_files, - fr=sampling_rate, - decay_time=0.4, +params = dict(decay_time=0.4, pw_rigid=False, max_shifts= (5, 5), gSig_filt=(3, 3), From 899414b431cdb3fd56294a44e83368d574087f31 Mon Sep 17 00:00:00 2001 From: Kabilar Gunalan Date: Wed, 27 Apr 2022 23:07:29 +0000 Subject: [PATCH 30/32] Update equipment table to be generalizable --- workflow_miniscope/pipeline.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/workflow_miniscope/pipeline.py b/workflow_miniscope/pipeline.py index df86867..1b974db 100644 --- a/workflow_miniscope/pipeline.py +++ b/workflow_miniscope/pipeline.py @@ -32,14 +32,17 @@ @lab.schema class Equipment(dj.Manual): definition = """ - acquisition_hardware: varchar(32) + equipment: varchar(32) + --- + modality: varchar(256) + description: varchar(256) """ @lab.schema class AnatomicalLocation(dj.Manual): definition = """ recording_location_id : varchar(16) - ---- + --- anatomical_description: varchar(256) """ From 23b177d72aa45204bb432e57919fde790adc1aff Mon Sep 17 00:00:00 2001 From: Kabilar Gunalan Date: Wed, 27 Apr 2022 23:58:37 +0000 Subject: [PATCH 31/32] Update diagram --- images/attached_miniscope_element.svg | 949 +++++++++++++------------- 1 file changed, 466 insertions(+), 483 deletions(-) diff --git a/images/attached_miniscope_element.svg b/images/attached_miniscope_element.svg index 17737a9..3385e7c 100644 --- a/images/attached_miniscope_element.svg +++ b/images/attached_miniscope_element.svg @@ -8,15 +8,15 @@ xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd" xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape" - width="1060pt" + width="1125pt" height="719pt" - viewBox="0.00 0.00 1060.00 719.00" + viewBox="0.00 0.00 1124.81 719.00" version="1.1" - id="svg1359" - sodipodi:docname="output.svg" + id="svg1688" + sodipodi:docname="attached_miniscope_element.svg" inkscape:version="1.0.1 (c497b03c, 2020-09-10)"> + id="metadata1694"> @@ -27,20 +27,7 @@ - - - + id="defs1692" /> + inkscape:current-layer="svg1688" + inkscape:document-rotation="0" /> %3 + id="title1162">%3 - + points="-4,4 -4,-715 1120.81,-715 1120.81,4 -4,4" + id="polygon1164" /> + 90 + id="title1166">30 + id="ellipse1168" /> 90 + id="text1170">30 - + Fluorescence.Trace + id="title1173">MotionCorrection + id="a_node16"> - + xlink:title="→ Curation ------------------------------ → Channel.proj(motion_correct_channel="channel") " + id="a1179"> + Fluorescence.Trace + font-size="12.00" + fill="#00007f" + fill-opacity="0.627451" + id="text1177">MotionCorrection - + 90->Fluorescence.Trace + id="title1183">30->MotionCorrection + d="M514.75,-203.9C500.83,-199.17 407.49,-167.45 352.95,-148.91" + id="path1185" /> - + 91 + id="title1188">31 + id="ellipse1190" /> 91 + id="text1192">31 Segmentation.Mask + id="title1195">Segmentation.Mask + id="a1201"> + points="1097.5,-144 993.5,-144 993.5,-125 1097.5,-125 1097.5,-144" + id="polygon1197" /> Segmentation.Mask + id="text1199">Segmentation.Mask - + 91->Segmentation.Mask + id="title1205">31->Segmentation.Mask + d="M1045.5,-203.3C1045.5,-196.32 1045.5,-160.64 1045.5,-144.03" + id="path1207" /> - + 92 + id="title1210">32 + id="ellipse1212" /> 92 + id="text1214">32 - + MotionCorrection + id="title1217">Fluorescence.Trace + id="a_node22"> - + xlink:title="→ Fluorescence → Segmentation.Mask → Channel.proj(fluorescence_channel="channel") ------------------------------ fluorescence          neuropil_fluorescence=null " + id="a1223"> + MotionCorrection + font-size="10.00" + id="text1221">Fluorescence.Trace - + 92->MotionCorrection + id="title1227">32->Fluorescence.Trace + d="M1103.95,-203.5C1107.35,-195.82 1128.27,-145.51 1106.5,-117 1091.41,-97.24 1028.63,-83.66 982.33,-76.12" + id="path1229" /> Subject + id="title1232">Subject + id="a1238"> + points="467,-711 410,-711 410,-676 467,-676 467,-711" + id="polygon1234" /> Subject + id="text1236">Subject @@ -316,28 +303,28 @@ id="node5" class="node"> Session + id="title1242">Session + id="a1248"> + points="468.5,-640 408.5,-640 408.5,-605 468.5,-605 468.5,-640" + id="polygon1244" /> Session + id="text1246">Session @@ -346,42 +333,42 @@ id="edge4" class="edge"> Subject->Session + id="title1252">Subject->Session + d="M438.5,-675.8C438.5,-664.95 438.5,-650.87 438.5,-640.05" + id="path1254" /> Recording + id="title1257">Recording + id="a1263"> + points="574.5,-569 502.5,-569 502.5,-534 574.5,-534 574.5,-569" + id="polygon1259" /> Recording + id="text1261">Recording @@ -390,41 +377,41 @@ id="edge5" class="edge"> Session->Recording + id="title1267">Session->Recording + d="M462.45,-604.97C478.17,-594.13 498.65,-580 514.4,-569.13" + id="path1269" /> RecordingLocation + id="title1272">RecordingLocation + id="a1278"> + points="472.5,-498 354.5,-498 354.5,-463 472.5,-463 472.5,-498" + id="polygon1274" /> RecordingLocation + id="text1276">RecordingLocation @@ -433,45 +420,45 @@ id="edge6" class="edge"> Recording->RecordingLocation + id="title1282">Recording->RecordingLocation + d="M508.56,-533.97C488.91,-523.13 463.31,-509 443.63,-498.13" + id="path1284" /> RecordingInfo + id="title1287">RecordingInfo + id="a1293"> + id="ellipse1289" /> RecordingInfo + id="text1291">RecordingInfo @@ -480,41 +467,41 @@ id="edge7" class="edge"> Recording->RecordingInfo + id="title1297">Recording->RecordingInfo + d="M540.92,-533.8C542.49,-522.95 544.53,-508.87 546.1,-498.05" + id="path1299" /> ProcessingTask + id="title1302">ProcessingTask + id="a1308"> + points="722,-427 621,-427 621,-392 722,-392 722,-427" + id="polygon1304" /> ProcessingTask + id="text1306">ProcessingTask @@ -523,31 +510,31 @@ id="node19" class="node"> Processing + id="title1312">Processing + id="a1318"> + id="ellipse1314" /> Processing + id="text1316">Processing @@ -556,123 +543,184 @@ id="edge8" class="edge"> ProcessingTask->Processing + id="title1322">ProcessingTask->Processing + d="M671.5,-391.89C671.5,-380.69 671.5,-366.17 671.5,-356.07" + id="path1324" /> Curation + id="title1327">Curation + id="a1333"> + points="702.5,-294 640.5,-294 640.5,-259 702.5,-259 702.5,-294" + id="polygon1329" /> Curation + id="text1331">Curation + + + Curation->MotionCorrection + + + + + Segmentation + + + + Segmentation + + + + + + Curation->Segmentation + + RecordingInfo.File + id="title1357">RecordingInfo.File + id="a1363"> + points="596.5,-419 500.5,-419 500.5,-400 596.5,-400 596.5,-419" + id="polygon1359" /> RecordingInfo.File + id="text1361">RecordingInfo.File RecordingInfo->ProcessingTask + id="title1367">RecordingInfo->ProcessingTask + d="M574.59,-464.87C594.33,-453.79 621.46,-438.57 642,-427.05" + id="path1369" /> RecordingInfo->RecordingInfo.File + id="title1372">RecordingInfo->RecordingInfo.File + d="M548.5,-462.8C548.5,-449.08 548.5,-430.19 548.5,-419.27" + id="path1374" /> MotionCorrection.Summary + id="title1377">MotionCorrection.Summary + id="a1383"> + points="572.5,-77.5 436.5,-77.5 436.5,-58.5 572.5,-58.5 572.5,-77.5" + id="polygon1379" /> MotionCorrection.Summary + id="text1381">MotionCorrection.Summary @@ -681,24 +729,24 @@ id="node13" class="node"> MotionCorrection.RigidMotionCorrection + id="title1387">MotionCorrection.RigidMotionCorrection + id="a1393"> + points="191,-77.5 0,-77.5 0,-58.5 191,-58.5 191,-77.5" + id="polygon1389" /> MotionCorrection.RigidMotionCorrection + id="text1391">MotionCorrection.RigidMotionCorrection @@ -707,24 +755,24 @@ id="node14" class="node"> MotionCorrection.NonRigidMotionCorrection + id="title1397">MotionCorrection.NonRigidMotionCorrection + id="a1403"> + points="418,-77.5 209,-77.5 209,-58.5 418,-58.5 418,-77.5" + id="polygon1399" /> MotionCorrection.NonRigidMotionCorrection + id="text1401">MotionCorrection.NonRigidMotionCorrection @@ -733,358 +781,297 @@ id="node15" class="node"> MotionCorrection.Block + id="title1407">MotionCorrection.Block + id="a1413"> + points="373,-19 254,-19 254,0 373,0 373,-19" + id="polygon1409" /> MotionCorrection.Block + id="text1411">MotionCorrection.Block MotionCorrection.NonRigidMotionCorrection->MotionCorrection.Block + id="title1417">MotionCorrection.NonRigidMotionCorrection->MotionCorrection.Block + d="M313.5,-58.17C313.5,-47.5 313.5,-29.65 313.5,-19.08" + id="path1419" /> MotionCorrection->MotionCorrection.Summary + id="title1422">MotionCorrection->MotionCorrection.Summary + d="M353.08,-120.14C390.99,-107.33 447.07,-88.4 479.27,-77.52" + id="path1424" /> MotionCorrection->MotionCorrection.RigidMotionCorrection + id="title1427">MotionCorrection->MotionCorrection.RigidMotionCorrection + d="M270.95,-120.91C227.71,-108.12 161.9,-88.64 124.38,-77.54" + id="path1429" /> - MotionCorrection->MotionCorrection.NonRigidMotionCorrection - - - - - Segmentation - - - - Segmentation - - - - - MotionCorrection->Segmentation + id="title1432">MotionCorrection->MotionCorrection.NonRigidMotionCorrection + d="M313.5,-116.89C313.5,-104.35 313.5,-87.64 313.5,-77.64" + id="path1434" /> MaskClassification.MaskType + id="title1437">MaskClassification.MaskType + id="a1443"> + points="738.5,-77.5 590.5,-77.5 590.5,-58.5 738.5,-58.5 738.5,-77.5" + id="polygon1439" /> MaskClassification.MaskType + id="text1441">MaskClassification.MaskType Segmentation.Mask->MaskClassification.MaskType + id="title1447">Segmentation.Mask->MaskClassification.MaskType + d="M1016.5,-124.92C1006.43,-122.14 995.02,-119.2 984.5,-117 900.51,-99.4 802.82,-85.72 737.69,-77.54" + id="path1449" /> Segmentation.Mask->Fluorescence.Trace + id="title1452">Segmentation.Mask->Fluorescence.Trace + d="M1029.99,-124.8C1007.92,-112.42 967.7,-89.86 945.76,-77.56" + id="path1454" /> Segmentation->Segmentation.Mask + id="title1457">Segmentation->Segmentation.Mask + d="M961.56,-197.12C979.08,-184.25 1015.12,-157.8 1033.76,-144.12" + id="path1459" /> MaskClassification + id="title1462">MaskClassification + id="a1468"> + id="ellipse1464" /> MaskClassification + id="text1466">MaskClassification Segmentation->MaskClassification + id="title1472">Segmentation->MaskClassification + d="M938.66,-201.44C904.89,-193.37 810.8,-170.88 732.5,-152 713.13,-147.33 690.67,-141.87 677.03,-138.55" + id="path1474" /> Fluorescence + id="title1477">Fluorescence + id="a1483"> + id="ellipse1479" /> Fluorescence + id="text1481">Fluorescence Segmentation->Fluorescence + id="title1487">Segmentation->Fluorescence + d="M947.94,-192.8C944.05,-180.03 937.87,-159.73 934.01,-147.03" + id="path1489" /> - - Processing->Curation - - - Processing->MotionCorrection + id="title1492">Processing->Curation + d="M671.5,-329.87C671.5,-319.8 671.5,-305.37 671.5,-294.2" + id="path1494" /> MaskClassification->MaskClassification.MaskType + id="title1497">MaskClassification->MaskClassification.MaskType + d="M664.5,-121.37C664.5,-108.67 664.5,-89.04 664.5,-77.77" + id="path1499" /> Activity.Trace + id="title1502">Activity.Trace + id="a1508"> + points="872,-19 795,-19 795,0 872,0 872,-19" + id="polygon1504" /> Activity.Trace + id="text1506">Activity.Trace @@ -1093,59 +1080,59 @@ id="edge24" class="edge"> Fluorescence.Trace->Activity.Trace + id="title1512">Fluorescence.Trace->Activity.Trace + d="M915.77,-58.42C897.54,-47.8 866.67,-29.82 848.37,-19.16" + id="path1514" /> Fluorescence->Fluorescence.Trace + id="title1517">Fluorescence->Fluorescence.Trace + d="M930.5,-121.37C930.5,-108.67 930.5,-89.04 930.5,-77.77" + id="path1519" /> Activity + id="title1522">Activity + id="a1528"> + id="ellipse1524" /> Activity + id="text1526">Activity @@ -1154,54 +1141,54 @@ id="edge26" class="edge"> Fluorescence->Activity + id="title1532">Fluorescence->Activity + d="M920.12,-126.6C901.55,-114.25 862.83,-88.5 844.1,-76.05" + id="path1534" /> Activity->Activity.Trace + id="title1537">Activity->Activity.Trace + d="M833.5,-54.74C833.5,-43.97 833.5,-28.56 833.5,-19.06" + id="path1539" /> ProcessingParamSet + id="title1542">ProcessingParamSet + id="a1548"> + points="733,-498 624,-498 624,-463 733,-463 733,-498" + id="polygon1544" /> ProcessingParamSet + id="text1546">ProcessingParamSet @@ -1210,40 +1197,40 @@ id="edge28" class="edge"> ProcessingParamSet->ProcessingTask + id="title1552">ProcessingParamSet->ProcessingTask + d="M676.81,-462.8C675.7,-451.95 674.28,-437.87 673.18,-427.05" + id="path1554" /> ProcessingMethod + id="title1557">ProcessingMethod + id="a1563"> + points="727.5,-569 629.5,-569 629.5,-534 727.5,-534 727.5,-569" + id="polygon1559" /> ProcessingMethod + id="text1561">ProcessingMethod @@ -1252,41 +1239,41 @@ id="edge29" class="edge"> ProcessingMethod->ProcessingParamSet + id="title1567">ProcessingMethod->ProcessingParamSet + d="M678.5,-533.8C678.5,-522.95 678.5,-508.87 678.5,-498.05" + id="path1569" /> MaskType + id="title1572">MaskType + id="a1578"> + points="587.5,-152 525.5,-152 525.5,-117 587.5,-117 587.5,-152" + id="polygon1574" /> MaskType + id="text1576">MaskType @@ -1295,41 +1282,41 @@ id="edge30" class="edge"> MaskType->MaskClassification.MaskType + id="title1582">MaskType->MaskClassification.MaskType + d="M584.31,-116.89C605.31,-104.35 633.28,-87.64 650.03,-77.64" + id="path1584" /> MaskClassificationMethod + id="title1587">MaskClassificationMethod + id="a1593"> + points="730.5,-223 598.5,-223 598.5,-188 730.5,-188 730.5,-223" + id="polygon1589" /> MaskClassificationMethod + id="text1591">MaskClassificationMethod @@ -1338,112 +1325,112 @@ id="edge31" class="edge"> MaskClassificationMethod->MaskClassification + id="title1597">MaskClassificationMethod->MaskClassification + d="M664.5,-187.8C664.5,-175.35 664.5,-158.66 664.5,-147.51" + id="path1599" /> Channel + id="title1602">Channel + id="a1608"> + points="1072,-294 1019,-294 1019,-259 1072,-259 1072,-294" + id="polygon1604" /> Channel + id="text1606">Channel - + Channel->90 + id="title1612">Channel->30 + d="M1018.89,-273.44C948.46,-267.79 751.53,-250.56 589.5,-223 560.81,-218.12 526.44,-209.15 518.29,-206.98" + id="path1614" /> - + Channel->91 + id="title1617">Channel->31 + d="M1045.5,-258.8C1045.5,-240.93 1045.5,-214.29 1045.5,-207.91" + id="path1619" /> - + Channel->92 + id="title1622">Channel->32 + d="M1059.54,-258.8C1074.56,-240.93 1096.95,-214.29 1102.32,-207.91" + id="path1624" /> ActivityExtractionMethod + id="title1627">ActivityExtractionMethod + id="a1633"> + points="867.5,-152 741.5,-152 741.5,-117 867.5,-117 867.5,-152" + id="polygon1629" /> ActivityExtractionMethod + id="text1631">ActivityExtractionMethod @@ -1452,40 +1439,40 @@ id="edge35" class="edge"> ActivityExtractionMethod->Activity + id="title1637">ActivityExtractionMethod->Activity + d="M811.97,-116.89C817.14,-105.39 823.88,-90.4 828.43,-80.27" + id="path1639" /> AcquisitionSoftware + id="title1642">AcquisitionSoftware + id="a1648"> + points="590.5,-640 486.5,-640 486.5,-605 590.5,-605 590.5,-640" + id="polygon1644" /> AcquisitionSoftware + id="text1646">AcquisitionSoftware @@ -1494,43 +1481,43 @@ id="edge36" class="edge"> AcquisitionSoftware->Recording + id="title1652">AcquisitionSoftware->Recording + d="M538.5,-604.8C538.5,-593.95 538.5,-579.87 538.5,-569.05" + id="path1654" /> Equipment + id="title1657">Equipment + id="a1663"> + points="682.5,-640 608.5,-640 608.5,-605 682.5,-605 682.5,-640" + id="polygon1659" /> Equipment + id="text1661">Equipment @@ -1539,43 +1526,43 @@ id="edge37" class="edge"> Equipment->Recording + id="title1667">Equipment->Recording + d="M619.87,-604.97C603.05,-594.13 581.14,-580 564.29,-569.13" + id="path1669" /> AnatomicalLocation + id="title1672">AnatomicalLocation + id="a1678"> + points="474.5,-569 352.5,-569 352.5,-534 474.5,-534 474.5,-569" + id="polygon1674" /> AnatomicalLocation + id="text1676">AnatomicalLocation @@ -1584,56 +1571,52 @@ id="edge38" class="edge"> AnatomicalLocation->RecordingLocation + id="title1682">AnatomicalLocation->RecordingLocation + d="M413.5,-533.8C413.5,-522.95 413.5,-508.87 413.5,-498.05" + id="path1684" /> - - MiniscopeUpstreamElement + id="tspan1412">pipeline + UpstreamMiniscopepipeline + id="tspan1387">Element From 268949f0a00db6b7b181d39159f15a56151d2a89 Mon Sep 17 00:00:00 2001 From: Kabilar Gunalan Date: Fri, 29 Apr 2022 03:28:54 +0000 Subject: [PATCH 32/32] Separate commands into individual cells --- notebooks/03-process.ipynb | 77 +++++++++++++++++++++++++++------ notebooks/scripts/03-process.py | 10 ++--- 2 files changed, 67 insertions(+), 20 deletions(-) diff --git a/notebooks/03-process.ipynb b/notebooks/03-process.ipynb index 8b797d9..807462a 100644 --- a/notebooks/03-process.ipynb +++ b/notebooks/03-process.ipynb @@ -50,8 +50,7 @@ "metadata": {}, "outputs": [], "source": [ - "from workflow_miniscope.pipeline import *\n", - "from element_interface.utils import find_full_path" + "from workflow_miniscope.pipeline import *" ] }, { @@ -151,10 +150,24 @@ "outputs": [], "source": [ "session_key = dict(subject='subject1', \n", - " session_datetime='2021-01-01 00:00:01')\n", - "\n", - "session.Session.insert1(session_key)\n", - "\n", + " session_datetime='2021-01-01 00:00:01')" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "session.Session.insert1(session_key)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ "session.Session()" ] }, @@ -194,8 +207,15 @@ "outputs": [], "source": [ "session.SessionDirectory.insert1(dict(**session_key, \n", - " session_dir='subject1/session1'))\n", - "\n", + " session_dir='subject1/session1'))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ "session.SessionDirectory()" ] }, @@ -222,13 +242,28 @@ "outputs": [], "source": [ "recording_key = dict(**session_key,\n", - " recording_id=0)\n", - "\n", + " recording_id=0)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ "miniscope.Recording.insert1(dict(**recording_key, \n", " acquisition_hardware='UCLA Miniscope', \n", " acquisition_software='Miniscope-DAQ-V4',\n", " recording_directory='subject1/session1',\n", - " recording_notes='No notes for this session.'))\n", + " recording_notes='No notes for this session.'))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ "miniscope.Recording()" ] }, @@ -268,8 +303,24 @@ "metadata": {}, "outputs": [], "source": [ - "populate_settings = {'display_progress': True}\n", - "miniscope.RecordingInfo.populate(**populate_settings)\n", + "populate_settings = {'display_progress': True}" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "miniscope.RecordingInfo.populate(**populate_settings)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ "miniscope.RecordingInfo()" ] }, diff --git a/notebooks/scripts/03-process.py b/notebooks/scripts/03-process.py index dced950..3bf245b 100644 --- a/notebooks/scripts/03-process.py +++ b/notebooks/scripts/03-process.py @@ -36,7 +36,6 @@ # + This script `activates` the DataJoint `Elements` and declares other required tables. from workflow_miniscope.pipeline import * -from element_interface.utils import find_full_path # ## Schema diagrams # @@ -66,14 +65,12 @@ session.Session.heading -# + session_key = dict(subject='subject1', session_datetime='2021-01-01 00:00:01') session.Session.insert1(session_key) session.Session() -# - # ## Insert an entry into `session.SessionDirectory` # @@ -85,18 +82,15 @@ session.SessionDirectory.heading -# + session.SessionDirectory.insert1(dict(**session_key, session_dir='subject1/session1')) session.SessionDirectory() -# - # ## Insert an entry into `miniscope.Recording` miniscope.Recording.heading -# + recording_key = dict(**session_key, recording_id=0) @@ -105,8 +99,8 @@ acquisition_software='Miniscope-DAQ-V4', recording_directory='subject1/session1', recording_notes='No notes for this session.')) + miniscope.Recording() -# - # ## Populate `miniscope.RecordingInfo` # @@ -120,7 +114,9 @@ miniscope.RecordingInfo.heading populate_settings = {'display_progress': True} + miniscope.RecordingInfo.populate(**populate_settings) + miniscope.RecordingInfo() # ## Insert a new entry into `miniscope.ProcessingParamSet` for CaImAn