diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json index ea27a584..97c8c97f 100644 --- a/.devcontainer/devcontainer.json +++ b/.devcontainer/devcontainer.json @@ -1,27 +1,20 @@ { "name": "nfcore", - "image": "nfcore/gitpod:latest", - "remoteUser": "gitpod", + "image": "nfcore/devcontainer:latest", - // Configure tool-specific properties. - "customizations": { - // Configure properties specific to VS Code. - "vscode": { - // Set *default* container specific settings.json values on container create. - "settings": { - "python.defaultInterpreterPath": "/opt/conda/bin/python", - "python.linting.enabled": true, - "python.linting.pylintEnabled": true, - "python.formatting.autopep8Path": "/opt/conda/bin/autopep8", - "python.formatting.yapfPath": "/opt/conda/bin/yapf", - "python.linting.flake8Path": "/opt/conda/bin/flake8", - "python.linting.pycodestylePath": "/opt/conda/bin/pycodestyle", - "python.linting.pydocstylePath": "/opt/conda/bin/pydocstyle", - "python.linting.pylintPath": "/opt/conda/bin/pylint" - }, + "remoteUser": "root", + "privileged": true, - // Add the IDs of extensions you want installed when the container is created. - "extensions": ["ms-python.python", "ms-python.vscode-pylance", "nf-core.nf-core-extensionpack"] - } + "remoteEnv": { + // Workspace path on the host for mounting with docker-outside-of-docker + "LOCAL_WORKSPACE_FOLDER": "${localWorkspaceFolder}" + }, + + "onCreateCommand": "./.devcontainer/setup.sh", + + "hostRequirements": { + "cpus": 4, + "memory": "16gb", + "storage": "32gb" } } diff --git a/.devcontainer/setup.sh b/.devcontainer/setup.sh new file mode 100755 index 00000000..b16c2e73 --- /dev/null +++ b/.devcontainer/setup.sh @@ -0,0 +1,13 @@ +#!/usr/bin/env bash + +# Customise the terminal command prompt +echo "export PROMPT_DIRTRIM=2" >> $HOME/.bashrc +echo "export PS1='\[\e[3;36m\]\w ->\[\e[0m\\] '" >> $HOME/.bashrc +export PROMPT_DIRTRIM=2 +export PS1='\[\e[3;36m\]\w ->\[\e[0m\\] ' + +# Update Nextflow +nextflow self-update + +# Update welcome message +echo "Welcome to the nf-core/spatialxe devcontainer!" > /usr/local/etc/vscode-dev-containers/first-run-notice.txt diff --git a/.editorconfig b/.editorconfig deleted file mode 100644 index b78de6e6..00000000 --- a/.editorconfig +++ /dev/null @@ -1,24 +0,0 @@ -root = true - -[*] -charset = utf-8 -end_of_line = lf -insert_final_newline = true -trim_trailing_whitespace = true -indent_size = 4 -indent_style = space - -[*.{md,yml,yaml,html,css,scss,js,cff}] -indent_size = 2 - -# These files are edited and tested upstream in nf-core/modules -[/modules/nf-core/**] -charset = unset -end_of_line = unset -insert_final_newline = unset -trim_trailing_whitespace = unset -indent_style = unset -indent_size = unset - -[/assets/email*] -indent_size = unset diff --git a/.github/CONTRIBUTING.md b/.github/CONTRIBUTING.md index beb78fd6..6264525d 100644 --- a/.github/CONTRIBUTING.md +++ b/.github/CONTRIBUTING.md @@ -1,4 +1,4 @@ -# nf-core/spatialxe: Contributing Guidelines +# `nf-core/spatialxe`: Contributing Guidelines Hi there! Many thanks for taking an interest in improving nf-core/spatialxe. @@ -9,6 +9,7 @@ Please use the pre-filled template to save time. However, don't be put off by this template - other more general issues and suggestions are welcome! Contributions to the code are even more welcome ;) +> [!NOTE] > If you need help using or modifying nf-core/spatialxe then the best place to ask is on the nf-core Slack [#spatialxe](https://nfcore.slack.com/channels/spatialxe) channel ([join our Slack here](https://nf-co.re/join/slack)). ## Contribution workflow @@ -18,13 +19,19 @@ If you'd like to write some code for nf-core/spatialxe, the standard workflow is 1. Check that there isn't already an issue about your idea in the [nf-core/spatialxe issues](https://github.com/nf-core/spatialxe/issues) to avoid duplicating work. If there isn't one already, please create one so that others know you're working on this 2. [Fork](https://help.github.com/en/github/getting-started-with-github/fork-a-repo) the [nf-core/spatialxe repository](https://github.com/nf-core/spatialxe) to your GitHub account 3. Make the necessary changes / additions within your forked repository following [Pipeline conventions](#pipeline-contribution-conventions) -4. Use `nf-core schema build` and add any new parameters to the pipeline JSON schema (requires [nf-core tools](https://github.com/nf-core/tools) >= 1.10). +4. Use `nf-core pipelines schema build` and add any new parameters to the pipeline JSON schema (requires [nf-core tools](https://github.com/nf-core/tools) >= 1.10). 5. Submit a Pull Request against the `dev` branch and wait for the code to be reviewed and merged If you're not used to this workflow with git, you can start with some [docs from GitHub](https://help.github.com/en/github/collaborating-with-issues-and-pull-requests) or even their [excellent `git` resources](https://try.github.io/). ## Tests +You have the option to test your changes locally by running the pipeline. For receiving warnings about process selectors and other `debug` information, it is recommended to use the debug profile. Execute all the tests with the following command: + +```bash +nf-test test --profile debug,test,docker --verbose +``` + When you create a pull request with changes, [GitHub Actions](https://github.com/features/actions) will run automatic tests. Typically, pull-requests are only fully reviewed when these tests are passing, though of course we can help out before then. @@ -33,7 +40,7 @@ There are typically two types of tests that run: ### Lint tests `nf-core` has a [set of guidelines](https://nf-co.re/developers/guidelines) which all pipelines must adhere to. -To enforce these and ensure that all pipelines stay in sync, we have developed a helper tool which runs checks on the pipeline code. This is in the [nf-core/tools repository](https://github.com/nf-core/tools) and once installed can be run locally with the `nf-core lint ` command. +To enforce these and ensure that all pipelines stay in sync, we have developed a helper tool which runs checks on the pipeline code. This is in the [nf-core/tools repository](https://github.com/nf-core/tools) and once installed can be run locally with the `nf-core pipelines lint ` command. If any failures or warnings are encountered, please follow the listed URL for more documentation. @@ -48,9 +55,9 @@ These tests are run both with the latest available version of `Nextflow` and als :warning: Only in the unlikely and regretful event of a release happening with a bug. -- On your own fork, make a new branch `patch` based on `upstream/master`. +- On your own fork, make a new branch `patch` based on `upstream/main` or `upstream/master`. - Fix the bug, and bump version (X.Y.Z+1). -- A PR should be made on `master` from patch to directly this particular bug. +- Open a pull-request from `patch` to `main`/`master` with the changes. ## Getting help @@ -58,34 +65,34 @@ For further information/help, please consult the [nf-core/spatialxe documentatio ## Pipeline contribution conventions -To make the nf-core/spatialxe code and processing logic more understandable for new contributors and to ensure quality, we semi-standardise the way the code and other contributions are written. +To make the `nf-core/spatialxe` code and processing logic more understandable for new contributors and to ensure quality, we semi-standardise the way the code and other contributions are written. ### Adding a new step If you wish to contribute a new step, please use the following coding standards: -1. Define the corresponding input channel into your new process from the expected previous process channel +1. Define the corresponding input channel into your new process from the expected previous process channel. 2. Write the process block (see below). 3. Define the output channel if needed (see below). 4. Add any new parameters to `nextflow.config` with a default (see below). -5. Add any new parameters to `nextflow_schema.json` with help text (via the `nf-core schema build` tool). +5. Add any new parameters to `nextflow_schema.json` with help text (via the `nf-core pipelines schema build` tool). 6. Add sanity checks and validation for all relevant parameters. 7. Perform local tests to validate that the new code works as expected. -8. If applicable, add a new test command in `.github/workflow/ci.yml`. +8. If applicable, add a new test in the `tests` directory. 9. Update MultiQC config `assets/multiqc_config.yml` so relevant suffixes, file name clean up and module plots are in the appropriate order. If applicable, add a [MultiQC](https://https://multiqc.info/) module. 10. Add a description of the output files and if relevant any appropriate images from the MultiQC report to `docs/output.md`. ### Default values -Parameters should be initialised / defined with default values in `nextflow.config` under the `params` scope. +Parameters should be initialised / defined with default values within the `params` scope in `nextflow.config`. -Once there, use `nf-core schema build` to add to `nextflow_schema.json`. +Once there, use `nf-core pipelines schema build` to add to `nextflow_schema.json`. ### Default processes resource requirements -Sensible defaults for process resource requirements (CPUs / memory / time) for a process should be defined in `conf/base.config`. These should generally be specified generic with `withLabel:` selectors so they can be shared across multiple processes/steps of the pipeline. A nf-core standard set of labels that should be followed where possible can be seen in the [nf-core pipeline template](https://github.com/nf-core/tools/blob/master/nf_core/pipeline-template/conf/base.config), which has the default process as a single core-process, and then different levels of multi-core configurations for increasingly large memory requirements defined with standardised labels. +Sensible defaults for process resource requirements (CPUs / memory / time) for a process should be defined in `conf/base.config`. These should generally be specified generic with `withLabel:` selectors so they can be shared across multiple processes/steps of the pipeline. A nf-core standard set of labels that should be followed where possible can be seen in the [nf-core pipeline template](https://github.com/nf-core/tools/blob/main/nf_core/pipeline-template/conf/base.config), which has the default process as a single core-process, and then different levels of multi-core configurations for increasingly large memory requirements defined with standardised labels. -The process resources can be passed on to the tool dynamically within the process with the `${task.cpu}` and `${task.memory}` variables in the `script:` block. +The process resources can be passed on to the tool dynamically within the process with the `${task.cpus}` and `${task.memory}` variables in the `script:` block. ### Naming schemes @@ -96,7 +103,7 @@ Please use the following naming schemes, to make it easy to understand what is g ### Nextflow version bumping -If you are using a new feature from core Nextflow, you may bump the minimum required version of nextflow in the pipeline with: `nf-core bump-version --nextflow . [min-nf-version]` +If you are using a new feature from core Nextflow, you may bump the minimum required version of nextflow in the pipeline with: `nf-core pipelines bump-version --nextflow . [min-nf-version]` ### Images and figures @@ -116,4 +123,3 @@ To get started: Devcontainer specs: - [DevContainer config](.devcontainer/devcontainer.json) -- [Dockerfile](.devcontainer/Dockerfile) diff --git a/.github/ISSUE_TEMPLATE/bug_report.yml b/.github/ISSUE_TEMPLATE/bug_report.yml index 94e252d8..08eca313 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.yml +++ b/.github/ISSUE_TEMPLATE/bug_report.yml @@ -9,7 +9,6 @@ body: - [nf-core website: troubleshooting](https://nf-co.re/usage/troubleshooting) - [nf-core/spatialxe pipeline documentation](https://nf-co.re/spatialxe/usage) - - type: textarea id: description attributes: @@ -42,9 +41,9 @@ body: attributes: label: System information description: | - * Nextflow version _(eg. 22.10.1)_ + * Nextflow version _(eg. 23.04.0)_ * Hardware _(eg. HPC, Desktop, Cloud)_ * Executor _(eg. slurm, local, awsbatch)_ - * Container engine: _(e.g. Docker, Singularity, Conda, Podman, Shifter or Charliecloud)_ + * Container engine: _(e.g. Docker, Singularity, Conda, Podman, Shifter, Charliecloud, or Apptainer)_ * OS _(eg. CentOS Linux, macOS, Linux Mint)_ * Version of nf-core/spatialxe _(eg. 1.1, 1.5, 1.8.2)_ diff --git a/.github/ISSUE_TEMPLATE/feature_request.yml b/.github/ISSUE_TEMPLATE/feature_request.yml index dc9e2ce4..9276baef 100644 --- a/.github/ISSUE_TEMPLATE/feature_request.yml +++ b/.github/ISSUE_TEMPLATE/feature_request.yml @@ -1,5 +1,5 @@ name: Feature request -description: Suggest an idea for the nf-core/spatialxe pipeline. Please write in the title as the first word *module* (suggestin a new module for spatialxe) or *implement* for general feature request. +description: Suggest an idea for the nf-core/spatialxe pipeline labels: enhancement body: - type: textarea diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index b2b4d9b8..ba3d4540 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -8,16 +8,18 @@ These are the most common things requested on pull requests (PRs). Remember that PRs should be made against the dev branch, unless you're preparing a pipeline release. -Learn more about contributing: [CONTRIBUTING.md](https://github.com/nf-core/spatialxe/tree/master/.github/CONTRIBUTING.md) +Learn more about contributing: [CONTRIBUTING.md](https://github.com/nf-core/spatialxe/tree/main/.github/CONTRIBUTING.md) --> ## PR checklist - [ ] This comment contains a description of changes (with reason). - [ ] If you've fixed a bug or added code that should be tested, add tests! -- [ ] If you've added a new tool - have you followed the pipeline conventions in the [contribution docs](https://github.com/nf-core/spatialxe/tree/master/.github/CONTRIBUTING.md)- [ ] If necessary, also make a PR on the nf-core/spatialxe _branch_ on the [nf-core/test-datasets](https://github.com/nf-core/test-datasets) repository. -- [ ] Make sure your code lints (`nf-core lint`). +- [ ] If you've added a new tool - have you followed the pipeline conventions in the [contribution docs](https://github.com/nf-core/spatialxe/tree/main/.github/CONTRIBUTING.md) +- [ ] If necessary, also make a PR on the nf-core/spatialxe _branch_ on the [nf-core/test-datasets](https://github.com/nf-core/test-datasets) repository. +- [ ] Make sure your code lints (`nf-core pipelines lint`). - [ ] Ensure the test suite passes (`nextflow run . -profile test,docker --outdir `). +- [ ] Check for unexpected warnings in debug mode (`nextflow run . -profile debug,test,docker --outdir `). - [ ] Usage Documentation in `docs/usage.md` is updated. - [ ] Output Documentation in `docs/output.md` is updated. - [ ] `CHANGELOG.md` is updated. diff --git a/.github/actions/get-shards/action.yml b/.github/actions/get-shards/action.yml new file mode 100644 index 00000000..34085279 --- /dev/null +++ b/.github/actions/get-shards/action.yml @@ -0,0 +1,69 @@ +name: "Get number of shards" +description: "Get the number of nf-test shards for the current CI job" +inputs: + max_shards: + description: "Maximum number of shards allowed" + required: true + paths: + description: "Component paths to test" + required: false + tags: + description: "Tags to pass as argument for nf-test --tag parameter" + required: false +outputs: + shard: + description: "Array of shard numbers" + value: ${{ steps.shards.outputs.shard }} + total_shards: + description: "Total number of shards" + value: ${{ steps.shards.outputs.total_shards }} +runs: + using: "composite" + steps: + - name: Install nf-test + uses: nf-core/setup-nf-test@v1 + with: + version: ${{ env.NFT_VER }} + - name: Get number of shards + id: shards + shell: bash + run: | + # Run nf-test with dynamic parameter + nftest_output=$(nf-test test \ + --profile +docker \ + $(if [ -n "${{ inputs.tags }}" ]; then echo "--tag ${{ inputs.tags }}"; fi) \ + --dry-run \ + --ci \ + --changed-since HEAD^) || { + echo "nf-test command failed with exit code $?" + echo "Full output: $nftest_output" + exit 1 + } + echo "nf-test dry-run output: $nftest_output" + + # Default values for shard and total_shards + shard="[]" + total_shards=0 + + # Check if there are related tests + if echo "$nftest_output" | grep -q 'No tests to execute'; then + echo "No related tests found." + else + # Extract the number of related tests + number_of_shards=$(echo "$nftest_output" | sed -n 's|.*Executed \([0-9]*\) tests.*|\1|p') + if [[ -n "$number_of_shards" && "$number_of_shards" -gt 0 ]]; then + shards_to_run=$(( $number_of_shards < ${{ inputs.max_shards }} ? $number_of_shards : ${{ inputs.max_shards }} )) + shard=$(seq 1 "$shards_to_run" | jq -R . | jq -c -s .) + total_shards="$shards_to_run" + else + echo "Unexpected output format. Falling back to default values." + fi + fi + + # Write to GitHub Actions outputs + echo "shard=$shard" >> $GITHUB_OUTPUT + echo "total_shards=$total_shards" >> $GITHUB_OUTPUT + + # Debugging output + echo "Final shard array: $shard" + echo "Total number of shards: $total_shards" diff --git a/.github/actions/nf-test/action.yml b/.github/actions/nf-test/action.yml new file mode 100644 index 00000000..3b9724c7 --- /dev/null +++ b/.github/actions/nf-test/action.yml @@ -0,0 +1,111 @@ +name: "nf-test Action" +description: "Runs nf-test with common setup steps" +inputs: + profile: + description: "Profile to use" + required: true + shard: + description: "Shard number for this CI job" + required: true + total_shards: + description: "Total number of test shards(NOT the total number of matrix jobs)" + required: true + paths: + description: "Test paths" + required: true + tags: + description: "Tags to pass as argument for nf-test --tag parameter" + required: false +runs: + using: "composite" + steps: + - name: Setup Nextflow + uses: nf-core/setup-nextflow@v2 + with: + version: "${{ env.NXF_VERSION }}" + + - name: Set up Python + uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6 + with: + python-version: "3.14" + + - name: Install nf-test + uses: nf-core/setup-nf-test@v1 + with: + version: "${{ env.NFT_VER }}" + install-pdiff: true + + - name: Setup apptainer + if: contains(inputs.profile, 'singularity') + uses: eWaterCycle/setup-apptainer@main + + - name: Set up Singularity + if: contains(inputs.profile, 'singularity') + shell: bash + run: | + mkdir -p $NXF_SINGULARITY_CACHEDIR + mkdir -p $NXF_SINGULARITY_LIBRARYDIR + + - name: Conda setup + if: contains(inputs.profile, 'conda') + uses: conda-incubator/setup-miniconda@505e6394dae86d6a5c7fbb6e3fb8938e3e863830 # v3 + with: + auto-update-conda: true + conda-solver: libmamba + channels: conda-forge + channel-priority: strict + conda-remove-defaults: true + + - name: Run nf-test + shell: bash + env: + NFT_WORKDIR: ${{ env.NFT_WORKDIR }} + run: | + nf-test test \ + --profile=+${{ inputs.profile }} \ + $(if [ -n "${{ inputs.tags }}" ]; then echo "--tag ${{ inputs.tags }}"; fi) \ + --ci \ + --changed-since HEAD^ \ + --verbose \ + --tap=test.tap \ + --shard ${{ inputs.shard }}/${{ inputs.total_shards }} + + # Save the absolute path of the test.tap file to the output + echo "tap_file_path=$(realpath test.tap)" >> $GITHUB_OUTPUT + + - name: Generate test summary + if: always() + shell: bash + run: | + # Add header if it doesn't exist (using a token file to track this) + if [ ! -f ".summary_header" ]; then + echo "# 🚀 nf-test results" >> $GITHUB_STEP_SUMMARY + echo "" >> $GITHUB_STEP_SUMMARY + echo "| Status | Test Name | Profile | Shard |" >> $GITHUB_STEP_SUMMARY + echo "|:------:|-----------|---------|-------|" >> $GITHUB_STEP_SUMMARY + touch .summary_header + fi + + if [ -f test.tap ]; then + while IFS= read -r line; do + if [[ $line =~ ^ok ]]; then + test_name="${line#ok }" + # Remove the test number from the beginning + test_name="${test_name#* }" + echo "| ✅ | ${test_name} | ${{ inputs.profile }} | ${{ inputs.shard }}/${{ inputs.total_shards }} |" >> $GITHUB_STEP_SUMMARY + elif [[ $line =~ ^not\ ok ]]; then + test_name="${line#not ok }" + # Remove the test number from the beginning + test_name="${test_name#* }" + echo "| ❌ | ${test_name} | ${{ inputs.profile }} | ${{ inputs.shard }}/${{ inputs.total_shards }} |" >> $GITHUB_STEP_SUMMARY + fi + done < test.tap + else + echo "| ⚠️ | No test results found | ${{ inputs.profile }} | ${{ inputs.shard }}/${{ inputs.total_shards }} |" >> $GITHUB_STEP_SUMMARY + fi + + - name: Clean up + if: always() + shell: bash + run: | + sudo rm -rf /home/ubuntu/tests/ diff --git a/.github/workflows/awsfulltest.yml b/.github/workflows/awsfulltest.yml index 7876e8c9..2f6822e4 100644 --- a/.github/workflows/awsfulltest.yml +++ b/.github/workflows/awsfulltest.yml @@ -1,34 +1,47 @@ name: nf-core AWS full size tests -# This workflow is triggered on published releases. +# This workflow is triggered on PRs opened against the main/master branch. # It can be additionally triggered manually with GitHub actions workflow dispatch button. # It runs the -profile 'test_full' on AWS batch on: + workflow_dispatch: + pull_request_review: + types: [submitted] release: types: [published] - workflow_dispatch: + jobs: - run-tower: + run-platform: name: Run AWS full tests - if: github.repository == 'nf-core/spatialxe' + # run only if the PR is approved by at least 2 reviewers and against the master/main branch or manually triggered + if: github.repository == 'nf-core/spatialxe' && github.event.review.state == 'approved' && (github.event.pull_request.base.ref == 'master' || github.event.pull_request.base.ref == 'main') || github.event_name == 'workflow_dispatch' || github.event_name == 'release' runs-on: ubuntu-latest steps: - - name: Launch workflow via tower - uses: nf-core/tower-action@v3 - # TODO nf-core: You can customise AWS full pipeline tests as required + - name: Set revision variable + id: revision + run: | + echo "revision=${{ (github.event_name == 'workflow_dispatch' || github.event_name == 'release') && github.sha || 'dev' }}" >> "$GITHUB_OUTPUT" + + - name: Launch workflow via Seqera Platform + uses: seqeralabs/action-tower-launch@v2 # Add full size test data (but still relatively small datasets for few samples) # on the `test_full.config` test runs with only one set of parameters with: - workspace_id: ${{ secrets.TOWER_WORKSPACE_ID }} + workspace_id: ${{ vars.TOWER_WORKSPACE_ID }} access_token: ${{ secrets.TOWER_ACCESS_TOKEN }} - compute_env: ${{ secrets.TOWER_COMPUTE_ENV }} - workdir: s3://${{ secrets.AWS_S3_BUCKET }}/work/spatialxe/work-${{ github.sha }} + compute_env: ${{ vars.TOWER_COMPUTE_ENV }} + revision: ${{ steps.revision.outputs.revision }} + workdir: s3://${{ vars.AWS_S3_BUCKET }}/work/spatialxe/work-${{ steps.revision.outputs.revision }} parameters: | { - "outdir": "s3://${{ secrets.AWS_S3_BUCKET }}/spatialxe/results-${{ github.sha }}" + "hook_url": "${{ secrets.MEGATESTS_ALERTS_SLACK_HOOK_URL }}", + "outdir": "s3://${{ vars.AWS_S3_BUCKET }}/spatialxe/results-${{ steps.revision.outputs.revision }}" } - profiles: test_full,aws_tower - - uses: actions/upload-artifact@v3 + profiles: test_full + + - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4 with: - name: Tower debug log file - path: tower_action_*.log + name: Seqera Platform debug log file + path: | + tower_action_*.log + tower_action_*.json diff --git a/.github/workflows/awstest.yml b/.github/workflows/awstest.yml index 190548f4..566a8d9b 100644 --- a/.github/workflows/awstest.yml +++ b/.github/workflows/awstest.yml @@ -5,25 +5,29 @@ name: nf-core AWS test on: workflow_dispatch: jobs: - run-tower: + run-platform: name: Run AWS tests if: github.repository == 'nf-core/spatialxe' runs-on: ubuntu-latest steps: - # Launch workflow using Tower CLI tool action - - name: Launch workflow via tower - uses: nf-core/tower-action@v3 + # Launch workflow using Seqera Platform CLI tool action + - name: Launch workflow via Seqera Platform + uses: seqeralabs/action-tower-launch@v2 with: - workspace_id: ${{ secrets.TOWER_WORKSPACE_ID }} + workspace_id: ${{ vars.TOWER_WORKSPACE_ID }} access_token: ${{ secrets.TOWER_ACCESS_TOKEN }} - compute_env: ${{ secrets.TOWER_COMPUTE_ENV }} - workdir: s3://${{ secrets.AWS_S3_BUCKET }}/work/spatialxe/work-${{ github.sha }} + compute_env: ${{ vars.TOWER_COMPUTE_ENV }} + revision: ${{ github.sha }} + workdir: s3://${{ vars.AWS_S3_BUCKET }}/work/spatialxe/work-${{ github.sha }} parameters: | { - "outdir": "s3://${{ secrets.AWS_S3_BUCKET }}/spatialxe/results-test-${{ github.sha }}" + "outdir": "s3://${{ vars.AWS_S3_BUCKET }}/spatialxe/results-test-${{ github.sha }}" } - profiles: test,aws_tower - - uses: actions/upload-artifact@v3 + profiles: test + + - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4 with: - name: Tower debug log file - path: tower_action_*.log + name: Seqera Platform debug log file + path: | + tower_action_*.log + tower_action_*.json diff --git a/.github/workflows/branch.yml b/.github/workflows/branch.yml index bc167225..4f79e0f9 100644 --- a/.github/workflows/branch.yml +++ b/.github/workflows/branch.yml @@ -1,28 +1,30 @@ name: nf-core branch protection -# This workflow is triggered on PRs to master branch on the repository -# It fails when someone tries to make a PR against the nf-core `master` branch instead of `dev` +# This workflow is triggered on PRs to `main`/`master` branch on the repository +# It fails when someone tries to make a PR against the nf-core `main`/`master` branch instead of `dev` on: pull_request_target: - branches: [master] + branches: + - main + - master jobs: test: runs-on: ubuntu-latest steps: - # PRs to the nf-core repo master branch are only ok if coming from the nf-core repo `dev` or any `patch` branches + # PRs to the nf-core repo main/master branch are only ok if coming from the nf-core repo `dev` or any `patch` branches - name: Check PRs if: github.repository == 'nf-core/spatialxe' run: | - { [[ ${{github.event.pull_request.head.repo.full_name }} == nf-core/spatialxe ]] && [[ $GITHUB_HEAD_REF = "dev" ]]; } || [[ $GITHUB_HEAD_REF == "patch" ]] + { [[ ${{github.event.pull_request.head.repo.full_name }} == nf-core/spatialxe ]] && [[ $GITHUB_HEAD_REF == "dev" ]]; } || [[ $GITHUB_HEAD_REF == "patch" ]] # If the above check failed, post a comment on the PR explaining the failure # NOTE - this doesn't currently work if the PR is coming from a fork, due to limitations in GitHub actions secrets - name: Post PR comment if: failure() - uses: mshick/add-pr-comment@v1 + uses: mshick/add-pr-comment@b8f338c590a895d50bcbfa6c5859251edc8952fc # v2 with: message: | - ## This PR is against the `master` branch :x: + ## This PR is against the `${{github.event.pull_request.base.ref}}` branch :x: * Do not close this PR * Click _Edit_ and change the `base` to `dev` @@ -32,9 +34,9 @@ jobs: Hi @${{ github.event.pull_request.user.login }}, - It looks like this pull-request is has been made against the [${{github.event.pull_request.head.repo.full_name }}](https://github.com/${{github.event.pull_request.head.repo.full_name }}) `master` branch. - The `master` branch on nf-core repositories should always contain code from the latest release. - Because of this, PRs to `master` are only allowed if they come from the [${{github.event.pull_request.head.repo.full_name }}](https://github.com/${{github.event.pull_request.head.repo.full_name }}) `dev` branch. + It looks like this pull-request is has been made against the [${{github.event.pull_request.head.repo.full_name }}](https://github.com/${{github.event.pull_request.head.repo.full_name }}) ${{github.event.pull_request.base.ref}} branch. + The ${{github.event.pull_request.base.ref}} branch on nf-core repositories should always contain code from the latest release. + Because of this, PRs to ${{github.event.pull_request.base.ref}} are only allowed if they come from the [${{github.event.pull_request.head.repo.full_name }}](https://github.com/${{github.event.pull_request.head.repo.full_name }}) `dev` branch. You do not need to close this PR, you can change the target branch to `dev` by clicking the _"Edit"_ button at the top of this page. Note that even after this, the test will continue to show as failing until you push a new commit. diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml deleted file mode 100644 index 2978414b..00000000 --- a/.github/workflows/ci.yml +++ /dev/null @@ -1,43 +0,0 @@ -name: nf-core CI -# This workflow runs the pipeline with the minimal test dataset to check that it completes without any syntax errors -on: - push: - branches: - - dev - pull_request: - release: - types: [published] - -env: - NXF_ANSI_LOG: false - -concurrency: - group: "${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}" - cancel-in-progress: true - -jobs: - test: - name: Run pipeline with test data - # Only run on push if this is the nf-core dev branch (merged PRs) - if: "${{ github.event_name != 'push' || (github.event_name == 'push' && github.repository == 'nf-core/spatialxe') }}" - runs-on: ubuntu-latest - strategy: - matrix: - NXF_VER: - - "22.10.1" - - "latest-everything" - steps: - - name: Check out pipeline code - uses: actions/checkout@v3 - - - name: Install Nextflow - uses: nf-core/setup-nextflow@v1 - with: - version: "${{ matrix.NXF_VER }}" - - - name: Run pipeline with test data - # TODO nf-core: You can customise CI pipeline run tests as required - # For example: adding multiple test runs with different parameters - # Remember that you can parallelise this by using strategy.matrix - run: | - nextflow run ${GITHUB_WORKSPACE} -profile test,docker --outdir ./results diff --git a/.github/workflows/clean-up.yml b/.github/workflows/clean-up.yml new file mode 100644 index 00000000..6adb0fff --- /dev/null +++ b/.github/workflows/clean-up.yml @@ -0,0 +1,24 @@ +name: "Close user-tagged issues and PRs" +on: + schedule: + - cron: "0 0 * * 0" # Once a week + +jobs: + clean-up: + runs-on: ubuntu-latest + permissions: + issues: write + pull-requests: write + steps: + - uses: actions/stale@5f858e3efba33a5ca4407a664cc011ad407f2008 # v10 + with: + stale-issue-message: "This issue has been tagged as awaiting-changes or awaiting-feedback by an nf-core contributor. Remove stale label or add a comment otherwise this issue will be closed in 20 days." + stale-pr-message: "This PR has been tagged as awaiting-changes or awaiting-feedback by an nf-core contributor. Remove stale label or add a comment if it is still useful." + close-issue-message: "This issue was closed because it has been tagged as awaiting-changes or awaiting-feedback by an nf-core contributor and then staled for 20 days with no activity." + days-before-stale: 30 + days-before-close: 20 + days-before-pr-close: -1 + any-of-labels: "awaiting-changes,awaiting-feedback" + exempt-issue-labels: "WIP" + exempt-pr-labels: "WIP" + repo-token: "${{ secrets.GITHUB_TOKEN }}" diff --git a/.github/workflows/download_pipeline.yml b/.github/workflows/download_pipeline.yml new file mode 100644 index 00000000..6d94bcbf --- /dev/null +++ b/.github/workflows/download_pipeline.yml @@ -0,0 +1,134 @@ +name: Test successful pipeline download with 'nf-core pipelines download' + +# Run the workflow when: +# - dispatched manually +# - when a PR is opened or reopened to main/master branch +# - the head branch of the pull request is updated, i.e. if fixes for a release are pushed last minute to dev. +on: + workflow_dispatch: + inputs: + testbranch: + description: "The specific branch you wish to utilize for the test execution of nf-core pipelines download." + required: true + default: "dev" + pull_request: + branches: + - main + - master + +env: + NXF_ANSI_LOG: false + +jobs: + configure: + runs-on: ubuntu-latest + outputs: + REPO_LOWERCASE: ${{ steps.get_repo_properties.outputs.REPO_LOWERCASE }} + REPOTITLE_LOWERCASE: ${{ steps.get_repo_properties.outputs.REPOTITLE_LOWERCASE }} + REPO_BRANCH: ${{ steps.get_repo_properties.outputs.REPO_BRANCH }} + steps: + - name: Get the repository name and current branch + id: get_repo_properties + run: | + echo "REPO_LOWERCASE=${GITHUB_REPOSITORY,,}" >> "$GITHUB_OUTPUT" + echo "REPOTITLE_LOWERCASE=$(basename ${GITHUB_REPOSITORY,,})" >> "$GITHUB_OUTPUT" + echo "REPO_BRANCH=${{ github.event.inputs.testbranch || 'dev' }}" >> "$GITHUB_OUTPUT" + + download: + runs-on: ubuntu-latest + needs: configure + steps: + - name: Install Nextflow + uses: nf-core/setup-nextflow@v2 + + - name: Disk space cleanup + uses: jlumbroso/free-disk-space@54081f138730dfa15788a46383842cd2f914a1be # v1.3.1 + + - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6 + with: + python-version: "3.14" + architecture: "x64" + + - name: Setup Apptainer + uses: eWaterCycle/setup-apptainer@4bb22c52d4f63406c49e94c804632975787312b3 # v2.0.0 + with: + apptainer-version: 1.3.4 + + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install git+https://github.com/nf-core/tools.git + + - name: Make a cache directory for the container images + run: | + mkdir -p ./singularity_container_images + + - name: Download the pipeline + env: + NXF_SINGULARITY_CACHEDIR: ./singularity_container_images + run: | + nf-core pipelines download ${{ needs.configure.outputs.REPO_LOWERCASE }} \ + --revision ${{ needs.configure.outputs.REPO_BRANCH }} \ + --outdir ./${{ needs.configure.outputs.REPOTITLE_LOWERCASE }} \ + --compress "none" \ + --container-system 'singularity' \ + --container-library "quay.io" -l "docker.io" -l "community.wave.seqera.io/library/" \ + --container-cache-utilisation 'amend' \ + --download-configuration 'yes' + + - name: Inspect download + run: tree ./${{ needs.configure.outputs.REPOTITLE_LOWERCASE }} + + - name: Inspect container images + run: tree ./singularity_container_images | tee ./container_initial + + - name: Count the downloaded number of container images + id: count_initial + run: | + image_count=$(ls -1 ./singularity_container_images | wc -l | xargs) + echo "Initial container image count: $image_count" + echo "IMAGE_COUNT_INITIAL=$image_count" >> "$GITHUB_OUTPUT" + + - name: Run the downloaded pipeline (stub) + id: stub_run_pipeline + continue-on-error: true + env: + NXF_SINGULARITY_CACHEDIR: ./singularity_container_images + NXF_SINGULARITY_HOME_MOUNT: true + run: nextflow run ./${{needs.configure.outputs.REPOTITLE_LOWERCASE }}/$( sed 's/\W/_/g' <<< ${{ needs.configure.outputs.REPO_BRANCH }}) -stub -profile test,singularity --outdir ./results + - name: Run the downloaded pipeline (stub run not supported) + id: run_pipeline + if: ${{ steps.stub_run_pipeline.outcome == 'failure' }} + env: + NXF_SINGULARITY_CACHEDIR: ./singularity_container_images + NXF_SINGULARITY_HOME_MOUNT: true + run: nextflow run ./${{ needs.configure.outputs.REPOTITLE_LOWERCASE }}/$( sed 's/\W/_/g' <<< ${{ needs.configure.outputs.REPO_BRANCH }}) -profile test,singularity --outdir ./results + + - name: Count the downloaded number of container images + id: count_afterwards + run: | + image_count=$(ls -1 ./singularity_container_images | wc -l | xargs) + echo "Post-pipeline run container image count: $image_count" + echo "IMAGE_COUNT_AFTER=$image_count" >> "$GITHUB_OUTPUT" + + - name: Compare container image counts + id: count_comparison + run: | + if [ "${{ steps.count_initial.outputs.IMAGE_COUNT_INITIAL }}" -ne "${{ steps.count_afterwards.outputs.IMAGE_COUNT_AFTER }}" ]; then + initial_count=${{ steps.count_initial.outputs.IMAGE_COUNT_INITIAL }} + final_count=${{ steps.count_afterwards.outputs.IMAGE_COUNT_AFTER }} + difference=$((final_count - initial_count)) + echo "$difference additional container images were \n downloaded at runtime . The pipeline has no support for offline runs!" + tree ./singularity_container_images > ./container_afterwards + diff ./container_initial ./container_afterwards + exit 1 + else + echo "The pipeline can be downloaded successfully!" + fi + + - name: Upload Nextflow logfile for debugging purposes + uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4 + with: + name: nextflow_logfile.txt + path: .nextflow.log* + include-hidden-files: true diff --git a/.github/workflows/fix-linting.yml b/.github/workflows/fix-linting.yml deleted file mode 100644 index af048ee3..00000000 --- a/.github/workflows/fix-linting.yml +++ /dev/null @@ -1,55 +0,0 @@ -name: Fix linting from a comment -on: - issue_comment: - types: [created] - -jobs: - deploy: - # Only run if comment is on a PR with the main repo, and if it contains the magic keywords - if: > - contains(github.event.comment.html_url, '/pull/') && - contains(github.event.comment.body, '@nf-core-bot fix linting') && - github.repository == 'nf-core/spatialxe' - runs-on: ubuntu-latest - steps: - # Use the @nf-core-bot token to check out so we can push later - - uses: actions/checkout@v3 - with: - token: ${{ secrets.nf_core_bot_auth_token }} - - # Action runs on the issue comment, so we don't get the PR by default - # Use the gh cli to check out the PR - - name: Checkout Pull Request - run: gh pr checkout ${{ github.event.issue.number }} - env: - GITHUB_TOKEN: ${{ secrets.nf_core_bot_auth_token }} - - - uses: actions/setup-node@v3 - - - name: Install Prettier - run: npm install -g prettier @prettier/plugin-php - - # Check that we actually need to fix something - - name: Run 'prettier --check' - id: prettier_status - run: | - if prettier --check ${GITHUB_WORKSPACE}; then - echo "result=pass" >> $GITHUB_OUTPUT - else - echo "result=fail" >> $GITHUB_OUTPUT - fi - - - name: Run 'prettier --write' - if: steps.prettier_status.outputs.result == 'fail' - run: prettier --write ${GITHUB_WORKSPACE} - - - name: Commit & push changes - if: steps.prettier_status.outputs.result == 'fail' - run: | - git config user.email "core@nf-co.re" - git config user.name "nf-core-bot" - git config push.default upstream - git add . - git status - git commit -m "[automated] Fix linting with Prettier" - git push diff --git a/.github/workflows/fix_linting.yml b/.github/workflows/fix_linting.yml new file mode 100644 index 00000000..5a1c68cf --- /dev/null +++ b/.github/workflows/fix_linting.yml @@ -0,0 +1,89 @@ +name: Fix linting from a comment +on: + issue_comment: + types: [created] + +jobs: + fix-linting: + # Only run if comment is on a PR with the main repo, and if it contains the magic keywords + if: > + contains(github.event.comment.html_url, '/pull/') && + contains(github.event.comment.body, '@nf-core-bot fix linting') && + github.repository == 'nf-core/spatialxe' + runs-on: ubuntu-latest + steps: + # Use the @nf-core-bot token to check out so we can push later + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5 + with: + token: ${{ secrets.nf_core_bot_auth_token }} + + # indication that the linting is being fixed + - name: React on comment + uses: peter-evans/create-or-update-comment@e8674b075228eee787fea43ef493e45ece1004c9 # v5 + with: + comment-id: ${{ github.event.comment.id }} + reactions: eyes + + # Action runs on the issue comment, so we don't get the PR by default + # Use the gh cli to check out the PR + - name: Checkout Pull Request + run: gh pr checkout ${{ github.event.issue.number }} + env: + GITHUB_TOKEN: ${{ secrets.nf_core_bot_auth_token }} + + # Install and run pre-commit + - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6 + with: + python-version: "3.14" + + - name: Install pre-commit + run: pip install pre-commit + + - name: Run pre-commit + id: pre-commit + run: pre-commit run --all-files + continue-on-error: true + + # indication that the linting has finished + - name: react if linting finished succesfully + if: steps.pre-commit.outcome == 'success' + uses: peter-evans/create-or-update-comment@e8674b075228eee787fea43ef493e45ece1004c9 # v5 + with: + comment-id: ${{ github.event.comment.id }} + reactions: "+1" + + - name: Commit & push changes + id: commit-and-push + if: steps.pre-commit.outcome == 'failure' + run: | + git config user.email "core@nf-co.re" + git config user.name "nf-core-bot" + git config push.default upstream + git add . + git status + git commit -m "[automated] Fix code linting" + git push + + - name: react if linting errors were fixed + id: react-if-fixed + if: steps.commit-and-push.outcome == 'success' + uses: peter-evans/create-or-update-comment@e8674b075228eee787fea43ef493e45ece1004c9 # v5 + with: + comment-id: ${{ github.event.comment.id }} + reactions: hooray + + - name: react if linting errors were not fixed + if: steps.commit-and-push.outcome == 'failure' + uses: peter-evans/create-or-update-comment@e8674b075228eee787fea43ef493e45ece1004c9 # v5 + with: + comment-id: ${{ github.event.comment.id }} + reactions: confused + + - name: react if linting errors were not fixed + if: steps.commit-and-push.outcome == 'failure' + uses: peter-evans/create-or-update-comment@e8674b075228eee787fea43ef493e45ece1004c9 # v5 + with: + issue-number: ${{ github.event.issue.number }} + body: | + @${{ github.actor }} I tried to fix the linting errors, but it didn't work. Please fix them manually. + See [CI log](https://github.com/nf-core/spatialxe/actions/runs/${{ github.run_id }}) for more details. diff --git a/.github/workflows/linting.yml b/.github/workflows/linting.yml index 858d622e..7a527a34 100644 --- a/.github/workflows/linting.yml +++ b/.github/workflows/linting.yml @@ -1,97 +1,69 @@ name: nf-core linting # This workflow is triggered on pushes and PRs to the repository. -# It runs the `nf-core lint` and markdown lint tests to ensure +# It runs the `nf-core pipelines lint` and markdown lint tests to ensure # that the code meets the nf-core guidelines. on: - push: - branches: - - dev pull_request: release: types: [published] jobs: - EditorConfig: + pre-commit: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5 - - uses: actions/setup-node@v3 - - - name: Install editorconfig-checker - run: npm install -g editorconfig-checker - - - name: Run ECLint check - run: editorconfig-checker -exclude README.md $(find .* -type f | grep -v '.git\|.py\|.md\|json\|yml\|yaml\|html\|css\|work\|.nextflow\|build\|nf_core.egg-info\|log.txt\|Makefile') - - Prettier: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v3 - - - uses: actions/setup-node@v3 - - - name: Install Prettier - run: npm install -g prettier - - - name: Run Prettier --check - run: prettier --check ${GITHUB_WORKSPACE} - - PythonBlack: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v3 - - - name: Check code lints with Black - uses: psf/black@stable - - # If the above check failed, post a comment on the PR explaining the failure - - name: Post PR comment - if: failure() - uses: mshick/add-pr-comment@v1 + - name: Set up Python 3.14 + uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6 with: - message: | - ## Python linting (`black`) is failing - - To keep the code consistent with lots of contributors, we run automated code consistency checks. - To fix this CI test, please run: - - * Install [`black`](https://black.readthedocs.io/en/stable/): `pip install black` - * Fix formatting errors in your pipeline: `black .` + python-version: "3.14" - Once you push these changes the test should pass, and you can hide this comment :+1: + - name: Install pre-commit + run: pip install pre-commit - We highly recommend setting up Black in your code editor so that this formatting is done automatically on save. Ask about it on Slack for help! - - Thanks again for your contribution! - repo-token: ${{ secrets.GITHUB_TOKEN }} - allow-repeats: false + - name: Run pre-commit + run: pre-commit run --all-files nf-core: runs-on: ubuntu-latest steps: - name: Check out pipeline code - uses: actions/checkout@v3 + uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5 - name: Install Nextflow - uses: nf-core/setup-nextflow@v1 + uses: nf-core/setup-nextflow@v2 - - uses: actions/setup-python@v4 + - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6 with: - python-version: "3.7" + python-version: "3.14" architecture: "x64" + - name: read .nf-core.yml + uses: pietrobolcato/action-read-yaml@9f13718d61111b69f30ab4ac683e67a56d254e1d # 1.1.0 + id: read_yml + with: + config: ${{ github.workspace }}/.nf-core.yml + - name: Install dependencies run: | python -m pip install --upgrade pip - pip install nf-core + pip install nf-core==${{ steps.read_yml.outputs['nf_core_version'] }} + + - name: Run nf-core pipelines lint + if: ${{ github.base_ref != 'master' }} + env: + GITHUB_COMMENTS_URL: ${{ github.event.pull_request.comments_url }} + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + GITHUB_PR_COMMIT: ${{ github.event.pull_request.head.sha }} + run: nf-core -l lint_log.txt pipelines lint --dir ${GITHUB_WORKSPACE} --markdown lint_results.md - - name: Run nf-core lint + - name: Run nf-core pipelines lint --release + if: ${{ github.base_ref == 'master' }} env: GITHUB_COMMENTS_URL: ${{ github.event.pull_request.comments_url }} GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} GITHUB_PR_COMMIT: ${{ github.event.pull_request.head.sha }} - run: nf-core -l lint_log.txt lint --dir ${GITHUB_WORKSPACE} --markdown lint_results.md + run: nf-core -l lint_log.txt pipelines lint --release --dir ${GITHUB_WORKSPACE} --markdown lint_results.md - name: Save PR number if: ${{ always() }} @@ -99,7 +71,7 @@ jobs: - name: Upload linting log file artifact if: ${{ always() }} - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5 with: name: linting-logs path: | diff --git a/.github/workflows/linting_comment.yml b/.github/workflows/linting_comment.yml index 0bbcd30f..e6e9bc26 100644 --- a/.github/workflows/linting_comment.yml +++ b/.github/workflows/linting_comment.yml @@ -11,7 +11,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Download lint results - uses: dawidd6/action-download-artifact@v2 + uses: dawidd6/action-download-artifact@ac66b43f0e6a346234dd65d4d0c8fbb31cb316e5 # v11 with: workflow: linting.yml workflow_conclusion: completed @@ -21,7 +21,7 @@ jobs: run: echo "pr_number=$(cat linting-logs/PR_number.txt)" >> $GITHUB_OUTPUT - name: Post PR comment - uses: marocchino/sticky-pull-request-comment@v2 + uses: marocchino/sticky-pull-request-comment@773744901bac0e8cbb5a0dc842800d45e9b2b405 # v2 with: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} number: ${{ steps.pr_number.outputs.pr_number }} diff --git a/.github/workflows/nf-test.yml b/.github/workflows/nf-test.yml new file mode 100644 index 00000000..06d0f546 --- /dev/null +++ b/.github/workflows/nf-test.yml @@ -0,0 +1,147 @@ +name: Run nf-test +on: + pull_request: + paths-ignore: + - "docs/**" + - "**/meta.yml" + - "**/*.md" + - "**/*.png" + - "**/*.svg" + release: + types: [published] + workflow_dispatch: + +# Cancel if a newer run is started +concurrency: + group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} + cancel-in-progress: true + +env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + NFT_VER: "0.9.3" + NFT_WORKDIR: "~" + NXF_ANSI_LOG: false + NXF_SINGULARITY_CACHEDIR: ${{ github.workspace }}/.singularity + NXF_SINGULARITY_LIBRARYDIR: ${{ github.workspace }}/.singularity + +jobs: + nf-test-changes: + name: nf-test-changes + runs-on: # use self-hosted runners + - runs-on=${{ github.run_id }}-nf-test-changes + - runner=4cpu-linux-x64 + outputs: + shard: ${{ steps.set-shards.outputs.shard }} + total_shards: ${{ steps.set-shards.outputs.total_shards }} + steps: + - name: Clean Workspace # Purge the workspace in case it's running on a self-hosted runner + run: | + ls -la ./ + rm -rf ./* || true + rm -rf ./.??* || true + ls -la ./ + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5 + with: + fetch-depth: 0 + + - name: get number of shards + id: set-shards + uses: ./.github/actions/get-shards + env: + NFT_VER: ${{ env.NFT_VER }} + with: + max_shards: 12 + + - name: debug + run: | + echo ${{ steps.set-shards.outputs.shard }} + echo ${{ steps.set-shards.outputs.total_shards }} + + nf-test: + name: "${{ matrix.profile }} | ${{ matrix.NXF_VER }} | ${{ matrix.shard }}/${{ needs.nf-test-changes.outputs.total_shards }}" + needs: [nf-test-changes] + if: ${{ needs.nf-test-changes.outputs.total_shards != '0' }} + runs-on: # use self-hosted runners + - runs-on=${{ github.run_id }}-nf-test + - runner=4cpu-linux-x64 + - disk=large + strategy: + fail-fast: false + matrix: + shard: ${{ fromJson(needs.nf-test-changes.outputs.shard) }} + profile: [conda, docker, singularity] + isMain: + - ${{ github.base_ref == 'master' || github.base_ref == 'main' }} + # Exclude conda and singularity on dev; conda disabled on all branches + exclude: + - isMain: false + profile: "conda" + - isMain: false + profile: "singularity" + - isMain: true + profile: "conda" + NXF_VER: + - "25.04.0" + - "latest-everything" + env: + NXF_ANSI_LOG: false + TOTAL_SHARDS: ${{ needs.nf-test-changes.outputs.total_shards }} + + steps: + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5 + with: + fetch-depth: 0 + + - name: Run nf-test + id: run_nf_test + uses: ./.github/actions/nf-test + continue-on-error: ${{ matrix.NXF_VER == 'latest-everything' }} + env: + NFT_WORKDIR: ${{ env.NFT_WORKDIR }} + NXF_VERSION: ${{ matrix.NXF_VER }} + with: + profile: ${{ matrix.profile }} + shard: ${{ matrix.shard }} + total_shards: ${{ env.TOTAL_SHARDS }} + + - name: Report test status + if: ${{ always() }} + run: | + if [[ "${{ steps.run_nf_test.outcome }}" == "failure" ]]; then + echo "::error::Test with ${{ matrix.NXF_VER }} failed" + # Add to workflow summary + echo "## ❌ Test failed: ${{ matrix.profile }} | ${{ matrix.NXF_VER }} | Shard ${{ matrix.shard }}/${{ env.TOTAL_SHARDS }}" >> $GITHUB_STEP_SUMMARY + if [[ "${{ matrix.NXF_VER }}" == "latest-everything" ]]; then + echo "::warning::Test with latest-everything failed but will not cause workflow failure. Please check if the error is expected or if it needs fixing." + fi + if [[ "${{ matrix.NXF_VER }}" != "latest-everything" ]]; then + exit 1 + fi + fi + + confirm-pass: + needs: [nf-test] + if: always() + runs-on: # use self-hosted runners + - runs-on=${{ github.run_id }}-confirm-pass + - runner=2cpu-linux-x64 + steps: + - name: One or more tests failed (excluding latest-everything) + if: ${{ contains(needs.*.result, 'failure') }} + run: exit 1 + + - name: One or more tests cancelled + if: ${{ contains(needs.*.result, 'cancelled') }} + run: exit 1 + + - name: All tests ok + if: ${{ contains(needs.*.result, 'success') }} + run: exit 0 + + - name: debug-print + if: always() + run: | + echo "::group::DEBUG: `needs` Contents" + echo "DEBUG: toJSON(needs) = ${{ toJSON(needs) }}" + echo "DEBUG: toJSON(needs.*.result) = ${{ toJSON(needs.*.result) }}" + echo "::endgroup::" diff --git a/.github/workflows/release-announcements.yml b/.github/workflows/release-announcements.yml new file mode 100644 index 00000000..431d3d44 --- /dev/null +++ b/.github/workflows/release-announcements.yml @@ -0,0 +1,46 @@ +name: release-announcements +# Automatic release toot and tweet anouncements +on: + release: + types: [published] + workflow_dispatch: + +jobs: + toot: + runs-on: ubuntu-latest + steps: + - name: get topics and convert to hashtags + id: get_topics + run: | + echo "topics=$(curl -s https://nf-co.re/pipelines.json | jq -r '.remote_workflows[] | select(.full_name == "${{ github.repository }}") | .topics[]' | awk '{print "#"$0}' | tr '\n' ' ')" | sed 's/-//g' >> $GITHUB_OUTPUT + + - name: get description + id: get_description + run: | + echo "description=$(curl -s https://nf-co.re/pipelines.json | jq -r '.remote_workflows[] | select(.full_name == "${{ github.repository }}") | .description')" >> $GITHUB_OUTPUT + - uses: rzr/fediverse-action@master + with: + access-token: ${{ secrets.MASTODON_ACCESS_TOKEN }} + host: "mstdn.science" # custom host if not "mastodon.social" (default) + # GitHub event payload + # https://docs.github.com/en/developers/webhooks-and-events/webhooks/webhook-events-and-payloads#release + message: | + Pipeline release! ${{ github.repository }} v${{ github.event.release.tag_name }} - ${{ github.event.release.name }}! + ${{ steps.get_description.outputs.description }} + Please see the changelog: ${{ github.event.release.html_url }} + + ${{ steps.get_topics.outputs.topics }} #nfcore #openscience #nextflow #bioinformatics + + bsky-post: + runs-on: ubuntu-latest + steps: + - uses: zentered/bluesky-post-action@6461056ea355ea43b977e149f7bf76aaa572e5e8 # v0.3.0 + with: + post: | + Pipeline release! ${{ github.repository }} v${{ github.event.release.tag_name }} - ${{ github.event.release.name }}! + + Please see the changelog: ${{ github.event.release.html_url }} + env: + BSKY_IDENTIFIER: ${{ secrets.BSKY_IDENTIFIER }} + BSKY_PASSWORD: ${{ secrets.BSKY_PASSWORD }} + # diff --git a/.github/workflows/template-version-comment.yml b/.github/workflows/template-version-comment.yml new file mode 100644 index 00000000..c5988af9 --- /dev/null +++ b/.github/workflows/template-version-comment.yml @@ -0,0 +1,46 @@ +name: nf-core template version comment +# This workflow is triggered on PRs to check if the pipeline template version matches the latest nf-core version. +# It posts a comment to the PR, even if it comes from a fork. + +on: pull_request_target + +jobs: + template_version: + runs-on: ubuntu-latest + steps: + - name: Check out pipeline code + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5 + with: + ref: ${{ github.event.pull_request.head.sha }} + + - name: Read template version from .nf-core.yml + uses: nichmor/minimal-read-yaml@1f7205277e25e156e1f63815781db80a6d490b8f # v0.0.2 + id: read_yml + with: + config: ${{ github.workspace }}/.nf-core.yml + + - name: Install nf-core + run: | + python -m pip install --upgrade pip + pip install nf-core==${{ steps.read_yml.outputs['nf_core_version'] }} + + - name: Check nf-core outdated + id: nf_core_outdated + run: echo "OUTPUT=$(pip list --outdated | grep nf-core)" >> ${GITHUB_ENV} + + - name: Post nf-core template version comment + uses: mshick/add-pr-comment@b8f338c590a895d50bcbfa6c5859251edc8952fc # v2 + if: | + contains(env.OUTPUT, 'nf-core') + with: + repo-token: ${{ secrets.NF_CORE_BOT_AUTH_TOKEN }} + allow-repeats: false + message: | + > [!WARNING] + > Newer version of the nf-core template is available. + > + > Your pipeline is using an old version of the nf-core template: ${{ steps.read_yml.outputs['nf_core_version'] }}. + > Please update your pipeline to the latest version. + > + > For more documentation on how to update your pipeline, please see the [nf-core documentation](https://github.com/nf-core/tools?tab=readme-ov-file#sync-a-pipeline-with-the-template) and [Synchronisation documentation](https://nf-co.re/docs/contributing/sync). + # diff --git a/.gitignore b/.gitignore index 5124c9ac..2ef7dde1 100644 --- a/.gitignore +++ b/.gitignore @@ -6,3 +6,7 @@ results/ testing/ testing* *.pyc +null/ +.nf-test/ +.nf-test.log +.nf-test-* diff --git a/.gitpod.yml b/.gitpod.yml deleted file mode 100644 index 85d95ecc..00000000 --- a/.gitpod.yml +++ /dev/null @@ -1,14 +0,0 @@ -image: nfcore/gitpod:latest - -vscode: - extensions: # based on nf-core.nf-core-extensionpack - - codezombiech.gitignore # Language support for .gitignore files - # - cssho.vscode-svgviewer # SVG viewer - - esbenp.prettier-vscode # Markdown/CommonMark linting and style checking for Visual Studio Code - - eamodio.gitlens # Quickly glimpse into whom, why, and when a line or code block was changed - - EditorConfig.EditorConfig # override user/workspace settings with settings found in .editorconfig files - - Gruntfuggly.todo-tree # Display TODO and FIXME in a tree view in the activity bar - - mechatroner.rainbow-csv # Highlight columns in csv files in different colors - # - nextflow.nextflow # Nextflow syntax highlighting - - oderwat.indent-rainbow # Highlight indentation level - - streetsidesoftware.code-spell-checker # Spelling checker for source code diff --git a/.nf-core.yml b/.nf-core.yml index 3805dc81..08a86cf0 100644 --- a/.nf-core.yml +++ b/.nf-core.yml @@ -1 +1,24 @@ +lint: + actions_ci: false + files_exist: + - .github/workflows/awsfulltest.yml + - .github/workflows/awstest.yml + files_unchanged: + - .gitignore + - assets/nf-core-spatialxe_logo_light.png + - docs/images/nf-core-spatialxe_logo_dark.png + - docs/images/nf-core-spatialxe_logo_light.png + - .github/PULL_REQUEST_TEMPLATE.md +nf_core_version: 3.5.2 repository_type: pipeline +template: + author: Sameesh Kher, Dongze He, Florian Heyl + description: A pipeline for spatialomics Xenium In Situ data. + force: false + is_nfcore: true + name: spatialxe + org: nf-core + outdir: . + skip_features: + - igenomes + version: 1.0.0 diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 00000000..d06777a8 --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,27 @@ +repos: + - repo: https://github.com/pre-commit/mirrors-prettier + rev: "v3.1.0" + hooks: + - id: prettier + additional_dependencies: + - prettier@3.6.2 + - repo: https://github.com/pre-commit/pre-commit-hooks + rev: v6.0.0 + hooks: + - id: trailing-whitespace + args: [--markdown-linebreak-ext=md] + exclude: | + (?x)^( + .*ro-crate-metadata.json$| + modules/nf-core/.*| + subworkflows/nf-core/.*| + .*\.snap$ + )$ + - id: end-of-file-fixer + exclude: | + (?x)^( + .*ro-crate-metadata.json$| + modules/nf-core/.*| + subworkflows/nf-core/.*| + .*\.snap$ + )$ diff --git a/.prettierignore b/.prettierignore index 437d763d..2255e3e3 100644 --- a/.prettierignore +++ b/.prettierignore @@ -10,3 +10,5 @@ testing/ testing* *.pyc bin/ +.nf-test/ +ro-crate-metadata.json diff --git a/.prettierrc.yml b/.prettierrc.yml index c81f9a76..07dbd8bb 100644 --- a/.prettierrc.yml +++ b/.prettierrc.yml @@ -1 +1,6 @@ printWidth: 120 +tabWidth: 4 +overrides: + - files: "*.{md,yml,yaml,html,css,scss,js,cff}" + options: + tabWidth: 2 diff --git a/.vscode/settings.json b/.vscode/settings.json new file mode 100644 index 00000000..a33b527c --- /dev/null +++ b/.vscode/settings.json @@ -0,0 +1,3 @@ +{ + "markdown.styles": ["public/vscode_markdown.css"] +} diff --git a/CHANGELOG.md b/CHANGELOG.md index 86cab63a..9f0d1258 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -3,7 +3,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). -## v1.0dev - [date] +## v1.0.0 - [date] Initial release of nf-core/spatialxe, created with the [nf-core](https://nf-co.re/) template. diff --git a/CITATION.cff b/CITATION.cff deleted file mode 100644 index 017666c0..00000000 --- a/CITATION.cff +++ /dev/null @@ -1,56 +0,0 @@ -cff-version: 1.2.0 -message: "If you use `nf-core tools` in your work, please cite the `nf-core` publication" -authors: - - family-names: Ewels - given-names: Philip - - family-names: Peltzer - given-names: Alexander - - family-names: Fillinger - given-names: Sven - - family-names: Patel - given-names: Harshil - - family-names: Alneberg - given-names: Johannes - - family-names: Wilm - given-names: Andreas - - family-names: Garcia - given-names: Maxime Ulysse - - family-names: Di Tommaso - given-names: Paolo - - family-names: Nahnsen - given-names: Sven -title: "The nf-core framework for community-curated bioinformatics pipelines." -version: 2.4.1 -doi: 10.1038/s41587-020-0439-x -date-released: 2022-05-16 -url: https://github.com/nf-core/tools -prefered-citation: - type: article - authors: - - family-names: Ewels - given-names: Philip - - family-names: Peltzer - given-names: Alexander - - family-names: Fillinger - given-names: Sven - - family-names: Patel - given-names: Harshil - - family-names: Alneberg - given-names: Johannes - - family-names: Wilm - given-names: Andreas - - family-names: Garcia - given-names: Maxime Ulysse - - family-names: Di Tommaso - given-names: Paolo - - family-names: Nahnsen - given-names: Sven - doi: 10.1038/s41587-020-0439-x - journal: nature biotechnology - start: 276 - end: 278 - title: "The nf-core framework for community-curated bioinformatics pipelines." - issue: 3 - volume: 38 - year: 2020 - url: https://dx.doi.org/10.1038/s41587-020-0439-x diff --git a/CITATIONS.md b/CITATIONS.md index 1460ce4e..542e6e38 100644 --- a/CITATIONS.md +++ b/CITATIONS.md @@ -10,10 +10,9 @@ ## Pipeline tools -- [FastQC](https://www.bioinformatics.babraham.ac.uk/projects/fastqc/) - - [MultiQC](https://pubmed.ncbi.nlm.nih.gov/27312411/) - > Ewels P, Magnusson M, Lundin S, Käller M. MultiQC: summarize analysis results for multiple tools and samples in a single report. Bioinformatics. 2016 Oct 1;32(19):3047-8. doi: 10.1093/bioinformatics/btw354. Epub 2016 Jun 16. PubMed PMID: 27312411; PubMed Central PMCID: PMC5039924. + +> Ewels P, Magnusson M, Lundin S, Käller M. MultiQC: summarize analysis results for multiple tools and samples in a single report. Bioinformatics. 2016 Oct 1;32(19):3047-8. doi: 10.1093/bioinformatics/btw354. Epub 2016 Jun 16. PubMed PMID: 27312411; PubMed Central PMCID: PMC5039924. ## Software packaging/containerisation tools @@ -31,5 +30,8 @@ - [Docker](https://dl.acm.org/doi/10.5555/2600239.2600241) + > Merkel, D. (2014). Docker: lightweight linux containers for consistent development and deployment. Linux Journal, 2014(239), 2. doi: 10.5555/2600239.2600241. + - [Singularity](https://pubmed.ncbi.nlm.nih.gov/28494014/) + > Kurtzer GM, Sochat V, Bauer MW. Singularity: Scientific containers for mobility of compute. PLoS One. 2017 May 11;12(5):e0177459. doi: 10.1371/journal.pone.0177459. eCollection 2017. PubMed PMID: 28494014; PubMed Central PMCID: PMC5426675. diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md index f4fd052f..c089ec78 100644 --- a/CODE_OF_CONDUCT.md +++ b/CODE_OF_CONDUCT.md @@ -1,18 +1,20 @@ -# Code of Conduct at nf-core (v1.0) +# Code of Conduct at nf-core (v1.4) ## Our Pledge -In the interest of fostering an open, collaborative, and welcoming environment, we as contributors and maintainers of nf-core, pledge to making participation in our projects and community a harassment-free experience for everyone, regardless of: +In the interest of fostering an open, collaborative, and welcoming environment, we as contributors and maintainers of nf-core pledge to making participation in our projects and community a harassment-free experience for everyone, regardless of: - Age +- Ability - Body size +- Caste - Familial status - Gender identity and expression - Geographical location - Level of experience - Nationality and national origins - Native language -- Physical and neurological ability +- Neurodiversity - Race or ethnicity - Religion - Sexual identity and orientation @@ -22,80 +24,133 @@ Please note that the list above is alphabetised and is therefore not ranked in a ## Preamble -> Note: This Code of Conduct (CoC) has been drafted by the nf-core Safety Officer and been edited after input from members of the nf-core team and others. "We", in this document, refers to the Safety Officer and members of the nf-core core team, both of whom are deemed to be members of the nf-core community and are therefore required to abide by this Code of Conduct. This document will amended periodically to keep it up-to-date, and in case of any dispute, the most current version will apply. +:::note +This Code of Conduct (CoC) has been drafted by Renuka Kudva, Cris Tuñí, and Michael Heuer, with input from the nf-core Core Team and Susanna Marquez from the nf-core community. "We", in this document, refers to the Safety Officers and members of the nf-core Core Team, both of whom are deemed to be members of the nf-core community and are therefore required to abide by this Code of Conduct. This document will be amended periodically to keep it up-to-date. In case of any dispute, the most current version will apply. +::: -An up-to-date list of members of the nf-core core team can be found [here](https://nf-co.re/about). Our current safety officer is Renuka Kudva. +An up-to-date list of members of the nf-core core team can be found [here](https://nf-co.re/about). + +Our Safety Officers are Saba Nafees, Cris Tuñí, and Michael Heuer. nf-core is a young and growing community that welcomes contributions from anyone with a shared vision for [Open Science Policies](https://www.fosteropenscience.eu/taxonomy/term/8). Open science policies encompass inclusive behaviours and we strive to build and maintain a safe and inclusive environment for all individuals. -We have therefore adopted this code of conduct (CoC), which we require all members of our community and attendees in nf-core events to adhere to in all our workspaces at all times. Workspaces include but are not limited to Slack, meetings on Zoom, Jitsi, YouTube live etc. +We have therefore adopted this CoC, which we require all members of our community and attendees of nf-core events to adhere to in all our workspaces at all times. Workspaces include, but are not limited to, Slack, meetings on Zoom, gather.town, YouTube live etc. -Our CoC will be strictly enforced and the nf-core team reserve the right to exclude participants who do not comply with our guidelines from our workspaces and future nf-core activities. +Our CoC will be strictly enforced and the nf-core team reserves the right to exclude participants who do not comply with our guidelines from our workspaces and future nf-core activities. -We ask all members of our community to help maintain a supportive and productive workspace and to avoid behaviours that can make individuals feel unsafe or unwelcome. Please help us maintain and uphold this CoC. +We ask all members of our community to help maintain supportive and productive workspaces and to avoid behaviours that can make individuals feel unsafe or unwelcome. Please help us maintain and uphold this CoC. -Questions, concerns or ideas on what we can include? Contact safety [at] nf-co [dot] re +Questions, concerns, or ideas on what we can include? Contact members of the Safety Team on Slack or email safety [at] nf-co [dot] re. ## Our Responsibilities -The safety officer is responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behaviour. +Members of the Safety Team (the Safety Officers) are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behaviour. -The safety officer in consultation with the nf-core core team have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful. +The Safety Team, in consultation with the nf-core core team, have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this CoC, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful. -Members of the core team or the safety officer who violate the CoC will be required to recuse themselves pending investigation. They will not have access to any reports of the violations and be subject to the same actions as others in violation of the CoC. +Members of the core team or the Safety Team who violate the CoC will be required to recuse themselves pending investigation. They will not have access to any reports of the violations and will be subject to the same actions as others in violation of the CoC. -## When are where does this Code of Conduct apply? +## When and where does this Code of Conduct apply? -Participation in the nf-core community is contingent on following these guidelines in all our workspaces and events. This includes but is not limited to the following listed alphabetically and therefore in no order of preference: +Participation in the nf-core community is contingent on following these guidelines in all our workspaces and events, such as hackathons, workshops, bytesize, and collaborative workspaces on gather.town. These guidelines include, but are not limited to, the following (listed alphabetically and therefore in no order of preference): - Communicating with an official project email address. - Communicating with community members within the nf-core Slack channel. - Participating in hackathons organised by nf-core (both online and in-person events). -- Participating in collaborative work on GitHub, Google Suite, community calls, mentorship meetings, email correspondence. -- Participating in workshops, training, and seminar series organised by nf-core (both online and in-person events). This applies to events hosted on web-based platforms such as Zoom, Jitsi, YouTube live etc. +- Participating in collaborative work on GitHub, Google Suite, community calls, mentorship meetings, email correspondence, and on the nf-core gather.town workspace. +- Participating in workshops, training, and seminar series organised by nf-core (both online and in-person events). This applies to events hosted on web-based platforms such as Zoom, gather.town, Jitsi, YouTube live etc. - Representing nf-core on social media. This includes both official and personal accounts. ## nf-core cares 😊 -nf-core's CoC and expectations of respectful behaviours for all participants (including organisers and the nf-core team) include but are not limited to the following (listed in alphabetical order): +nf-core's CoC and expectations of respectful behaviours for all participants (including organisers and the nf-core team) include, but are not limited to, the following (listed in alphabetical order): - Ask for consent before sharing another community member’s personal information (including photographs) on social media. - Be respectful of differing viewpoints and experiences. We are all here to learn from one another and a difference in opinion can present a good learning opportunity. -- Celebrate your accomplishments at events! (Get creative with your use of emojis 🎉 🥳 💯 🙌 !) +- Celebrate your accomplishments! (Get creative with your use of emojis 🎉 🥳 💯 🙌 !) - Demonstrate empathy towards other community members. (We don’t all have the same amount of time to dedicate to nf-core. If tasks are pending, don’t hesitate to gently remind members of your team. If you are leading a task, ask for help if you feel overwhelmed.) - Engage with and enquire after others. (This is especially important given the geographically remote nature of the nf-core community, so let’s do this the best we can) - Focus on what is best for the team and the community. (When in doubt, ask) -- Graciously accept constructive criticism, yet be unafraid to question, deliberate, and learn. +- Accept feedback, yet be unafraid to question, deliberate, and learn. - Introduce yourself to members of the community. (We’ve all been outsiders and we know that talking to strangers can be hard for some, but remember we’re interested in getting to know you and your visions for open science!) -- Show appreciation and **provide clear feedback**. (This is especially important because we don’t see each other in person and it can be harder to interpret subtleties. Also remember that not everyone understands a certain language to the same extent as you do, so **be clear in your communications to be kind.**) +- Show appreciation and **provide clear feedback**. (This is especially important because we don’t see each other in person and it can be harder to interpret subtleties. Also remember that not everyone understands a certain language to the same extent as you do, so **be clear in your communication to be kind.**) - Take breaks when you feel like you need them. -- Using welcoming and inclusive language. (Participants are encouraged to display their chosen pronouns on Zoom or in communication on Slack.) +- Use welcoming and inclusive language. (Participants are encouraged to display their chosen pronouns on Zoom or in communication on Slack) ## nf-core frowns on 😕 -The following behaviours from any participants within the nf-core community (including the organisers) will be considered unacceptable under this code of conduct. Engaging or advocating for any of the following could result in expulsion from nf-core workspaces. +The following behaviours from any participants within the nf-core community (including the organisers) will be considered unacceptable under this CoC. Engaging or advocating for any of the following could result in expulsion from nf-core workspaces: - Deliberate intimidation, stalking or following and sustained disruption of communication among participants of the community. This includes hijacking shared screens through actions such as using the annotate tool in conferencing software such as Zoom. - “Doxing” i.e. posting (or threatening to post) another person’s personal identifying information online. - Spamming or trolling of individuals on social media. -- Use of sexual or discriminatory imagery, comments, or jokes and unwelcome sexual attention. -- Verbal and text comments that reinforce social structures of domination related to gender, gender identity and expression, sexual orientation, ability, physical appearance, body size, race, age, religion or work experience. +- Use of sexual or discriminatory imagery, comments, jokes, or unwelcome sexual attention. +- Verbal and text comments that reinforce social structures of domination related to gender, gender identity and expression, sexual orientation, ability, physical appearance, body size, race, age, religion, or work experience. ### Online Trolling -The majority of nf-core interactions and events are held online. Unfortunately, holding events online comes with the added issue of online trolling. This is unacceptable, reports of such behaviour will be taken very seriously, and perpetrators will be excluded from activities immediately. +The majority of nf-core interactions and events are held online. Unfortunately, holding events online comes with the risk of online trolling. This is unacceptable — reports of such behaviour will be taken very seriously and perpetrators will be excluded from activities immediately. -All community members are required to ask members of the group they are working within for explicit consent prior to taking screenshots of individuals during video calls. +All community members are **required** to ask members of the group they are working with for explicit consent prior to taking screenshots of individuals during video calls. -## Procedures for Reporting CoC violations +## Procedures for reporting CoC violations If someone makes you feel uncomfortable through their behaviours or actions, report it as soon as possible. -You can reach out to members of the [nf-core core team](https://nf-co.re/about) and they will forward your concerns to the safety officer(s). +You can reach out to members of the Safety Team (Saba Nafees, Cris Tuñí, and Michael Heuer) on Slack. Alternatively, contact a member of the nf-core core team [nf-core core team](https://nf-co.re/about), and they will forward your concerns to the Safety Team. + +Issues directly concerning members of the Core Team or the Safety Team will be dealt with by other members of the core team and the safety manager — possible conflicts of interest will be taken into account. nf-core is also in discussions about having an ombudsperson and details will be shared in due course. + +All reports will be handled with the utmost discretion and confidentiality. + +You can also report any CoC violations to safety [at] nf-co [dot] re. In your email report, please do your best to include: + +- Your contact information. +- Identifying information (e.g. names, nicknames, pseudonyms) of the participant who has violated the Code of Conduct. +- The behaviour that was in violation and the circumstances surrounding the incident. +- The approximate time of the behaviour (if different than the time the report was made). +- Other people involved in the incident, if applicable. +- If you believe the incident is ongoing. +- If there is a publicly available record (e.g. mailing list record, a screenshot). +- Any additional information. + +After you file a report, one or more members of our Safety Team will contact you to follow up on your report. + +## Who will read and handle reports + +All reports will be read and handled by the members of the Safety Team at nf-core. + +If members of the Safety Team are deemed to have a conflict of interest with a report, they will be required to recuse themselves as per our Code of Conduct and will not have access to any follow-ups. + +To keep this first report confidential from any of the Safety Team members, please submit your first report by direct messaging on Slack/direct email to any of the nf-core members you are comfortable disclosing the information to, and be explicit about which member(s) you do not consent to sharing the information with. + +## Reviewing reports + +After receiving the report, members of the Safety Team will review the incident report to determine whether immediate action is required, for example, whether there is immediate threat to participants’ safety. + +The Safety Team, in consultation with members of the nf-core core team, will assess the information to determine whether the report constitutes a Code of Conduct violation, for them to decide on a course of action. + +In the case of insufficient information, one or more members of the Safety Team may contact the reporter, the reportee, or any other attendees to obtain more information. -Issues directly concerning members of the core team will be dealt with by other members of the core team and the safety manager, and possible conflicts of interest will be taken into account. nf-core is also in discussions about having an ombudsperson, and details will be shared in due course. +Once additional information is gathered, the Safety Team will collectively review and decide on the best course of action to take, if any. The Safety Team reserves the right to not act on a report. -All reports will be handled with utmost discretion and confidentially. +## Confidentiality + +All reports, and any additional information included, are only shared with the team of safety officers (and possibly members of the core team, in case the safety officer is in violation of the CoC). We will respect confidentiality requests for the purpose of protecting victims of abuse. + +We will not name harassment victims, beyond discussions between the safety officer and members of the nf-core team, without the explicit consent of the individuals involved. + +## Enforcement + +Actions taken by the nf-core’s Safety Team may include, but are not limited to: + +- Asking anyone to stop a behaviour. +- Asking anyone to leave the event and online spaces either temporarily, for the remainder of the event, or permanently. +- Removing access to the gather.town and Slack, either temporarily or permanently. +- Communicating to all participants to reinforce our expectations for conduct and remind what is unacceptable behaviour; this may be public for practical reasons. +- Communicating to all participants that an incident has taken place and how we will act or have acted — this may be for the purpose of letting event participants know we are aware of and dealing with the incident. +- Banning anyone from participating in nf-core-managed spaces, future events, and activities, either temporarily or permanently. +- No action. ## Attribution and Acknowledgements @@ -106,6 +161,22 @@ All reports will be handled with utmost discretion and confidentially. ## Changelog -### v1.0 - March 12th, 2021 +### v1.4 - February 8th, 2022 + +- Included a new member of the Safety Team. Corrected a typographical error in the text. + +### v1.3 - December 10th, 2021 + +- Added a statement that the CoC applies to nf-core gather.town workspaces. Corrected typographical errors in the text. + +### v1.2 - November 12th, 2021 + +- Removed information specific to reporting CoC violations at the Hackathon in October 2021. + +### v1.1 - October 14th, 2021 + +- Updated with names of new Safety Officers and specific information for the hackathon in October 2021. + +### v1.0 - March 15th, 2021 - Complete rewrite from original [Contributor Covenant](http://contributor-covenant.org/) CoC. diff --git a/LICENSE b/LICENSE index 6cc79cbb..df908d32 100644 --- a/LICENSE +++ b/LICENSE @@ -1,6 +1,6 @@ MIT License -Copyright (c) Florian Heyl +Copyright (c) The nf-core/spatialxe team Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/README.md b/README.md index 7ad9a617..6eafaeef 100644 --- a/README.md +++ b/README.md @@ -1,69 +1,177 @@ -# ![nf-core/spatialxe](docs/images/nf-core-spatialxe_logo_light.png#gh-light-mode-only) ![nf-core/spatialxe](docs/images/nf-core-spatialxe_logo_dark.png#gh-dark-mode-only) - -[![AWS CI](https://img.shields.io/badge/CI%20tests-full%20size-FF9900?labelColor=000000&logo=Amazon%20AWS)](https://nf-co.re/spatialxe/results)[![Cite with Zenodo](http://img.shields.io/badge/DOI-10.5281/zenodo.XXXXXXX-1073c8?labelColor=000000)](https://doi.org/10.5281/zenodo.XXXXXXX) - -[![Nextflow](https://img.shields.io/badge/nextflow%20DSL2-%E2%89%A522.10.1-23aa62.svg)](https://www.nextflow.io/) -[![run with conda](http://img.shields.io/badge/run%20with-conda-3EB049?labelColor=000000&logo=anaconda)](https://docs.conda.io/en/latest/) +

+ + + nf-core/spatialxe + +

+ +[![Open in GitHub Codespaces](https://github.com/codespaces/badge.svg)](https://github.com/codespaces/new/nf-core/spatialxe) +[![GitHub Actions CI Status](https://github.com/nf-core/spatialxe/actions/workflows/nf-test.yml/badge.svg)](https://github.com/nf-core/spatialxe/actions/workflows/nf-test.yml) +[![GitHub Actions Linting Status](https://github.com/nf-core/spatialxe/actions/workflows/linting.yml/badge.svg)](https://github.com/nf-core/spatialxe/actions/workflows/linting.yml)[![AWS CI](https://img.shields.io/badge/CI%20tests-full%20size-FF9900?labelColor=000000&logo=Amazon%20AWS)](https://nf-co.re/spatialxe/results)[![Cite with Zenodo](http://img.shields.io/badge/DOI-10.5281/zenodo.XXXXXXX-1073c8?labelColor=000000)](https://doi.org/10.5281/zenodo.XXXXXXX) +[![nf-test](https://img.shields.io/badge/unit_tests-nf--test-337ab7.svg)](https://www.nf-test.com) + +[![Nextflow](https://img.shields.io/badge/version-%E2%89%A525.04.0-green?style=flat&logo=nextflow&logoColor=white&color=%230DC09D&link=https%3A%2F%2Fnextflow.io)](https://www.nextflow.io/) +[![nf-core template version](https://img.shields.io/badge/nf--core_template-3.4.1-green?style=flat&logo=nfcore&logoColor=white&color=%2324B064&link=https%3A%2F%2Fnf-co.re)](https://github.com/nf-core/tools/releases/tag/3.4.1) [![run with docker](https://img.shields.io/badge/run%20with-docker-0db7ed?labelColor=000000&logo=docker)](https://www.docker.com/) [![run with singularity](https://img.shields.io/badge/run%20with-singularity-1d355c.svg?labelColor=000000)](https://sylabs.io/docs/) -[![Launch on Nextflow Tower](https://img.shields.io/badge/Launch%20%F0%9F%9A%80-Nextflow%20Tower-%234256e7)](https://tower.nf/launch?pipeline=https://github.com/nf-core/spatialxe) +[![Launch on Seqera Platform](https://img.shields.io/badge/Launch%20%F0%9F%9A%80-Seqera%20Platform-%234256e7)](https://cloud.seqera.io/launch?pipeline=https://github.com/nf-core/spatialxe) -[![Get help on Slack](http://img.shields.io/badge/slack-nf--core%20%23spatialxe-4A154B?labelColor=000000&logo=slack)](https://nfcore.slack.com/channels/spatialxe)[![Follow on Twitter](http://img.shields.io/badge/twitter-%40nf__core-1DA1F2?labelColor=000000&logo=twitter)](https://twitter.com/nf_core)[![Watch on YouTube](http://img.shields.io/badge/youtube-nf--core-FF0000?labelColor=000000&logo=youtube)](https://www.youtube.com/c/nf-core) +[![Get help on Slack](http://img.shields.io/badge/slack-nf--core%20%23spatialxe-4A154B?labelColor=000000&logo=slack)](https://nfcore.slack.com/channels/spatialxe)[![Follow on Bluesky](https://img.shields.io/badge/bluesky-%40nf__core-1185fe?labelColor=000000&logo=bluesky)](https://bsky.app/profile/nf-co.re)[![Follow on Mastodon](https://img.shields.io/badge/mastodon-nf__core-6364ff?labelColor=FFFFFF&logo=mastodon)](https://mstdn.science/@nf_core)[![Watch on YouTube](http://img.shields.io/badge/youtube-nf--core-FF0000?labelColor=000000&logo=youtube)](https://www.youtube.com/c/nf-core) ## Introduction -**nf-core/spatialxe** is a bioinformatics best-practice processing and quality control pipeline for Xenium data. **The pipeline is currently under developement and not completed yet!**. The current plan for the pipeline implementation is shown in the metromap below. Please note that the pipeline steps and methods might change as we move forward in the development cycle. +**nf-core/spatialxe** is a bioinformatics best-practice processing and quality control pipeline for Xenium data. The current plan for the pipeline implementation is shown in the metromap below. **The pipeline is under active developement and changes might occure frequently**. ![nf-core/spatialxe-metromap](docs/images/spatialxe-metromap.png) -The pipeline is built using [Nextflow](https://www.nextflow.io), a workflow tool to run tasks across multiple compute infrastructures in a very portable manner. It uses Docker/Singularity containers making installation trivial and results highly reproducible. The [Nextflow DSL2](https://www.nextflow.io/docs/latest/dsl2.html) implementation of this pipeline uses one container per process which makes it much easier to maintain and update software dependencies. Where possible, these processes have been submitted to and installed from [nf-core/modules](https://github.com/nf-core/modules) in order to make them available to all nf-core pipelines, and to everyone within the Nextflow community! - - +> [!NOTE] +> We are currently testing the pipeline for the [10x Atera system](https://www.10xgenomics.com/platforms/atera). + +## Tools supported + +The pipeline supports the following tools: + +- Segmenation methods: + - [Baysor](https://doi.org/10.1038/s41587-021-01044-w) + - [Cellpose](https://doi.org/10.1038/s41592-020-01018-x) + - [Xenium ranger (XR)](https://www.10xgenomics.com/support/software/xenium-ranger/latest) + - [StarDist](https://doi.org/10.48550/arXiv.2203.02284) +- Segmentation free methods: + - [Ficture](https://doi.org/10.1038/s41592-024-02415-2) + - [Baysor](https://doi.org/10.1038/s41587-021-01044-w) +- Transcript assignment methods: + - [Segger](https://doi.org/10.1101/2025.03.14.643160) + - [Proseg](https://doi.org/10.1038/s41592-025-02697-0) +- Utility methods: + - [SpatialData](https://doi.org/10.1038/s41592-024-02212-x) + - [Baysor](https://doi.org/10.1038/s41587-021-01044-w) +- QC methods: + - [MultiQC Xenium Extra Plugin](https://github.com/MultiQC/xenium-extra) + - [OPT](https://github.com/JEFworks-Lab/off-target-probe-tracker) + +## Usage On release, automated continuous integration tests run the pipeline on a full-sized dataset on the AWS cloud infrastructure. This ensures that the pipeline runs on AWS, has sensible resource allocation defaults set to run on real-world datasets, and permits the persistent storage of results to benchmark between pipeline releases and other analysis sources. The results obtained from the full-sized test can be viewed on the [nf-core website](https://nf-co.re/spatialxe/results). -## Pipeline summary +> [!NOTE] +> The pipeline does not support conda currently. We are working on it. ## Quick Start -1. Install [`Nextflow`](https://www.nextflow.io/docs/latest/getstarted.html#installation) (`>=22.10.1`) +`samplesheet.csv`: + +```csv +sample,bundle,image +test_sample,/path/to/xenium-bundle,/path/to/morphology.ome.tif +``` + +Now, you can run the pipeline using: + +### Run image-based segmentation mode
+ +`CELLPOSE -> BAYSOR -> XR-IMPORT_SEGMENTATION -> SPATIALDATA -> QC` + +```bash +nextflow run nf-core/spatialxe \ + -profile \ + --input samplesheet.csv \ + --outdir \ + --mode +``` + +### Run coordinate-based segmentation mode
-2. Install any of [`Docker`](https://docs.docker.com/engine/installation/), [`Singularity`](https://www.sylabs.io/guides/3.0/user-guide/) (you can follow [this tutorial](https://singularity-tutorial.github.io/01-installation/)), [`Podman`](https://podman.io/), [`Shifter`](https://nersc.gitlab.io/development/shifter/how-to-use/) or [`Charliecloud`](https://hpc.github.io/charliecloud/) for full pipeline reproducibility _(you can use [`Conda`](https://conda.io/miniconda.html) both to install Nextflow itself and also to manage software within pipelines. Please only use it within pipelines as a last resort; see [docs](https://nf-co.re/usage/configuration#basic-configuration-profiles))_. +`PROSEG -> PROSEG2BAYSOR -> XR-IMPORT_SEGMENTATION -> SPATIALDATA -> QC` -3. Download the pipeline and test it on a minimal dataset with a single command: +```bash +nextflow run nf-core/spatialxe \ + -profile \ + --input samplesheet.csv \ + --outdir \ + --mode coordinate +``` - ```bash - nextflow run nf-core/spatialxe -profile test,YOURPROFILE --outdir - ``` +### Run segfree mode
- Note that some form of configuration will be needed so that Nextflow knows how to fetch the required software. This is usually done in the form of a config profile (`YOURPROFILE` in the example command above). You can chain multiple config profiles in a comma-separated string. +`BAYSOR_SEGFREE` - > - The pipeline comes with config profiles called `docker`, `singularity`, `podman`, `shifter`, `charliecloud` and `conda` which instruct the pipeline to use the named tool for software management. For example, `-profile test,docker`. - > - Please check [nf-core/configs](https://github.com/nf-core/configs#documentation) to see if a custom config file to run nf-core pipelines already exists for your Institute. If so, you can simply use `-profile ` in your command. This will enable either `docker` or `singularity` and set the appropriate execution settings for your local compute environment. - > - If you are using `singularity`, please use the [`nf-core download`](https://nf-co.re/tools/#downloading-pipelines-for-offline-use) command to download images first, before running the pipeline. Setting the [`NXF_SINGULARITY_CACHEDIR` or `singularity.cacheDir`](https://www.nextflow.io/docs/latest/singularity.html?#singularity-docker-hub) Nextflow options enables you to store and re-use the images from a central location for future pipeline runs. - > - If you are using `conda`, it is highly recommended to use the [`NXF_CONDA_CACHEDIR` or `conda.cacheDir`](https://www.nextflow.io/docs/latest/conda.html) settings to store the environments in a central location for future pipeline runs. +```bash +nextflow run nf-core/spatialxe \ + -profile \ + --input samplesheet.csv \ + --outdir \ + --mode segfree +``` -4. Start running your own analysis! +### Run preview mode
- +`BAYSOR_PREVIEW` - ```bash - nextflow run nf-core/spatialxe --input samplesheet.csv --outdir --genome GRCh37 -profile - ``` +```bash +nextflow run nf-core/spatialxe \ + -profile \ + --input samplesheet.csv \ + --outdir \ + --mode preview +``` -## Documentation +### Run just the quality control
-The nf-core/spatialxe pipeline comes with documentation about the pipeline [usage](https://nf-co.re/spatialxe/usage), [parameters](https://nf-co.re/spatialxe/parameters) and [output](https://nf-co.re/spatialxe/output). +```bash +nextflow run nf-core/spatialxe \ + -profile \ + --input samplesheet.csv \ + --outdir \ + --mode qc +``` + +### Additional information + +> [!WARNING] +> Please provide pipeline parameters via the CLI or Nextflow `-params-file` option. Custom config files including those provided by the `-c` Nextflow option can be used to provide any configuration _**except for parameters**_; see [docs](https://nf-co.re/docs/usage/getting_started/configuration#custom-configuration-files). + +For more details and further functionality, please refer to the [usage documentation](https://nf-co.re/spatialxe/usage) and the [parameter documentation](https://nf-co.re/spatialxe/parameters). + +## Pipeline output + +To see the results of an example test run with a full size dataset refer to the [results](https://nf-co.re/spatialxe/results) tab on the nf-core website pipeline page. +For more details about the output files and reports, please refer to the +[output documentation](https://nf-co.re/spatialxe/output). + +## Runtime and resource estimations + +| Tool | Compute | Runtime (min / med / max) | Peak RSS (min / med / max) | +| ------------------------- | ------- | ------------------------- | -------------------------- | +| Cellpose | GPU | 1m / 4m / 1.4h | 10 GB / 26 GB / 554 GB | +| Cellpose | CPU | 1.3h / 2.3h / 6.5h | 161 GB / 426 GB / 1115 GB | +| StarDist | GPU | 1m / 4m / 7m | 5 GB / 12 GB / 18 GB | +| StarDist | CPU | 5m / 6m / 7m | 18 GB / 18 GB / 18 GB | +| Segger (create_dataset) | GPU | 2m / 9m / 31m | 1.7 GB / 14 GB / 50 GB | +| Segger (create_dataset) | CPU | 13m / 21m / 46m | 13 GB / 19 GB / 49 GB | +| Segger (train) | GPU | 10m / 43m / 2.9h | 30 GB / 33 GB / 60 GB | +| Segger (predict) | GPU | 2m / 16m / 59m | 10 GB / 25 GB / 87 GB | +| Baysor (whole-image) | CPU | 2m / 30m / 17h | 6 GB / 10 GB / 650 GB | +| Baysor (tiled) | CPU | 1m / 18m / 13h | 0.2 GB / 34 GB / 530 GB | +| Proseg | CPU | 1m / 18m / 6.8h | 279 MB / 3.8 GB / 136 GB | +| XeniumRanger (resegment) | CPU | 18m / 39m / 3.7h | 28 GB / 54 GB / 60 GB | +| XeniumRanger (import_seg) | CPU | 2m / 7m / 2.7h | 2.6 GB / 11 GB / 51 GB | +| Ficture (preprocess) | CPU | 3m / 4m / 13m | 331 MB / 357 MB / 21 GB | + +- Cellpose GPU vs CPU: 35x faster on GPU (4m median vs 2.3h), 16x less memory (26 GB vs 426 GB) +- Segger: Only tool that truly requires GPU for all 3 steps (create_dataset, train, predict) +- StarDist: Very fast on CPU, GPU is not necessary to run its default model ## Credits -nf-core/spatialxe was originally written by Sameesh Kher (khersameesh24) and Florian Heyl (heylf). +nf-core/spatialxe is mainly developed by [Sameesh Kher](https://github.com/khersameesh24), [Dongze He](https://github.com/dongzehe), and [Florian Heyl](https://github.com/heylf). We thank the following people for their extensive assistance in the development of this pipeline: - Tobias Krause - KreĹĄimir BeĹĄtak (kbestak) - Matthias HĂśrtenhuber (mashehu) +- Maxime Garcia (maxulysse) +- KĂźbra NarcÄą (kubranarci) ## Contributions and Support @@ -73,10 +181,7 @@ For further information or help, don't hesitate to get in touch on the [Slack `# ## Citations - - - - + An extensive list of references for the tools used by the pipeline can be found in the [`CITATIONS.md`](CITATIONS.md) file. diff --git a/assets/config/xenium.toml b/assets/config/xenium.toml new file mode 100644 index 00000000..c7740146 --- /dev/null +++ b/assets/config/xenium.toml @@ -0,0 +1,15 @@ +[data] +x = "x_location" +y = "y_location" +z = "z_location" +gene = "feature_name" +min_molecules_per_gene = 10 +exclude_genes = "NegControl*,BLANK_*,antisense_*" +min_molecules_per_cell = 50 + +[segmentation] +unassigned_prior_label = "UNASSIGNED" +prior_segmentation_confidence = 0.5 + +[plotting] +min_pixels_per_cell = 10 diff --git a/assets/email_template.html b/assets/email_template.html index f7b23204..35fd954c 100644 --- a/assets/email_template.html +++ b/assets/email_template.html @@ -4,7 +4,7 @@ - + nf-core/spatialxe Pipeline Report @@ -12,7 +12,7 @@ -

nf-core/spatialxe v${version}

+

nf-core/spatialxe ${version}

Run Name: $runName

<% if (!success){ diff --git a/assets/email_template.txt b/assets/email_template.txt index 9160b1fa..f92d5849 100644 --- a/assets/email_template.txt +++ b/assets/email_template.txt @@ -4,7 +4,7 @@ |\\ | |__ __ / ` / \\ |__) |__ } { | \\| | \\__, \\__/ | \\ |___ \\`-._,-`-, `._,._,' - nf-core/spatialxe v${version} + nf-core/spatialxe ${version} ---------------------------------------------------- Run Name: $runName diff --git a/assets/example_samplesheet.csv b/assets/example_samplesheet.csv new file mode 100644 index 00000000..9cc36cf4 --- /dev/null +++ b/assets/example_samplesheet.csv @@ -0,0 +1,2 @@ +sample,bundle,image +xenium_prime_mouse_ileum,/home/user/raw_data/xenium/Xenium_Prime_Mouse_Ileum_tiny_outs,/home/user/raw_data/xenium/Xenium_Prime_Mouse_Ileum_tiny_outs/morphology.ome.tif diff --git a/assets/methods_description_template.yml b/assets/methods_description_template.yml index 761ee111..c1fbf537 100644 --- a/assets/methods_description_template.yml +++ b/assets/methods_description_template.yml @@ -3,17 +3,20 @@ description: "Suggested text and references to use when describing pipeline usag section_name: "nf-core/spatialxe Methods Description" section_href: "https://github.com/nf-core/spatialxe" plot_type: "html" -## TODO nf-core: Update the HTML below to your prefered methods description, e.g. add publication citation for this pipeline ## You inject any metadata in the Nextflow '${workflow}' object data: |

Methods

-

Data was processed using nf-core/spatialxe v${workflow.manifest.version} ${doi_text} of the nf-core collection of workflows (Ewels et al., 2020).

+

Data was processed using nf-core/spatialxe v${workflow.manifest.version} ${doi_text} of the nf-core collection of workflows (Ewels et al., 2020), utilising reproducible software environments from the Bioconda (GrĂźning et al., 2018) and Biocontainers (da Veiga Leprevost et al., 2017) projects.

The pipeline was executed with Nextflow v${workflow.nextflow.version} (Di Tommaso et al., 2017) with the following command:

${workflow.commandLine}
+

${tool_citations}

References

    -
  • Di Tommaso, P., Chatzou, M., Floden, E. W., Barja, P. P., Palumbo, E., & Notredame, C. (2017). Nextflow enables reproducible computational workflows. Nature Biotechnology, 35(4), 316-319. https://doi.org/10.1038/nbt.3820
  • -
  • Ewels, P. A., Peltzer, A., Fillinger, S., Patel, H., Alneberg, J., Wilm, A., Garcia, M. U., Di Tommaso, P., & Nahnsen, S. (2020). The nf-core framework for community-curated bioinformatics pipelines. Nature Biotechnology, 38(3), 276-278. https://doi.org/10.1038/s41587-020-0439-x
  • +
  • Di Tommaso, P., Chatzou, M., Floden, E. W., Barja, P. P., Palumbo, E., & Notredame, C. (2017). Nextflow enables reproducible computational workflows. Nature Biotechnology, 35(4), 316-319. doi: 10.1038/nbt.3820
  • +
  • Ewels, P. A., Peltzer, A., Fillinger, S., Patel, H., Alneberg, J., Wilm, A., Garcia, M. U., Di Tommaso, P., & Nahnsen, S. (2020). The nf-core framework for community-curated bioinformatics pipelines. Nature Biotechnology, 38(3), 276-278. doi: 10.1038/s41587-020-0439-x
  • +
  • GrĂźning, B., Dale, R., SjĂśdin, A., Chapman, B. A., Rowe, J., Tomkins-Tinch, C. H., Valieris, R., KĂśster, J., & Bioconda Team. (2018). Bioconda: sustainable and comprehensive software distribution for the life sciences. Nature Methods, 15(7), 475–476. doi: 10.1038/s41592-018-0046-7
  • +
  • da Veiga Leprevost, F., GrĂźning, B. A., Alves Aflitos, S., RĂśst, H. L., Uszkoreit, J., Barsnes, H., Vaudel, M., Moreno, P., Gatto, L., Weber, J., Bai, M., Jimenez, R. C., Sachsenberg, T., Pfeuffer, J., Vera Alvarez, R., Griss, J., Nesvizhskii, A. I., & Perez-Riverol, Y. (2017). BioContainers: an open-source and community-driven framework for software standardization. Bioinformatics (Oxford, England), 33(16), 2580–2582. doi: 10.1093/bioinformatics/btx192
  • + ${tool_bibliography}
Notes:
diff --git a/assets/multiqc_config.yml b/assets/multiqc_config.yml index c2c4948b..f0862f78 100644 --- a/assets/multiqc_config.yml +++ b/assets/multiqc_config.yml @@ -1,7 +1,5 @@ report_comment: > - This report has been generated by the nf-core/spatialxe - analysis pipeline. For information about how to interpret these results, please see the - documentation. + This report has been generated by the nf-core/spatialxe analysis pipeline. For information about how to interpret these results, please see the documentation. report_section_order: "nf-core-spatialxe-methods-description": order: -1000 @@ -11,3 +9,35 @@ report_section_order: order: -1002 export_plots: true + +disable_version_detection: true + +run_module: + - xenium + +module_order: + - xenium + +sp: + cell_feature_matrix: + fn: cell_feature_matrix.h5 + cells: + fn: cells.parquet + experiment: + fn: experiment.xenium + num_lines: 50 + metrics: + contents: num_cells_detected + fn: metrics_summary.csv + num_lines: 5 + transcripts: + fn: transcripts.parquet + +custom_data: + focus_density_plot: + pconfig: + title: "Focus Score: Per Sequence Density" + xlab: "CCFS Focus Score" + ylab: "Density" + ymin: 0 + logswitch: false diff --git a/assets/nf-core-spatialxe_logo_light.png b/assets/nf-core-spatialxe_logo_light.png index 42f49bfe..f74a46fb 100644 Binary files a/assets/nf-core-spatialxe_logo_light.png and b/assets/nf-core-spatialxe_logo_light.png differ diff --git a/assets/samplesheet.csv b/assets/samplesheet.csv index 5f653ab7..f5710dab 100644 --- a/assets/samplesheet.csv +++ b/assets/samplesheet.csv @@ -1,3 +1,2 @@ -sample,fastq_1,fastq_2 -SAMPLE_PAIRED_END,/path/to/fastq/files/AEG588A1_S1_L002_R1_001.fastq.gz,/path/to/fastq/files/AEG588A1_S1_L002_R2_001.fastq.gz -SAMPLE_SINGLE_END,/path/to/fastq/files/AEG588A4_S4_L003_R1_001.fastq.gz, +sample,bundle,image +test_run,https://raw.githubusercontent.com/nf-core/test-datasets/spatialxe/xenium_bundle.tar.gz diff --git a/assets/schema_input.json b/assets/schema_input.json index 82777bc7..2dfb357a 100644 --- a/assets/schema_input.json +++ b/assets/schema_input.json @@ -1,5 +1,5 @@ { - "$schema": "http://json-schema.org/draft-07/schema", + "$schema": "https://json-schema.org/draft/2020-12/schema", "$id": "https://raw.githubusercontent.com/nf-core/spatialxe/master/assets/schema_input.json", "title": "nf-core/spatialxe pipeline - params.input schema", "description": "Schema for the file provided with params.input", @@ -10,27 +10,20 @@ "sample": { "type": "string", "pattern": "^\\S+$", - "errorMessage": "Sample name must be provided and cannot contain spaces" + "errorMessage": "Sample name must be provided and cannot contain spaces", + "meta": ["id"] }, - "fastq_1": { + "bundle": { "type": "string", - "pattern": "^\\S+\\.f(ast)?q\\.gz$", - "errorMessage": "FastQ file for reads 1 must be provided, cannot contain spaces and must have extension '.fq.gz' or '.fastq.gz'" + "pattern": "^\\S+$", + "errorMessage": "Please provide a bundle as input data" }, - "fastq_2": { - "errorMessage": "FastQ file for reads 2 cannot contain spaces and must have extension '.fq.gz' or '.fastq.gz'", - "anyOf": [ - { - "type": "string", - "pattern": "^\\S+\\.f(ast)?q\\.gz$" - }, - { - "type": "string", - "maxLength": 0 - } - ] + "image": { + "type": "string", + "pattern": "^\\S+$", + "errorMessage": "You can provide an image. If you do not then please leave the field empty." } }, - "required": ["sample", "fastq_1"] + "required": ["sample", "bundle"] } } diff --git a/assets/slackreport.json b/assets/slackreport.json index 043d02f2..c139eab6 100644 --- a/assets/slackreport.json +++ b/assets/slackreport.json @@ -3,7 +3,7 @@ { "fallback": "Plain-text summary of the attachment.", "color": "<% if (success) { %>good<% } else { %>danger<%} %>", - "author_name": "sanger-tol/readmapping v${version} - ${runName}", + "author_name": "nf-core/spatialxe ${version} - ${runName}", "author_icon": "https://www.nextflow.io/docs/latest/_static/favicon.ico", "text": "<% if (success) { %>Pipeline completed successfully!<% } else { %>Pipeline completed with errors<% } %>", "fields": [ diff --git a/bin/baysor_create_dataset.py b/bin/baysor_create_dataset.py new file mode 100755 index 00000000..4e5a263a --- /dev/null +++ b/bin/baysor_create_dataset.py @@ -0,0 +1,96 @@ +#!/usr/bin/env python3 +""" +Create a sampled dataset for Baysor preview mode. + +Reads a CSV transcript file and randomly samples a fraction of rows, +writing the result to a new CSV file. +""" + +import argparse +import csv +import os +import random +from pathlib import Path + + +class BaysorPreview(): + """ + Utility class to generate baysor preview dataset + """ + @staticmethod + def generate_dataset( + transcripts: Path, + sampled_transcripts: Path, + sample_fraction: float = 0.3, + random_state: int = 42, + prefix: str = "" + ) -> None: + """ + Reads a csv file & randomly samples a fraction of rows, + and writes the result to a .csv file. + + Args: + transcripts: unziped transcripts.csv from xenium bundle + sampled_transcripts: randomly subsampled transcripts.csv file + sample_fraction: Fraction of rows to sample + random_state: Seed for reproducibility + prefix: Output directory prefix + """ + + random.seed(random_state) + output_path = f"{prefix}/{sampled_transcripts}" + os.makedirs(os.path.dirname(output_path), exist_ok=True) + with open(transcripts, mode='rt', newline='') as infile, \ + open(output_path, mode='wt', newline='') as outfile: + + reader = csv.reader(infile) + writer = csv.writer(outfile) + + # get the header line + header = next(reader) + writer.writerow(header) + + # randomize csv rows to write + for row in reader: + if random.random() < float(sample_fraction): + writer.writerow(row) + + return None + + +def main() -> None: + """ + Run create dataset as nf module + """ + parser = argparse.ArgumentParser( + description="Create sampled dataset for Baysor preview" + ) + parser.add_argument( + "--transcripts", required=True, + help="Path to transcripts CSV file" + ) + parser.add_argument( + "--sample-fraction", required=True, type=float, + help="Fraction of rows to sample" + ) + parser.add_argument( + "--prefix", required=True, + help="Output directory prefix" + ) + args = parser.parse_args() + + sampled_transcripts = "sampled_transcripts.csv" + + # generate dataset + BaysorPreview.generate_dataset( + transcripts=args.transcripts, + sampled_transcripts=sampled_transcripts, + sample_fraction=args.sample_fraction, + prefix=args.prefix + ) + + return None + + +if __name__ == "__main__": + main() diff --git a/bin/baysor_preprocess_transcripts.py b/bin/baysor_preprocess_transcripts.py new file mode 100755 index 00000000..2662f83c --- /dev/null +++ b/bin/baysor_preprocess_transcripts.py @@ -0,0 +1,126 @@ +#!/usr/bin/env python3 +""" +Preprocess Xenium transcripts for Baysor segmentation. + +Filters transcripts based on quality score and spatial coordinate thresholds, +removes negative control probes, and outputs filtered CSV for Baysor compatibility. +""" + +import argparse +import os + +import pandas as pd + + +def filter_transcripts( + transcripts: str, + min_qv: float = 20.0, + min_x: float = 0.0, + max_x: float = 24000.0, + min_y: float = 0.0, + max_y: float = 24000.0, + prefix: str = "", +) -> None: + """ + Filter transcripts based on the specified thresholds. + + Args: + transcripts: Path to transcripts parquet file + min_qv: Minimum Q-Score to pass filtering + min_x: Minimum x-coordinate threshold + max_x: Maximum x-coordinate threshold + min_y: Minimum y-coordinate threshold + max_y: Maximum y-coordinate threshold + prefix: Output directory prefix + """ + df = pd.read_parquet(transcripts, engine="pyarrow") + + # filter transcripts df with thresholds, ignore negative controls + filtered_df = df[ + (df["qv"] >= min_qv) + & (df["x_location"] >= min_x) + & (df["x_location"] <= max_x) + & (df["y_location"] >= min_y) + & (df["y_location"] <= max_y) + & (~df["feature_name"].str.startswith("NegControlProbe_")) + & (~df["feature_name"].str.startswith("antisense_")) + & (~df["feature_name"].str.startswith("NegControlCodeword_")) + & (~df["feature_name"].str.startswith("BLANK_")) + ] + + # change cell_id of cell-free transcripts to "0" (Baysor's no-cell sentinel). + # Modern Xenium stores cell_id as a string ("UNASSIGNED" for cell-free transcripts); + # legacy Xenium used integer -1. Normalize to string and handle both cases — pandas 3 + # rejects mixing int values into a string-dtype column. + filtered_df["cell_id"] = filtered_df["cell_id"].astype(str) + neg_cell_row = filtered_df["cell_id"].isin(["-1", "UNASSIGNED"]) + filtered_df.loc[neg_cell_row, "cell_id"] = "0" + + # Output filtered transcripts as CSV for Baysor 0.7.1 compatibility. + # Baysor's Julia Parquet.jl cannot read modern pyarrow Parquet files + # (pyarrow 15+ writes size_statistics Thrift field 16 unconditionally, + # which Baysor's old Thrift deserializer doesn't recognize). + os.makedirs(prefix, exist_ok=True) + filtered_df.to_csv(f"{prefix}/filtered_transcripts.csv", index=False) + + return None + + +def main() -> None: + """ + Run preprocess transcripts as nf module. + """ + parser = argparse.ArgumentParser( + description="Preprocess Xenium transcripts for Baysor" + ) + parser.add_argument( + "--transcripts", required=True, help="Path to transcripts parquet file" + ) + parser.add_argument("--prefix", required=True, help="Output directory prefix") + parser.add_argument( + "--min-qv", + type=float, + default=20.0, + help="Minimum Q-Score threshold (default: 20.0)", + ) + parser.add_argument( + "--min-x", + type=float, + default=0.0, + help="Minimum x-coordinate threshold (default: 0.0)", + ) + parser.add_argument( + "--max-x", + type=float, + default=24000.0, + help="Maximum x-coordinate threshold (default: 24000.0)", + ) + parser.add_argument( + "--min-y", + type=float, + default=0.0, + help="Minimum y-coordinate threshold (default: 0.0)", + ) + parser.add_argument( + "--max-y", + type=float, + default=24000.0, + help="Maximum y-coordinate threshold (default: 24000.0)", + ) + args = parser.parse_args() + + filter_transcripts( + transcripts=args.transcripts, + min_qv=args.min_qv, + min_x=args.min_x, + max_x=args.max_x, + min_y=args.min_y, + max_y=args.max_y, + prefix=args.prefix, + ) + + return None + + +if __name__ == "__main__": + main() diff --git a/bin/check_samplesheet.py b/bin/check_samplesheet.py deleted file mode 100755 index 11b15572..00000000 --- a/bin/check_samplesheet.py +++ /dev/null @@ -1,262 +0,0 @@ -#!/usr/bin/env python - - -"""Provide a command line tool to validate and transform tabular samplesheets.""" - - -import argparse -import csv -import logging -import sys -from collections import Counter -from pathlib import Path - -logger = logging.getLogger() - - -class RowChecker: - """ - Define a service that can validate and transform each given row. - - Attributes: - modified (list): A list of dicts, where each dict corresponds to a previously - validated and transformed row. The order of rows is maintained. - - """ - - VALID_FORMATS = ( - ".fq.gz", - ".fastq.gz", - ) - - def __init__( - self, - sample_col="sample", - first_col="fastq_1", - second_col="fastq_2", - single_col="single_end", - **kwargs, - ): - """ - Initialize the row checker with the expected column names. - - Args: - sample_col (str): The name of the column that contains the sample name - (default "sample"). - first_col (str): The name of the column that contains the first (or only) - FASTQ file path (default "fastq_1"). - second_col (str): The name of the column that contains the second (if any) - FASTQ file path (default "fastq_2"). - single_col (str): The name of the new column that will be inserted and - records whether the sample contains single- or paired-end sequencing - reads (default "single_end"). - - """ - super().__init__(**kwargs) - self._sample_col = sample_col - self._first_col = first_col - self._second_col = second_col - self._single_col = single_col - self._seen = set() - self.modified = [] - - def validate_and_transform(self, row): - """ - Perform all validations on the given row and insert the read pairing status. - - Args: - row (dict): A mapping from column headers (keys) to elements of that row - (values). - - """ - self._validate_sample(row) - self._validate_first(row) - self._validate_second(row) - self._validate_pair(row) - self._seen.add((row[self._sample_col], row[self._first_col])) - self.modified.append(row) - - def _validate_sample(self, row): - """Assert that the sample name exists and convert spaces to underscores.""" - if len(row[self._sample_col]) <= 0: - raise AssertionError("Sample input is required.") - # Sanitize samples slightly. - row[self._sample_col] = row[self._sample_col].replace(" ", "_") - - def _validate_first(self, row): - """Assert that the first FASTQ entry is non-empty and has the right format.""" - if len(row[self._first_col]) <= 0: - raise AssertionError("At least the first FASTQ file is required.") - self._validate_fastq_format(row[self._first_col]) - - def _validate_second(self, row): - """Assert that the second FASTQ entry has the right format if it exists.""" - if len(row[self._second_col]) > 0: - self._validate_fastq_format(row[self._second_col]) - - def _validate_pair(self, row): - """Assert that read pairs have the same file extension. Report pair status.""" - if row[self._first_col] and row[self._second_col]: - row[self._single_col] = False - first_col_suffix = Path(row[self._first_col]).suffixes[-2:] - second_col_suffix = Path(row[self._second_col]).suffixes[-2:] - if first_col_suffix != second_col_suffix: - raise AssertionError("FASTQ pairs must have the same file extensions.") - else: - row[self._single_col] = True - - def _validate_fastq_format(self, filename): - """Assert that a given filename has one of the expected FASTQ extensions.""" - if not any(filename.endswith(extension) for extension in self.VALID_FORMATS): - raise AssertionError( - f"The FASTQ file has an unrecognized extension: {filename}\n" - f"It should be one of: {', '.join(self.VALID_FORMATS)}" - ) - - def validate_unique_samples(self): - """ - Assert that the combination of sample name and FASTQ filename is unique. - - In addition to the validation, also rename all samples to have a suffix of _T{n}, where n is the - number of times the same sample exist, but with different FASTQ files, e.g., multiple runs per experiment. - - """ - if len(self._seen) != len(self.modified): - raise AssertionError("The pair of sample name and FASTQ must be unique.") - seen = Counter() - for row in self.modified: - sample = row[self._sample_col] - seen[sample] += 1 - row[self._sample_col] = f"{sample}_T{seen[sample]}" - - -def read_head(handle, num_lines=10): - """Read the specified number of lines from the current position in the file.""" - lines = [] - for idx, line in enumerate(handle): - if idx == num_lines: - break - lines.append(line) - return "".join(lines) - - -def sniff_format(handle): - """ - Detect the tabular format. - - Args: - handle (text file): A handle to a `text file`_ object. The read position is - expected to be at the beginning (index 0). - - Returns: - csv.Dialect: The detected tabular format. - - .. _text file: - https://docs.python.org/3/glossary.html#term-text-file - - """ - peek = read_head(handle) - handle.seek(0) - sniffer = csv.Sniffer() - if not sniffer.has_header(peek): - logger.critical("The given sample sheet does not appear to contain a header.") - sys.exit(1) - dialect = sniffer.sniff(peek) - return dialect - - -def check_samplesheet(file_in, file_out): - """ - Check that the tabular samplesheet has the structure expected by nf-core pipelines. - - Validate the general shape of the table, expected columns, and each row. Also add - an additional column which records whether one or two FASTQ reads were found. - - Args: - file_in (pathlib.Path): The given tabular samplesheet. The format can be either - CSV, TSV, or any other format automatically recognized by ``csv.Sniffer``. - file_out (pathlib.Path): Where the validated and transformed samplesheet should - be created; always in CSV format. - - Example: - This function checks that the samplesheet follows the following structure, - see also the `viral recon samplesheet`_:: - - sample,fastq_1,fastq_2 - SAMPLE_PE,SAMPLE_PE_RUN1_1.fastq.gz,SAMPLE_PE_RUN1_2.fastq.gz - SAMPLE_PE,SAMPLE_PE_RUN2_1.fastq.gz,SAMPLE_PE_RUN2_2.fastq.gz - SAMPLE_SE,SAMPLE_SE_RUN1_1.fastq.gz, - - .. _viral recon samplesheet: - https://raw.githubusercontent.com/nf-core/test-datasets/viralrecon/samplesheet/samplesheet_test_illumina_amplicon.csv - - """ - required_columns = {"sample", "fastq_1", "fastq_2"} - # See https://docs.python.org/3.9/library/csv.html#id3 to read up on `newline=""`. - with file_in.open(newline="") as in_handle: - reader = csv.DictReader(in_handle, dialect=sniff_format(in_handle)) - # Validate the existence of the expected header columns. - if not required_columns.issubset(reader.fieldnames): - req_cols = ", ".join(required_columns) - logger.critical(f"The sample sheet **must** contain these column headers: {req_cols}.") - sys.exit(1) - # Validate each row. - checker = RowChecker() - for i, row in enumerate(reader): - try: - checker.validate_and_transform(row) - except AssertionError as error: - logger.critical(f"{str(error)} On line {i + 2}.") - sys.exit(1) - checker.validate_unique_samples() - header = list(reader.fieldnames) - header.insert(1, "single_end") - # See https://docs.python.org/3.9/library/csv.html#id3 to read up on `newline=""`. - with file_out.open(mode="w", newline="") as out_handle: - writer = csv.DictWriter(out_handle, header, delimiter=",") - writer.writeheader() - for row in checker.modified: - writer.writerow(row) - - -def parse_args(argv=None): - """Define and immediately parse command line arguments.""" - parser = argparse.ArgumentParser( - description="Validate and transform a tabular samplesheet.", - epilog="Example: python check_samplesheet.py samplesheet.csv samplesheet.valid.csv", - ) - parser.add_argument( - "file_in", - metavar="FILE_IN", - type=Path, - help="Tabular input samplesheet in CSV or TSV format.", - ) - parser.add_argument( - "file_out", - metavar="FILE_OUT", - type=Path, - help="Transformed output samplesheet in CSV format.", - ) - parser.add_argument( - "-l", - "--log-level", - help="The desired log level (default WARNING).", - choices=("CRITICAL", "ERROR", "WARNING", "INFO", "DEBUG"), - default="WARNING", - ) - return parser.parse_args(argv) - - -def main(argv=None): - """Coordinate argument parsing and program execution.""" - args = parse_args(argv) - logging.basicConfig(level=args.log_level, format="[%(levelname)s] %(message)s") - if not args.file_in.is_file(): - logger.error(f"The given input file {args.file_in} was not found!") - sys.exit(2) - args.file_out.parent.mkdir(parents=True, exist_ok=True) - check_samplesheet(args.file_in, args.file_out) - - -if __name__ == "__main__": - sys.exit(main()) diff --git a/bin/divide_transcripts.py b/bin/divide_transcripts.py new file mode 100755 index 00000000..133fcede --- /dev/null +++ b/bin/divide_transcripts.py @@ -0,0 +1,1312 @@ +#!/usr/bin/env python3 +"""Divide a Xenium transcripts.parquet file into spatial patches for tiled segmentation. + +Standalone script — no imports from xenium_patch or any local package. +Only uses stdlib + pyarrow + numpy. + +Two grid modes: + - Uniform (default): equal-sized tiles based on --tile-width + - Quadtree (--balanced): starts uniform, recursively subdivides dense tiles +""" + +from __future__ import annotations + +import argparse +import json +import math +import os +from concurrent.futures import ThreadPoolExecutor +from dataclasses import dataclass +from pathlib import Path + +import numpy as np +import pyarrow as pa +import pyarrow.compute as pc +import pyarrow.parquet as pq + +# --------------------------------------------------------------------------- +# Constants +# --------------------------------------------------------------------------- + +XENIUM_PIXEL_SIZE_UM: float = 0.2125 + +TRANSCRIPT_COLS = [ + "transcript_id", + "cell_id", + "overlaps_nucleus", + "feature_name", + "x_location", + "y_location", + "z_location", + "qv", +] + +# Quadtree defaults +QUADTREE_MIN_TILE_WIDTH_UM: float = 200.0 +QUADTREE_MAX_DEPTH: int = 4 +QUADTREE_HISTOGRAM_BINS: int = 500 + +# --------------------------------------------------------------------------- +# Data types +# --------------------------------------------------------------------------- + + +@dataclass(frozen=True) +class Bounds: + """Axis-aligned bounding box in either pixel or micron coordinates.""" + + x_min: float + x_max: float + y_min: float + y_max: float + + @property + def width(self) -> float: + return self.x_max - self.x_min + + @property + def height(self) -> float: + return self.y_max - self.y_min + + +@dataclass(frozen=True) +class PatchInfo: + """Metadata for a single patch in the grid.""" + + patch_id: str + row: int + col: int + global_bounds_px: Bounds + global_bounds_um: Bounds + core_bounds_px: Bounds + core_bounds_um: Bounds + + +# --------------------------------------------------------------------------- +# Grid computation — uniform +# --------------------------------------------------------------------------- + + +def _compute_uniform_grid( + image_height_px: int, + image_width_px: int, + grid_rows: int, + grid_cols: int, + overlap_px: int, + pixel_size_um: float, +) -> list[PatchInfo]: + """ + Compute a regular NxM grid of overlapping patches. + + Grid is computed in pixel space. Each patch overlaps its neighbors by + overlap_px pixels. Core regions are computed such that every pixel + belongs to exactly one core. + + Args: + image_height_px: Image height in pixels. + image_width_px: Image width in pixels. + grid_rows: Number of rows in the patch grid. + grid_cols: Number of columns in the patch grid. + overlap_px: Overlap between adjacent patches in pixels. + pixel_size_um: Microns per pixel. + + Returns: + List of PatchInfo for every patch. + """ + step_x = (image_width_px - overlap_px) / grid_cols + step_y = (image_height_px - overlap_px) / grid_rows + + patches: list[PatchInfo] = [] + for row in range(grid_rows): + for col in range(grid_cols): + x_min_px = int(round(col * step_x)) + y_min_px = int(round(row * step_y)) + x_max_px = min( + int(round(col * step_x + step_x + overlap_px)), image_width_px + ) + y_max_px = min( + int(round(row * step_y + step_y + overlap_px)), image_height_px + ) + + global_bounds_px = Bounds(x_min_px, x_max_px, y_min_px, y_max_px) + + # Core bounds: trim half-overlap from sides that have neighbors + half_overlap = overlap_px // 2 + remainder = overlap_px % 2 + core_x_min = x_min_px + (half_overlap + remainder if col > 0 else 0) + core_x_max = x_max_px - (half_overlap if col < grid_cols - 1 else 0) + core_y_min = y_min_px + (half_overlap + remainder if row > 0 else 0) + core_y_max = y_max_px - (half_overlap if row < grid_rows - 1 else 0) + + core_bounds_px = Bounds(core_x_min, core_x_max, core_y_min, core_y_max) + + global_bounds_um = Bounds( + x_min_px * pixel_size_um, + x_max_px * pixel_size_um, + y_min_px * pixel_size_um, + y_max_px * pixel_size_um, + ) + core_bounds_um = Bounds( + core_x_min * pixel_size_um, + core_x_max * pixel_size_um, + core_y_min * pixel_size_um, + core_y_max * pixel_size_um, + ) + + patches.append( + PatchInfo( + patch_id=f"patch_{row}_{col}", + row=row, + col=col, + global_bounds_px=global_bounds_px, + global_bounds_um=global_bounds_um, + core_bounds_px=core_bounds_px, + core_bounds_um=core_bounds_um, + ) + ) + + return patches + + +def compute_tilewidth_uniform_grid( + image_height_px: int, + image_width_px: int, + tile_width_um: float, + overlap_um: float, + pixel_size_um: float, + transcript_extent_um: Bounds, +) -> tuple[list[PatchInfo], int, int, int]: + """ + Compute a uniform grid from a tile width in microns. + + Args: + image_height_px: Image height in pixels. + image_width_px: Image width in pixels. + tile_width_um: Desired tile width in microns. + overlap_um: Overlap between adjacent patches in microns. + pixel_size_um: Size of one pixel in microns. + transcript_extent_um: Bounding box of transcript coordinates. + + Returns: + Tuple of (patches, grid_rows, grid_cols, overlap_px). + """ + image_width_um = image_width_px * pixel_size_um + image_height_um = image_height_px * pixel_size_um + cols = max(1, math.ceil(image_width_um / tile_width_um)) + rows = max(1, math.ceil(image_height_um / tile_width_um)) + overlap_px = int(math.ceil(overlap_um / pixel_size_um)) + + patches = _compute_uniform_grid( + image_height_px, image_width_px, rows, cols, overlap_px, pixel_size_um + ) + return patches, rows, cols, overlap_px + + +# --------------------------------------------------------------------------- +# Grid computation — density quadtree +# --------------------------------------------------------------------------- + + +def _build_prefix_sum( + x_coords_um: np.ndarray, + y_coords_um: np.ndarray, + n_bins: int = QUADTREE_HISTOGRAM_BINS, +) -> tuple[np.ndarray, np.ndarray, np.ndarray]: + """ + Build a 2D histogram and its prefix sum for fast rectangle count queries. + + Args: + x_coords_um: Transcript X coordinates in microns. + y_coords_um: Transcript Y coordinates in microns. + n_bins: Number of bins along each axis. + + Returns: + Tuple of (prefix_sum, x_edges, y_edges). + """ + x_min, x_max = float(np.min(x_coords_um)), float(np.max(x_coords_um)) + y_min, y_max = float(np.min(y_coords_um)), float(np.max(y_coords_um)) + + eps = 1e-6 + x_edges = np.linspace(x_min, x_max + eps, n_bins + 1) + y_edges = np.linspace(y_min, y_max + eps, n_bins + 1) + + hist, _, _ = np.histogram2d(x_coords_um, y_coords_um, bins=[x_edges, y_edges]) + # hist shape is (n_bins_x, n_bins_y), transpose to (y, x) for row-major access + hist = hist.T + + prefix_sum = np.cumsum(np.cumsum(hist, axis=0), axis=1) + return prefix_sum, x_edges, y_edges + + +def _count_transcripts_in_rect( + prefix_sum: np.ndarray, + x_edges: np.ndarray, + y_edges: np.ndarray, + x_min_um: float, + x_max_um: float, + y_min_um: float, + y_max_um: float, +) -> int: + """ + Count transcripts in a rectangle using a 2D prefix sum array. + + Args: + prefix_sum: 2D cumulative sum array (n_bins_y x n_bins_x). + x_edges: Histogram bin edges along X. + y_edges: Histogram bin edges along Y. + x_min_um: Left bound in microns. + x_max_um: Right bound in microns. + y_min_um: Top bound in microns. + y_max_um: Bottom bound in microns. + + Returns: + Approximate transcript count in the rectangle. + """ + col_lo = max(0, int(np.searchsorted(x_edges, x_min_um, side="right")) - 1) + col_hi = min( + len(x_edges) - 1, int(np.searchsorted(x_edges, x_max_um, side="right")) - 1 + ) + row_lo = max(0, int(np.searchsorted(y_edges, y_min_um, side="right")) - 1) + row_hi = min( + len(y_edges) - 1, int(np.searchsorted(y_edges, y_max_um, side="right")) - 1 + ) + + col_hi = min(col_hi, prefix_sum.shape[1] - 1) + row_hi = min(row_hi, prefix_sum.shape[0] - 1) + + if col_lo > col_hi or row_lo > row_hi: + return 0 + + total = int( + prefix_sum[row_hi, col_hi] + - (prefix_sum[row_lo - 1, col_hi] if row_lo > 0 else 0) + - (prefix_sum[row_hi, col_lo - 1] if col_lo > 0 else 0) + + (prefix_sum[row_lo - 1, col_lo - 1] if row_lo > 0 and col_lo > 0 else 0) + ) + return max(0, total) + + +def _subdivide_regions( + regions: list[tuple[float, float, float, float]], + prefix_sum: np.ndarray, + x_edges: np.ndarray, + y_edges: np.ndarray, + max_transcripts: int, + min_tile_width_um: float, + max_depth: int, +) -> list[tuple[float, float, float, float]]: + """ + Recursively subdivide regions exceeding the transcript threshold. + + Uses a stack instead of recursion for large grids. + + Args: + regions: List of (x_min, x_max, y_min, y_max) tuples in microns. + prefix_sum: 2D prefix sum for fast counting. + x_edges: Histogram X bin edges. + y_edges: Histogram Y bin edges. + max_transcripts: Maximum transcripts allowed per region. + min_tile_width_um: Minimum tile dimension before stopping. + max_depth: Maximum recursion depth. + + Returns: + List of final (x_min, x_max, y_min, y_max) regions. + """ + result: list[tuple[float, float, float, float]] = [] + stack: list[tuple[tuple[float, float, float, float], int]] = [ + (r, 0) for r in regions + ] + + while stack: + region, depth = stack.pop() + x_min, x_max, y_min, y_max = region + width = x_max - x_min + height = y_max - y_min + + count = _count_transcripts_in_rect( + prefix_sum, x_edges, y_edges, x_min, x_max, y_min, y_max + ) + + if count <= max_transcripts or depth >= max_depth: + result.append(region) + continue + + if min(width, height) / 2 < min_tile_width_um: + result.append(region) + continue + + # Split into 4 quadrants + mid_x = (x_min + x_max) / 2 + mid_y = (y_min + y_max) / 2 + children = [ + (x_min, mid_x, y_min, mid_y), + (mid_x, x_max, y_min, mid_y), + (x_min, mid_x, mid_y, y_max), + (mid_x, x_max, mid_y, y_max), + ] + for child in children: + stack.append((child, depth + 1)) + + return result + + +def _regions_to_patches( + regions: list[tuple[float, float, float, float]], + overlap_um: float, + overlap_px: int, + pixel_size_um: float, + image_width_px: int, + image_height_px: int, +) -> list[PatchInfo]: + """ + Convert quadtree regions to PatchInfo objects with overlap. + + Args: + regions: Sorted list of (x_min, x_max, y_min, y_max) in microns. + overlap_um: Overlap in microns. + overlap_px: Overlap in pixels. + pixel_size_um: Microns per pixel. + image_width_px: Image width in pixels. + image_height_px: Image height in pixels. + + Returns: + List of PatchInfo objects. + """ + patches: list[PatchInfo] = [] + for i, (x_min_um, x_max_um, y_min_um, y_max_um) in enumerate(regions): + # Core bounds in pixels + core_x_min_px = max( + 0, min(int(round(x_min_um / pixel_size_um)), image_width_px) + ) + core_x_max_px = max( + 0, min(int(round(x_max_um / pixel_size_um)), image_width_px) + ) + core_y_min_px = max( + 0, min(int(round(y_min_um / pixel_size_um)), image_height_px) + ) + core_y_max_px = max( + 0, min(int(round(y_max_um / pixel_size_um)), image_height_px) + ) + + core_bounds_px = Bounds( + core_x_min_px, core_x_max_px, core_y_min_px, core_y_max_px + ) + + # Global bounds: core extended by overlap, clamped to image + global_x_min_px = max(0, core_x_min_px - overlap_px) + global_x_max_px = min(image_width_px, core_x_max_px + overlap_px) + global_y_min_px = max(0, core_y_min_px - overlap_px) + global_y_max_px = min(image_height_px, core_y_max_px + overlap_px) + + global_bounds_px = Bounds( + global_x_min_px, global_x_max_px, global_y_min_px, global_y_max_px + ) + + core_bounds_um = Bounds( + core_x_min_px * pixel_size_um, + core_x_max_px * pixel_size_um, + core_y_min_px * pixel_size_um, + core_y_max_px * pixel_size_um, + ) + global_bounds_um = Bounds( + global_x_min_px * pixel_size_um, + global_x_max_px * pixel_size_um, + global_y_min_px * pixel_size_um, + global_y_max_px * pixel_size_um, + ) + + patches.append( + PatchInfo( + patch_id=f"patch_{i}", + row=i, + col=0, + global_bounds_px=global_bounds_px, + global_bounds_um=global_bounds_um, + core_bounds_px=core_bounds_px, + core_bounds_um=core_bounds_um, + ) + ) + + return patches + + +def compute_density_quadtree_grid( + image_height_px: int, + image_width_px: int, + tile_width_um: float, + overlap_um: float, + pixel_size_um: float, + x_coords_um: np.ndarray, + y_coords_um: np.ndarray, + max_transcripts_per_patch: int | None = None, + min_tile_width_um: float = QUADTREE_MIN_TILE_WIDTH_UM, + max_depth: int = QUADTREE_MAX_DEPTH, +) -> tuple[list[PatchInfo], int, int, int]: + """ + Compute an adaptive quadtree grid that subdivides dense regions. + + Starts with a uniform grid derived from tile_width_um, then recursively + subdivides patches exceeding max_transcripts_per_patch. + + Args: + image_height_px: Image height in pixels. + image_width_px: Image width in pixels. + tile_width_um: Base tile width in microns. + overlap_um: Overlap between adjacent patches in microns. + pixel_size_um: Microns per pixel. + x_coords_um: Transcript X coordinates in microns. + y_coords_um: Transcript Y coordinates in microns. + max_transcripts_per_patch: Target max transcripts per patch. + If None, auto-computed as 2x the average per initial patch. + min_tile_width_um: Minimum tile dimension before stopping. + max_depth: Maximum recursion depth. + + Returns: + Tuple of (patches, initial_rows, initial_cols, overlap_px). + """ + image_width_um = image_width_px * pixel_size_um + image_height_um = image_height_px * pixel_size_um + overlap_px = int(math.ceil(overlap_um / pixel_size_um)) + + initial_cols = max(1, math.ceil(image_width_um / tile_width_um)) + initial_rows = max(1, math.ceil(image_height_um / tile_width_um)) + + # Build prefix sum for fast counting + prefix_sum, x_edges, y_edges = _build_prefix_sum(x_coords_um, y_coords_um) + + # Define initial regions in microns + cell_width_um = image_width_um / initial_cols + cell_height_um = image_height_um / initial_rows + + initial_regions: list[tuple[float, float, float, float]] = [] + for row in range(initial_rows): + for col in range(initial_cols): + x_min = col * cell_width_um + x_max = min((col + 1) * cell_width_um, image_width_um) + y_min = row * cell_height_um + y_max = min((row + 1) * cell_height_um, image_height_um) + initial_regions.append((x_min, x_max, y_min, y_max)) + + # Auto-compute threshold + n_initial = len(initial_regions) + total_transcripts = len(x_coords_um) + if max_transcripts_per_patch is None: + max_transcripts_per_patch = max(1, int(total_transcripts / n_initial * 2)) + + # Recursive subdivision + final_regions = _subdivide_regions( + initial_regions, + prefix_sum, + x_edges, + y_edges, + max_transcripts_per_patch, + min_tile_width_um, + max_depth, + ) + + # Sort by (y_min, x_min) for deterministic ordering + final_regions.sort(key=lambda r: (r[2], r[0])) + + # Convert to PatchInfo + patches = _regions_to_patches( + final_regions, + overlap_um, + overlap_px, + pixel_size_um, + image_width_px, + image_height_px, + ) + + return patches, initial_rows, initial_cols, overlap_px + + +# --------------------------------------------------------------------------- +# Sparse tile merging +# --------------------------------------------------------------------------- + + +def _count_transcripts_per_tile( + patches: list[PatchInfo], + x_coords_um: np.ndarray, + y_coords_um: np.ndarray, +) -> dict[str, int]: + """ + Count transcripts falling within each patch's core bounds. + + Uses core bounds (not global) to avoid double-counting transcripts + in overlap regions. + + Args: + patches: List of PatchInfo objects. + x_coords_um: Transcript X coordinates in microns. + y_coords_um: Transcript Y coordinates in microns. + + Returns: + Dict mapping patch_id to transcript count. + """ + counts: dict[str, int] = {} + for p in patches: + cb = p.core_bounds_um + mask = ( + (x_coords_um >= cb.x_min) + & (x_coords_um < cb.x_max) + & (y_coords_um >= cb.y_min) + & (y_coords_um < cb.y_max) + ) + counts[p.patch_id] = int(np.sum(mask)) + return counts + + +def _find_adjacent_patches( + patches: list[PatchInfo], +) -> dict[str, list[str]]: + """ + Build an adjacency map: patches sharing a core bounds edge are neighbors. + + Two patches are adjacent if their core bounds share an edge (touch or + overlap along one axis while overlapping along the other axis). + + Args: + patches: List of PatchInfo objects. + + Returns: + Dict mapping patch_id to list of adjacent patch_ids. + """ + adjacency: dict[str, list[str]] = {p.patch_id: [] for p in patches} + eps = 1.0 # tolerance in microns for edge sharing + + for i, a in enumerate(patches): + for j in range(i + 1, len(patches)): + b = patches[j] + ac = a.core_bounds_um + bc = b.core_bounds_um + + # Check X-axis overlap (cores overlap in X) + x_overlap = ac.x_min < bc.x_max and bc.x_min < ac.x_max + # Check Y-axis overlap (cores overlap in Y) + y_overlap = ac.y_min < bc.y_max and bc.y_min < ac.y_max + + # Adjacent along X: share a vertical edge, overlap in Y + x_touching = ( + abs(ac.x_max - bc.x_min) < eps or abs(bc.x_max - ac.x_min) < eps + ) + # Adjacent along Y: share a horizontal edge, overlap in X + y_touching = ( + abs(ac.y_max - bc.y_min) < eps or abs(bc.y_max - ac.y_min) < eps + ) + + if (x_touching and y_overlap) or (y_touching and x_overlap): + adjacency[a.patch_id].append(b.patch_id) + adjacency[b.patch_id].append(a.patch_id) + + return adjacency + + +def _recalculate_core_bounds( + patches: list[PatchInfo], + overlap_px: int, + pixel_size_um: float, + image_width_px: int, + image_height_px: int, +) -> list[PatchInfo]: + """ + Recalculate core bounds for all patches after merging. + + Core bounds are derived from the regions: the core is the + non-overlapping portion of each tile. After merging, we extract + core regions from global bounds by trimming the overlap, then + rebuild PatchInfo objects. + + For merged grids where tiles may be irregular, core bounds equal + the global bounds shrunk by half the overlap on each side that has + a neighbor, clamped to the image extent. + + Args: + patches: Current list of PatchInfo (with updated global bounds). + overlap_px: Overlap in pixels. + pixel_size_um: Microns per pixel. + image_width_px: Image width in pixels. + image_height_px: Image height in pixels. + + Returns: + New list of PatchInfo with recalculated core and global bounds. + """ + if not patches: + return [] + + # Extract core regions in microns from global bounds minus overlap + half_overlap_um = (overlap_px * pixel_size_um) / 2.0 + image_width_um = image_width_px * pixel_size_um + image_height_um = image_height_px * pixel_size_um + + # Collect all core regions (global shrunk by half overlap) + core_regions_um: list[tuple[float, float, float, float]] = [] + for p in patches: + gb = p.global_bounds_um + # Shrink by half overlap on each side, but not past image edge + cx_min = gb.x_min + (half_overlap_um if gb.x_min > 0 else 0) + cx_max = gb.x_max - (half_overlap_um if gb.x_max < image_width_um else 0) + cy_min = gb.y_min + (half_overlap_um if gb.y_min > 0 else 0) + cy_max = gb.y_max - (half_overlap_um if gb.y_max < image_height_um else 0) + core_regions_um.append((cx_min, cx_max, cy_min, cy_max)) + + # Rebuild patches using core regions -> global bounds (core + overlap) + result: list[PatchInfo] = [] + for i, p in enumerate(patches): + cx_min, cx_max, cy_min, cy_max = core_regions_um[i] + + # Core bounds in pixels + core_x_min_px = max(0, min(int(round(cx_min / pixel_size_um)), image_width_px)) + core_x_max_px = max(0, min(int(round(cx_max / pixel_size_um)), image_width_px)) + core_y_min_px = max(0, min(int(round(cy_min / pixel_size_um)), image_height_px)) + core_y_max_px = max(0, min(int(round(cy_max / pixel_size_um)), image_height_px)) + + core_bounds_px = Bounds( + core_x_min_px, core_x_max_px, core_y_min_px, core_y_max_px + ) + + # Global bounds: core extended by overlap, clamped to image + global_x_min_px = max(0, core_x_min_px - overlap_px) + global_x_max_px = min(image_width_px, core_x_max_px + overlap_px) + global_y_min_px = max(0, core_y_min_px - overlap_px) + global_y_max_px = min(image_height_px, core_y_max_px + overlap_px) + + global_bounds_px = Bounds( + global_x_min_px, global_x_max_px, global_y_min_px, global_y_max_px + ) + + core_bounds_um = Bounds( + core_x_min_px * pixel_size_um, + core_x_max_px * pixel_size_um, + core_y_min_px * pixel_size_um, + core_y_max_px * pixel_size_um, + ) + global_bounds_um = Bounds( + global_x_min_px * pixel_size_um, + global_x_max_px * pixel_size_um, + global_y_min_px * pixel_size_um, + global_y_max_px * pixel_size_um, + ) + + result.append( + PatchInfo( + patch_id=p.patch_id, + row=p.row, + col=p.col, + global_bounds_px=global_bounds_px, + global_bounds_um=global_bounds_um, + core_bounds_px=core_bounds_px, + core_bounds_um=core_bounds_um, + ) + ) + + return result + + +def merge_sparse_tiles( + patches: list[PatchInfo], + x_coords_um: np.ndarray, + y_coords_um: np.ndarray, + overlap_px: int, + pixel_size_um: float, + image_width_px: int, + image_height_px: int, + min_transcripts: int = 1000, +) -> tuple[list[PatchInfo], int]: + """ + Merge tiles below min_transcripts into their least populated adjacent neighbor. + + Iteratively finds the sparsest tile below the threshold and merges it + into its smallest neighbor for balanced tile sizes. Repeats until no + tiles remain below the threshold (or a tile has no neighbors to merge into). + + Args: + patches: List of PatchInfo objects from grid computation. + x_coords_um: Transcript X coordinates in microns. + y_coords_um: Transcript Y coordinates in microns. + overlap_px: Overlap in pixels. + pixel_size_um: Microns per pixel. + image_width_px: Image width in pixels. + image_height_px: Image height in pixels. + min_transcripts: Minimum transcript count per tile. + + Returns: + Tuple of (merged patches, number of merges performed). + """ + if len(patches) <= 1: + return patches, 0 + + # Work with mutable list + active = list(patches) + merge_count = 0 + + while True: + counts = _count_transcripts_per_tile(active, x_coords_um, y_coords_um) + adjacency = _find_adjacent_patches(active) + + # Find sparsest tile below threshold + sparse_candidates = [ + (pid, cnt) for pid, cnt in counts.items() if cnt < min_transcripts + ] + if not sparse_candidates: + break + + # Sort by count ascending to merge sparsest first + sparse_candidates.sort(key=lambda t: t[1]) + sparse_id, sparse_count = sparse_candidates[0] + + # Find neighbors and pick the least populated one for balanced merging + neighbors = adjacency.get(sparse_id, []) + if not neighbors: + # No neighbors for this tile — skip it and try next sparsest + sparse_candidates = [(pid, cnt) for pid, cnt in sparse_candidates[1:]] + found = False + for pid, cnt in sparse_candidates: + nbrs = adjacency.get(pid, []) + if nbrs: + sparse_id, sparse_count = pid, cnt + neighbors = nbrs + found = True + break + if not found: + break + + best_neighbor_id = min(neighbors, key=lambda nid: counts.get(nid, 0)) + + # Find the actual PatchInfo objects + sparse_patch = next(p for p in active if p.patch_id == sparse_id) + neighbor_patch = next(p for p in active if p.patch_id == best_neighbor_id) + + # Expand neighbor's global bounds to cover both tiles + sg = sparse_patch.global_bounds_um + ng = neighbor_patch.global_bounds_um + merged_global_um = Bounds( + x_min=min(sg.x_min, ng.x_min), + x_max=max(sg.x_max, ng.x_max), + y_min=min(sg.y_min, ng.y_min), + y_max=max(sg.y_max, ng.y_max), + ) + + # Also merge core bounds (union) + sc = sparse_patch.core_bounds_um + nc = neighbor_patch.core_bounds_um + merged_core_um = Bounds( + x_min=min(sc.x_min, nc.x_min), + x_max=max(sc.x_max, nc.x_max), + y_min=min(sc.y_min, nc.y_min), + y_max=max(sc.y_max, nc.y_max), + ) + + # Convert merged bounds to pixels + merged_global_px = Bounds( + x_min=max(0, int(round(merged_global_um.x_min / pixel_size_um))), + x_max=min( + image_width_px, int(round(merged_global_um.x_max / pixel_size_um)) + ), + y_min=max(0, int(round(merged_global_um.y_min / pixel_size_um))), + y_max=min( + image_height_px, int(round(merged_global_um.y_max / pixel_size_um)) + ), + ) + merged_core_px = Bounds( + x_min=max(0, int(round(merged_core_um.x_min / pixel_size_um))), + x_max=min(image_width_px, int(round(merged_core_um.x_max / pixel_size_um))), + y_min=max(0, int(round(merged_core_um.y_min / pixel_size_um))), + y_max=min( + image_height_px, int(round(merged_core_um.y_max / pixel_size_um)) + ), + ) + + # Create merged patch (keeps absorbing tile's ID and position) + merged_patch = PatchInfo( + patch_id=neighbor_patch.patch_id, + row=neighbor_patch.row, + col=neighbor_patch.col, + global_bounds_px=merged_global_px, + global_bounds_um=merged_global_um, + core_bounds_px=merged_core_px, + core_bounds_um=merged_core_um, + ) + + # Replace neighbor with merged patch and remove sparse tile + active = [ + merged_patch if p.patch_id == best_neighbor_id else p + for p in active + if p.patch_id != sparse_id + ] + merge_count += 1 + + print( + f" Merged {sparse_id} ({sparse_count:,} transcripts) " + f"into {best_neighbor_id} ({counts[best_neighbor_id]:,} transcripts)" + ) + + if merge_count > 0: + # Recalculate core bounds for consistency + active = _recalculate_core_bounds( + active, overlap_px, pixel_size_um, image_width_px, image_height_px + ) + + return active, merge_count + + +# --------------------------------------------------------------------------- +# Transcript division +# --------------------------------------------------------------------------- + + +def _filter_and_write_patch_transcripts( + full_table: pa.Table, + output_path: Path, + bounds_um: Bounds, + origin_x: float, + origin_y: float, +) -> int: + """ + Filter transcripts to a spatial region and write to parquet. + + Transcripts are filtered to global_bounds (including overlap), then + coordinates are offset by subtracting the global_bounds origin. + + Args: + full_table: Full transcript table as a pyarrow Table. + output_path: Path for the filtered output parquet. + bounds_um: Spatial bounding box for filtering (microns). + origin_x: X offset to subtract for local coordinates. + origin_y: Y offset to subtract for local coordinates. + + Returns: + Number of transcripts written. + """ + x_col = full_table.column("x_location") + y_col = full_table.column("y_location") + + mask = pc.and_( + pc.and_( + pc.greater_equal(x_col, pa.scalar(bounds_um.x_min, type=x_col.type)), + pc.less(x_col, pa.scalar(bounds_um.x_max, type=x_col.type)), + ), + pc.and_( + pc.greater_equal(y_col, pa.scalar(bounds_um.y_min, type=y_col.type)), + pc.less(y_col, pa.scalar(bounds_um.y_max, type=y_col.type)), + ), + ) + filtered = full_table.filter(mask) + + if origin_x != 0.0 or origin_y != 0.0: + fx = filtered.column("x_location") + fy = filtered.column("y_location") + x_local = pc.subtract(fx, pa.scalar(origin_x, type=fx.type)) + y_local = pc.subtract(fy, pa.scalar(origin_y, type=fy.type)) + idx_x = filtered.schema.get_field_index("x_location") + idx_y = filtered.schema.get_field_index("y_location") + filtered = filtered.set_column(idx_x, "x_location", x_local) + filtered = filtered.set_column(idx_y, "y_location", y_local) + + output_path.parent.mkdir(parents=True, exist_ok=True) + pq.write_table(filtered, str(output_path)) + return len(filtered) + + +def _process_patch( + patch: PatchInfo, + output_dir: Path, + full_table: pa.Table, +) -> int: + """ + Write transcript subset for a single patch. + + Args: + patch: Patch metadata. + output_dir: Root output directory. + full_table: Full transcript table. + + Returns: + Number of transcripts written. + """ + patch_dir = output_dir / patch.patch_id + bounds_um = patch.global_bounds_um + return _filter_and_write_patch_transcripts( + full_table, + patch_dir / "transcripts.parquet", + bounds_um, + origin_x=bounds_um.x_min, + origin_y=bounds_um.y_min, + ) + + +# --------------------------------------------------------------------------- +# JSON serialization +# --------------------------------------------------------------------------- + + +def _bounds_to_dict(b: Bounds) -> dict[str, float]: + """Serialize a Bounds to a JSON-compatible dict.""" + return {"x_min": b.x_min, "x_max": b.x_max, "y_min": b.y_min, "y_max": b.y_max} + + +def save_grid_metadata( + patches: list[PatchInfo], + image_height_px: int, + image_width_px: int, + pixel_size_um: float, + transcript_extent_um: Bounds, + grid_rows: int, + grid_cols: int, + overlap_um: float, + overlap_px: int, + grid_type: str, + output_path: Path, +) -> None: + """ + Serialize grid metadata to JSON. + + Args: + patches: List of PatchInfo objects. + image_height_px: Image height in pixels. + image_width_px: Image width in pixels. + pixel_size_um: Microns per pixel. + transcript_extent_um: Bounding box of transcript coordinates. + grid_rows: Number of rows in the initial grid. + grid_cols: Number of columns in the initial grid. + overlap_um: Overlap in microns. + overlap_px: Overlap in pixels. + grid_type: Grid type string ("uniform" or "density_quadtree"). + output_path: Path to write JSON file. + """ + data = { + "version": "1.0", + "bundle_path": "", + "image_height_px": image_height_px, + "image_width_px": image_width_px, + "pixel_size_um": pixel_size_um, + "transcript_extent_um": _bounds_to_dict(transcript_extent_um), + "grid_rows": grid_rows, + "grid_cols": grid_cols, + "overlap_um": overlap_um, + "overlap_px": overlap_px, + "grid_type": grid_type, + "patches": [ + { + "patch_id": p.patch_id, + "row": p.row, + "col": p.col, + "global_bounds_px": _bounds_to_dict(p.global_bounds_px), + "global_bounds_um": _bounds_to_dict(p.global_bounds_um), + "core_bounds_px": _bounds_to_dict(p.core_bounds_px), + "core_bounds_um": _bounds_to_dict(p.core_bounds_um), + } + for p in patches + ], + } + output_path.parent.mkdir(parents=True, exist_ok=True) + with open(output_path, "w") as f: + json.dump(data, f, indent=2) + + +# --------------------------------------------------------------------------- +# Coordinate shift helper +# --------------------------------------------------------------------------- + + +def _shift_patches_to_real_coords( + patches: list[PatchInfo], + ox: float, + oy: float, +) -> list[PatchInfo]: + """ + Shift patch micron bounds by (ox, oy) to align with real transcript coords. + + Pixel bounds remain zero-origin (there is no real image to index into). + + Args: + patches: Patches in zero-origin micron space. + ox: X offset (transcript extent x_min). + oy: Y offset (transcript extent y_min). + + Returns: + New list of PatchInfo with shifted micron bounds. + """ + shifted: list[PatchInfo] = [] + for p in patches: + gu = p.global_bounds_um + cu = p.core_bounds_um + shifted.append( + PatchInfo( + patch_id=p.patch_id, + row=p.row, + col=p.col, + global_bounds_px=p.global_bounds_px, + global_bounds_um=Bounds( + gu.x_min + ox, gu.x_max + ox, gu.y_min + oy, gu.y_max + oy + ), + core_bounds_px=p.core_bounds_px, + core_bounds_um=Bounds( + cu.x_min + ox, cu.x_max + ox, cu.y_min + oy, cu.y_max + oy + ), + ) + ) + return shifted + + +# --------------------------------------------------------------------------- +# Main divide logic +# --------------------------------------------------------------------------- + + +def divide_transcripts( + transcripts_path: Path, + output_dir: Path, + image_width_px: int, + image_height_px: int, + tile_width_um: float, + overlap_um: float, + balanced: bool, + pixel_size_um: float = XENIUM_PIXEL_SIZE_UM, + max_workers: int | None = None, + min_transcripts: int = 1000, +) -> None: + """ + Divide transcripts into overlapping spatial patches. + + Reads the transcript table once, computes a grid, merges sparse tiles + into neighbors, and writes per-patch parquet files with coordinates + offset to patch-local space. + + Args: + transcripts_path: Path to transcripts.parquet. + output_dir: Output directory for patches. + image_width_px: Image width in pixels. + image_height_px: Image height in pixels. + tile_width_um: Tile width in microns. + overlap_um: Overlap between adjacent patches in microns. + balanced: If True, use density quadtree mode. + pixel_size_um: Microns per pixel. + max_workers: Maximum threads for parallel patch writes. + min_transcripts: Minimum transcripts per tile; sparse tiles merged + into neighbors. Set to 0 to disable merging. + """ + output_dir.mkdir(parents=True, exist_ok=True) + + # Read full transcript table + full_table = pq.read_table(str(transcripts_path)) + n_total = len(full_table) + print(f"Read {n_total:,} transcripts from {transcripts_path}") + + # Compute transcript extent + x_col = full_table.column("x_location") + y_col = full_table.column("y_location") + extent_um = Bounds( + x_min=pc.min(x_col).as_py(), + x_max=pc.max(x_col).as_py(), + y_min=pc.min(y_col).as_py(), + y_max=pc.max(y_col).as_py(), + ) + print( + f"Transcript extent: " + f"x=[{extent_um.x_min:.1f}, {extent_um.x_max:.1f}] " + f"y=[{extent_um.y_min:.1f}, {extent_um.y_max:.1f}] um" + ) + + # Build grid in zero-origin space when transcripts have a positive offset. + # The grid functions work in pixel space starting at (0, 0). We shift + # micron bounds back to real coordinates afterward. + ox = extent_um.x_min + oy = extent_um.y_min + + if balanced: + # Shift coordinates to zero-origin for density computation + x_coords = x_col.to_numpy() - ox + y_coords = y_col.to_numpy() - oy + + patches, grid_rows, grid_cols, overlap_px = compute_density_quadtree_grid( + image_height_px=image_height_px, + image_width_px=image_width_px, + tile_width_um=tile_width_um, + overlap_um=overlap_um, + pixel_size_um=pixel_size_um, + x_coords_um=x_coords, + y_coords_um=y_coords, + ) + grid_type = "density_quadtree" + else: + patches, grid_rows, grid_cols, overlap_px = compute_tilewidth_uniform_grid( + image_height_px=image_height_px, + image_width_px=image_width_px, + tile_width_um=tile_width_um, + overlap_um=overlap_um, + pixel_size_um=pixel_size_um, + transcript_extent_um=extent_um, + ) + grid_type = "uniform" + + # Merge sparse tiles into neighbors + n_before_merge = len(patches) + if min_transcripts > 0 and len(patches) > 1: + # Coordinates for counting: use zero-origin if not already + if balanced: + merge_x = x_coords + merge_y = y_coords + else: + merge_x = x_col.to_numpy() - ox + merge_y = y_col.to_numpy() - oy + + patches, n_merged = merge_sparse_tiles( + patches=patches, + x_coords_um=merge_x, + y_coords_um=merge_y, + overlap_px=overlap_px, + pixel_size_um=pixel_size_um, + image_width_px=image_width_px, + image_height_px=image_height_px, + min_transcripts=min_transcripts, + ) + if n_merged > 0: + grid_type = f"{grid_type}+merged" + print( + f"Merged {n_merged} sparse tiles: " + f"{n_before_merge} -> {len(patches)} patches" + ) + + # Shift micron bounds to real transcript coordinates + if ox != 0.0 or oy != 0.0: + patches = _shift_patches_to_real_coords(patches, ox, oy) + + print( + f"Grid: {grid_type}, {grid_rows}x{grid_cols} initial, " + f"{len(patches)} patches, overlap={overlap_um} um" + ) + + # Write patches in parallel + n_patches = len(patches) + workers = ( + max_workers if max_workers is not None else min(n_patches, os.cpu_count() or 1) + ) + + with ThreadPoolExecutor(max_workers=workers) as pool: + futures = [ + pool.submit(_process_patch, patch, output_dir, full_table) + for patch in patches + ] + for i, future in enumerate(futures): + count = future.result() + print(f" {patches[i].patch_id}: {count:,} transcripts") + + # Save grid metadata + save_grid_metadata( + patches=patches, + image_height_px=image_height_px, + image_width_px=image_width_px, + pixel_size_um=pixel_size_um, + transcript_extent_um=extent_um, + grid_rows=grid_rows, + grid_cols=grid_cols, + overlap_um=overlap_um, + overlap_px=overlap_px, + grid_type=grid_type, + output_path=output_dir / "patch_grid.json", + ) + print(f"Grid metadata saved to {output_dir / 'patch_grid.json'}") + + +# --------------------------------------------------------------------------- +# CLI +# --------------------------------------------------------------------------- + + +def parse_args(argv: list[str] | None = None) -> argparse.Namespace: + """ + Parse command-line arguments. + + Args: + argv: Argument list (defaults to sys.argv[1:]). + + Returns: + Parsed arguments namespace. + """ + parser = argparse.ArgumentParser( + description="Divide Xenium transcripts.parquet into spatial patches.", + formatter_class=argparse.ArgumentDefaultsHelpFormatter, + ) + parser.add_argument( + "--transcripts", + type=Path, + required=True, + help="Path to transcripts.parquet", + ) + parser.add_argument( + "--output", + type=Path, + required=True, + help="Output directory for patches", + ) + parser.add_argument( + "--tile-width", + type=float, + default=2000.0, + help="Tile width in microns", + ) + parser.add_argument( + "--overlap", + type=float, + default=50.0, + help="Overlap between patches in microns", + ) + parser.add_argument( + "--balanced", + action="store_true", + help="Enable density quadtree mode (subdivides dense tiles)", + ) + parser.add_argument( + "--image-width", + type=int, + required=True, + help="Image width in pixels", + ) + parser.add_argument( + "--image-height", + type=int, + required=True, + help="Image height in pixels", + ) + parser.add_argument( + "--pixel-size", + type=float, + default=XENIUM_PIXEL_SIZE_UM, + help="Pixel size in microns", + ) + parser.add_argument( + "--min-transcripts", + type=int, + default=1000, + help="Minimum transcripts per tile; sparse tiles are merged into neighbors", + ) + parser.add_argument( + "--max-workers", + type=int, + default=None, + help="Maximum threads for parallel writes", + ) + return parser.parse_args(argv) + + +def main(argv: list[str] | None = None) -> None: + """Entry point.""" + args = parse_args(argv) + + divide_transcripts( + transcripts_path=args.transcripts, + output_dir=args.output, + image_width_px=args.image_width, + image_height_px=args.image_height, + tile_width_um=args.tile_width, + overlap_um=args.overlap, + balanced=args.balanced, + pixel_size_um=args.pixel_size, + max_workers=args.max_workers, + min_transcripts=args.min_transcripts, + ) + + +if __name__ == "__main__": + main() diff --git a/bin/ficture_preprocess.py b/bin/ficture_preprocess.py new file mode 100755 index 00000000..2e0c687c --- /dev/null +++ b/bin/ficture_preprocess.py @@ -0,0 +1,101 @@ +#!/usr/bin/env python3 +"""Preprocess Xenium transcripts for FICTURE analysis.""" + +import argparse +import gzip +import logging +import os +import re +import sys + +import pandas as pd + + +def parse_args(): + """Parse command-line arguments.""" + parser = argparse.ArgumentParser( + description="Preprocess Xenium transcripts for FICTURE" + ) + parser.add_argument( + "--transcripts", required=True, help="Path to transcripts file (CSV)" + ) + parser.add_argument( + "--features", default="", help="Path to features file (optional)" + ) + parser.add_argument( + "--negative-control-regex", default="", help="Regex for negative control probes" + ) + return parser.parse_args() + + +def main(): + """Run FICTURE preprocessing.""" + args = parse_args() + print("[START]") + + negctrl_regex = "BLANK|NegCon" + if args.negative_control_regex: + negctrl_regex = args.negative_control_regex + + unit_info = ["X", "Y", "gene", "cell_id", "overlaps_nucleus"] + oheader = unit_info + ["Count"] + + feature = pd.DataFrame() + xmin = sys.maxsize + xmax = 0 + ymin = sys.maxsize + ymax = 0 + + output = "processed_transcripts.tsv.gz" + feature_file = "feature.clean.tsv.gz" + min_phred_score = 15 + + with gzip.open(output, "wt") as wf: + wf.write("\t".join(oheader) + "\n") + + for chunk in pd.read_csv(args.transcripts, header=0, chunksize=500000): + chunk = chunk.loc[(chunk.qv > min_phred_score)] + chunk.rename(columns={"feature_name": "gene"}, inplace=True) + if negctrl_regex != "": + chunk = chunk[ + ~chunk.gene.str.contains(negctrl_regex, flags=re.IGNORECASE, regex=True) + ] + chunk.rename(columns={"x_location": "X", "y_location": "Y"}, inplace=True) + chunk["Count"] = 1 + chunk[oheader].to_csv( + output, sep="\t", mode="a", index=False, header=False, float_format="%.2f" + ) + logging.info(f"{chunk.shape[0]}") + feature = pd.concat( + [feature, chunk.groupby(by="gene").agg({"Count": "sum"}).reset_index()] + ) + x0 = chunk.X.min() + x1 = chunk.X.max() + y0 = chunk.Y.min() + y1 = chunk.Y.max() + xmin = min(int(xmin), int(x0)) + xmax = max(int(xmax), int(x1)) + ymin = min(int(ymin), int(y0)) + ymax = max(int(ymax), int(y1)) + + if os.path.exists(args.features): + feature_list = [] + with open(args.features, "r") as ff: + for line in ff: + feature_list.append(line.strip("\n")) + feature = feature.groupby(by="gene").agg({"Count": "sum"}).reset_index() + feature = feature[[x in feature_list for x in feature["gene"]]] + feature.to_csv(feature_file, sep="\t", index=False) + + f = os.path.join(os.path.dirname(output), "coordinate_minmax.tsv") + with open(f, "w") as wf: + wf.write(f"xmin\t{xmin}\n") + wf.write(f"xmax\t{xmax}\n") + wf.write(f"ymin\t{ymin}\n") + wf.write(f"ymax\t{ymax}\n") + + print("[FINISH]") + + +if __name__ == "__main__": + main() diff --git a/bin/segger_create_dataset.py b/bin/segger_create_dataset.py new file mode 100755 index 00000000..c73ab006 --- /dev/null +++ b/bin/segger_create_dataset.py @@ -0,0 +1,253 @@ +#!/usr/bin/env python3 +""" +Run segger create_dataset with spatialxe-specific preprocessing and workarounds. + +Wraps segger's create_dataset_fast.py with: + - bundle_local symlink prep (handles read-only S3/Fusion mounts) + - parquet column statistics (segger needs these) + - WORKAROUND: filter trainable tiles from test_tiles when segger commit 0787167 mis-splits + - WORKAROUND: replace NaN bd.x with zeros after get_polygon_props produces NaN + +Each WORKAROUND should be removable when the upstream segger bug is fixed. +""" + +import argparse +import os +import shutil +import subprocess +import sys +from pathlib import Path + +# imports for actual work (used in functions below) +import pyarrow.parquet as pq +import pyarrow.compute as pc +import torch + + +SEGGER_CLI = "/workspace/segger_dev/src/segger/cli/create_dataset_fast.py" + + +def parse_args(): + p = argparse.ArgumentParser() + p.add_argument("--bundle-dir", required=True) + p.add_argument("--output-dir", required=True) + p.add_argument("--sample-type", required=True, choices=["xenium"]) + p.add_argument("--tile-width", type=int, required=True) + p.add_argument("--tile-height", type=int, required=True) + p.add_argument("--n-workers", type=int, required=True) + # remaining args forwarded to segger CLI + args, extra = p.parse_known_args() + return args, extra + + +def prepare_bundle(bundle_dir): + """Create local bundle dir with absolute symlinks (S3/Fusion read-only-safe).""" + Path("bundle_local").mkdir(exist_ok=True) + for item in Path(bundle_dir).iterdir(): + try: + abs_path = item.resolve() + except Exception: + abs_path = item + target = Path("bundle_local") / item.name + if target.exists() or target.is_symlink(): + target.unlink() + target.symlink_to(abs_path) + + # Segger expects nucleus_boundaries.parquet but Xenium bundles have cell_boundaries.parquet + nb = Path("bundle_local/nucleus_boundaries.parquet") + cb = Path("bundle_local/cell_boundaries.parquet") + if not nb.exists() and cb.exists(): + print( + "Creating nucleus_boundaries.parquet symlink from cell_boundaries.parquet" + ) + nb.symlink_to(cb.resolve()) + + print("Bundle contents:") + for item in sorted(Path("bundle_local").iterdir()): + print(f" {item.name}") + + +def add_parquet_stats(): + """Rewrite key parquet files with column statistics (segger requires them).""" + Path("bundle_stats").mkdir(exist_ok=True) + for fname in ["transcripts.parquet", "nucleus_boundaries.parquet"]: + src = Path("bundle_local") / fname + dst = Path("bundle_stats") / fname + if not src.exists(): + print(f" Skip {src}") + continue + t = pq.read_table(str(src)) + pq.write_table(t, str(dst), write_statistics=True, compression="snappy") + print(f" Done {fname} ({len(t)} rows)") + + # Symlink everything else from bundle_local into bundle_stats + for item in Path("bundle_local").iterdir(): + dst = Path("bundle_stats") / item.name + if not dst.exists(): + dst.symlink_to(item.resolve()) + + # Debug: check overlaps_nucleus column in transcripts + print("\n=== Debugging overlaps_nucleus data ===") + tx = pq.read_table("bundle_stats/transcripts.parquet") + bd = pq.read_table("bundle_stats/nucleus_boundaries.parquet") + if "overlaps_nucleus" in tx.column_names: + col = tx.column("overlaps_nucleus") + print(f"overlaps_nucleus dtype: {col.type}") + unique_vals = pc.unique(col) + print(f"overlaps_nucleus unique values: {unique_vals.to_pylist()[:10]}") + val_counts = pc.value_counts(col) + print(f"overlaps_nucleus value_counts: {val_counts.to_pylist()}") + else: + print("WARNING: overlaps_nucleus column NOT FOUND in transcripts.parquet") + + if "cell_id" in tx.column_names and "cell_id" in bd.column_names: + tx_cells = set(pc.unique(tx.column("cell_id")).to_pylist()) + bd_cells = set(pc.unique(bd.column("cell_id")).to_pylist()) + overlap = tx_cells & bd_cells + print(f"Transcripts unique cell_ids: {len(tx_cells)}") + print(f"Boundaries unique cell_ids: {len(bd_cells)}") + print(f"Overlapping cell_ids: {len(overlap)}") + print("=== End Debug ===\n") + + +def run_segger_cli(args, extra): + cmd = [ + "python3", + SEGGER_CLI, + "--base_dir", + "bundle_stats", + "--data_dir", + args.output_dir, + "--sample_type", + args.sample_type, + "--tile_width", + str(args.tile_width), + "--tile_height", + str(args.tile_height), + "--n_workers", + str(args.n_workers), + *extra, + ] + print(f"Running: {' '.join(cmd)}") + result = subprocess.run(cmd) + if result.returncode != 0: + sys.exit(result.returncode) + + +def filter_trainable_tiles_if_needed(prefix): + """ + WORKAROUND: segger commit 0787167 has a bug where all tiles end up in test_tiles + regardless of test_prob/val_prob settings. Move ONLY trainable tiles (those with + edge_label_index) from test_tiles to train_tiles. + + Remove this function once segger >= 0.1.x is bumped with the upstream fix. + """ + train_dir = Path(prefix) / "train_tiles" / "processed" + test_dir = Path(prefix) / "test_tiles" / "processed" + val_dir = Path(prefix) / "val_tiles" / "processed" + + train_count = len(list(train_dir.iterdir())) if train_dir.exists() else 0 + test_count = len(list(test_dir.iterdir())) if test_dir.exists() else 0 + val_count = len(list(val_dir.iterdir())) if val_dir.exists() else 0 + print( + f"Dataset split (before fix): train={train_count} val={val_count} test={test_count}" + ) + + if train_count == 0 and test_count > 0: + print( + "Applying workaround: filtering trainable tiles from test_tiles (segger split bug)" + ) + moved = 0 + skipped = 0 + for tile_path in list(test_dir.iterdir()): + if not tile_path.name.endswith(".pt"): + continue + try: + tile = torch.load(str(tile_path), weights_only=False) + edge_store = tile["tx", "belongs", "bd"] + if ( + hasattr(edge_store, "edge_label_index") + and edge_store.edge_label_index.numel() > 0 + ): + shutil.move(str(tile_path), str(train_dir / tile_path.name)) + moved += 1 + else: + skipped += 1 + except Exception as e: + print(f"Warning: Could not process {tile_path.name}: {e}") + skipped += 1 + print(f"Moved {moved} trainable tiles to train_tiles") + print(f"Skipped {skipped} test-only tiles (no edge_label_index)") + + train_count = len(list(train_dir.iterdir())) if train_dir.exists() else 0 + test_count = len(list(test_dir.iterdir())) if test_dir.exists() else 0 + val_count = len(list(val_dir.iterdir())) if val_dir.exists() else 0 + print( + f"Dataset split (after fix): train={train_count} val={val_count} test={test_count}" + ) + + if train_count == 0: + print(f"ERROR: No trainable tiles were created in {train_dir}", file=sys.stderr) + print( + "This usually means no transcripts overlap with nucleus boundaries in the dataset.", + file=sys.stderr, + ) + print( + "Check if the Xenium bundle contains valid overlaps_nucleus data in transcripts.parquet.", + file=sys.stderr, + ) + sys.exit(1) + print(f"Successfully created {train_count} trainable tiles") + + +def fix_bd_x_nan(prefix): + """ + WORKAROUND: segger's get_polygon_props() produces NaN boundary features (bd.x) + when polygon geometries have zero area or index misalignment during GeoDataFrame + construction. Replace NaN bd.x with zeros so BCEWithLogitsLoss doesn't propagate NaN. + + Remove this function once segger >= 0.1.x is bumped with the upstream fix. + """ + fixed = 0 + total = 0 + for split in ["train_tiles", "test_tiles", "val_tiles"]: + tile_dir = Path(prefix) / split / "processed" + if not tile_dir.is_dir(): + continue + for tile_path in tile_dir.iterdir(): + if not tile_path.name.endswith(".pt"): + continue + total += 1 + tile = torch.load(str(tile_path), weights_only=False) + bd_x = tile["bd"].x + if bd_x.isnan().any(): + tile["bd"].x = torch.nan_to_num(bd_x, nan=0.0) + torch.save(tile, str(tile_path)) + fixed += 1 + print(f"Fixed NaN bd.x in {fixed}/{total} tiles") + + +def main(): + args, extra = parse_args() + + # Ensure numba cache dir is writable (env var should be set by caller, but belt-and-suspenders) + os.environ.setdefault("NUMBA_CACHE_DIR", os.path.join(os.getcwd(), ".numba_cache")) + os.makedirs(os.environ["NUMBA_CACHE_DIR"], exist_ok=True) + + prepare_bundle(args.bundle_dir) + print("Adding statistics to parquet files...") + add_parquet_stats() + + # Sanity-check bundle_stats + print("bundle_stats contents:") + for item in sorted(Path("bundle_stats").iterdir()): + print(f" {item.name}") + + run_segger_cli(args, extra) + + filter_trainable_tiles_if_needed(args.output_dir) + fix_bd_x_nan(args.output_dir) + + +if __name__ == "__main__": + main() diff --git a/bin/segger_predict.py b/bin/segger_predict.py new file mode 100755 index 00000000..56a77ffc --- /dev/null +++ b/bin/segger_predict.py @@ -0,0 +1,137 @@ +#!/usr/bin/env python3 +""" +Run segger predict with spatialxe-specific preprocessing. + +Wraps segger's predict_fast.py with: + - GPU enumeration (replaces inline python3 -c torch check) + - WORKAROUND: patch predict_parquet.py at runtime to add torch.no_grad() for ~30-50% VRAM savings + - WORKAROUND: seed random.choice for deterministic GPU assignment (avoids stochastic OOM) + +Both WORKAROUNDs should be removable once the patches are upstreamed to segger. +""" + +import argparse +import os +import subprocess +import sys + + +SEGGER_CLI = "/workspace/segger_dev/src/segger/cli/predict_fast.py" + + +def parse_args(): + p = argparse.ArgumentParser() + p.add_argument("--models-dir", required=True) + p.add_argument("--segger-data-dir", required=True) + p.add_argument("--transcripts-file", required=True) + p.add_argument("--benchmarks-dir", required=True) + p.add_argument("--batch-size", type=int, required=True) + p.add_argument("--use-cc", required=True) + p.add_argument("--knn-method", required=True) + p.add_argument("--num-workers", type=int, required=True) + args, extra = p.parse_known_args() + return args, extra + + +def detect_gpus(): + """Return comma-separated list of available CUDA device ids (or "0" if none).""" + import torch + + print("=== GPU Detection (SEGGER_PREDICT) ===") + print(f"PyTorch CUDA available: {torch.cuda.is_available()}") + n = torch.cuda.device_count() + print(f"CUDA device count: {n}") + print("======================================") + if n > 0: + return ",".join(str(i) for i in range(n)) + return "0" + + +def patch_predict_parquet(): + """ + WORKAROUND: patch segger.prediction.predict_parquet at runtime. + + Avoids rebuilding the segger Docker image. Two patches: + 1. Add torch.no_grad() to disable gradient graphs during inference (~30-50% VRAM savings). + 2. Seed random for deterministic GPU assignment (avoids stochastic OOM). + + Remove this function once the patches are upstreamed to segger. + """ + import segger.prediction.predict_parquet as m + + pred_py = m.__file__ + print(f"Patching {pred_py}: torch.no_grad() + round-robin GPU assignment") + # Use sed via subprocess for in-place edit (matches the original behavior exactly) + subprocess.run( + [ + "sed", + "-i", + "s/with cp.cuda.Device(gpu_id):/with cp.cuda.Device(gpu_id), torch.no_grad():/", + pred_py, + ], + check=True, + ) + subprocess.run( + [ + "sed", + "-i", + "s/gpu_id = random.choice(gpu_ids)/random.seed(0); gpu_id = random.choice(gpu_ids)/", + pred_py, + ], + check=True, + ) + + +def run_segger_cli(args, extra, gpu_ids): + cmd = [ + "python3", + SEGGER_CLI, + "--models_dir", + args.models_dir, + "--segger_data_dir", + args.segger_data_dir, + "--transcripts_file", + args.transcripts_file, + "--benchmarks_dir", + args.benchmarks_dir, + "--batch_size", + str(args.batch_size), + "--use_cc", + str(args.use_cc), + "--knn_method", + args.knn_method, + "--num_workers", + str(args.num_workers), + "--gpu_ids", + gpu_ids, + *extra, + ] + print(f"Running: {' '.join(cmd)}") + result = subprocess.run(cmd) + if result.returncode != 0: + sys.exit(result.returncode) + + +def main(): + args, extra = parse_args() + + # Limit cupy GPU memory to 80% so PyTorch has headroom for graph attention ops + os.environ.setdefault("CUPY_GPU_MEMORY_LIMIT", "80%") + # Belt-and-suspenders: ensure PyTorch uses expandable segments + os.environ.setdefault( + "PYTORCH_CUDA_ALLOC_CONF", "expandable_segments:True,max_split_size_mb:512" + ) + # Numba cache directory + os.environ.setdefault("NUMBA_CACHE_DIR", os.path.join(os.getcwd(), ".numba_cache")) + os.makedirs(os.environ["NUMBA_CACHE_DIR"], exist_ok=True) + + gpu_ids = detect_gpus() + print(f"Using GPUs: {gpu_ids}") + + patch_predict_parquet() + + run_segger_cli(args, extra, gpu_ids) + + +if __name__ == "__main__": + main() diff --git a/bin/spatialdata_merge.py b/bin/spatialdata_merge.py new file mode 100755 index 00000000..409d8c00 --- /dev/null +++ b/bin/spatialdata_merge.py @@ -0,0 +1,82 @@ +#!/usr/bin/env python3 +"""Merge two spatialdata bundles to create a layered spatialdata object.""" + +import argparse +import json +import os +import shutil + +import spatialdata + + +def parse_args(): + """Parse command-line arguments.""" + parser = argparse.ArgumentParser(description="Merge two spatialdata bundles") + parser.add_argument("--raw-bundle", required=True, help="Path to raw spatialdata bundle") + parser.add_argument("--redefined-bundle", required=True, help="Path to redefined spatialdata bundle") + parser.add_argument("--prefix", required=True, help="Output prefix (sample ID)") + parser.add_argument("--output-folder", required=True, help="Output folder name") + return parser.parse_args() + + +def main(): + """Run spatialdata merge.""" + args = parse_args() + print("[START]") + + output_dir = f"spatialdata/{args.prefix}/{args.output_folder}" + + # Ensure the output folder exists + if os.path.exists(output_dir): + shutil.rmtree(output_dir) + os.makedirs(output_dir) + + # Copy the entire reference bundle as is + for root, _, files in os.walk(args.raw_bundle): + rel_path = os.path.relpath(root, args.raw_bundle) + target_path = os.path.join(output_dir, rel_path) + os.makedirs(target_path, exist_ok=True) + for file in files: + shutil.copy(os.path.join(root, file), os.path.join(target_path, file)) + + # Rename folders in Points, Shapes, and Tables to raw_* + for category in ["points", "shapes", "tables"]: + category_path = os.path.join(output_dir, category) + if os.path.exists(category_path): + for folder in next(os.walk(category_path))[1]: + old_path = os.path.join(category_path, folder) + print(folder) + new_path = os.path.join(category_path, f"raw_{folder}") + os.rename(old_path, new_path) + + # Copy folders from redefined_bundle and rename them as redefined_* + for category in ["points", "shapes", "tables"]: + add_category_path = os.path.join(args.redefined_bundle, category) + output_category_path = os.path.join(output_dir, category) + os.makedirs(output_category_path, exist_ok=True) + + if os.path.exists(add_category_path): + for folder in next(os.walk(add_category_path))[1]: + src_folder = os.path.join(add_category_path, folder) + dest_folder = os.path.join(output_category_path, f"redefined_{folder}") + shutil.copytree(src_folder, dest_folder) + + # Invalidate consolidated metadata in zarr.json -- the directory renames above + # made the element paths in the metadata stale (e.g., 'points/transcripts' -> + # 'points/raw_transcripts'). Without consolidated metadata, sd.read_zarr() + # discovers elements by scanning the filesystem directly. + zarr_json = os.path.join(output_dir, "zarr.json") + if os.path.exists(zarr_json): + with open(zarr_json) as f: + meta = json.load(f) + if "consolidated_metadata" in meta: + del meta["consolidated_metadata"] + with open(zarr_json, "w") as f: + json.dump(meta, f) + print("[NOTE] Removed stale consolidated metadata from zarr.json") + + print("[FINISH]") + + +if __name__ == "__main__": + main() diff --git a/bin/spatialdata_meta.py b/bin/spatialdata_meta.py new file mode 100755 index 00000000..935f39b2 --- /dev/null +++ b/bin/spatialdata_meta.py @@ -0,0 +1,126 @@ +#!/usr/bin/env python3 +"""Add metadata to SpatialData bundle.""" + +import argparse +import json +import sys + +import pandas as pd +import spatialdata as sd +import zarr + +# Fix zarr v3 + anndata + numcodecs incompatibility: +# anndata's string writer passes numcodecs.VLenUTF8 to zarr.Group.create_array, +# but zarr v3 only accepts ArrayArrayCodec types. OME-Zarr 0.5 requires zarr v3 +# for images, so we can't downgrade the store format. Instead, we intercept +# create_array to strip numcodecs codecs and let zarr v3 handle strings natively. +import numcodecs +import zarr.core.group as _zarr_group + +_orig_create_array = _zarr_group.Group.create_array + + +def _v3_compat_create_array(self, *args, **kwargs): + """Strip numcodecs VLenUTF8 from codec params for zarr v3 compatibility.""" + for param in ("filters", "compressor", "object_codec"): + val = kwargs.get(param) + if val is None: + continue + if isinstance(val, numcodecs.vlen.VLenUTF8): + del kwargs[param] + elif isinstance(val, (list, tuple)): + cleaned = [v for v in val if not isinstance(v, numcodecs.vlen.VLenUTF8)] + if len(cleaned) != len(val): + if cleaned: + kwargs[param] = cleaned + else: + del kwargs[param] + return _orig_create_array(self, *args, **kwargs) + + +_zarr_group.Group.create_array = _v3_compat_create_array + + +def _is_arrow_backed(dtype): + """Check if a pandas dtype is backed by PyArrow.""" + return isinstance(dtype, pd.ArrowDtype) or ( + hasattr(dtype, "storage") and getattr(dtype, "storage", None) == "pyarrow" + ) or "pyarrow" in str(dtype) + + +def _convert_df_arrow_to_numpy(df): + """Convert Arrow-backed dtypes in a DataFrame to numpy object dtype.""" + for col in df.columns: + dtype = df[col].dtype + if _is_arrow_backed(dtype): + df[col] = df[col].astype("object") + elif isinstance(dtype, pd.CategoricalDtype): + cats = dtype.categories + if cats is not None and _is_arrow_backed(cats.dtype): + df[col] = df[col].cat.rename_categories(cats.astype("object")) + if _is_arrow_backed(df.index.dtype): + df.index = pd.Index(df.index.astype("object")) + + +def convert_arrow_to_numpy(sdata): + """Convert Arrow-backed dtypes to numpy for anndata zarr write compatibility.""" + for table_key in list(sdata.tables.keys()): + adata = sdata.tables[table_key] + _convert_df_arrow_to_numpy(adata.obs) + _convert_df_arrow_to_numpy(adata.var) + + +def parse_args(): + """Parse command-line arguments.""" + parser = argparse.ArgumentParser(description="Add metadata to SpatialData bundle") + parser.add_argument("--spatialdata-bundle", required=True, help="Path to spatialdata bundle") + parser.add_argument("--xenium-bundle", required=True, help="Path to xenium bundle") + parser.add_argument("--prefix", required=True, help="Output prefix (sample ID)") + parser.add_argument("--metadata", required=True, help="Metadata string from Nextflow meta map") + parser.add_argument("--output-folder", required=True, help="Output folder name") + return parser.parse_args() + + +def main(): + """Run spatialdata metadata addition.""" + args = parse_args() + print("[START]") + + sdata = sd.read_zarr(args.spatialdata_bundle) + + # Convert metadata into dict + print("[NOTE] Read in provenance ...") + metadata = args.metadata.strip("[]") # Remove square brackets + pairs = metadata.split(", ") # Split by comma and space + metadata = {k: v for k, v in (pair.split(":") for pair in pairs)} # Create dictionary + + for key in metadata: + if key not in sdata['raw_table'].uns['spatialdata_attrs']: + sdata['raw_table'].uns['spatialdata_attrs'][key] = metadata[key] + else: + print(f'[ERROR] {key} already exist in sdata[raw_table].uns[spatialdata_attrs].', file=sys.stderr) + + # Add experimental metadata + print("[NOTE] Read in experiment metadata ...") + sdata['raw_table'].uns['experiment_xenium'] = '' + metadata_experiment = f'{args.xenium_bundle}/experiment.xenium' + with open(metadata_experiment, "r") as f: + metadata_experiment = json.load(f) + sdata['raw_table'].uns['experiment_xenium'] = json.dumps(metadata_experiment) + + # Add gene panel metadata + print("[NOTE] Read in gene panel metadata ...") + sdata['raw_table'].uns['gene_panel'] = '' + metadata_gene_panel = f'{args.xenium_bundle}/gene_panel.json' + with open(metadata_gene_panel, "r") as f: + metadata_gene_panel = json.load(f) + sdata['raw_table'].uns['gene_panel'] = json.dumps(metadata_gene_panel) + + convert_arrow_to_numpy(sdata) + sdata.write(f"spatialdata/{args.prefix}/{args.output_folder}", overwrite=True, consolidate_metadata=True, sdata_formats=None) + + print("[FINISH]") + + +if __name__ == "__main__": + main() diff --git a/bin/spatialdata_write.py b/bin/spatialdata_write.py new file mode 100755 index 00000000..421e830f --- /dev/null +++ b/bin/spatialdata_write.py @@ -0,0 +1,156 @@ +#!/usr/bin/env python3 +"""Write spatialdata object from segmentation format.""" + +import argparse +import sys + +import pandas as pd +import spatialdata +from spatialdata_io import xenium + +# Fix zarr v3 + anndata + numcodecs incompatibility: +# anndata's string writer passes numcodecs.VLenUTF8 to zarr.Group.create_array, +# but zarr v3 only accepts ArrayArrayCodec types. OME-Zarr 0.5 requires zarr v3 +# for images, so we can't downgrade the store format. Instead, we intercept +# create_array to strip numcodecs codecs and let zarr v3 handle strings natively. +import numcodecs +import zarr.core.group as _zarr_group + +_orig_create_array = _zarr_group.Group.create_array + + +def _v3_compat_create_array(self, *args, **kwargs): + """Strip numcodecs VLenUTF8 from codec params for zarr v3 compatibility.""" + for param in ("filters", "compressor", "object_codec"): + val = kwargs.get(param) + if val is None: + continue + if isinstance(val, numcodecs.vlen.VLenUTF8): + del kwargs[param] + elif isinstance(val, (list, tuple)): + cleaned = [v for v in val if not isinstance(v, numcodecs.vlen.VLenUTF8)] + if len(cleaned) != len(val): + if cleaned: + kwargs[param] = cleaned + else: + del kwargs[param] + return _orig_create_array(self, *args, **kwargs) + + +_zarr_group.Group.create_array = _v3_compat_create_array + + +def _is_arrow_backed(dtype): + """Check if a pandas dtype is backed by PyArrow.""" + return ( + isinstance(dtype, pd.ArrowDtype) + or (hasattr(dtype, "storage") and getattr(dtype, "storage", None) == "pyarrow") + or "pyarrow" in str(dtype) + ) + + +def _convert_df_arrow_to_numpy(df): + """Convert Arrow-backed dtypes in a DataFrame to numpy object dtype. + + Handles three cases: + 1. Regular columns with Arrow-backed dtypes + 2. Categorical columns whose categories are Arrow-backed + 3. Index with Arrow-backed dtype + """ + for col in df.columns: + dtype = df[col].dtype + if _is_arrow_backed(dtype): + df[col] = df[col].astype("object") + elif isinstance(dtype, pd.CategoricalDtype): + cats = dtype.categories + if cats is not None and _is_arrow_backed(cats.dtype): + df[col] = df[col].cat.rename_categories(cats.astype("object")) + if _is_arrow_backed(df.index.dtype): + df.index = pd.Index(df.index.astype("object")) + + +def convert_arrow_to_numpy(sdata): + """Convert Arrow-backed dtypes to numpy for anndata zarr write compatibility.""" + for table_key in list(sdata.tables.keys()): + adata = sdata.tables[table_key] + _convert_df_arrow_to_numpy(adata.obs) + _convert_df_arrow_to_numpy(adata.var) + + +def parse_args(): + """Parse command-line arguments.""" + parser = argparse.ArgumentParser(description="Write spatialdata object from segmentation format") + parser.add_argument("--bundle", required=True, help="Path to input bundle") + parser.add_argument("--prefix", required=True, help="Output prefix (sample ID)") + parser.add_argument("--output-folder", required=True, help="Output folder name") + parser.add_argument("--segmented-object", required=True, help="Segmented object type (cells, nuclei, cells_and_nuclei)") + parser.add_argument("--coordinate-space", required=True, help="Coordinate space (pixels, microns)") + parser.add_argument("--format", required=True, help="Input format (xenium)") + return parser.parse_args() + + +def main(): + """Run spatialdata write.""" + args = parse_args() + print("[START]") + + cells_as_circles = False + cells_boundaries = False + nucleus_boundaries = False + cells_labels = False + nucleus_labels = False + + if args.segmented_object == "cells": + cells_boundaries = True + cells_labels = True + elif args.segmented_object == "nuclei": + nucleus_boundaries = True + nucleus_labels = True + elif args.segmented_object == "cells_and_nuclei": + cells_boundaries = True + nucleus_boundaries = True + cells_labels = True + nucleus_labels = True + else: + cells_as_circles = False + + # set sd variables based on the coordinate space + if args.coordinate_space == "pixels": + cells_labels = True + nucleus_labels = True + # Labels are sufficient in pixel space; boundaries can contain + # degenerate polygons (< 4 vertices) from XeniumRanger that + # crash spatialdata_io's shapely LinearRing parser. + cells_boundaries = False + nucleus_boundaries = False + + if args.coordinate_space == "microns": + cells_labels = False + cells_boundaries = True + nucleus_boundaries = False + nucleus_labels = False + cells_as_circles = False + + if args.format == "xenium": + sd_xenium_obj = xenium( + args.bundle, + cells_as_circles=cells_as_circles, + cells_boundaries=cells_boundaries, + nucleus_boundaries=nucleus_boundaries, + cells_labels=cells_labels, + nucleus_labels=nucleus_labels, + transcripts=True, + morphology_mip=True, + morphology_focus=True, + ) + print(sd_xenium_obj) + convert_arrow_to_numpy(sd_xenium_obj) + sd_xenium_obj.write(f"spatialdata/{args.prefix}/{args.output_folder}") + else: + sys.exit("[ERROR] Format not found") + + print("[FINISH]") + + +if __name__ == "__main__": + main() diff --git a/bin/stitch_transcripts.py b/bin/stitch_transcripts.py new file mode 100755 index 00000000..45f057e3 --- /dev/null +++ b/bin/stitch_transcripts.py @@ -0,0 +1,848 @@ +#!/usr/bin/env python3 +"""Stitch per-patch Baysor segmentation results into unified output. + +Standalone script that replaces the xenium_patch CLI package's stitch +functionality. Uses sopa's solve_conflicts() for overlap resolution. +""" + +from __future__ import annotations + +import argparse +import json +import os +from concurrent.futures import ThreadPoolExecutor +from dataclasses import dataclass +from pathlib import Path + +import geopandas as gpd +import numpy as np +import pyarrow as pa +import pyarrow.compute as pc +import pyarrow.csv as pa_csv +import shapely +from shapely.affinity import translate +from shapely.geometry import mapping, shape +from sopa.segmentation.resolve import solve_conflicts + +# --------------------------------------------------------------------------- +# Inline types (from _types.py) +# --------------------------------------------------------------------------- + + +@dataclass(frozen=True) +class Bounds: + """Axis-aligned bounding box in either pixel or micron coordinates.""" + + x_min: float + x_max: float + y_min: float + y_max: float + + +@dataclass(frozen=True) +class PatchInfo: + """Metadata for a single patch in the grid.""" + + patch_id: str + row: int + col: int + global_bounds_px: Bounds + global_bounds_um: Bounds + core_bounds_px: Bounds + core_bounds_um: Bounds + + +@dataclass +class PatchGridMetadata: + """Full grid metadata, serializable to JSON.""" + + version: str + bundle_path: str + image_height_px: int + image_width_px: int + pixel_size_um: float + transcript_extent_um: Bounds + grid_rows: int + grid_cols: int + overlap_um: float + overlap_px: int + patches: list[PatchInfo] + grid_type: str = "uniform" + + +# --------------------------------------------------------------------------- +# Internal result containers +# --------------------------------------------------------------------------- + + +@dataclass +class _PatchGeoResult: + """Result of parallel GeoJSON processing for a single patch.""" + + features: list[dict] + cell_ids: list[str] + + +@dataclass +class _PatchCsvResult: + """Result of parallel CSV reading for a single patch.""" + + table: pa.Table + has_cell_col: bool + has_x_col: bool + has_y_col: bool + has_gene_col: bool = False + has_feature_name_col: bool = False + + +# --------------------------------------------------------------------------- +# Grid metadata I/O (from grid.py) +# --------------------------------------------------------------------------- + + +def _dict_to_bounds(d: dict) -> Bounds: + return Bounds(d["x_min"], d["x_max"], d["y_min"], d["y_max"]) + + +def load_grid_metadata(input_path: Path) -> PatchGridMetadata: + """Deserialize PatchGridMetadata from JSON. + + Args: + input_path: Path to JSON file to read. + + Returns: + Reconstructed PatchGridMetadata. + """ + with open(input_path) as f: + data = json.load(f) + + patches = [ + PatchInfo( + patch_id=p["patch_id"], + row=p["row"], + col=p["col"], + global_bounds_px=_dict_to_bounds(p["global_bounds_px"]), + global_bounds_um=_dict_to_bounds(p["global_bounds_um"]), + core_bounds_px=_dict_to_bounds(p["core_bounds_px"]), + core_bounds_um=_dict_to_bounds(p["core_bounds_um"]), + ) + for p in data["patches"] + ] + + return PatchGridMetadata( + version=data["version"], + bundle_path=data["bundle_path"], + image_height_px=data["image_height_px"], + image_width_px=data["image_width_px"], + pixel_size_um=data["pixel_size_um"], + transcript_extent_um=_dict_to_bounds(data["transcript_extent_um"]), + grid_rows=data["grid_rows"], + grid_cols=data["grid_cols"], + overlap_um=data["overlap_um"], + overlap_px=data["overlap_px"], + grid_type=data.get("grid_type", "uniform"), + patches=patches, + ) + + +# --------------------------------------------------------------------------- +# GeoJSON I/O (from polygon_io.py) +# --------------------------------------------------------------------------- + + +def _normalize_geometry_collection(geojson: dict) -> dict: + """Convert a GeometryCollection to a FeatureCollection. + + proseg-to-baysor produces a non-standard GeoJSON GeometryCollection where + each geometry object has a custom ``cell`` key (bare integer) instead of + using Feature wrappers. This normalises it to a standard FeatureCollection + with ``id`` and ``properties.cell_id`` on each feature, using the + ``"cell-{N}"`` format that matches the companion CSV. + + Args: + geojson: Parsed GeoJSON dict with type GeometryCollection. + + Returns: + Standard FeatureCollection dict. + """ + features = [] + for geom in geojson.get("geometries", []): + cell_raw = geom.get("cell", "") + cell_id = str(cell_raw) + clean_geom = {k: v for k, v in geom.items() if k != "cell"} + feature = { + "type": "Feature", + "id": cell_id, + "geometry": clean_geom, + "properties": {"cell_id": cell_id}, + } + features.append(feature) + return {"type": "FeatureCollection", "features": features} + + +def read_geojson(geojson_path: Path) -> dict: + """Read a GeoJSON file and normalise to FeatureCollection. + + Handles both standard FeatureCollections and the GeometryCollection + format produced by proseg-to-baysor. + + Args: + geojson_path: Path to the GeoJSON file. + + Returns: + Parsed GeoJSON dict (always a FeatureCollection). + """ + with open(geojson_path) as f: + data = json.load(f) + if data.get("type") == "GeometryCollection": + return _normalize_geometry_collection(data) + return data + + +def transform_polygons(geojson: dict, offset_x: float, offset_y: float) -> dict: + """Shift all polygon coordinates by (offset_x, offset_y). + + Args: + geojson: Input FeatureCollection. + offset_x: Translation in x. + offset_y: Translation in y. + + Returns: + New FeatureCollection with shifted geometries. + """ + features = [] + for feat in geojson.get("features", []): + geom = shape(feat["geometry"]) + shifted = translate(geom, xoff=offset_x, yoff=offset_y) + new_feat = {**feat, "geometry": mapping(shifted)} + features.append(new_feat) + return {"type": "FeatureCollection", "features": features} + + +def write_geojson(geojson: dict, output_path: Path) -> None: + """Write a GeoJSON FeatureCollection. + + Args: + geojson: GeoJSON dict to write. + output_path: Destination path (parent dirs created automatically). + """ + output_path.parent.mkdir(parents=True, exist_ok=True) + with open(output_path, "w") as f: + json.dump(geojson, f) + + +# --------------------------------------------------------------------------- +# Arrow utilities (from _arrow_utils.py) +# --------------------------------------------------------------------------- + + +def float_str_array(f64_array: pa.Array) -> pa.Array: + """Convert a float64 pyarrow array to string using Python's str(float) format. + + pyarrow's built-in cast omits trailing '.0' for whole numbers. This + function ensures output matches str(float(...)) for CSV compatibility. + + Args: + f64_array: Float64 pyarrow array to convert. + + Returns: + String pyarrow array with Python-formatted float values. + """ + return pa.array( + [str(v) if v is not None else None for v in f64_array.to_pylist()], + type=pa.string(), + ) + + +# --------------------------------------------------------------------------- +# Parallel I/O +# --------------------------------------------------------------------------- + + +def _read_and_transform_geojson( + patch: PatchInfo, + patches_dir: Path, + geojson_filename: str, +) -> _PatchGeoResult | None: + """Read, transform GeoJSON for a single patch (no core clipping). + + Args: + patch: Patch metadata. + patches_dir: Root patches directory. + geojson_filename: GeoJSON filename within each patch directory. + + Returns: + _PatchGeoResult with features and cell IDs, or None if no GeoJSON. + """ + geojson_path = patches_dir / patch.patch_id / geojson_filename + if not geojson_path.exists(): + return None + + geojson = read_geojson(geojson_path) + + offset_x = patch.global_bounds_um.x_min + offset_y = patch.global_bounds_um.y_min + geojson = transform_polygons(geojson, offset_x, offset_y) + + features = geojson.get("features", []) + seen: set[str] = set() + cell_ids: list[str] = [] + for feat in features: + old_id = str(feat.get("id", feat.get("properties", {}).get("cell_id", ""))) + if old_id not in seen: + seen.add(old_id) + cell_ids.append(old_id) + + return _PatchGeoResult(features=features, cell_ids=cell_ids) + + +def _read_patch_csv( + patch: PatchInfo, + patches_dir: Path, + csv_filename: str, +) -> _PatchCsvResult | None: + """Read a patch CSV into a pyarrow Table. + + All columns are read as strings to preserve exact formatting. + + Args: + patch: Patch metadata. + patches_dir: Root patches directory. + csv_filename: CSV filename within each patch directory. + + Returns: + _PatchCsvResult with the table and column presence flags, or None. + """ + csv_path = patches_dir / patch.patch_id / csv_filename + if not csv_path.exists(): + return None + + with open(csv_path) as fh: + header_line = fh.readline().strip() + col_names = header_line.split(",") + all_string_types = {name: pa.string() for name in col_names} + + table = pa_csv.read_csv( + csv_path, + convert_options=pa_csv.ConvertOptions( + column_types=all_string_types, + strings_can_be_null=False, + ), + read_options=pa_csv.ReadOptions(use_threads=True), + ) + + return _PatchCsvResult( + table=table, + has_cell_col="cell" in table.column_names, + has_x_col="x" in table.column_names, + has_y_col="y" in table.column_names, + has_gene_col="gene" in table.column_names, + has_feature_name_col="feature_name" in table.column_names, + ) + + +# --------------------------------------------------------------------------- +# CSV processing +# --------------------------------------------------------------------------- + + +def _transform_patch_coords( + csv_result: _PatchCsvResult, + offset_x: float, + offset_y: float, +) -> pa.Table: + """Shift transcript coordinates from local patch space to global space. + + Args: + csv_result: The raw CSV table and column flags. + offset_x: X offset for coordinate transform (microns). + offset_y: Y offset for coordinate transform (microns). + + Returns: + Table with x, y columns shifted to global coordinates. + """ + table = csv_result.table + + if table.num_rows == 0: + return table + + if csv_result.has_x_col: + x_f64 = pc.add( + table.column("x").cast(pa.float64()), + pa.scalar(offset_x, type=pa.float64()), + ) + table = table.set_column( + table.schema.get_field_index("x"), + "x", + float_str_array(x_f64), + ) + if csv_result.has_y_col: + y_f64 = pc.add( + table.column("y").cast(pa.float64()), + pa.scalar(offset_y, type=pa.float64()), + ) + table = table.set_column( + table.schema.get_field_index("y"), + "y", + float_str_array(y_f64), + ) + + return table + + +# --------------------------------------------------------------------------- +# Sopa conflict resolution +# --------------------------------------------------------------------------- + + +def _stitch_sopa_resolve( + metadata: PatchGridMetadata, + geo_results: list[_PatchGeoResult | None], + csv_results: list[_PatchCsvResult | None], + all_geojson_features: list[dict], + all_tables: list[pa.Table], + threshold: float = 0.5, +) -> set[str]: + """Stitch per-patch segmentation using spatial containment assignment. + + 1. Collect ALL non-empty polygons from all patches (no transcript filtering). + 2. Resolve overlapping polygons via sopa's solve_conflicts(). + 3. Assign sequential global cell IDs (cell-1, cell-2, ...). + 4. Spatially assign transcripts to resolved polygons using STRtree. + 5. Noise transcripts (outside all polygons) kept only from their core patch. + + This approach works regardless of whether Baysor's CSV ``cell`` column + matches GeoJSON cell IDs -- all assignment is done by spatial containment. + + Args: + metadata: Grid metadata with patch list. + geo_results: Per-patch GeoJSON results (already in global coords). + csv_results: Per-patch CSV results. + all_geojson_features: Output list to append resolved GeoJSON features. + all_tables: Output list to append processed CSV tables. + threshold: Overlap threshold for sopa's solve_conflicts (0-1). + + Returns: + Set of global cell IDs created by merging overlapping cells. + """ + # --- Phase 1: Collect all polygons from all patches --- + all_polygons: list = [] + patch_indices_list: list[int] = [] + + for i, patch in enumerate(metadata.patches): + geo_result = geo_results[i] + if geo_result is None: + continue + + for feat in geo_result.features: + polygon = shape(feat["geometry"]) + if polygon.is_empty: + continue + if not polygon.is_valid: + polygon = shapely.make_valid(polygon) + if polygon.is_empty: + continue + # make_valid can produce MultiPolygon/GeometryCollection; + # xeniumranger only accepts Polygon, so keep largest component + if polygon.geom_type == "MultiPolygon": + polygon = max(polygon.geoms, key=lambda g: g.area) + elif polygon.geom_type == "GeometryCollection": + polys = [g for g in polygon.geoms if g.geom_type == "Polygon"] + if not polys: + continue + polygon = max(polys, key=lambda g: g.area) + + all_polygons.append(polygon) + patch_indices_list.append(i) + + if not all_polygons: + print("[stitch] No polygons found in any patch") + # Still transform and collect CSVs as noise-only + for i, patch in enumerate(metadata.patches): + csv_result = csv_results[i] + if csv_result is None: + continue + offset_x = patch.global_bounds_um.x_min + offset_y = patch.global_bounds_um.y_min + transformed = _transform_patch_coords(csv_result, offset_x, offset_y) + if transformed.num_rows > 0: + all_tables.append(transformed) + return set() + + # --- Phase 2: Resolve overlapping polygons via sopa --- + patch_idx_array = np.array(patch_indices_list, dtype=np.int64) + input_gdf = gpd.GeoDataFrame(geometry=all_polygons) + resolved_gdf, kept_indices = solve_conflicts( + input_gdf, + threshold=threshold, + patch_indices=patch_idx_array, + return_indices=True, + ) + + # --- Phase 3: Assign global cell IDs to resolved polygons --- + merged_cell_ids: set[str] = set() + kept_arr = np.asarray(kept_indices) + resolved_polys: list = [] + resolved_ids: list[str] = [] + + for rank, orig_idx in enumerate(kept_arr, start=1): + global_id = f"cell-{rank}" + geom = resolved_gdf.geometry.iloc[rank - 1] + + # solve_conflicts union can produce MultiPolygon; keep largest + if geom.geom_type == "MultiPolygon": + geom = max(geom.geoms, key=lambda g: g.area) + elif geom.geom_type == "GeometryCollection": + polys = [g for g in geom.geoms if g.geom_type == "Polygon"] + if not polys: + continue + geom = max(polys, key=lambda g: g.area) + + if orig_idx < 0: + merged_cell_ids.add(global_id) + + resolved_polys.append(geom) + resolved_ids.append(global_id) + + all_geojson_features.append( + { + "type": "Feature", + "id": global_id, + "geometry": mapping(geom), + "properties": {"cell_id": global_id}, + } + ) + + print( + f"[stitch] Resolved {len(all_polygons)} input polygons to " + f"{len(resolved_polys)} cells ({len(merged_cell_ids)} merged)" + ) + + # --- Phase 4: Spatial transcript assignment via STRtree --- + poly_tree = shapely.STRtree(resolved_polys) + + for i, patch in enumerate(metadata.patches): + csv_result = csv_results[i] + if csv_result is None: + continue + + offset_x = patch.global_bounds_um.x_min + offset_y = patch.global_bounds_um.y_min + core = patch.core_bounds_um + + transformed = _transform_patch_coords(csv_result, offset_x, offset_y) + if transformed.num_rows == 0: + continue + + if not csv_result.has_x_col or not csv_result.has_y_col: + all_tables.append(transformed) + continue + + # Get global coordinates for spatial query + gx = transformed.column("x").cast(pa.float64()).to_numpy(zero_copy_only=False) + gy = transformed.column("y").cast(pa.float64()).to_numpy(zero_copy_only=False) + points = shapely.points(gx, gy) + + # Query STRtree: returns (input_indices, tree_indices) + point_hits, poly_hits = poly_tree.query(points, predicate="intersects") + + # Build point -> cell_id mapping (first hit wins) + point_to_cell: dict[int, str] = {} + for pt_idx, poly_idx in zip(point_hits, poly_hits): + if pt_idx not in point_to_cell: + point_to_cell[pt_idx] = resolved_ids[poly_idx] + + # Build cell and is_noise columns + n_rows = transformed.num_rows + cell_arr = [""] * n_rows + is_noise_arr = ["true"] * n_rows + for pt_idx, cell_id in point_to_cell.items(): + cell_arr[pt_idx] = cell_id + is_noise_arr[pt_idx] = "false" + + # Filter noise transcripts to core bounds only + # Assigned transcripts are kept from all patches (dedup later by transcript_id) + in_core = ( + (gx >= core.x_min) + & (gx < core.x_max) + & (gy >= core.y_min) + & (gy < core.y_max) + ) + is_assigned = np.array([c != "" for c in cell_arr]) + keep_mask = pa.array(is_assigned | in_core, type=pa.bool_()) + + filtered = transformed.filter(keep_mask) + cell_arr_filtered = [c for c, k in zip(cell_arr, (is_assigned | in_core)) if k] + is_noise_filtered = [ + n for n, k in zip(is_noise_arr, (is_assigned | in_core)) if k + ] + + if filtered.num_rows == 0: + continue + + # Set cell and is_noise columns + cell_idx = ( + filtered.schema.get_field_index("cell") + if "cell" in filtered.column_names + else None + ) + if cell_idx is not None: + filtered = filtered.set_column( + cell_idx, "cell", pa.array(cell_arr_filtered, type=pa.string()) + ) + else: + filtered = filtered.append_column( + "cell", pa.array(cell_arr_filtered, type=pa.string()) + ) + + noise_idx = ( + filtered.schema.get_field_index("is_noise") + if "is_noise" in filtered.column_names + else None + ) + if noise_idx is not None: + filtered = filtered.set_column( + noise_idx, + "is_noise", + pa.array(is_noise_filtered, type=pa.string()), + ) + else: + filtered = filtered.append_column( + "is_noise", pa.array(is_noise_filtered, type=pa.string()) + ) + + all_tables.append(filtered) + + return merged_cell_ids + + +# --------------------------------------------------------------------------- +# Main orchestrator +# --------------------------------------------------------------------------- + + +def stitch_transcript_assignments( + patches_dir: Path, + output_dir: Path, + csv_filename: str = "segmentation.csv", + geojson_filename: str = "segmentation_polygons.json", + max_workers: int | None = None, + min_transcripts_per_cell: int = 0, +) -> None: + """Stitch per-patch transcript assignments and polygons into unified output. + + For each patch, reads the transcript assignment CSV and polygon GeoJSON. + Cells are deduplicated using sopa's solve_conflicts() which resolves + overlapping cells at patch boundaries based on area overlap ratio. + + Processing is split into a parallel I/O phase (reading GeoJSON and CSV + files via thread pool) and a sequential phase (dedup, global cell ID + assignment, remapping, and concatenation). + + Args: + patches_dir: Directory containing patch subdirectories and patch_grid.json. + output_dir: Output directory for stitched CSV and GeoJSON. + csv_filename: CSV filename within each patch directory. + geojson_filename: GeoJSON filename within each patch directory. + max_workers: Maximum number of threads for parallel I/O. + """ + patches_dir = Path(patches_dir) + output_dir = Path(output_dir) + output_dir.mkdir(parents=True, exist_ok=True) + + metadata = load_grid_metadata(patches_dir / "patch_grid.json") + + n_patches = len(metadata.patches) + if max_workers is None: + max_workers = min(n_patches, os.cpu_count() or 1) + + # ---- Parallel phase: read GeoJSON and CSV files concurrently ---- + with ThreadPoolExecutor(max_workers=max_workers) as executor: + geo_futures = [ + executor.submit( + _read_and_transform_geojson, p, patches_dir, geojson_filename + ) + for p in metadata.patches + ] + csv_futures = [ + executor.submit(_read_patch_csv, p, patches_dir, csv_filename) + for p in metadata.patches + ] + geo_results = [f.result() for f in geo_futures] + csv_results = [f.result() for f in csv_futures] + + # ---- Sequential phase: assign global cell IDs, remap, concatenate ---- + all_tables: list[pa.Table] = [] + all_geojson_features: list[dict] = [] + + _stitch_sopa_resolve( + metadata, + geo_results, + csv_results, + all_geojson_features, + all_tables, + threshold=0.5, + ) + + # Concatenate all patch tables + if all_tables: + merged = pa.concat_tables(all_tables) + + # Deduplicate by transcript_id: prefer assigned over noise + if "transcript_id" in merged.column_names: + if "cell" in merged.column_names: + is_noise = pc.equal(merged.column("cell"), "").cast(pa.int8()) + row_order = pa.array(np.arange(merged.num_rows), type=pa.int64()) + sort_table = pa.table({"_noise": is_noise, "_row": row_order}) + sort_indices = pc.sort_indices( + sort_table, + sort_keys=[("_noise", "ascending"), ("_row", "ascending")], + ) + merged = merged.take(sort_indices) + + tid_np = merged.column("transcript_id").to_numpy(zero_copy_only=False) + _, first_indices = np.unique(tid_np, return_index=True) + first_indices.sort() + merged = merged.take(first_indices) + + # Post-stitch cell filter: drop cells below min_transcripts_per_cell + if min_transcripts_per_cell > 0 and "cell" in merged.column_names: + cell_col = merged.column("cell") + cell_counts: dict[str, int] = {} + for c in cell_col.to_pylist(): + if c: + cell_counts[c] = cell_counts.get(c, 0) + 1 + small_cells = { + cid + for cid, cnt in cell_counts.items() + if cnt < min_transcripts_per_cell + } + if small_cells: + # Reassign transcripts from small cells to noise + new_cell = ["" if c in small_cells else c for c in cell_col.to_pylist()] + new_noise = [ + "true" if c in small_cells else n + for c, n in zip( + cell_col.to_pylist(), + merged.column("is_noise").to_pylist() + if "is_noise" in merged.column_names + else ["false"] * merged.num_rows, + ) + ] + cidx = merged.column_names.index("cell") + merged = merged.set_column( + cidx, "cell", pa.array(new_cell, type=pa.string()) + ) + if "is_noise" in merged.column_names: + nidx = merged.column_names.index("is_noise") + merged = merged.set_column( + nidx, "is_noise", pa.array(new_noise, type=pa.string()) + ) + # Remove filtered cells from GeoJSON + all_geojson_features[:] = [ + f + for f in all_geojson_features + if str(f.get("id", f.get("properties", {}).get("cell_id", ""))) + not in small_cells + ] + print( + f"[stitch] Filtered {len(small_cells)} cells with " + f"<{min_transcripts_per_cell} transcripts" + ) + + # Log assignment stats + if "cell" in merged.column_names: + cell_vals = merged.column("cell").to_pylist() + n_assigned = sum(1 for c in cell_vals if c) + n_noise = sum(1 for c in cell_vals if not c) + print( + f"[stitch] Final: {merged.num_rows} transcripts, " + f"{n_assigned} assigned, {n_noise} noise" + ) + + # Cast is_noise to integer for xeniumranger compatibility + if "is_noise" in merged.column_names: + noise_col = merged.column("is_noise") + if noise_col.type == pa.string(): + lower = pc.utf8_lower(noise_col) + is_true = pc.or_(pc.equal(lower, "true"), pc.equal(lower, "1")) + idx = merged.column_names.index("is_noise") + merged = merged.set_column(idx, "is_noise", is_true.cast(pa.int8())) + + # Write CSV + if merged.num_rows > 0: + csv_out = output_dir / "xr-transcript-metadata.csv" + pa_csv.write_csv( + merged, + csv_out, + write_options=pa_csv.WriteOptions(quoting_style="needed"), + ) + + # Safety net: remove orphan polygons with zero transcripts + if all_geojson_features and all_tables: + csv_cell_ids: set[str] = set() + if "cell" in merged.column_names: + csv_cell_ids = set(c for c in merged.column("cell").to_pylist() if c) + all_geojson_features = [ + f + for f in all_geojson_features + if str(f.get("id", f.get("properties", {}).get("cell_id", ""))) + in csv_cell_ids + ] + + # Write merged GeoJSON + if all_geojson_features: + merged_geo = {"type": "FeatureCollection", "features": all_geojson_features} + write_geojson(merged_geo, output_dir / "xr-cell-polygons.geojson") + + +# --------------------------------------------------------------------------- +# CLI +# --------------------------------------------------------------------------- + + +def main() -> None: + parser = argparse.ArgumentParser( + description="Stitch per-patch Baysor segmentation results into unified output." + ) + parser.add_argument( + "--patches", + type=Path, + required=True, + help="Directory containing patch subdirectories and patch_grid.json", + ) + parser.add_argument( + "--output", + type=Path, + required=True, + help="Output directory for stitched CSV and GeoJSON", + ) + parser.add_argument( + "--csv-filename", + default="segmentation.csv", + help="CSV filename within each patch (default: segmentation.csv)", + ) + parser.add_argument( + "--geojson-filename", + default="segmentation_polygons.json", + help="GeoJSON filename within each patch (default: segmentation_polygons.json)", + ) + parser.add_argument( + "--min-transcripts-per-cell", + type=int, + default=0, + help="Drop cells with fewer transcripts (0 = no filter, default: 0)", + ) + args = parser.parse_args() + + stitch_transcript_assignments( + patches_dir=args.patches, + output_dir=args.output, + csv_filename=args.csv_filename, + geojson_filename=args.geojson_filename, + min_transcripts_per_cell=args.min_transcripts_per_cell, + ) + + +if __name__ == "__main__": + main() diff --git a/bin/utility_convert_mask_uint32.py b/bin/utility_convert_mask_uint32.py new file mode 100755 index 00000000..955ad4b7 --- /dev/null +++ b/bin/utility_convert_mask_uint32.py @@ -0,0 +1,46 @@ +#!/usr/bin/env python3 +""" +Convert a segmentation mask TIFF to uint32 dtype. + +XeniumRanger import-segmentation requires uint32 masks, but upstream +segmenters (e.g. StarDist) often emit int32 labels. This script reads +the input mask, casts it to uint32, and writes the result. +""" + +import argparse + +import numpy as np +import tifffile + + +def convert_mask_to_uint32(input_path: str, output_path: str) -> None: + """ + Read a mask TIFF, cast to uint32, and write to output_path. + + Args: + input_path: Path to input mask TIFF (any integer dtype). + output_path: Path where the uint32 mask will be written. + """ + mask = tifffile.imread(input_path) + print(f"Input dtype: {mask.dtype}, shape: {mask.shape}, labels: {mask.max()}") + tifffile.imwrite(output_path, mask.astype(np.uint32)) + print("Output dtype: uint32") + + +def parse_args() -> argparse.Namespace: + """Parse command-line arguments.""" + parser = argparse.ArgumentParser( + description="Convert a segmentation mask TIFF to uint32 dtype." + ) + parser.add_argument( + "--input", required=True, help="Path to input mask TIFF" + ) + parser.add_argument( + "--output", required=True, help="Path where uint32 mask will be written" + ) + return parser.parse_args() + + +if __name__ == "__main__": + args = parse_args() + convert_mask_to_uint32(input_path=args.input, output_path=args.output) diff --git a/bin/utility_downscale_morphology.py b/bin/utility_downscale_morphology.py new file mode 100755 index 00000000..8544ecf3 --- /dev/null +++ b/bin/utility_downscale_morphology.py @@ -0,0 +1,103 @@ +#!/usr/bin/env python3 +""" +Pre-downscale a morphology image for Cellpose. + +Reduces image dimensions by a scale factor so that Cellpose's internal +rescaling (diam_mean / diameter) does not exceed GPU/CPU memory. The +scale factor defaults to diameter / diam_mean (e.g., 9 / 30 = 0.3). +After downscaling, Cellpose should run with --diameter equal to +diam_mean (no further internal rescaling). + +Outputs: + {prefix}/downscaled.tif - Downscaled image at the same dtype as input. + {prefix}/scale_info.json - Scale factor and original/new dimensions. +""" + +import argparse +import json +from pathlib import Path + +import tifffile +from skimage.transform import resize + +# Cellpose network requires a minimum spatial size of 256 px. +MIN_DIM = 256 + + +def downscale_image( + image_path: str, diameter: float, diam_mean: float, prefix: str +) -> None: + """ + Downscale image so Cellpose can run with diameter == diam_mean. + + Args: + image_path: Path to morphology TIFF (2D, 3D, or 4D). + diameter: Target object diameter (used to compute scale). + diam_mean: Cellpose model's mean diameter assumption. + prefix: Output directory. + """ + scale = min(diameter / diam_mean, 1.0) # clamp to prevent upscaling + + img = tifffile.imread(image_path) + print(f"Original: {img.shape}, dtype={img.dtype}, ndim={img.ndim}") + + # Handle multichannel OME-TIFFs: shape can be (H, W), (C, H, W), or (Z, C, H, W) + if img.ndim == 2: + orig_h, orig_w = img.shape + new_h = max(int(orig_h * scale), MIN_DIM) + new_w = max(int(orig_w * scale), MIN_DIM) + output_shape = (new_h, new_w) + elif img.ndim == 3: + orig_h, orig_w = img.shape[1], img.shape[2] + new_h = max(int(orig_h * scale), MIN_DIM) + new_w = max(int(orig_w * scale), MIN_DIM) + output_shape = (img.shape[0], new_h, new_w) + else: + orig_h, orig_w = img.shape[-2], img.shape[-1] + new_h = max(int(orig_h * scale), MIN_DIM) + new_w = max(int(orig_w * scale), MIN_DIM) + output_shape = img.shape[:-2] + (new_h, new_w) + + print(f"Downscaling by {scale:.3f}: ({orig_h}, {orig_w}) -> ({new_h}, {new_w})") + + img_ds = resize(img, output_shape, order=3, preserve_range=True, anti_aliasing=True) + img_ds = img_ds.astype(img.dtype) + + out_dir = Path(prefix) + out_dir.mkdir(parents=True, exist_ok=True) + tifffile.imwrite(str(out_dir / "downscaled.tif"), img_ds, compression="zlib") + + info = { + "scale": scale, + "orig_h": orig_h, + "orig_w": orig_w, + "new_h": new_h, + "new_w": new_w, + "diameter": diameter, + "diam_mean": diam_mean, + } + with open(out_dir / "scale_info.json", "w") as f: + json.dump(info, f) + print(f"Done: downscaled.tif written, shape={img_ds.shape}") + + +def parse_args() -> argparse.Namespace: + """Parse command-line arguments.""" + parser = argparse.ArgumentParser( + description="Pre-downscale a morphology image for Cellpose." + ) + parser.add_argument("--image", required=True, help="Morphology TIFF input") + parser.add_argument("--diameter", type=float, required=True, help="Target object diameter") + parser.add_argument("--diam-mean", type=float, required=True, help="Cellpose model diam_mean") + parser.add_argument("--prefix", required=True, help="Output directory") + return parser.parse_args() + + +if __name__ == "__main__": + args = parse_args() + downscale_image( + image_path=args.image, + diameter=args.diameter, + diam_mean=args.diam_mean, + prefix=args.prefix, + ) diff --git a/bin/utility_extract_dapi.py b/bin/utility_extract_dapi.py new file mode 100755 index 00000000..3d60f563 --- /dev/null +++ b/bin/utility_extract_dapi.py @@ -0,0 +1,60 @@ +#!/usr/bin/env python3 +""" +Extract a single channel (e.g., DAPI) from a multi-channel OME-TIFF. + +Xenium morphology_focus.ome.tif has multiple channels (DAPI, boundary, +interior). Single-channel segmenters such as StarDist 2D_versatile_fluo +expect one channel as input. This script reads the input image, slices +the requested channel, and writes the result. +""" + +import argparse + +import tifffile + + +def extract_channel(input_path: str, output_path: str, channel_index: int) -> None: + """ + Read an OME-TIFF, extract a single channel, and write the result. + + Args: + input_path: Path to multi-channel OME-TIFF morphology image. + output_path: Path where the single-channel TIFF will be written. + channel_index: Index of the channel to extract. + """ + img = tifffile.imread(input_path) + orig_shape = img.shape + + if img.ndim == 3: + img = img[channel_index] + elif img.ndim == 4: + img = img[0, channel_index] + + tifffile.imwrite(output_path, img) + print(f"Input shape: {orig_shape} -> extracted channel {channel_index}: {img.shape}") + + +def parse_args() -> argparse.Namespace: + """Parse command-line arguments.""" + parser = argparse.ArgumentParser( + description="Extract a single channel from a multi-channel OME-TIFF." + ) + parser.add_argument( + "--input", required=True, help="Path to multi-channel OME-TIFF morphology image" + ) + parser.add_argument( + "--output", required=True, help="Path where the single-channel TIFF will be written" + ) + parser.add_argument( + "--channel-index", type=int, default=0, help="Channel index to extract (default: 0)" + ) + return parser.parse_args() + + +if __name__ == "__main__": + args = parse_args() + extract_channel( + input_path=args.input, + output_path=args.output, + channel_index=args.channel_index, + ) diff --git a/bin/utility_extract_preview_data.py b/bin/utility_extract_preview_data.py new file mode 100755 index 00000000..0ea737c2 --- /dev/null +++ b/bin/utility_extract_preview_data.py @@ -0,0 +1,208 @@ +#!/usr/bin/env python3 +""" +Extract preview data from Baysor preview HTML reports. + +Parses embedded Vega-Lite spec variables and base64 PNG images from the +Baysor preview.html file, writing MultiQC-compatible TSV and PNG files. +""" + +import argparse +import base64 +import html +import json +import re +import sys +from pathlib import Path +from typing import Dict, List, Optional, Tuple + +import pandas as pd +from bs4 import BeautifulSoup + + +def get_png_files(soup: BeautifulSoup, outdir: Path) -> None: + """Get png base64 images following specific h1 tags in preview.html""" + target_ids = ["Transcript_Plots", "Noise_Level"] + outdir.mkdir(parents=True, exist_ok=True) + + for h1_id in target_ids: + h1_tag = soup.find("h1", id=h1_id) + if not h1_tag: + print(f"[WARN] No

with id {h1_id} found") + continue + + # Look for the first after the h1 in the DOM + img_tag = h1_tag.find_next("img") + if not img_tag or not img_tag.get("src"): + print(f"[WARN] No found after h1#{h1_id}") + continue + + img_src = img_tag["src"] + if img_src.startswith("data:image/png;base64,"): + base64_data = img_src.split(",", 1)[1] + data = base64.b64decode(base64_data) + else: + print(f"[WARN] img src is not base64 PNG for h1#{h1_id}") + continue + + # save png files with _mqc suffix for MultiQC integration + img_name = f"{h1_id}_mqc.png".lower() + out_path = outdir / img_name + with open(out_path, "wb") as f: + f.write(data) + + print(f"[INFO] Saved {img_name}") + + return None + + +def extract_js_object(text: str, start_idx: int) -> Tuple[Optional[str], int]: + """Extract json-like object starting at start_idx.""" + if start_idx >= len(text) or text[start_idx] != "{": + return None, start_idx + + stack, in_str, escape, quote = [], False, False, None + for i in range(start_idx, len(text)): + ch = text[i] + if in_str: + if escape: + escape = False + elif ch == "\\": + escape = True + elif ch == quote: + in_str = False + else: + if ch in ('"', "'"): + in_str, quote = True, ch + elif ch == "{": + stack.append("{") + elif ch == "}": + stack.pop() + if not stack: + return text[start_idx : i + 1], i + 1 + elif ch == "/" and i + 1 < len(text): + # skip js comments + nxt = text[i + 1] + if nxt == "/": + end = text.find("\n", i + 2) + i = len(text) - 1 if end == -1 else end + elif nxt == "*": + end = text.find("*/", i + 2) + if end == -1: + break + i = end + 1 + + return None, start_idx + + +def js_to_json(js: str) -> str: + """Convert a JS object string to valid JSON.""" + # Remove comments + js = re.sub(r"/\*.*?\*/", "", js, flags=re.S) + js = re.sub(r"//[^\n]*", "", js) + + # Convert single-quoted strings to double-quoted strings + js = re.sub( + r"'((?:\\.|[^'\\])*)'", + lambda m: '"' + m.group(1).replace('"', '\\"') + '"', + js, + ) + + # Remove trailing commas + js = re.sub(r",\s*(?=[}\]])", "", js) + js = re.sub(r",\s*,+", ",", js) + + return js.strip() + + +def find_variables(script_text: str) -> Dict[str, str]: + """Find all 'var|let|const specN =' declarations and extract their objects.""" + specs: Dict[str, str] = {} + script_text = html.unescape(script_text) + pattern = re.compile(r"(?:var|let|const)\s+(spec\d+)\s*=\s*{", re.I) + + for match in pattern.finditer(script_text): + var = match.group(1) + obj, _ = extract_js_object(script_text, match.end() - 1) + if obj: + specs[var] = obj + else: + print(f"[WARN] Could not extract object for {var}") + return specs + + +def write_tsvs(specs: Dict[str, str], outdir: Path) -> List[Path]: + """Convert extracted json to tsv.""" + outdir.mkdir(parents=True, exist_ok=True) + written: List[Path] = [] + + for var, js_obj in specs.items(): + try: + data = json.loads(js_to_json(js_obj)) + values = data.get("data", {}).get("values", []) + if not values: + print(f"[WARN] No data.values found in {var}") + continue + + df = pd.DataFrame(values) + outpath = outdir / f"{var}_mqc.tsv" + + with open(outpath, "w") as f: + f.write("# plot_type: linegraph\n") + f.write(f"# section_name: {var}\n") + f.write("# description: Extracted preview data\n") + df.to_csv(f, sep="\t", index=False) + + written.append(outpath) + print(f"[INFO] Wrote {outpath} ({len(df)} rows × {len(df.columns)} cols)") + except Exception as e: + print(f"[ERROR] Failed to process {var}: {e}") + + return written + + +def parse_args() -> argparse.Namespace: + """Parse command-line arguments.""" + parser = argparse.ArgumentParser( + description="Extract preview data from Baysor preview HTML reports." + ) + parser.add_argument( + "--preview-html", + required=True, + help="Path to Baysor preview HTML file", + ) + parser.add_argument( + "--prefix", + required=True, + help="Output directory prefix (sample ID)", + ) + return parser.parse_args() + + +if __name__ == "__main__": + args = parse_args() + + input_path: Path = Path(args.preview_html) + outdir: Path = Path(args.prefix) + + text = input_path.read_text(encoding="utf-8", errors="ignore") + soup = BeautifulSoup(text, "html.parser") + + # get the script section + if " argparse.Namespace: + """Parse command-line arguments.""" + parser = argparse.ArgumentParser( + description="Get transcript coordinate bounds from a Parquet file." + ) + parser.add_argument( + "--transcripts", + required=True, + help="Path to transcripts parquet file", + ) + return parser.parse_args() + + +if __name__ == "__main__": + args = parse_args() + result = get_coordinates(args.transcripts) + print(",".join(str(v) for v in result)) diff --git a/bin/utility_parquet_to_csv.py b/bin/utility_parquet_to_csv.py new file mode 100755 index 00000000..bfa19c40 --- /dev/null +++ b/bin/utility_parquet_to_csv.py @@ -0,0 +1,70 @@ +#!/usr/bin/env python3 +""" +Convert a Parquet file to CSV format. + +Reads a Parquet file and writes it as CSV, optionally gzip-compressed. +""" + +import argparse +from pathlib import Path + +import pandas as pd + + +def convert_parquet( + transcripts: str, + extension: str = ".csv", + prefix: str = "", +) -> None: + """ + Convert a Parquet file to CSV or CSV.GZ format. + + Args: + transcripts: Filename of the input parquet file + extension: Output extension ('.csv' or '.gz' for gzip) + prefix: Output directory prefix + """ + df = pd.read_parquet(transcripts, engine="pyarrow") + + Path(prefix).mkdir(parents=True, exist_ok=True) + + if extension == ".gz": + output = transcripts.replace(".parquet", ".csv.gz") + df.to_csv(f"{prefix}/{output}", compression="gzip", index=False) + else: + output = transcripts.replace(".parquet", ".csv") + df.to_csv(f"{prefix}/{output}", index=False) + + return None + + +def parse_args() -> argparse.Namespace: + """Parse command-line arguments.""" + parser = argparse.ArgumentParser( + description="Convert a Parquet file to CSV format." + ) + parser.add_argument( + "--transcripts", + required=True, + help="Input parquet filename", + ) + parser.add_argument( + "--extension", + default=".csv", + help="Output extension: '.csv' or '.gz' (default: .csv)", + ) + parser.add_argument( + "--prefix", + required=True, + help="Output directory prefix (sample ID)", + ) + return parser.parse_args() + + +if __name__ == "__main__": + args = parse_args() + convert_parquet( + transcripts=args.transcripts, + extension=args.extension, + prefix=args.prefix, + ) diff --git a/bin/utility_resize_tif.py b/bin/utility_resize_tif.py new file mode 100755 index 00000000..6cca640d --- /dev/null +++ b/bin/utility_resize_tif.py @@ -0,0 +1,134 @@ +#!/usr/bin/env python3 +""" +Resize a segmentation TIFF mask to match transcript coordinates. + +This script rescales a segmentation mask image to match the coordinate +space of Xenium transcript data using microns-per-pixel metadata. +""" + +import argparse +import json +import os +from typing import Tuple + +import numpy as np +import pandas as pd +import tifffile +from skimage.transform import resize + + +def read_mask(mask_path: str) -> np.ndarray: + """Read the segmentation mask from a TIFF file.""" + print(f"Reading mask: {mask_path}") + mask = tifffile.imread(mask_path) + print(f"Mask shape: {mask.shape}, dtype: {mask.dtype}") + return mask + + +def read_transcript_bounds(transcript_path: str) -> Tuple[float, float, float, float]: + """Read transcript coordinates and return their bounding box.""" + print(f"Reading transcripts: {transcript_path}") + if transcript_path.endswith(".parquet"): + transcripts = pd.read_parquet(transcript_path, columns=["x_location", "y_location"]) + else: + transcripts = pd.read_csv(transcript_path) + + if "x_location" not in transcripts.columns or "y_location" not in transcripts.columns: + raise ValueError("Transcript file must contain 'x_location' and 'y_location' columns.") + + x_min, x_max = transcripts["x_location"].min(), transcripts["x_location"].max() + y_min, y_max = transcripts["y_location"].min(), transcripts["y_location"].max() + + print(f"Transcript bounds: X=({x_min:.2f}, {x_max:.2f}), Y=({y_min:.2f}, {y_max:.2f})") + return x_min, x_max, y_min, y_max + + +def read_microns_per_pixel(metadata_path: str) -> float: + """Extract microns_per_pixel or pixel_size from metadata JSON.""" + print(f"Reading metadata: {metadata_path}") + with open(metadata_path, "r") as f: + metadata = json.load(f) + + mpp = metadata.get("microns_per_pixel") or metadata.get("pixel_size") + if mpp is None: + raise KeyError("Metadata JSON must contain 'microns_per_pixel' or 'pixel_size'.") + + print(f"Microns per pixel: {mpp}") + return float(mpp) + + +def compute_target_size( + x_min: float, x_max: float, y_min: float, y_max: float, microns_per_pixel: float +) -> Tuple[int, int]: + """Compute new image size (in pixels) to cover given coordinates.""" + new_width = int(round((x_max - x_min) / microns_per_pixel)) + new_height = int(round((y_max - y_min) / microns_per_pixel)) + print(f"Target image size: {new_width} x {new_height} pixels") + return new_height, new_width + + +def resize_mask(mask: np.ndarray, new_shape: Tuple[int, int]) -> np.ndarray: + """Resize mask using nearest-neighbor interpolation (preserve labels).""" + print("Resizing mask...") + resized = resize( + mask, + new_shape, + order=0, # nearest neighbor to preserve segmentation labels + preserve_range=True, + anti_aliasing=False, + ).astype(mask.dtype) + print(f"Resized shape: {resized.shape}") + return resized + + +def main(mask_path: str, transcripts_path: str, metadata_path: str, output_path: str) -> None: + """Resize segmentation mask to match Xenium coordinate space.""" + # Validate input files + for path in [mask_path, transcripts_path, metadata_path]: + if not os.path.exists(path): + raise FileNotFoundError(f"File not found: {path}") + + # Load data + mask = read_mask(mask_path) + x_min, x_max, y_min, y_max = read_transcript_bounds(transcripts_path) + microns_per_pixel = read_microns_per_pixel(metadata_path) + + # Compute physical mask size + height, width = mask.shape + print(f"Original mask size: {width * microns_per_pixel:.2f} x {height * microns_per_pixel:.2f} um") + + # Compute target size + new_height, new_width = compute_target_size(x_min, x_max, y_min, y_max, microns_per_pixel) + + # Resize and save + resized_mask = resize_mask(mask, (new_height, new_width)) + tifffile.imwrite(output_path, resized_mask) + + print(f"Saved resized mask -> {output_path}") + + +def parse_args() -> argparse.Namespace: + """Parse command-line arguments.""" + parser = argparse.ArgumentParser( + description="Resize a segmentation TIFF mask to match transcript coordinates." + ) + parser.add_argument("--mask", required=True, help="Path to segmentation mask TIFF") + parser.add_argument("--transcripts", required=True, help="Path to transcripts file") + parser.add_argument("--metadata", required=True, help="Path to metadata JSON") + parser.add_argument("--prefix", required=True, help="Output directory prefix") + parser.add_argument("--mask-filename", required=True, help="Original mask filename for output naming") + return parser.parse_args() + + +if __name__ == "__main__": + args = parse_args() + + os.makedirs(args.prefix, exist_ok=True) + output_mask: str = os.path.join(args.prefix, f"resized_{args.mask_filename}.tif") + + main( + mask_path=args.mask, + transcripts_path=args.transcripts, + metadata_path=args.metadata, + output_path=output_mask, + ) diff --git a/bin/utility_segger2xr.py b/bin/utility_segger2xr.py new file mode 100755 index 00000000..22889e82 --- /dev/null +++ b/bin/utility_segger2xr.py @@ -0,0 +1,247 @@ +#!/usr/bin/env python3 +""" +Convert Segger prediction output to XeniumRanger-compatible format. + +Reads Segger PREDICT output (transcripts.parquet with segger_cell_id), +produces Baysor-format segmentation CSV, refined transcripts parquet, +and GeoJSON cell boundary polygons for xeniumranger import-segmentation. +""" + +import argparse +import json +from pathlib import Path +from typing import List + +import pandas as pd +from scipy.spatial import ConvexHull + +# Expected columns in transcripts.parquet +REQUIRED_COLUMNS: List[str] = [ + "transcript_id", + "cell_id", + "overlaps_nucleus", + "feature_name", + "x_location", + "y_location", + "z_location", + "qv", +] + +# Column name for segger cell assignment (varies by segger version) +SEGGER_ID_CANDIDATES: List[str] = ["segger_cell_id", "segger_id"] + + +def refine_transcripts(parquet_path: str) -> pd.DataFrame: + """ + Read segger PREDICT output and extract cell assignments. + Supports both 'segger_cell_id' (newer) and 'segger_id' (older) column names. + """ + parquet_file = Path(parquet_path) + if not parquet_file.exists(): + raise FileNotFoundError(f"File not found: {parquet_path}") + + df = pd.read_parquet(parquet_file, engine="pyarrow") + + missing_cols = [col for col in REQUIRED_COLUMNS if col not in df.columns] + if missing_cols: + raise ValueError(f"Missing required columns: {missing_cols}") + + # Find segger cell assignment column + segger_col = None + for candidate in SEGGER_ID_CANDIDATES: + if candidate in df.columns: + segger_col = candidate + break + if segger_col is None: + raise ValueError( + f"No segger cell assignment column found. " + f"Expected one of {SEGGER_ID_CANDIDATES}, got columns: {list(df.columns)}" + ) + + # Replace cell_id with segger assignment + cell_id_index = df.columns.get_loc("cell_id") + df = df.drop(columns=["cell_id"]) + segger_series = df.pop(segger_col) + df.insert(cell_id_index, "cell_id", segger_series) + + return df + + +def build_cell_map(df: pd.DataFrame, min_transcripts: int = 3) -> dict: + """ + Build a mapping from raw segger cell IDs to non-numeric string IDs. + + Only includes cells that have: + - >= min_transcripts assigned transcripts + - At least one transcript with valid (non-NaN) x/y coordinates + + Cell IDs use "cell-N" format (hyphen + integer) as required by + xeniumranger's cell ID parser. Non-numeric to avoid polars Int64 inference. + """ + cell_ids = df["cell_id"].fillna("UNASSIGNED").astype(str) + is_unassigned = (cell_ids == "UNASSIGNED") | (cell_ids == "") | (cell_ids == "0") + assigned = cell_ids[~is_unassigned] + counts = assigned.value_counts() + enough_tx = set(counts[counts >= min_transcripts].index) + + # Exclude cells with all-NaN coordinates (no spatial info = useless) + has_coords = df.dropna(subset=["x_location", "y_location"]) + has_coords_ids = set(has_coords["cell_id"].fillna("UNASSIGNED").astype(str)) + valid_cells = sorted(enough_tx & has_coords_ids) + + return {cell: f"cell-{i + 1}" for i, cell in enumerate(valid_cells)} + + +def to_baysor_csv(df: pd.DataFrame, output_path: str, cell_map: dict) -> None: + """ + Convert transcript DataFrame to Baysor-compatible CSV format. + + xeniumranger 4.0 import-segmentation --transcript-assignment expects a + Baysor segmentation CSV with at minimum: transcript_id, cell, is_noise, + x, y columns. This function maps Xenium/Segger columns to Baysor format. + """ + baysor_df = pd.DataFrame() + baysor_df["transcript_id"] = df["transcript_id"] + baysor_df["x"] = df["x_location"] + baysor_df["y"] = df["y_location"] + baysor_df["z"] = df["z_location"] + baysor_df["gene"] = df["feature_name"] + + cell_ids = df["cell_id"].fillna("UNASSIGNED").astype(str) + is_unassigned = (cell_ids == "UNASSIGNED") | (cell_ids == "") | (cell_ids == "0") + baysor_df["cell"] = cell_ids.map(cell_map).fillna("") + baysor_df["is_noise"] = is_unassigned.astype(int) + + baysor_df.to_csv(output_path, index=False) + + n_assigned = (~is_unassigned).sum() + n_noise = is_unassigned.sum() + n_cells = len(cell_map) + print( + f"Baysor CSV: {n_assigned} assigned, {n_noise} noise, {n_cells} cells -> {output_path}" + ) + + +def _make_buffer_polygon(cx: float, cy: float, radius: float = 0.5) -> list: + """Create a small square polygon around a centroid as fallback.""" + return [ + [cx - radius, cy - radius], + [cx + radius, cy - radius], + [cx + radius, cy + radius], + [cx - radius, cy + radius], + [cx - radius, cy - radius], # close ring + ] + + +def generate_viz_polygons(df: pd.DataFrame, output_path: str, cell_map: dict) -> None: + """ + Generate a GeoJSON file with cell boundary polygons. + + Uses ConvexHull when possible; falls back to a small buffer polygon around + the centroid for cells with < 3 unique points or collinear points. + + Required by xeniumranger import-segmentation when using --transcript-assignment. + Each feature MUST have a top-level "id" field (xeniumranger reads item["id"]). + Cell IDs must match those in the Baysor CSV. + """ + assigned = df[ + df["cell_id"].notna() + & (df["cell_id"].astype(str) != "UNASSIGNED") + & (df["cell_id"].astype(str) != "") + ].copy() + + features = [] + grouped = assigned.groupby("cell_id") + + for cell_id, group in grouped: + mapped_id = cell_map.get(str(cell_id)) + if mapped_id is None: + continue + + coords = group[["x_location", "y_location"]].dropna().values + + polygon_coords = None + if len(coords) >= 3: + try: + hull = ConvexHull(coords) + hull_points = coords[hull.vertices].tolist() + hull_points.append(hull_points[0]) # close polygon ring + polygon_coords = hull_points + except Exception: + pass + + # Fallback: buffer polygon around centroid + if polygon_coords is None: + cx, cy = coords.mean(axis=0).astype(float) + polygon_coords = _make_buffer_polygon(cx, cy) + + features.append( + { + "type": "Feature", + "id": mapped_id, + "geometry": { + "type": "Polygon", + "coordinates": [polygon_coords], + }, + "properties": {"cell_id": mapped_id}, + } + ) + + geojson = {"type": "FeatureCollection", "features": features} + + with open(output_path, "w") as f: + json.dump(geojson, f) + + print(f"Generated {len(features)} cell polygons in {output_path}") + + +def main(input_file: str, prefix: str, min_transcripts: int = 3) -> None: + """Run the full segger-to-xeniumranger conversion pipeline.""" + Path(prefix).mkdir(parents=True, exist_ok=True) + transcripts = refine_transcripts(input_file) + + # Build cell ID mapping, filtering cells with < min_transcripts + cell_map = build_cell_map(transcripts, min_transcripts=min_transcripts) + + # xeniumranger 4.0 expects Baysor-format CSV (not parquet) with is_noise column + to_baysor_csv(transcripts, f"{prefix}/segmentation.csv", cell_map) + + # Also save the refined parquet for downstream use + transcripts.to_parquet(f"{prefix}/transcripts.parquet", engine="pyarrow") + + # Generate cell boundary polygons (required companion to --transcript-assignment) + # Uses ConvexHull when possible; falls back to buffer polygon for edge cases + generate_viz_polygons(transcripts, f"{prefix}/segmentation_polygons.json", cell_map) + + +def parse_args() -> argparse.Namespace: + """Parse command-line arguments.""" + parser = argparse.ArgumentParser( + description="Convert Segger prediction output to XeniumRanger-compatible format." + ) + parser.add_argument( + "--transcripts", + required=True, + help="Path to Segger output transcripts parquet file", + ) + parser.add_argument( + "--prefix", + required=True, + help="Output directory prefix (sample ID)", + ) + parser.add_argument( + "--min-transcripts", + type=int, + default=3, + help="Minimum transcripts per cell (default: 3)", + ) + return parser.parse_args() + + +if __name__ == "__main__": + args = parse_args() + main( + input_file=args.transcripts, + prefix=args.prefix, + min_transcripts=args.min_transcripts, + ) diff --git a/bin/utility_split_transcripts.py b/bin/utility_split_transcripts.py new file mode 100755 index 00000000..275fbab1 --- /dev/null +++ b/bin/utility_split_transcripts.py @@ -0,0 +1,109 @@ +#!/usr/bin/env python3 +""" +Split transcript coordinates into spatial tiles. + +Reads a Xenium transcripts.parquet file and computes quantile-based spatial +tiles, writing a splits.csv with tile boundaries. +""" + +import argparse +import os +from typing import List + +import pandas as pd + + +def compute_quantile_ranges(df: pd.DataFrame, col: str, n_bins: int) -> List: + """ + Compute the bin edges for `df[col]` such that each of the n_bins + has ~equal count of points. Returns a list of (min, max) tuples. + """ + _, bins = pd.qcut(df[col], q=n_bins, retbins=True, duplicates="drop") + + ranges = [(bins[i], bins[i + 1]) for i in range(len(bins) - 1)] + + return ranges + + +def make_tiles(df: pd.DataFrame, x_bins: int, y_bins: int) -> pd.DataFrame: + """ + Produce a DataFrame with one row per tile: + tile_id, x_min, x_max, y_min, y_max + """ + x_ranges = compute_quantile_ranges(df, "x_location", x_bins) + y_ranges = compute_quantile_ranges(df, "y_location", y_bins) + + tiles = [] + for ix, (x_min, x_max) in enumerate(x_ranges, start=1): + for iy, (y_min, y_max) in enumerate(y_ranges, start=1): + tiles.append( + { + "tile_id": f"{ix}_{iy}", + "x_min": x_min, + "x_max": x_max, + "y_min": y_min, + "y_max": y_max, + } + ) + + return pd.DataFrame(tiles) + + +def main( + transcripts: str, + x_bins: int = 10, + y_bins: int = 10, + prefix: str = "", +) -> None: + """Generate spatial tile splits from transcript coordinates.""" + # read parquet file + df = pd.read_parquet(transcripts, engine="fastparquet") + + # compute tiles + tiles_df = make_tiles(df, x_bins, y_bins) + + # save csv file + os.makedirs(prefix, exist_ok=True) + tiles_df.to_csv(f"{prefix}/splits.csv", index=False) + + return None + + +def parse_args() -> argparse.Namespace: + """Parse command-line arguments.""" + parser = argparse.ArgumentParser( + description="Split transcript coordinates into spatial tiles." + ) + parser.add_argument( + "--transcripts", + required=True, + help="Path to transcripts parquet file", + ) + parser.add_argument( + "--x-bins", + type=int, + required=True, + help="Number of bins along X axis", + ) + parser.add_argument( + "--y-bins", + type=int, + required=True, + help="Number of bins along Y axis", + ) + parser.add_argument( + "--prefix", + required=True, + help="Output directory prefix", + ) + return parser.parse_args() + + +if __name__ == "__main__": + args = parse_args() + main( + transcripts=args.transcripts, + x_bins=args.x_bins, + y_bins=args.y_bins, + prefix=args.prefix, + ) diff --git a/bin/utility_upscale_mask.py b/bin/utility_upscale_mask.py new file mode 100755 index 00000000..6cc1694e --- /dev/null +++ b/bin/utility_upscale_mask.py @@ -0,0 +1,72 @@ +#!/usr/bin/env python3 +""" +Restore Cellpose masks to original image resolution. + +Uses nearest-neighbor interpolation to upscale segmentation masks back +to the original image dimensions recorded in scale_info.json (produced +by downscale_morphology.py). + +Output: {prefix}/upscaled_{mask_basename}.tif +""" + +import argparse +import json +from pathlib import Path + +import numpy as np +import tifffile +from PIL import Image + + +def upscale_mask(mask_path: str, scale_info_path: str, prefix: str) -> None: + """ + Read a downscaled mask and upscale it to original dimensions. + + Args: + mask_path: Path to downscaled segmentation mask TIFF. + scale_info_path: Path to scale_info.json from downscale_morphology. + prefix: Output directory. + """ + with open(scale_info_path) as f: + info = json.load(f) + orig_h, orig_w = info["orig_h"], info["orig_w"] + + mask = tifffile.imread(mask_path) + print( + f"Mask: {mask.shape}, dtype={mask.dtype}, " + f"unique cells: {len(np.unique(mask)) - 1}" + ) + print(f"Upscaling to ({orig_h}, {orig_w})") + + pil_mask = Image.fromarray(mask) + pil_mask = pil_mask.resize((orig_w, orig_h), Image.NEAREST) + mask_up = np.array(pil_mask, dtype=mask.dtype) + + out_dir = Path(prefix) + out_dir.mkdir(parents=True, exist_ok=True) + base = Path(mask_path).stem + out_name = out_dir / f"upscaled_{base}.tif" + tifffile.imwrite(str(out_name), mask_up, compression="zlib") + print( + f"Done: {out_name}, unique cells: {len(np.unique(mask_up)) - 1}" + ) + + +def parse_args() -> argparse.Namespace: + """Parse command-line arguments.""" + parser = argparse.ArgumentParser( + description="Upscale a Cellpose mask back to original resolution." + ) + parser.add_argument("--mask", required=True, help="Downscaled mask TIFF") + parser.add_argument("--scale-info", required=True, help="scale_info.json from downscale step") + parser.add_argument("--prefix", required=True, help="Output directory") + return parser.parse_args() + + +if __name__ == "__main__": + args = parse_args() + upscale_mask( + mask_path=args.mask, + scale_info_path=args.scale_info, + prefix=args.prefix, + ) diff --git a/bin/xenium_patch_stitch_postprocess.py b/bin/xenium_patch_stitch_postprocess.py new file mode 100755 index 00000000..7144b1ac --- /dev/null +++ b/bin/xenium_patch_stitch_postprocess.py @@ -0,0 +1,98 @@ +#!/usr/bin/env python3 +""" +Post-process stitched per-patch segmentation outputs. + +Ensures every GeoJSON feature is a single Polygon: make_valid() and +sopa.solve_conflicts() can produce MultiPolygon, MultiLineString, or +GeometryCollection geometries that XeniumRanger rejects. Cells dropped +during cleanup are also reassigned to UNASSIGNED in the transcript CSV +so the two outputs stay consistent. +""" + +import argparse +import csv +import json + +import shapely +from shapely.geometry import mapping, shape + + +def clean_geojson(geojson_path: str) -> set: + """ + Force every feature to a single valid Polygon. + + Returns the set of cell ids whose features were dropped. + """ + with open(geojson_path) as f: + data = json.load(f) + + clean = [] + dropped_cells = set() + for feat in data["features"]: + geom = shape(feat["geometry"]) + if not geom.is_valid: + geom = shapely.make_valid(geom) + poly = None + if geom.geom_type == "Polygon": + poly = geom + elif geom.geom_type == "MultiPolygon": + poly = max(geom.geoms, key=lambda g: g.area) + elif geom.geom_type == "GeometryCollection": + polys = [g for g in geom.geoms if g.geom_type == "Polygon"] + if polys: + poly = max(polys, key=lambda g: g.area) + if poly is not None and not poly.is_empty: + feat["geometry"] = mapping(poly) + clean.append(feat) + else: + cell_id = feat.get("id") or feat.get("properties", {}).get("cell_id", "") + dropped_cells.add(str(cell_id)) + + print(f"GeoJSON: {len(clean)} kept, {len(dropped_cells)} dropped: {dropped_cells}") + data["features"] = clean + with open(geojson_path, "w") as f: + json.dump(data, f) + + return dropped_cells + + +def reassign_dropped(csv_path: str, dropped_cells: set) -> None: + """ + Reassign transcripts of dropped cells to UNASSIGNED in the CSV. + """ + if not dropped_cells: + return + + with open(csv_path) as f: + reader = csv.DictReader(f) + fieldnames = reader.fieldnames + rows = list(reader) + + reassigned = 0 + for row in rows: + if row["cell"] in dropped_cells: + row["cell"] = "" + row["is_noise"] = "1" + reassigned += 1 + + with open(csv_path, "w", newline="") as f: + writer = csv.DictWriter(f, fieldnames=fieldnames) + writer.writeheader() + writer.writerows(rows) + print(f"CSV: {reassigned} transcripts reassigned to UNASSIGNED") + + +def parse_args() -> argparse.Namespace: + """Parse command-line arguments.""" + parser = argparse.ArgumentParser( + description="Clean stitched GeoJSON polygons and reconcile transcript CSV." + ) + parser.add_argument("--geojson", required=True, help="Path to xr-cell-polygons.geojson") + parser.add_argument("--csv", required=True, help="Path to xr-transcript-metadata.csv") + return parser.parse_args() + + +if __name__ == "__main__": + args = parse_args() + dropped = clean_geojson(args.geojson) + reassign_dropped(args.csv, dropped) diff --git a/bin/xenium_patch_stitch_transcripts.py b/bin/xenium_patch_stitch_transcripts.py new file mode 100755 index 00000000..d9fb8d41 --- /dev/null +++ b/bin/xenium_patch_stitch_transcripts.py @@ -0,0 +1,808 @@ +#!/usr/bin/env python3 +"""Stitch per-patch Baysor segmentation results into unified output. + +Standalone script that replaces the xenium_patch CLI package's stitch +functionality. Uses sopa's solve_conflicts() for overlap resolution. +""" + +from __future__ import annotations + +import argparse +import json +import os +from concurrent.futures import ThreadPoolExecutor +from dataclasses import dataclass +from pathlib import Path + +import geopandas as gpd +import numpy as np +import pyarrow as pa +import pyarrow.compute as pc +import pyarrow.csv as pa_csv +import shapely +from shapely.affinity import translate +from shapely.geometry import mapping, shape +from sopa.segmentation.resolve import solve_conflicts + +# --------------------------------------------------------------------------- +# Geometry helpers +# --------------------------------------------------------------------------- + + +def _ensure_polygon(geom) -> "shapely.Polygon | None": + """Extract a single Polygon from any geometry, or return None. + + XeniumRanger only accepts Polygon. make_valid() and solve_conflicts + can produce MultiPolygon, GeometryCollection, MultiLineString, etc. + """ + if geom is None or geom.is_empty: + return None + if geom.geom_type == "Polygon": + return geom + if geom.geom_type == "MultiPolygon": + return max(geom.geoms, key=lambda g: g.area) + if geom.geom_type == "GeometryCollection": + polys = [g for g in geom.geoms if g.geom_type == "Polygon"] + return max(polys, key=lambda g: g.area) if polys else None + # LineString, MultiLineString, Point, etc. — not a polygon + return None + + +# --------------------------------------------------------------------------- +# Inline types (from _types.py) +# --------------------------------------------------------------------------- + + +@dataclass(frozen=True) +class Bounds: + """Axis-aligned bounding box in either pixel or micron coordinates.""" + + x_min: float + x_max: float + y_min: float + y_max: float + + +@dataclass(frozen=True) +class PatchInfo: + """Metadata for a single patch in the grid.""" + + patch_id: str + row: int + col: int + global_bounds_px: Bounds + global_bounds_um: Bounds + core_bounds_px: Bounds + core_bounds_um: Bounds + + +@dataclass +class PatchGridMetadata: + """Full grid metadata, serializable to JSON.""" + + version: str + bundle_path: str + image_height_px: int + image_width_px: int + pixel_size_um: float + transcript_extent_um: Bounds + grid_rows: int + grid_cols: int + overlap_um: float + overlap_px: int + patches: list[PatchInfo] + grid_type: str = "uniform" + + +# --------------------------------------------------------------------------- +# Internal result containers +# --------------------------------------------------------------------------- + + +@dataclass +class _PatchGeoResult: + """Result of parallel GeoJSON processing for a single patch.""" + + features: list[dict] + cell_ids: list[str] + + +@dataclass +class _PatchCsvResult: + """Result of parallel CSV reading for a single patch.""" + + table: pa.Table + has_cell_col: bool + has_x_col: bool + has_y_col: bool + has_gene_col: bool = False + has_feature_name_col: bool = False + + +# --------------------------------------------------------------------------- +# Grid metadata I/O (from grid.py) +# --------------------------------------------------------------------------- + + +def _dict_to_bounds(d: dict) -> Bounds: + return Bounds(d["x_min"], d["x_max"], d["y_min"], d["y_max"]) + + +def load_grid_metadata(input_path: Path) -> PatchGridMetadata: + """Deserialize PatchGridMetadata from JSON. + + Args: + input_path: Path to JSON file to read. + + Returns: + Reconstructed PatchGridMetadata. + """ + with open(input_path) as f: + data = json.load(f) + + patches = [ + PatchInfo( + patch_id=p["patch_id"], + row=p["row"], + col=p["col"], + global_bounds_px=_dict_to_bounds(p["global_bounds_px"]), + global_bounds_um=_dict_to_bounds(p["global_bounds_um"]), + core_bounds_px=_dict_to_bounds(p["core_bounds_px"]), + core_bounds_um=_dict_to_bounds(p["core_bounds_um"]), + ) + for p in data["patches"] + ] + + return PatchGridMetadata( + version=data["version"], + bundle_path=data["bundle_path"], + image_height_px=data["image_height_px"], + image_width_px=data["image_width_px"], + pixel_size_um=data["pixel_size_um"], + transcript_extent_um=_dict_to_bounds(data["transcript_extent_um"]), + grid_rows=data["grid_rows"], + grid_cols=data["grid_cols"], + overlap_um=data["overlap_um"], + overlap_px=data["overlap_px"], + grid_type=data.get("grid_type", "uniform"), + patches=patches, + ) + + +# --------------------------------------------------------------------------- +# GeoJSON I/O (from polygon_io.py) +# --------------------------------------------------------------------------- + + +def _normalize_geometry_collection(geojson: dict) -> dict: + """Convert a GeometryCollection to a FeatureCollection. + + proseg-to-baysor produces a non-standard GeoJSON GeometryCollection where + each geometry object has a custom ``cell`` key (bare integer) instead of + using Feature wrappers. This normalises it to a standard FeatureCollection + with ``id`` and ``properties.cell_id`` on each feature, using the + ``"cell-{N}"`` format that matches the companion CSV. + + Args: + geojson: Parsed GeoJSON dict with type GeometryCollection. + + Returns: + Standard FeatureCollection dict. + """ + features = [] + for geom in geojson.get("geometries", []): + cell_raw = geom.get("cell", "") + cell_id = str(cell_raw) + clean_geom = {k: v for k, v in geom.items() if k != "cell"} + feature = { + "type": "Feature", + "id": cell_id, + "geometry": clean_geom, + "properties": {"cell_id": cell_id}, + } + features.append(feature) + return {"type": "FeatureCollection", "features": features} + + +def read_geojson(geojson_path: Path) -> dict: + """Read a GeoJSON file and normalise to FeatureCollection. + + Handles both standard FeatureCollections and the GeometryCollection + format produced by proseg-to-baysor. + + Args: + geojson_path: Path to the GeoJSON file. + + Returns: + Parsed GeoJSON dict (always a FeatureCollection). + """ + with open(geojson_path) as f: + data = json.load(f) + if data.get("type") == "GeometryCollection": + return _normalize_geometry_collection(data) + return data + + +def transform_polygons(geojson: dict, offset_x: float, offset_y: float) -> dict: + """Shift all polygon coordinates by (offset_x, offset_y). + + Args: + geojson: Input FeatureCollection. + offset_x: Translation in x. + offset_y: Translation in y. + + Returns: + New FeatureCollection with shifted geometries. + """ + features = [] + for feat in geojson.get("features", []): + geom = shape(feat["geometry"]) + shifted = translate(geom, xoff=offset_x, yoff=offset_y) + new_feat = {**feat, "geometry": mapping(shifted)} + features.append(new_feat) + return {"type": "FeatureCollection", "features": features} + + +def write_geojson(geojson: dict, output_path: Path) -> None: + """Write a GeoJSON FeatureCollection. + + Args: + geojson: GeoJSON dict to write. + output_path: Destination path (parent dirs created automatically). + """ + output_path.parent.mkdir(parents=True, exist_ok=True) + with open(output_path, "w") as f: + json.dump(geojson, f) + + +# --------------------------------------------------------------------------- +# Arrow utilities (from _arrow_utils.py) +# --------------------------------------------------------------------------- + + +def float_str_array(f64_array: pa.Array) -> pa.Array: + """Convert a float64 pyarrow array to string using Python's str(float) format. + + pyarrow's built-in cast omits trailing '.0' for whole numbers. This + function ensures output matches str(float(...)) for CSV compatibility. + + Args: + f64_array: Float64 pyarrow array to convert. + + Returns: + String pyarrow array with Python-formatted float values. + """ + return pa.array( + [str(v) if v is not None else None for v in f64_array.to_pylist()], + type=pa.string(), + ) + + +# --------------------------------------------------------------------------- +# Parallel I/O +# --------------------------------------------------------------------------- + + +def _read_and_transform_geojson( + patch: PatchInfo, + patches_dir: Path, + geojson_filename: str, +) -> _PatchGeoResult | None: + """Read, transform GeoJSON for a single patch (no core clipping). + + Args: + patch: Patch metadata. + patches_dir: Root patches directory. + geojson_filename: GeoJSON filename within each patch directory. + + Returns: + _PatchGeoResult with features and cell IDs, or None if no GeoJSON. + """ + geojson_path = patches_dir / patch.patch_id / geojson_filename + if not geojson_path.exists(): + return None + + geojson = read_geojson(geojson_path) + + offset_x = patch.global_bounds_um.x_min + offset_y = patch.global_bounds_um.y_min + geojson = transform_polygons(geojson, offset_x, offset_y) + + features = geojson.get("features", []) + seen: set[str] = set() + cell_ids: list[str] = [] + for feat in features: + old_id = str(feat.get("id", feat.get("properties", {}).get("cell_id", ""))) + if old_id not in seen: + seen.add(old_id) + cell_ids.append(old_id) + + return _PatchGeoResult(features=features, cell_ids=cell_ids) + + +def _read_patch_csv( + patch: PatchInfo, + patches_dir: Path, + csv_filename: str, +) -> _PatchCsvResult | None: + """Read a patch CSV into a pyarrow Table. + + All columns are read as strings to preserve exact formatting. + + Args: + patch: Patch metadata. + patches_dir: Root patches directory. + csv_filename: CSV filename within each patch directory. + + Returns: + _PatchCsvResult with the table and column presence flags, or None. + """ + csv_path = patches_dir / patch.patch_id / csv_filename + if not csv_path.exists(): + return None + + with open(csv_path) as fh: + header_line = fh.readline().strip() + col_names = header_line.split(",") + all_string_types = {name: pa.string() for name in col_names} + + table = pa_csv.read_csv( + csv_path, + convert_options=pa_csv.ConvertOptions( + column_types=all_string_types, + strings_can_be_null=False, + ), + read_options=pa_csv.ReadOptions(use_threads=True), + ) + + return _PatchCsvResult( + table=table, + has_cell_col="cell" in table.column_names, + has_x_col="x" in table.column_names, + has_y_col="y" in table.column_names, + has_gene_col="gene" in table.column_names, + has_feature_name_col="feature_name" in table.column_names, + ) + + +# --------------------------------------------------------------------------- +# CSV processing +# --------------------------------------------------------------------------- + + +def _transform_patch_coords( + csv_result: _PatchCsvResult, + offset_x: float, + offset_y: float, +) -> pa.Table: + """Shift transcript coordinates from local patch space to global space. + + Args: + csv_result: The raw CSV table and column flags. + offset_x: X offset for coordinate transform (microns). + offset_y: Y offset for coordinate transform (microns). + + Returns: + Table with x, y columns shifted to global coordinates. + """ + table = csv_result.table + + if table.num_rows == 0: + return table + + if csv_result.has_x_col: + x_f64 = pc.add( + table.column("x").cast(pa.float64()), + pa.scalar(offset_x, type=pa.float64()), + ) + table = table.set_column( + table.schema.get_field_index("x"), + "x", + float_str_array(x_f64), + ) + if csv_result.has_y_col: + y_f64 = pc.add( + table.column("y").cast(pa.float64()), + pa.scalar(offset_y, type=pa.float64()), + ) + table = table.set_column( + table.schema.get_field_index("y"), + "y", + float_str_array(y_f64), + ) + + return table + + +# --------------------------------------------------------------------------- +# Sopa conflict resolution +# --------------------------------------------------------------------------- + + +def _stitch_sopa_resolve( + metadata: PatchGridMetadata, + geo_results: list[_PatchGeoResult | None], + csv_results: list[_PatchCsvResult | None], + all_geojson_features: list[dict], + all_tables: list[pa.Table], + threshold: float = 0.5, +) -> set[str]: + """Stitch per-patch segmentation using spatial containment assignment. + + 1. Collect ALL non-empty polygons from all patches (no transcript filtering). + 2. Resolve overlapping polygons via sopa's solve_conflicts(). + 3. Assign sequential global cell IDs (cell-1, cell-2, ...). + 4. Spatially assign transcripts to resolved polygons using STRtree. + 5. Noise transcripts (outside all polygons) kept only from their core patch. + + This approach works regardless of whether Baysor's CSV ``cell`` column + matches GeoJSON cell IDs -- all assignment is done by spatial containment. + + Args: + metadata: Grid metadata with patch list. + geo_results: Per-patch GeoJSON results (already in global coords). + csv_results: Per-patch CSV results. + all_geojson_features: Output list to append resolved GeoJSON features. + all_tables: Output list to append processed CSV tables. + threshold: Overlap threshold for sopa's solve_conflicts (0-1). + + Returns: + Set of global cell IDs created by merging overlapping cells. + """ + # --- Phase 1: Collect all polygons from all patches --- + all_polygons: list = [] + patch_indices_list: list[int] = [] + + for i, patch in enumerate(metadata.patches): + geo_result = geo_results[i] + if geo_result is None: + continue + + for feat in geo_result.features: + polygon = shape(feat["geometry"]) + if polygon.is_empty: + continue + if not polygon.is_valid: + polygon = shapely.make_valid(polygon) + # Ensure we have a single Polygon (xeniumranger rejects all else) + polygon = _ensure_polygon(polygon) + if polygon is None: + continue + + all_polygons.append(polygon) + patch_indices_list.append(i) + + if not all_polygons: + print("[stitch] No polygons found in any patch") + # Still transform and collect CSVs as noise-only + for i, patch in enumerate(metadata.patches): + csv_result = csv_results[i] + if csv_result is None: + continue + offset_x = patch.global_bounds_um.x_min + offset_y = patch.global_bounds_um.y_min + transformed = _transform_patch_coords(csv_result, offset_x, offset_y) + if transformed.num_rows > 0: + all_tables.append(transformed) + return set() + + # --- Phase 2: Resolve overlapping polygons via sopa --- + patch_idx_array = np.array(patch_indices_list, dtype=np.int64) + input_gdf = gpd.GeoDataFrame(geometry=all_polygons) + resolved_gdf, kept_indices = solve_conflicts( + input_gdf, + threshold=threshold, + patch_indices=patch_idx_array, + return_indices=True, + ) + + # --- Phase 3: Assign global cell IDs to resolved polygons --- + merged_cell_ids: set[str] = set() + kept_arr = np.asarray(kept_indices) + resolved_polys: list = [] + resolved_ids: list[str] = [] + + for rank, orig_idx in enumerate(kept_arr, start=1): + global_id = f"cell-{rank}" + geom = resolved_gdf.geometry.iloc[rank - 1] + + # Ensure single Polygon after solve_conflicts union + geom = _ensure_polygon(geom) + if geom is None: + continue + + if orig_idx < 0: + merged_cell_ids.add(global_id) + + resolved_polys.append(geom) + resolved_ids.append(global_id) + + all_geojson_features.append( + { + "type": "Feature", + "id": global_id, + "geometry": mapping(geom), + "properties": {"cell_id": global_id}, + } + ) + + print( + f"[stitch] Resolved {len(all_polygons)} input polygons to " + f"{len(resolved_polys)} cells ({len(merged_cell_ids)} merged)" + ) + + # --- Phase 4: Spatial transcript assignment via STRtree --- + poly_tree = shapely.STRtree(resolved_polys) + + for i, patch in enumerate(metadata.patches): + csv_result = csv_results[i] + if csv_result is None: + continue + + offset_x = patch.global_bounds_um.x_min + offset_y = patch.global_bounds_um.y_min + core = patch.core_bounds_um + + transformed = _transform_patch_coords(csv_result, offset_x, offset_y) + if transformed.num_rows == 0: + continue + + if not csv_result.has_x_col or not csv_result.has_y_col: + all_tables.append(transformed) + continue + + # Get global coordinates for spatial query + gx = transformed.column("x").cast(pa.float64()).to_numpy(zero_copy_only=False) + gy = transformed.column("y").cast(pa.float64()).to_numpy(zero_copy_only=False) + points = shapely.points(gx, gy) + + # Query STRtree: returns (input_indices, tree_indices) + point_hits, poly_hits = poly_tree.query(points, predicate="intersects") + + # Build point -> cell_id mapping (first hit wins) + point_to_cell: dict[int, str] = {} + for pt_idx, poly_idx in zip(point_hits, poly_hits): + if pt_idx not in point_to_cell: + point_to_cell[pt_idx] = resolved_ids[poly_idx] + + # Build cell and is_noise columns + n_rows = transformed.num_rows + cell_arr = [""] * n_rows + is_noise_arr = ["true"] * n_rows + for pt_idx, cell_id in point_to_cell.items(): + cell_arr[pt_idx] = cell_id + is_noise_arr[pt_idx] = "false" + + # Filter noise transcripts to core bounds only + # Assigned transcripts are kept from all patches (dedup later by transcript_id) + in_core = ( + (gx >= core.x_min) + & (gx < core.x_max) + & (gy >= core.y_min) + & (gy < core.y_max) + ) + is_assigned = np.array([c != "" for c in cell_arr]) + keep_mask = pa.array(is_assigned | in_core, type=pa.bool_()) + + filtered = transformed.filter(keep_mask) + cell_arr_filtered = [c for c, k in zip(cell_arr, (is_assigned | in_core)) if k] + is_noise_filtered = [ + n for n, k in zip(is_noise_arr, (is_assigned | in_core)) if k + ] + + if filtered.num_rows == 0: + continue + + # Set cell and is_noise columns + cell_idx = ( + filtered.schema.get_field_index("cell") + if "cell" in filtered.column_names + else None + ) + if cell_idx is not None: + filtered = filtered.set_column( + cell_idx, "cell", pa.array(cell_arr_filtered, type=pa.string()) + ) + else: + filtered = filtered.append_column( + "cell", pa.array(cell_arr_filtered, type=pa.string()) + ) + + noise_idx = ( + filtered.schema.get_field_index("is_noise") + if "is_noise" in filtered.column_names + else None + ) + if noise_idx is not None: + filtered = filtered.set_column( + noise_idx, + "is_noise", + pa.array(is_noise_filtered, type=pa.string()), + ) + else: + filtered = filtered.append_column( + "is_noise", pa.array(is_noise_filtered, type=pa.string()) + ) + + all_tables.append(filtered) + + return merged_cell_ids + + +# --------------------------------------------------------------------------- +# Main orchestrator +# --------------------------------------------------------------------------- + + +def stitch_transcript_assignments( + patches_dir: Path, + output_dir: Path, + csv_filename: str = "segmentation.csv", + geojson_filename: str = "segmentation_polygons.json", + max_workers: int | None = None, +) -> None: + """Stitch per-patch transcript assignments and polygons into unified output. + + For each patch, reads the transcript assignment CSV and polygon GeoJSON. + Cells are deduplicated using sopa's solve_conflicts() which resolves + overlapping cells at patch boundaries based on area overlap ratio. + + Processing is split into a parallel I/O phase (reading GeoJSON and CSV + files via thread pool) and a sequential phase (dedup, global cell ID + assignment, remapping, and concatenation). + + Args: + patches_dir: Directory containing patch subdirectories and patch_grid.json. + output_dir: Output directory for stitched CSV and GeoJSON. + csv_filename: CSV filename within each patch directory. + geojson_filename: GeoJSON filename within each patch directory. + max_workers: Maximum number of threads for parallel I/O. + """ + patches_dir = Path(patches_dir) + output_dir = Path(output_dir) + output_dir.mkdir(parents=True, exist_ok=True) + + metadata = load_grid_metadata(patches_dir / "patch_grid.json") + + n_patches = len(metadata.patches) + if max_workers is None: + max_workers = min(n_patches, os.cpu_count() or 1) + + # ---- Parallel phase: read GeoJSON and CSV files concurrently ---- + with ThreadPoolExecutor(max_workers=max_workers) as executor: + geo_futures = [ + executor.submit( + _read_and_transform_geojson, p, patches_dir, geojson_filename + ) + for p in metadata.patches + ] + csv_futures = [ + executor.submit(_read_patch_csv, p, patches_dir, csv_filename) + for p in metadata.patches + ] + geo_results = [f.result() for f in geo_futures] + csv_results = [f.result() for f in csv_futures] + + # ---- Sequential phase: assign global cell IDs, remap, concatenate ---- + all_tables: list[pa.Table] = [] + all_geojson_features: list[dict] = [] + + _stitch_sopa_resolve( + metadata, + geo_results, + csv_results, + all_geojson_features, + all_tables, + threshold=0.5, + ) + + # Concatenate all patch tables + if all_tables: + merged = pa.concat_tables(all_tables) + + # Deduplicate by transcript_id: prefer assigned over noise + if "transcript_id" in merged.column_names: + if "cell" in merged.column_names: + is_noise = pc.equal(merged.column("cell"), "").cast(pa.int8()) + row_order = pa.array(np.arange(merged.num_rows), type=pa.int64()) + sort_table = pa.table({"_noise": is_noise, "_row": row_order}) + sort_indices = pc.sort_indices( + sort_table, + sort_keys=[("_noise", "ascending"), ("_row", "ascending")], + ) + merged = merged.take(sort_indices) + + tid_np = merged.column("transcript_id").to_numpy(zero_copy_only=False) + _, first_indices = np.unique(tid_np, return_index=True) + first_indices.sort() + merged = merged.take(first_indices) + + # Log assignment stats + if "cell" in merged.column_names: + cell_vals = merged.column("cell").to_pylist() + n_assigned = sum(1 for c in cell_vals if c) + n_noise = sum(1 for c in cell_vals if not c) + print( + f"[stitch] Final: {merged.num_rows} transcripts, " + f"{n_assigned} assigned, {n_noise} noise" + ) + + # Cast is_noise to integer for xeniumranger compatibility + if "is_noise" in merged.column_names: + noise_col = merged.column("is_noise") + if noise_col.type == pa.string(): + lower = pc.utf8_lower(noise_col) + is_true = pc.or_(pc.equal(lower, "true"), pc.equal(lower, "1")) + idx = merged.column_names.index("is_noise") + merged = merged.set_column(idx, "is_noise", is_true.cast(pa.int8())) + + # Write CSV + if merged.num_rows > 0: + csv_out = output_dir / "xr-transcript-metadata.csv" + pa_csv.write_csv( + merged, + csv_out, + write_options=pa_csv.WriteOptions(quoting_style="needed"), + ) + + # Safety net: remove orphan polygons with zero transcripts + if all_geojson_features and all_tables: + csv_cell_ids: set[str] = set() + if "cell" in merged.column_names: + csv_cell_ids = set(c for c in merged.column("cell").to_pylist() if c) + all_geojson_features = [ + f + for f in all_geojson_features + if str(f.get("id", f.get("properties", {}).get("cell_id", ""))) + in csv_cell_ids + ] + + # Write merged GeoJSON + if all_geojson_features: + merged_geo = {"type": "FeatureCollection", "features": all_geojson_features} + write_geojson(merged_geo, output_dir / "xr-cell-polygons.geojson") + + +# --------------------------------------------------------------------------- +# CLI +# --------------------------------------------------------------------------- + + +def main() -> None: + parser = argparse.ArgumentParser( + description="Stitch per-patch Baysor segmentation results into unified output." + ) + parser.add_argument( + "--patches", + type=Path, + required=True, + help="Directory containing patch subdirectories and patch_grid.json", + ) + parser.add_argument( + "--output", + type=Path, + required=True, + help="Output directory for stitched CSV and GeoJSON", + ) + parser.add_argument( + "--csv-filename", + default="segmentation.csv", + help="CSV filename within each patch (default: segmentation.csv)", + ) + parser.add_argument( + "--geojson-filename", + default="segmentation_polygons.json", + help="GeoJSON filename within each patch (default: segmentation_polygons.json)", + ) + args = parser.parse_args() + + stitch_transcript_assignments( + patches_dir=args.patches, + output_dir=args.output, + csv_filename=args.csv_filename, + geojson_filename=args.geojson_filename, + ) + + +if __name__ == "__main__": + main() diff --git a/conf/base.config b/conf/base.config index 97f4f191..45b62604 100644 --- a/conf/base.config +++ b/conf/base.config @@ -10,56 +10,94 @@ process { - // TODO nf-core: Check the defaults for all processes - cpus = { check_max( 1 * task.attempt, 'cpus' ) } - memory = { check_max( 6.GB * task.attempt, 'memory' ) } - time = { check_max( 4.h * task.attempt, 'time' ) } + cpus = { 1 * task.attempt } + memory = { 6.GB * task.attempt } + time = { 4.h * task.attempt } - errorStrategy = { task.exitStatus in [143,137,104,134,139] ? 'retry' : 'finish' } - maxRetries = 1 + // resourceLimits = [ cpus: 192, memory: 750.GB, time: 72.h ] + + // Retry signal-induced exits and "killed without exit code" cases: + // 130..145 = signal exits (SIGINT=130, SIGKILL=137, SIGTERM=143, etc.) + // 104 = ECONNRESET (transient network failures during stage-in/out) + // 2147483647 = Integer.MAX_VALUE, Nextflow's sentinel for tasks that died + // before writing .exitcode (Nextflow surfaces this as + // "terminated for an unknown reason -- Likely it has been + // terminated by the external system"). Common on AWS Batch + // spot capacity, kubernetes preemption, and grid-scheduler + // cancellations. See nextflow docs/aws.md for the AWS case. + errorStrategy = { task.exitStatus in ((130..145) + 104 + 2147483647) ? 'retry' : 'finish' } + maxRetries = 3 maxErrors = '-1' - // Process-specific resource requirements - // NOTE - Please try and re-use the labels below as much as possible. - // These labels are used and recognised by default in DSL2 files hosted on nf-core/modules. - // If possible, it would be nice to keep the same label naming convention when - // adding in your local modules too. - // TODO nf-core: Customise requirements for specific processes. - // See https://www.nextflow.io/docs/latest/config.html#config-process-selectors + // ========================================================================= + // Standard nf-core CPU labels + // ========================================================================= + withLabel:process_single { - cpus = { check_max( 1 , 'cpus' ) } - memory = { check_max( 6.GB * task.attempt, 'memory' ) } - time = { check_max( 4.h * task.attempt, 'time' ) } + cpus = { 1 } + memory = { 6.GB * task.attempt } + time = { 4.h * task.attempt } } + withLabel:process_low { - cpus = { check_max( 2 * task.attempt, 'cpus' ) } - memory = { check_max( 12.GB * task.attempt, 'memory' ) } - time = { check_max( 4.h * task.attempt, 'time' ) } + cpus = { 2 * task.attempt } + memory = { 12.GB * task.attempt } + time = { 4.h * task.attempt } } + withLabel:process_medium { - cpus = { check_max( 6 * task.attempt, 'cpus' ) } - memory = { check_max( 36.GB * task.attempt, 'memory' ) } - time = { check_max( 8.h * task.attempt, 'time' ) } + cpus = { 6 * task.attempt } + memory = { 42.GB * task.attempt } + time = { 8.h * task.attempt } } + withLabel:process_high { - cpus = { check_max( 12 * task.attempt, 'cpus' ) } - memory = { check_max( 72.GB * task.attempt, 'memory' ) } - time = { check_max( 16.h * task.attempt, 'time' ) } + cpus = { 12 * task.attempt } + memory = { 72.GB * task.attempt } + time = { 16.h * task.attempt } + } + + withLabel:process_xl { + cpus = { 30 * task.attempt } + memory = { 240.GB * task.attempt } + time = { 24.h * task.attempt } } + withLabel:process_long { - time = { check_max( 20.h * task.attempt, 'time' ) } + time = { 20.h * task.attempt } } + withLabel:process_high_memory { - memory = { check_max( 200.GB * task.attempt, 'memory' ) } + memory = { 200.GB * task.attempt } } + withLabel:error_ignore { errorStrategy = 'ignore' } + withLabel:error_retry { errorStrategy = 'retry' maxRetries = 2 } - withName:CUSTOM_DUMPSOFTWAREVERSIONS { - cache = false + + // ========================================================================= + // GPU labels + // ========================================================================= + + // Multi-GPU processes (e.g., Segger train/predict) + withLabel:process_gpu { + ext.use_gpu = { params.use_gpu } + accelerator = { params.use_gpu ? 1 : null } + // containerOptions = { "--shm-size ${task.memory.toGiga().intValue()}g" } + } + + // Single-GPU processes (e.g., Cellpose, StarDist) + withLabel:process_gpu_single { + ext.use_gpu = { params.use_gpu } + accelerator = { params.use_gpu ? 1 : null } + cpus = { 12 * task.attempt } + memory = { 72.GB * task.attempt } + time = { 16.h * task.attempt } } + } diff --git a/conf/igenomes.config b/conf/igenomes.config deleted file mode 100644 index 7a1b3ac6..00000000 --- a/conf/igenomes.config +++ /dev/null @@ -1,432 +0,0 @@ -/* -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - Nextflow config file for iGenomes paths -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - Defines reference genomes using iGenome paths. - Can be used by any config that customises the base path using: - $params.igenomes_base / --igenomes_base ----------------------------------------------------------------------------------------- -*/ - -params { - // illumina iGenomes reference file paths - genomes { - 'GRCh37' { - fasta = "${params.igenomes_base}/Homo_sapiens/Ensembl/GRCh37/Sequence/WholeGenomeFasta/genome.fa" - bwa = "${params.igenomes_base}/Homo_sapiens/Ensembl/GRCh37/Sequence/BWAIndex/version0.6.0/" - bowtie2 = "${params.igenomes_base}/Homo_sapiens/Ensembl/GRCh37/Sequence/Bowtie2Index/" - star = "${params.igenomes_base}/Homo_sapiens/Ensembl/GRCh37/Sequence/STARIndex/" - bismark = "${params.igenomes_base}/Homo_sapiens/Ensembl/GRCh37/Sequence/BismarkIndex/" - gtf = "${params.igenomes_base}/Homo_sapiens/Ensembl/GRCh37/Annotation/Genes/genes.gtf" - bed12 = "${params.igenomes_base}/Homo_sapiens/Ensembl/GRCh37/Annotation/Genes/genes.bed" - readme = "${params.igenomes_base}/Homo_sapiens/Ensembl/GRCh37/Annotation/README.txt" - mito_name = "MT" - macs_gsize = "2.7e9" - blacklist = "${projectDir}/assets/blacklists/GRCh37-blacklist.bed" - } - 'GRCh38' { - fasta = "${params.igenomes_base}/Homo_sapiens/NCBI/GRCh38/Sequence/WholeGenomeFasta/genome.fa" - bwa = "${params.igenomes_base}/Homo_sapiens/NCBI/GRCh38/Sequence/BWAIndex/version0.6.0/" - bowtie2 = "${params.igenomes_base}/Homo_sapiens/NCBI/GRCh38/Sequence/Bowtie2Index/" - star = "${params.igenomes_base}/Homo_sapiens/NCBI/GRCh38/Sequence/STARIndex/" - bismark = "${params.igenomes_base}/Homo_sapiens/NCBI/GRCh38/Sequence/BismarkIndex/" - gtf = "${params.igenomes_base}/Homo_sapiens/NCBI/GRCh38/Annotation/Genes/genes.gtf" - bed12 = "${params.igenomes_base}/Homo_sapiens/NCBI/GRCh38/Annotation/Genes/genes.bed" - mito_name = "chrM" - macs_gsize = "2.7e9" - blacklist = "${projectDir}/assets/blacklists/hg38-blacklist.bed" - } - 'GRCm38' { - fasta = "${params.igenomes_base}/Mus_musculus/Ensembl/GRCm38/Sequence/WholeGenomeFasta/genome.fa" - bwa = "${params.igenomes_base}/Mus_musculus/Ensembl/GRCm38/Sequence/BWAIndex/version0.6.0/" - bowtie2 = "${params.igenomes_base}/Mus_musculus/Ensembl/GRCm38/Sequence/Bowtie2Index/" - star = "${params.igenomes_base}/Mus_musculus/Ensembl/GRCm38/Sequence/STARIndex/" - bismark = "${params.igenomes_base}/Mus_musculus/Ensembl/GRCm38/Sequence/BismarkIndex/" - gtf = "${params.igenomes_base}/Mus_musculus/Ensembl/GRCm38/Annotation/Genes/genes.gtf" - bed12 = "${params.igenomes_base}/Mus_musculus/Ensembl/GRCm38/Annotation/Genes/genes.bed" - readme = "${params.igenomes_base}/Mus_musculus/Ensembl/GRCm38/Annotation/README.txt" - mito_name = "MT" - macs_gsize = "1.87e9" - blacklist = "${projectDir}/assets/blacklists/GRCm38-blacklist.bed" - } - 'TAIR10' { - fasta = "${params.igenomes_base}/Arabidopsis_thaliana/Ensembl/TAIR10/Sequence/WholeGenomeFasta/genome.fa" - bwa = "${params.igenomes_base}/Arabidopsis_thaliana/Ensembl/TAIR10/Sequence/BWAIndex/version0.6.0/" - bowtie2 = "${params.igenomes_base}/Arabidopsis_thaliana/Ensembl/TAIR10/Sequence/Bowtie2Index/" - star = "${params.igenomes_base}/Arabidopsis_thaliana/Ensembl/TAIR10/Sequence/STARIndex/" - bismark = "${params.igenomes_base}/Arabidopsis_thaliana/Ensembl/TAIR10/Sequence/BismarkIndex/" - gtf = "${params.igenomes_base}/Arabidopsis_thaliana/Ensembl/TAIR10/Annotation/Genes/genes.gtf" - bed12 = "${params.igenomes_base}/Arabidopsis_thaliana/Ensembl/TAIR10/Annotation/Genes/genes.bed" - readme = "${params.igenomes_base}/Arabidopsis_thaliana/Ensembl/TAIR10/Annotation/README.txt" - mito_name = "Mt" - } - 'EB2' { - fasta = "${params.igenomes_base}/Bacillus_subtilis_168/Ensembl/EB2/Sequence/WholeGenomeFasta/genome.fa" - bwa = "${params.igenomes_base}/Bacillus_subtilis_168/Ensembl/EB2/Sequence/BWAIndex/version0.6.0/" - bowtie2 = "${params.igenomes_base}/Bacillus_subtilis_168/Ensembl/EB2/Sequence/Bowtie2Index/" - star = "${params.igenomes_base}/Bacillus_subtilis_168/Ensembl/EB2/Sequence/STARIndex/" - bismark = "${params.igenomes_base}/Bacillus_subtilis_168/Ensembl/EB2/Sequence/BismarkIndex/" - gtf = "${params.igenomes_base}/Bacillus_subtilis_168/Ensembl/EB2/Annotation/Genes/genes.gtf" - bed12 = "${params.igenomes_base}/Bacillus_subtilis_168/Ensembl/EB2/Annotation/Genes/genes.bed" - readme = "${params.igenomes_base}/Bacillus_subtilis_168/Ensembl/EB2/Annotation/README.txt" - } - 'UMD3.1' { - fasta = "${params.igenomes_base}/Bos_taurus/Ensembl/UMD3.1/Sequence/WholeGenomeFasta/genome.fa" - bwa = "${params.igenomes_base}/Bos_taurus/Ensembl/UMD3.1/Sequence/BWAIndex/version0.6.0/" - bowtie2 = "${params.igenomes_base}/Bos_taurus/Ensembl/UMD3.1/Sequence/Bowtie2Index/" - star = "${params.igenomes_base}/Bos_taurus/Ensembl/UMD3.1/Sequence/STARIndex/" - bismark = "${params.igenomes_base}/Bos_taurus/Ensembl/UMD3.1/Sequence/BismarkIndex/" - gtf = "${params.igenomes_base}/Bos_taurus/Ensembl/UMD3.1/Annotation/Genes/genes.gtf" - bed12 = "${params.igenomes_base}/Bos_taurus/Ensembl/UMD3.1/Annotation/Genes/genes.bed" - readme = "${params.igenomes_base}/Bos_taurus/Ensembl/UMD3.1/Annotation/README.txt" - mito_name = "MT" - } - 'WBcel235' { - fasta = "${params.igenomes_base}/Caenorhabditis_elegans/Ensembl/WBcel235/Sequence/WholeGenomeFasta/genome.fa" - bwa = "${params.igenomes_base}/Caenorhabditis_elegans/Ensembl/WBcel235/Sequence/BWAIndex/version0.6.0/" - bowtie2 = "${params.igenomes_base}/Caenorhabditis_elegans/Ensembl/WBcel235/Sequence/Bowtie2Index/" - star = "${params.igenomes_base}/Caenorhabditis_elegans/Ensembl/WBcel235/Sequence/STARIndex/" - bismark = "${params.igenomes_base}/Caenorhabditis_elegans/Ensembl/WBcel235/Sequence/BismarkIndex/" - gtf = "${params.igenomes_base}/Caenorhabditis_elegans/Ensembl/WBcel235/Annotation/Genes/genes.gtf" - bed12 = "${params.igenomes_base}/Caenorhabditis_elegans/Ensembl/WBcel235/Annotation/Genes/genes.bed" - mito_name = "MtDNA" - macs_gsize = "9e7" - } - 'CanFam3.1' { - fasta = "${params.igenomes_base}/Canis_familiaris/Ensembl/CanFam3.1/Sequence/WholeGenomeFasta/genome.fa" - bwa = "${params.igenomes_base}/Canis_familiaris/Ensembl/CanFam3.1/Sequence/BWAIndex/version0.6.0/" - bowtie2 = "${params.igenomes_base}/Canis_familiaris/Ensembl/CanFam3.1/Sequence/Bowtie2Index/" - star = "${params.igenomes_base}/Canis_familiaris/Ensembl/CanFam3.1/Sequence/STARIndex/" - bismark = "${params.igenomes_base}/Canis_familiaris/Ensembl/CanFam3.1/Sequence/BismarkIndex/" - gtf = "${params.igenomes_base}/Canis_familiaris/Ensembl/CanFam3.1/Annotation/Genes/genes.gtf" - bed12 = "${params.igenomes_base}/Canis_familiaris/Ensembl/CanFam3.1/Annotation/Genes/genes.bed" - readme = "${params.igenomes_base}/Canis_familiaris/Ensembl/CanFam3.1/Annotation/README.txt" - mito_name = "MT" - } - 'GRCz10' { - fasta = "${params.igenomes_base}/Danio_rerio/Ensembl/GRCz10/Sequence/WholeGenomeFasta/genome.fa" - bwa = "${params.igenomes_base}/Danio_rerio/Ensembl/GRCz10/Sequence/BWAIndex/version0.6.0/" - bowtie2 = "${params.igenomes_base}/Danio_rerio/Ensembl/GRCz10/Sequence/Bowtie2Index/" - star = "${params.igenomes_base}/Danio_rerio/Ensembl/GRCz10/Sequence/STARIndex/" - bismark = "${params.igenomes_base}/Danio_rerio/Ensembl/GRCz10/Sequence/BismarkIndex/" - gtf = "${params.igenomes_base}/Danio_rerio/Ensembl/GRCz10/Annotation/Genes/genes.gtf" - bed12 = "${params.igenomes_base}/Danio_rerio/Ensembl/GRCz10/Annotation/Genes/genes.bed" - mito_name = "MT" - } - 'BDGP6' { - fasta = "${params.igenomes_base}/Drosophila_melanogaster/Ensembl/BDGP6/Sequence/WholeGenomeFasta/genome.fa" - bwa = "${params.igenomes_base}/Drosophila_melanogaster/Ensembl/BDGP6/Sequence/BWAIndex/version0.6.0/" - bowtie2 = "${params.igenomes_base}/Drosophila_melanogaster/Ensembl/BDGP6/Sequence/Bowtie2Index/" - star = "${params.igenomes_base}/Drosophila_melanogaster/Ensembl/BDGP6/Sequence/STARIndex/" - bismark = "${params.igenomes_base}/Drosophila_melanogaster/Ensembl/BDGP6/Sequence/BismarkIndex/" - gtf = "${params.igenomes_base}/Drosophila_melanogaster/Ensembl/BDGP6/Annotation/Genes/genes.gtf" - bed12 = "${params.igenomes_base}/Drosophila_melanogaster/Ensembl/BDGP6/Annotation/Genes/genes.bed" - mito_name = "M" - macs_gsize = "1.2e8" - } - 'EquCab2' { - fasta = "${params.igenomes_base}/Equus_caballus/Ensembl/EquCab2/Sequence/WholeGenomeFasta/genome.fa" - bwa = "${params.igenomes_base}/Equus_caballus/Ensembl/EquCab2/Sequence/BWAIndex/version0.6.0/" - bowtie2 = "${params.igenomes_base}/Equus_caballus/Ensembl/EquCab2/Sequence/Bowtie2Index/" - star = "${params.igenomes_base}/Equus_caballus/Ensembl/EquCab2/Sequence/STARIndex/" - bismark = "${params.igenomes_base}/Equus_caballus/Ensembl/EquCab2/Sequence/BismarkIndex/" - gtf = "${params.igenomes_base}/Equus_caballus/Ensembl/EquCab2/Annotation/Genes/genes.gtf" - bed12 = "${params.igenomes_base}/Equus_caballus/Ensembl/EquCab2/Annotation/Genes/genes.bed" - readme = "${params.igenomes_base}/Equus_caballus/Ensembl/EquCab2/Annotation/README.txt" - mito_name = "MT" - } - 'EB1' { - fasta = "${params.igenomes_base}/Escherichia_coli_K_12_DH10B/Ensembl/EB1/Sequence/WholeGenomeFasta/genome.fa" - bwa = "${params.igenomes_base}/Escherichia_coli_K_12_DH10B/Ensembl/EB1/Sequence/BWAIndex/version0.6.0/" - bowtie2 = "${params.igenomes_base}/Escherichia_coli_K_12_DH10B/Ensembl/EB1/Sequence/Bowtie2Index/" - star = "${params.igenomes_base}/Escherichia_coli_K_12_DH10B/Ensembl/EB1/Sequence/STARIndex/" - bismark = "${params.igenomes_base}/Escherichia_coli_K_12_DH10B/Ensembl/EB1/Sequence/BismarkIndex/" - gtf = "${params.igenomes_base}/Escherichia_coli_K_12_DH10B/Ensembl/EB1/Annotation/Genes/genes.gtf" - bed12 = "${params.igenomes_base}/Escherichia_coli_K_12_DH10B/Ensembl/EB1/Annotation/Genes/genes.bed" - readme = "${params.igenomes_base}/Escherichia_coli_K_12_DH10B/Ensembl/EB1/Annotation/README.txt" - } - 'Galgal4' { - fasta = "${params.igenomes_base}/Gallus_gallus/Ensembl/Galgal4/Sequence/WholeGenomeFasta/genome.fa" - bwa = "${params.igenomes_base}/Gallus_gallus/Ensembl/Galgal4/Sequence/BWAIndex/version0.6.0/" - bowtie2 = "${params.igenomes_base}/Gallus_gallus/Ensembl/Galgal4/Sequence/Bowtie2Index/" - star = "${params.igenomes_base}/Gallus_gallus/Ensembl/Galgal4/Sequence/STARIndex/" - bismark = "${params.igenomes_base}/Gallus_gallus/Ensembl/Galgal4/Sequence/BismarkIndex/" - gtf = "${params.igenomes_base}/Gallus_gallus/Ensembl/Galgal4/Annotation/Genes/genes.gtf" - bed12 = "${params.igenomes_base}/Gallus_gallus/Ensembl/Galgal4/Annotation/Genes/genes.bed" - mito_name = "MT" - } - 'Gm01' { - fasta = "${params.igenomes_base}/Glycine_max/Ensembl/Gm01/Sequence/WholeGenomeFasta/genome.fa" - bwa = "${params.igenomes_base}/Glycine_max/Ensembl/Gm01/Sequence/BWAIndex/version0.6.0/" - bowtie2 = "${params.igenomes_base}/Glycine_max/Ensembl/Gm01/Sequence/Bowtie2Index/" - star = "${params.igenomes_base}/Glycine_max/Ensembl/Gm01/Sequence/STARIndex/" - bismark = "${params.igenomes_base}/Glycine_max/Ensembl/Gm01/Sequence/BismarkIndex/" - gtf = "${params.igenomes_base}/Glycine_max/Ensembl/Gm01/Annotation/Genes/genes.gtf" - bed12 = "${params.igenomes_base}/Glycine_max/Ensembl/Gm01/Annotation/Genes/genes.bed" - readme = "${params.igenomes_base}/Glycine_max/Ensembl/Gm01/Annotation/README.txt" - } - 'Mmul_1' { - fasta = "${params.igenomes_base}/Macaca_mulatta/Ensembl/Mmul_1/Sequence/WholeGenomeFasta/genome.fa" - bwa = "${params.igenomes_base}/Macaca_mulatta/Ensembl/Mmul_1/Sequence/BWAIndex/version0.6.0/" - bowtie2 = "${params.igenomes_base}/Macaca_mulatta/Ensembl/Mmul_1/Sequence/Bowtie2Index/" - star = "${params.igenomes_base}/Macaca_mulatta/Ensembl/Mmul_1/Sequence/STARIndex/" - bismark = "${params.igenomes_base}/Macaca_mulatta/Ensembl/Mmul_1/Sequence/BismarkIndex/" - gtf = "${params.igenomes_base}/Macaca_mulatta/Ensembl/Mmul_1/Annotation/Genes/genes.gtf" - bed12 = "${params.igenomes_base}/Macaca_mulatta/Ensembl/Mmul_1/Annotation/Genes/genes.bed" - readme = "${params.igenomes_base}/Macaca_mulatta/Ensembl/Mmul_1/Annotation/README.txt" - mito_name = "MT" - } - 'IRGSP-1.0' { - fasta = "${params.igenomes_base}/Oryza_sativa_japonica/Ensembl/IRGSP-1.0/Sequence/WholeGenomeFasta/genome.fa" - bwa = "${params.igenomes_base}/Oryza_sativa_japonica/Ensembl/IRGSP-1.0/Sequence/BWAIndex/version0.6.0/" - bowtie2 = "${params.igenomes_base}/Oryza_sativa_japonica/Ensembl/IRGSP-1.0/Sequence/Bowtie2Index/" - star = "${params.igenomes_base}/Oryza_sativa_japonica/Ensembl/IRGSP-1.0/Sequence/STARIndex/" - bismark = "${params.igenomes_base}/Oryza_sativa_japonica/Ensembl/IRGSP-1.0/Sequence/BismarkIndex/" - gtf = "${params.igenomes_base}/Oryza_sativa_japonica/Ensembl/IRGSP-1.0/Annotation/Genes/genes.gtf" - bed12 = "${params.igenomes_base}/Oryza_sativa_japonica/Ensembl/IRGSP-1.0/Annotation/Genes/genes.bed" - mito_name = "Mt" - } - 'CHIMP2.1.4' { - fasta = "${params.igenomes_base}/Pan_troglodytes/Ensembl/CHIMP2.1.4/Sequence/WholeGenomeFasta/genome.fa" - bwa = "${params.igenomes_base}/Pan_troglodytes/Ensembl/CHIMP2.1.4/Sequence/BWAIndex/version0.6.0/" - bowtie2 = "${params.igenomes_base}/Pan_troglodytes/Ensembl/CHIMP2.1.4/Sequence/Bowtie2Index/" - star = "${params.igenomes_base}/Pan_troglodytes/Ensembl/CHIMP2.1.4/Sequence/STARIndex/" - bismark = "${params.igenomes_base}/Pan_troglodytes/Ensembl/CHIMP2.1.4/Sequence/BismarkIndex/" - gtf = "${params.igenomes_base}/Pan_troglodytes/Ensembl/CHIMP2.1.4/Annotation/Genes/genes.gtf" - bed12 = "${params.igenomes_base}/Pan_troglodytes/Ensembl/CHIMP2.1.4/Annotation/Genes/genes.bed" - readme = "${params.igenomes_base}/Pan_troglodytes/Ensembl/CHIMP2.1.4/Annotation/README.txt" - mito_name = "MT" - } - 'Rnor_5.0' { - fasta = "${params.igenomes_base}/Rattus_norvegicus/Ensembl/Rnor_5.0/Sequence/WholeGenomeFasta/genome.fa" - bwa = "${params.igenomes_base}/Rattus_norvegicus/Ensembl/Rnor_5.0/Sequence/BWAIndex/version0.6.0/" - bowtie2 = "${params.igenomes_base}/Rattus_norvegicus/Ensembl/Rnor_5.0/Sequence/Bowtie2Index/" - star = "${params.igenomes_base}/Rattus_norvegicus/Ensembl/Rnor_5.0/Sequence/STARIndex/" - bismark = "${params.igenomes_base}/Rattus_norvegicus/Ensembl/Rnor_5.0/Sequence/BismarkIndex/" - gtf = "${params.igenomes_base}/Rattus_norvegicus/Ensembl/Rnor_5.0/Annotation/Genes/genes.gtf" - bed12 = "${params.igenomes_base}/Rattus_norvegicus/Ensembl/Rnor_5.0/Annotation/Genes/genes.bed" - mito_name = "MT" - } - 'Rnor_6.0' { - fasta = "${params.igenomes_base}/Rattus_norvegicus/Ensembl/Rnor_6.0/Sequence/WholeGenomeFasta/genome.fa" - bwa = "${params.igenomes_base}/Rattus_norvegicus/Ensembl/Rnor_6.0/Sequence/BWAIndex/version0.6.0/" - bowtie2 = "${params.igenomes_base}/Rattus_norvegicus/Ensembl/Rnor_6.0/Sequence/Bowtie2Index/" - star = "${params.igenomes_base}/Rattus_norvegicus/Ensembl/Rnor_6.0/Sequence/STARIndex/" - bismark = "${params.igenomes_base}/Rattus_norvegicus/Ensembl/Rnor_6.0/Sequence/BismarkIndex/" - gtf = "${params.igenomes_base}/Rattus_norvegicus/Ensembl/Rnor_6.0/Annotation/Genes/genes.gtf" - bed12 = "${params.igenomes_base}/Rattus_norvegicus/Ensembl/Rnor_6.0/Annotation/Genes/genes.bed" - mito_name = "MT" - } - 'R64-1-1' { - fasta = "${params.igenomes_base}/Saccharomyces_cerevisiae/Ensembl/R64-1-1/Sequence/WholeGenomeFasta/genome.fa" - bwa = "${params.igenomes_base}/Saccharomyces_cerevisiae/Ensembl/R64-1-1/Sequence/BWAIndex/version0.6.0/" - bowtie2 = "${params.igenomes_base}/Saccharomyces_cerevisiae/Ensembl/R64-1-1/Sequence/Bowtie2Index/" - star = "${params.igenomes_base}/Saccharomyces_cerevisiae/Ensembl/R64-1-1/Sequence/STARIndex/" - bismark = "${params.igenomes_base}/Saccharomyces_cerevisiae/Ensembl/R64-1-1/Sequence/BismarkIndex/" - gtf = "${params.igenomes_base}/Saccharomyces_cerevisiae/Ensembl/R64-1-1/Annotation/Genes/genes.gtf" - bed12 = "${params.igenomes_base}/Saccharomyces_cerevisiae/Ensembl/R64-1-1/Annotation/Genes/genes.bed" - mito_name = "MT" - macs_gsize = "1.2e7" - } - 'EF2' { - fasta = "${params.igenomes_base}/Schizosaccharomyces_pombe/Ensembl/EF2/Sequence/WholeGenomeFasta/genome.fa" - bwa = "${params.igenomes_base}/Schizosaccharomyces_pombe/Ensembl/EF2/Sequence/BWAIndex/version0.6.0/" - bowtie2 = "${params.igenomes_base}/Schizosaccharomyces_pombe/Ensembl/EF2/Sequence/Bowtie2Index/" - star = "${params.igenomes_base}/Schizosaccharomyces_pombe/Ensembl/EF2/Sequence/STARIndex/" - bismark = "${params.igenomes_base}/Schizosaccharomyces_pombe/Ensembl/EF2/Sequence/BismarkIndex/" - gtf = "${params.igenomes_base}/Schizosaccharomyces_pombe/Ensembl/EF2/Annotation/Genes/genes.gtf" - bed12 = "${params.igenomes_base}/Schizosaccharomyces_pombe/Ensembl/EF2/Annotation/Genes/genes.bed" - readme = "${params.igenomes_base}/Schizosaccharomyces_pombe/Ensembl/EF2/Annotation/README.txt" - mito_name = "MT" - macs_gsize = "1.21e7" - } - 'Sbi1' { - fasta = "${params.igenomes_base}/Sorghum_bicolor/Ensembl/Sbi1/Sequence/WholeGenomeFasta/genome.fa" - bwa = "${params.igenomes_base}/Sorghum_bicolor/Ensembl/Sbi1/Sequence/BWAIndex/version0.6.0/" - bowtie2 = "${params.igenomes_base}/Sorghum_bicolor/Ensembl/Sbi1/Sequence/Bowtie2Index/" - star = "${params.igenomes_base}/Sorghum_bicolor/Ensembl/Sbi1/Sequence/STARIndex/" - bismark = "${params.igenomes_base}/Sorghum_bicolor/Ensembl/Sbi1/Sequence/BismarkIndex/" - gtf = "${params.igenomes_base}/Sorghum_bicolor/Ensembl/Sbi1/Annotation/Genes/genes.gtf" - bed12 = "${params.igenomes_base}/Sorghum_bicolor/Ensembl/Sbi1/Annotation/Genes/genes.bed" - readme = "${params.igenomes_base}/Sorghum_bicolor/Ensembl/Sbi1/Annotation/README.txt" - } - 'Sscrofa10.2' { - fasta = "${params.igenomes_base}/Sus_scrofa/Ensembl/Sscrofa10.2/Sequence/WholeGenomeFasta/genome.fa" - bwa = "${params.igenomes_base}/Sus_scrofa/Ensembl/Sscrofa10.2/Sequence/BWAIndex/version0.6.0/" - bowtie2 = "${params.igenomes_base}/Sus_scrofa/Ensembl/Sscrofa10.2/Sequence/Bowtie2Index/" - star = "${params.igenomes_base}/Sus_scrofa/Ensembl/Sscrofa10.2/Sequence/STARIndex/" - bismark = "${params.igenomes_base}/Sus_scrofa/Ensembl/Sscrofa10.2/Sequence/BismarkIndex/" - gtf = "${params.igenomes_base}/Sus_scrofa/Ensembl/Sscrofa10.2/Annotation/Genes/genes.gtf" - bed12 = "${params.igenomes_base}/Sus_scrofa/Ensembl/Sscrofa10.2/Annotation/Genes/genes.bed" - readme = "${params.igenomes_base}/Sus_scrofa/Ensembl/Sscrofa10.2/Annotation/README.txt" - mito_name = "MT" - } - 'AGPv3' { - fasta = "${params.igenomes_base}/Zea_mays/Ensembl/AGPv3/Sequence/WholeGenomeFasta/genome.fa" - bwa = "${params.igenomes_base}/Zea_mays/Ensembl/AGPv3/Sequence/BWAIndex/version0.6.0/" - bowtie2 = "${params.igenomes_base}/Zea_mays/Ensembl/AGPv3/Sequence/Bowtie2Index/" - star = "${params.igenomes_base}/Zea_mays/Ensembl/AGPv3/Sequence/STARIndex/" - bismark = "${params.igenomes_base}/Zea_mays/Ensembl/AGPv3/Sequence/BismarkIndex/" - gtf = "${params.igenomes_base}/Zea_mays/Ensembl/AGPv3/Annotation/Genes/genes.gtf" - bed12 = "${params.igenomes_base}/Zea_mays/Ensembl/AGPv3/Annotation/Genes/genes.bed" - mito_name = "Mt" - } - 'hg38' { - fasta = "${params.igenomes_base}/Homo_sapiens/UCSC/hg38/Sequence/WholeGenomeFasta/genome.fa" - bwa = "${params.igenomes_base}/Homo_sapiens/UCSC/hg38/Sequence/BWAIndex/version0.6.0/" - bowtie2 = "${params.igenomes_base}/Homo_sapiens/UCSC/hg38/Sequence/Bowtie2Index/" - star = "${params.igenomes_base}/Homo_sapiens/UCSC/hg38/Sequence/STARIndex/" - bismark = "${params.igenomes_base}/Homo_sapiens/UCSC/hg38/Sequence/BismarkIndex/" - gtf = "${params.igenomes_base}/Homo_sapiens/UCSC/hg38/Annotation/Genes/genes.gtf" - bed12 = "${params.igenomes_base}/Homo_sapiens/UCSC/hg38/Annotation/Genes/genes.bed" - mito_name = "chrM" - macs_gsize = "2.7e9" - blacklist = "${projectDir}/assets/blacklists/hg38-blacklist.bed" - } - 'hg19' { - fasta = "${params.igenomes_base}/Homo_sapiens/UCSC/hg19/Sequence/WholeGenomeFasta/genome.fa" - bwa = "${params.igenomes_base}/Homo_sapiens/UCSC/hg19/Sequence/BWAIndex/version0.6.0/" - bowtie2 = "${params.igenomes_base}/Homo_sapiens/UCSC/hg19/Sequence/Bowtie2Index/" - star = "${params.igenomes_base}/Homo_sapiens/UCSC/hg19/Sequence/STARIndex/" - bismark = "${params.igenomes_base}/Homo_sapiens/UCSC/hg19/Sequence/BismarkIndex/" - gtf = "${params.igenomes_base}/Homo_sapiens/UCSC/hg19/Annotation/Genes/genes.gtf" - bed12 = "${params.igenomes_base}/Homo_sapiens/UCSC/hg19/Annotation/Genes/genes.bed" - readme = "${params.igenomes_base}/Homo_sapiens/UCSC/hg19/Annotation/README.txt" - mito_name = "chrM" - macs_gsize = "2.7e9" - blacklist = "${projectDir}/assets/blacklists/hg19-blacklist.bed" - } - 'mm10' { - fasta = "${params.igenomes_base}/Mus_musculus/UCSC/mm10/Sequence/WholeGenomeFasta/genome.fa" - bwa = "${params.igenomes_base}/Mus_musculus/UCSC/mm10/Sequence/BWAIndex/version0.6.0/" - bowtie2 = "${params.igenomes_base}/Mus_musculus/UCSC/mm10/Sequence/Bowtie2Index/" - star = "${params.igenomes_base}/Mus_musculus/UCSC/mm10/Sequence/STARIndex/" - bismark = "${params.igenomes_base}/Mus_musculus/UCSC/mm10/Sequence/BismarkIndex/" - gtf = "${params.igenomes_base}/Mus_musculus/UCSC/mm10/Annotation/Genes/genes.gtf" - bed12 = "${params.igenomes_base}/Mus_musculus/UCSC/mm10/Annotation/Genes/genes.bed" - readme = "${params.igenomes_base}/Mus_musculus/UCSC/mm10/Annotation/README.txt" - mito_name = "chrM" - macs_gsize = "1.87e9" - blacklist = "${projectDir}/assets/blacklists/mm10-blacklist.bed" - } - 'bosTau8' { - fasta = "${params.igenomes_base}/Bos_taurus/UCSC/bosTau8/Sequence/WholeGenomeFasta/genome.fa" - bwa = "${params.igenomes_base}/Bos_taurus/UCSC/bosTau8/Sequence/BWAIndex/version0.6.0/" - bowtie2 = "${params.igenomes_base}/Bos_taurus/UCSC/bosTau8/Sequence/Bowtie2Index/" - star = "${params.igenomes_base}/Bos_taurus/UCSC/bosTau8/Sequence/STARIndex/" - bismark = "${params.igenomes_base}/Bos_taurus/UCSC/bosTau8/Sequence/BismarkIndex/" - gtf = "${params.igenomes_base}/Bos_taurus/UCSC/bosTau8/Annotation/Genes/genes.gtf" - bed12 = "${params.igenomes_base}/Bos_taurus/UCSC/bosTau8/Annotation/Genes/genes.bed" - mito_name = "chrM" - } - 'ce10' { - fasta = "${params.igenomes_base}/Caenorhabditis_elegans/UCSC/ce10/Sequence/WholeGenomeFasta/genome.fa" - bwa = "${params.igenomes_base}/Caenorhabditis_elegans/UCSC/ce10/Sequence/BWAIndex/version0.6.0/" - bowtie2 = "${params.igenomes_base}/Caenorhabditis_elegans/UCSC/ce10/Sequence/Bowtie2Index/" - star = "${params.igenomes_base}/Caenorhabditis_elegans/UCSC/ce10/Sequence/STARIndex/" - bismark = "${params.igenomes_base}/Caenorhabditis_elegans/UCSC/ce10/Sequence/BismarkIndex/" - gtf = "${params.igenomes_base}/Caenorhabditis_elegans/UCSC/ce10/Annotation/Genes/genes.gtf" - bed12 = "${params.igenomes_base}/Caenorhabditis_elegans/UCSC/ce10/Annotation/Genes/genes.bed" - readme = "${params.igenomes_base}/Caenorhabditis_elegans/UCSC/ce10/Annotation/README.txt" - mito_name = "chrM" - macs_gsize = "9e7" - } - 'canFam3' { - fasta = "${params.igenomes_base}/Canis_familiaris/UCSC/canFam3/Sequence/WholeGenomeFasta/genome.fa" - bwa = "${params.igenomes_base}/Canis_familiaris/UCSC/canFam3/Sequence/BWAIndex/version0.6.0/" - bowtie2 = "${params.igenomes_base}/Canis_familiaris/UCSC/canFam3/Sequence/Bowtie2Index/" - star = "${params.igenomes_base}/Canis_familiaris/UCSC/canFam3/Sequence/STARIndex/" - bismark = "${params.igenomes_base}/Canis_familiaris/UCSC/canFam3/Sequence/BismarkIndex/" - gtf = "${params.igenomes_base}/Canis_familiaris/UCSC/canFam3/Annotation/Genes/genes.gtf" - bed12 = "${params.igenomes_base}/Canis_familiaris/UCSC/canFam3/Annotation/Genes/genes.bed" - readme = "${params.igenomes_base}/Canis_familiaris/UCSC/canFam3/Annotation/README.txt" - mito_name = "chrM" - } - 'danRer10' { - fasta = "${params.igenomes_base}/Danio_rerio/UCSC/danRer10/Sequence/WholeGenomeFasta/genome.fa" - bwa = "${params.igenomes_base}/Danio_rerio/UCSC/danRer10/Sequence/BWAIndex/version0.6.0/" - bowtie2 = "${params.igenomes_base}/Danio_rerio/UCSC/danRer10/Sequence/Bowtie2Index/" - star = "${params.igenomes_base}/Danio_rerio/UCSC/danRer10/Sequence/STARIndex/" - bismark = "${params.igenomes_base}/Danio_rerio/UCSC/danRer10/Sequence/BismarkIndex/" - gtf = "${params.igenomes_base}/Danio_rerio/UCSC/danRer10/Annotation/Genes/genes.gtf" - bed12 = "${params.igenomes_base}/Danio_rerio/UCSC/danRer10/Annotation/Genes/genes.bed" - mito_name = "chrM" - macs_gsize = "1.37e9" - } - 'dm6' { - fasta = "${params.igenomes_base}/Drosophila_melanogaster/UCSC/dm6/Sequence/WholeGenomeFasta/genome.fa" - bwa = "${params.igenomes_base}/Drosophila_melanogaster/UCSC/dm6/Sequence/BWAIndex/version0.6.0/" - bowtie2 = "${params.igenomes_base}/Drosophila_melanogaster/UCSC/dm6/Sequence/Bowtie2Index/" - star = "${params.igenomes_base}/Drosophila_melanogaster/UCSC/dm6/Sequence/STARIndex/" - bismark = "${params.igenomes_base}/Drosophila_melanogaster/UCSC/dm6/Sequence/BismarkIndex/" - gtf = "${params.igenomes_base}/Drosophila_melanogaster/UCSC/dm6/Annotation/Genes/genes.gtf" - bed12 = "${params.igenomes_base}/Drosophila_melanogaster/UCSC/dm6/Annotation/Genes/genes.bed" - mito_name = "chrM" - macs_gsize = "1.2e8" - } - 'equCab2' { - fasta = "${params.igenomes_base}/Equus_caballus/UCSC/equCab2/Sequence/WholeGenomeFasta/genome.fa" - bwa = "${params.igenomes_base}/Equus_caballus/UCSC/equCab2/Sequence/BWAIndex/version0.6.0/" - bowtie2 = "${params.igenomes_base}/Equus_caballus/UCSC/equCab2/Sequence/Bowtie2Index/" - star = "${params.igenomes_base}/Equus_caballus/UCSC/equCab2/Sequence/STARIndex/" - bismark = "${params.igenomes_base}/Equus_caballus/UCSC/equCab2/Sequence/BismarkIndex/" - gtf = "${params.igenomes_base}/Equus_caballus/UCSC/equCab2/Annotation/Genes/genes.gtf" - bed12 = "${params.igenomes_base}/Equus_caballus/UCSC/equCab2/Annotation/Genes/genes.bed" - readme = "${params.igenomes_base}/Equus_caballus/UCSC/equCab2/Annotation/README.txt" - mito_name = "chrM" - } - 'galGal4' { - fasta = "${params.igenomes_base}/Gallus_gallus/UCSC/galGal4/Sequence/WholeGenomeFasta/genome.fa" - bwa = "${params.igenomes_base}/Gallus_gallus/UCSC/galGal4/Sequence/BWAIndex/version0.6.0/" - bowtie2 = "${params.igenomes_base}/Gallus_gallus/UCSC/galGal4/Sequence/Bowtie2Index/" - star = "${params.igenomes_base}/Gallus_gallus/UCSC/galGal4/Sequence/STARIndex/" - bismark = "${params.igenomes_base}/Gallus_gallus/UCSC/galGal4/Sequence/BismarkIndex/" - gtf = "${params.igenomes_base}/Gallus_gallus/UCSC/galGal4/Annotation/Genes/genes.gtf" - bed12 = "${params.igenomes_base}/Gallus_gallus/UCSC/galGal4/Annotation/Genes/genes.bed" - readme = "${params.igenomes_base}/Gallus_gallus/UCSC/galGal4/Annotation/README.txt" - mito_name = "chrM" - } - 'panTro4' { - fasta = "${params.igenomes_base}/Pan_troglodytes/UCSC/panTro4/Sequence/WholeGenomeFasta/genome.fa" - bwa = "${params.igenomes_base}/Pan_troglodytes/UCSC/panTro4/Sequence/BWAIndex/version0.6.0/" - bowtie2 = "${params.igenomes_base}/Pan_troglodytes/UCSC/panTro4/Sequence/Bowtie2Index/" - star = "${params.igenomes_base}/Pan_troglodytes/UCSC/panTro4/Sequence/STARIndex/" - bismark = "${params.igenomes_base}/Pan_troglodytes/UCSC/panTro4/Sequence/BismarkIndex/" - gtf = "${params.igenomes_base}/Pan_troglodytes/UCSC/panTro4/Annotation/Genes/genes.gtf" - bed12 = "${params.igenomes_base}/Pan_troglodytes/UCSC/panTro4/Annotation/Genes/genes.bed" - readme = "${params.igenomes_base}/Pan_troglodytes/UCSC/panTro4/Annotation/README.txt" - mito_name = "chrM" - } - 'rn6' { - fasta = "${params.igenomes_base}/Rattus_norvegicus/UCSC/rn6/Sequence/WholeGenomeFasta/genome.fa" - bwa = "${params.igenomes_base}/Rattus_norvegicus/UCSC/rn6/Sequence/BWAIndex/version0.6.0/" - bowtie2 = "${params.igenomes_base}/Rattus_norvegicus/UCSC/rn6/Sequence/Bowtie2Index/" - star = "${params.igenomes_base}/Rattus_norvegicus/UCSC/rn6/Sequence/STARIndex/" - bismark = "${params.igenomes_base}/Rattus_norvegicus/UCSC/rn6/Sequence/BismarkIndex/" - gtf = "${params.igenomes_base}/Rattus_norvegicus/UCSC/rn6/Annotation/Genes/genes.gtf" - bed12 = "${params.igenomes_base}/Rattus_norvegicus/UCSC/rn6/Annotation/Genes/genes.bed" - mito_name = "chrM" - } - 'sacCer3' { - fasta = "${params.igenomes_base}/Saccharomyces_cerevisiae/UCSC/sacCer3/Sequence/WholeGenomeFasta/genome.fa" - bwa = "${params.igenomes_base}/Saccharomyces_cerevisiae/UCSC/sacCer3/Sequence/BWAIndex/version0.6.0/" - bowtie2 = "${params.igenomes_base}/Saccharomyces_cerevisiae/UCSC/sacCer3/Sequence/Bowtie2Index/" - star = "${params.igenomes_base}/Saccharomyces_cerevisiae/UCSC/sacCer3/Sequence/STARIndex/" - bismark = "${params.igenomes_base}/Saccharomyces_cerevisiae/UCSC/sacCer3/Sequence/BismarkIndex/" - readme = "${params.igenomes_base}/Saccharomyces_cerevisiae/UCSC/sacCer3/Annotation/README.txt" - mito_name = "chrM" - macs_gsize = "1.2e7" - } - 'susScr3' { - fasta = "${params.igenomes_base}/Sus_scrofa/UCSC/susScr3/Sequence/WholeGenomeFasta/genome.fa" - bwa = "${params.igenomes_base}/Sus_scrofa/UCSC/susScr3/Sequence/BWAIndex/version0.6.0/" - bowtie2 = "${params.igenomes_base}/Sus_scrofa/UCSC/susScr3/Sequence/Bowtie2Index/" - star = "${params.igenomes_base}/Sus_scrofa/UCSC/susScr3/Sequence/STARIndex/" - bismark = "${params.igenomes_base}/Sus_scrofa/UCSC/susScr3/Sequence/BismarkIndex/" - gtf = "${params.igenomes_base}/Sus_scrofa/UCSC/susScr3/Annotation/Genes/genes.gtf" - bed12 = "${params.igenomes_base}/Sus_scrofa/UCSC/susScr3/Annotation/Genes/genes.bed" - readme = "${params.igenomes_base}/Sus_scrofa/UCSC/susScr3/Annotation/README.txt" - mito_name = "chrM" - } - } -} diff --git a/conf/modules.config b/conf/modules.config index da58a5d8..bb5a8786 100644 --- a/conf/modules.config +++ b/conf/modules.config @@ -15,27 +15,344 @@ process { publishDir = [ path: { "${params.outdir}/${task.process.tokenize(':')[-1].tokenize('_')[0].toLowerCase()}" }, mode: params.publish_dir_mode, - saveAs: { filename -> filename.equals('versions.yml') ? null : filename } + saveAs: { filename -> filename.equals('versions.yml') ? null : filename }, ] - withName: SAMPLESHEET_CHECK { + // ---------------------------- multiqc --------------------------------------------------- + + withName: 'MULTIQC|MULTIQC_PRE_XR_RUN|MULTIQC_POST_XR_RUN' { + errorStrategy = 'ignore' + } + + withName: MULTIQC { + ext.args = { params.multiqc_title ? "--title \"${params.multiqc_title}\"" : '' } + publishDir = [ + path: { "${params.outdir}/${params.mode}/multiqc" }, + mode: params.publish_dir_mode, + saveAs: { filename -> filename.equals('versions.yml') ? null : filename }, + ] + } + + withName: MULTIQC_PRE_XR_RUN { + ext.args = { "--title \"${params.multiqc_title ?: 'MultiQC Pre Xeniumranger import-segmentation Run'}\"" } + publishDir = [ + path: { "${params.outdir}/${params.mode}/multiqc/raw_bundle" }, + mode: params.publish_dir_mode, + saveAs: { filename -> filename.equals('versions.yml') ? null : filename }, + ] + } + + withName: MULTIQC_POST_XR_RUN { + ext.args = { "--title \"${params.multiqc_title ?: 'MultiQC Post Xeniumranger import-segmentation Run'}\"" } + publishDir = [ + path: { "${params.outdir}/${params.mode}/multiqc/redefined_bundle" }, + mode: params.publish_dir_mode, + saveAs: { filename -> filename.equals('versions.yml') ? null : filename }, + ] + } + + + // ---------------------------- xeniumranger --------------------------------------------------- + + // XeniumRanger: must use local scratch for large output bundles + withName:".*XENIUMRANGER.*" { + scratch = true + } + + // scratch=true is set in base.config via withName:".*XENIUMRANGER.*" + withName: XENIUMRANGER_RELABEL { + publishDir = [ + path: "${params.outdir}/${params.mode}/xeniumranger/relabel", + mode: params.publish_dir_mode, + ] + } + + withName: XENIUMRANGER_RESEGMENT { + publishDir = [ + path: "${params.outdir}/${params.mode}/xeniumranger/resegment", + mode: params.publish_dir_mode, + ] + } + + withName: XENIUMRANGER_IMPORT_SEGMENTATION { + publishDir = [ + path: "${params.outdir}/${params.mode}/xeniumranger/import_segementation", + mode: params.publish_dir_mode, + ] + } + + // ---------------------------- proseg --------------------------------------------------- + + withName: PROSEG { publishDir = [ - path: { "${params.outdir}/pipeline_info" }, + path: "${params.outdir}/${params.mode}/proseg/preset", + mode: params.publish_dir_mode, + ] + } + + withName: PROSEG2BAYSOR { + publishDir = [ + path: "${params.outdir}/${params.mode}/proseg/proseg2baysor", + mode: params.publish_dir_mode, + ] + } + + // ---------------------------- baysor --------------------------------------------------- + + withName: BAYSOR_RUN { + memory = { params.baysor_tiling ? 240.GB * task.attempt : 720.GB } + ext.args = "--min-molecules-per-cell ${params.baysor_tiling ? params.baysor_tiling_min_mols_per_cell : 30} --x-column x_location --y-column y_location --z-column z_location --gene-column feature_name" + ext.prior_column = params.baysor_prior == 'cells' ? 'cell_id' : null + ext.prior_confidence = params.baysor_prior != null ? params.baysor_prior_confidence : null + publishDir = [ + path: { "${params.outdir}/${params.mode}/baysor/run" }, + mode: params.publish_dir_mode, + ] + } + + withName: BAYSOR_SEGFREE { + memory = { 720.GB } + publishDir = [ + path: { "${params.outdir}/${params.mode}/baysor/segfree" }, + mode: params.publish_dir_mode, + ] + } + + withName: BAYSOR_CREATE_DATASET { + publishDir = [ + path: { "${params.outdir}/${params.mode}/baysor/create_dataset" }, + mode: params.publish_dir_mode, + ] + } + + withName: BAYSOR_PREPROCESS_TRANSCRIPTS { + publishDir = [ + path: { "${params.outdir}/${params.mode}/baysor/preprocess" }, + mode: params.publish_dir_mode, + ] + } + + withName: BAYSOR_PREVIEW { + memory = { 240.GB * task.attempt } + publishDir = [ + path: { "${params.outdir}/${params.mode}/baysor/preview" }, + mode: params.publish_dir_mode, + ] + } + + // ---------------------------- xenium_patch (tiling) ------------------------------------ + + withName: 'XENIUM_PATCH_DIVIDE' { + ext.tile_width = params.baysor_tiling_micron + ext.overlap = params.baysor_tiling_overlap + ext.balanced = params.baysor_tiling_balanced + publishDir = [ + path: { "${params.outdir}/${meta.id}/xenium_patch" }, mode: params.publish_dir_mode, saveAs: { filename -> filename.equals('versions.yml') ? null : filename } ] } - withName: FASTQC { - ext.args = '--quiet' + withName: 'XENIUM_PATCH_STITCH' { + ext.filter_method = params.patch_filter_method ?: null + ext.iqr_multiplier = params.patch_filter_iqr_multiplier + ext.z_threshold = params.patch_filter_z_threshold + ext.args = { "--min-transcripts-per-cell ${params.baysor_tiling_min_transcripts_per_cell}" } + publishDir = [ + path: { "${params.outdir}/${meta.id}/xenium_patch" }, + mode: params.publish_dir_mode, + saveAs: { filename -> filename.equals('versions.yml') ? null : filename } + ] + } + + // ---------------------------- segger --------------------------------------------------- + + withName: SEGGER_CREATE_DATASET { + publishDir = [ + path: { "${params.outdir}/${params.mode}/segger/create_dataset" }, + mode: params.publish_dir_mode, + ] + } + + withName: SEGGER_TRAIN { + publishDir = [ + path: { "${params.outdir}/${params.mode}/segger/train" }, + mode: params.publish_dir_mode, + ] + ext.args = { "--init_emb 8 --hidden_channels 32 --num_tx_tokens 10000 --out_channels 8 --heads 2 --num_mid_layers 2 --strategy auto --precision bf16-mixed" } + } + + withName: SEGGER_PREDICT { + publishDir = [ + path: { "${params.outdir}/${params.mode}/segger/predict" }, + mode: params.publish_dir_mode, + // Skip partitioned parquet dirs (Hive-style) that S3 copy can't handle + saveAs: { filename -> filename.contains('transcripts_df.parquet') ? null : filename }, + ] + } + + // ---------------------------- ficture ------------------------------------------ + + withName: FICTURE_PREPROCESS { + publishDir = [ + path: "${params.outdir}/${params.mode}/ficture/preprocess", + mode: params.publish_dir_mode, + ] + } + + // ---------------------------- utility modules ----------------------------------- + + + withName: UNTAR { + publishDir = [ + path: { "${params.outdir}/${params.mode}/untar/" }, + mode: params.publish_dir_mode, + ] + } + + withName: RESOLIFT { + publishDir = [ + path: { "${params.outdir}/${params.mode}/resolift/" }, + mode: params.publish_dir_mode, + ] + } + + withName: PARQUET_TO_CSV { + publishDir = [ + path: { "${params.outdir}/${params.mode}/utility/parquet_to_csv" }, + mode: params.publish_dir_mode, + ] + } + + withName: EXTRACT_PREVIEW_DATA { + publishDir = [ + path: { "${params.outdir}/${params.mode}/utility/preview_data/" }, + mode: params.publish_dir_mode, + ] + } + + withName: GET_TRANSCRIPTS_COORDINATES { + publishDir = [ + path: { "${params.outdir}/${params.mode}/utility/get_coordinates/" }, + mode: params.publish_dir_mode, + ] + } + + withName: RESIZE_TIF { + publishDir = [ + path: { "${params.outdir}/${params.mode}/utility/resize_tif/" }, + mode: params.publish_dir_mode, + ] + } + + withName: SEGGER2XR { + publishDir = [ + path: { "${params.outdir}/${params.mode}/utility/segger2xr/" }, + mode: params.publish_dir_mode, + ] + } + + withName: SPLIT_TRANSCRIPTS { + publishDir = [ + path: { "${params.outdir}/${params.mode}/utility/split_transcripts/" }, + mode: params.publish_dir_mode, + ] + } + + // ---------------------------- spatialdata -------------------------------------- + + withName: SPATIALDATA_WRITE { + publishDir = [ + path: { "${params.outdir}/${params.mode}/spatialdata/write" }, + mode: params.publish_dir_mode, + ] } - withName: CUSTOM_DUMPSOFTWAREVERSIONS { + withName: SPATIALDATA_MERGE { publishDir = [ - path: { "${params.outdir}/pipeline_info" }, + path: { "${params.outdir}/${params.mode}/spatialdata/merge" }, mode: params.publish_dir_mode, - pattern: '*_versions.yml' ] } + withName: SPATIALDATA_META { + publishDir = [ + path: { "${params.outdir}/${params.mode}/spatialdata/meta" }, + mode: params.publish_dir_mode, + ] + } + + // ---------------------------- cellpose ----------------------------------------- + + // GPU is auto-detected via task.accelerator in the official nf-core cellpose module + withName: CELLPOSE { + publishDir = [ + path: { "${params.outdir}/${params.mode}/cellpose" }, + mode: params.publish_dir_mode, + ] + ext.args = "--flow_threshold 0 --batch_size 1" + } + + withName: CELLPOSE_CELLS { + publishDir = [ + path: { "${params.outdir}/${params.mode}/cellpose_cells" }, + mode: params.publish_dir_mode, + ] + ext.args = "--flow_threshold 0 --batch_size 1" + } + + // ---------------------------- stardist ----------------------------------------- + + withName: '.*STARDIST.*' { + ext.args = {[ + params.stardist_prob_thresh != null ? "--prob_thresh ${params.stardist_prob_thresh}" : "", + params.stardist_nms_thresh != null ? "--nms_thresh ${params.stardist_nms_thresh}" : "", + params.stardist_n_tiles != null ? "--n_tiles ${params.stardist_n_tiles}" : "", + ].join(' ').trim()} + } + + withName: 'STARDIST_NUCLEI' { + publishDir = [ + path: { "${params.outdir}/${params.mode}/stardist_nuclei" }, + mode: params.publish_dir_mode, + ] + } + + // StarDist preprocessing/postprocessing utilities + withName: '.*EXTRACT_DAPI.*' { + publishDir = [ + path: { "${params.outdir}/${params.mode}/extract_dapi" }, + mode: params.publish_dir_mode, + ] + } + + withName: '.*CONVERT_MASK_UINT32.*|.*CONVERT_CELLS_MASK.*|.*CONVERT_NUCLEI_MASK.*' { + publishDir = [ + path: { "${params.outdir}/${params.mode}/convert_mask" }, + mode: params.publish_dir_mode, + ] + } + + // ---------------------------- opt ----------------------------------------- + + withName: OPT_FLIP { + publishDir = [ + path: { "${params.outdir}/${params.mode}/opt/flip" }, + mode: params.publish_dir_mode, + ] + } + + withName: OPT_TRACK { + publishDir = [ + path: { "${params.outdir}/${params.mode}/opt/track" }, + mode: params.publish_dir_mode, + ] + } + + withName: OPT_STAT { + publishDir = [ + path: { "${params.outdir}/${params.mode}/opt/stat" }, + mode: params.publish_dir_mode, + ] + } } diff --git a/conf/test.config b/conf/test.config index b82db0ac..bae5a73f 100644 --- a/conf/test.config +++ b/conf/test.config @@ -5,25 +5,31 @@ Defines input files and everything required to run a fast and simple pipeline test. Use as follows: - nextflow run nf-core/spatialxe -profile test, --outdir + nextflow run nf-core/spatialxe -profile test, --mode --outdir ---------------------------------------------------------------------------------------- */ +process { + + resourceLimits = [ + cpus: 4, + memory: '8.GB', + time: '2.h', + ] + + withName: UNTAR { + ext.prefix = "test_run" + } + +} + params { config_profile_name = 'Test profile' config_profile_description = 'Minimal test dataset to check pipeline function' - // Limit resources so that this can run on GitHub Actions - max_cpus = 2 - max_memory = '6.GB' - max_time = '6.h' - // Input data - // TODO nf-core: Specify the paths to your test data on nf-core/test-datasets - // TODO nf-core: Give any required params for the test so that command line flags are not needed - input = 'https://raw.githubusercontent.com/nf-core/test-datasets/viralrecon/samplesheet/samplesheet_test_illumina_amplicon.csv' - - // Genome references - genome = 'R64-1-1' + input = "${projectDir}/assets/samplesheet.csv" + outdir = 'results' + mode = 'coordinate' } diff --git a/conf/test_coordinate_mode.config b/conf/test_coordinate_mode.config new file mode 100644 index 00000000..e8338a5c --- /dev/null +++ b/conf/test_coordinate_mode.config @@ -0,0 +1,31 @@ +/* +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + Nextflow config file for running minimal tests +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + Defines input files and everything required to run a fast and simple pipeline test. + + Use as follows: + nextflow run nf-core/spatialxe -profile test, --mode --outdir + +---------------------------------------------------------------------------------------- +*/ + +process { + + resourceLimits = [ + cpus: 4, + memory: '8.GB', + time: '2.h', + ] + +} + +params { + config_profile_name = 'Test profile coordinate mode' + config_profile_description = 'Minimal test dataset to check pipeline function in the coordinate mode' + + // Input data + input = "${projectDir}/assets/samplesheet.csv" + outdir = 'results' + mode = 'coordinate' +} diff --git a/conf/test_full.config b/conf/test_full.config index 6d7f1ca0..9e0980c4 100644 --- a/conf/test_full.config +++ b/conf/test_full.config @@ -14,11 +14,8 @@ params { config_profile_name = 'Full test profile' config_profile_description = 'Full test dataset to check pipeline function' - // Input data for full size test - // TODO nf-core: Specify the paths to your full test data ( on nf-core/test-datasets or directly in repositories, e.g. SRA) - // TODO nf-core: Give any required params for the test so that command line flags are not needed - input = 'https://raw.githubusercontent.com/nf-core/test-datasets/viralrecon/samplesheet/samplesheet_full_illumina_amplicon.csv' - - // Genome references - genome = 'R64-1-1' + // Input data + input = "${projectDir}/assets/samplesheet.csv" + outdir = 'results' + mode = 'coordinate' } diff --git a/conf/test_image_mode.config b/conf/test_image_mode.config new file mode 100644 index 00000000..17e8124a --- /dev/null +++ b/conf/test_image_mode.config @@ -0,0 +1,31 @@ +/* +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + Nextflow config file for running minimal tests +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + Defines input files and everything required to run a fast and simple pipeline test. + + Use as follows: + nextflow run nf-core/spatialxe -profile test, --mode --outdir + +---------------------------------------------------------------------------------------- +*/ + +process { + + resourceLimits = [ + cpus: 4, + memory: '8.GB', + time: '2.h', + ] + +} + +params { + config_profile_name = 'Test profile image mode' + config_profile_description = 'Minimal test dataset to check pipeline function in the image mode' + + // Input data + input = "${projectDir}/assets/samplesheet.csv" + outdir = 'results' + mode = 'image' +} diff --git a/conf/test_preview_mode.config b/conf/test_preview_mode.config new file mode 100644 index 00000000..144a2349 --- /dev/null +++ b/conf/test_preview_mode.config @@ -0,0 +1,31 @@ +/* +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + Nextflow config file for running minimal tests +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + Defines input files and everything required to run a fast and simple pipeline test. + + Use as follows: + nextflow run nf-core/spatialxe -profile test, --mode --outdir + +---------------------------------------------------------------------------------------- +*/ + +process { + + resourceLimits = [ + cpus: 4, + memory: '8.GB', + time: '2.h', + ] + +} + +params { + config_profile_name = 'Test profile preview mode' + config_profile_description = 'Minimal test dataset to check pipeline function in the preview mode' + + // Input data + input = "${projectDir}/assets/samplesheet.csv" + outdir = 'results' + mode = 'preview' +} diff --git a/conf/test_segfree_mode.config b/conf/test_segfree_mode.config new file mode 100644 index 00000000..5f4467b5 --- /dev/null +++ b/conf/test_segfree_mode.config @@ -0,0 +1,31 @@ +/* +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + Nextflow config file for running minimal tests +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + Defines input files and everything required to run a fast and simple pipeline test. + + Use as follows: + nextflow run nf-core/spatialxe -profile test, --mode --outdir + +---------------------------------------------------------------------------------------- +*/ + +process { + + resourceLimits = [ + cpus: 4, + memory: '8.GB', + time: '2.h', + ] + +} + +params { + config_profile_name = 'Test profile segfree mode' + config_profile_description = 'Minimal test dataset to check pipeline function in the segfree mode' + + // Input data + input = "${projectDir}/assets/samplesheet.csv" + outdir = 'results' + mode = 'segfree' +} diff --git a/docs/images/nf-core-spatialxe_logo_dark.png b/docs/images/nf-core-spatialxe_logo_dark.png index 7d00434c..927c3bfb 100644 Binary files a/docs/images/nf-core-spatialxe_logo_dark.png and b/docs/images/nf-core-spatialxe_logo_dark.png differ diff --git a/docs/images/nf-core-spatialxe_logo_light.png b/docs/images/nf-core-spatialxe_logo_light.png index be64834d..e933cdfa 100644 Binary files a/docs/images/nf-core-spatialxe_logo_light.png and b/docs/images/nf-core-spatialxe_logo_light.png differ diff --git a/docs/images/spatialxe-logo.png b/docs/images/spatialxe-logo.png new file mode 100644 index 00000000..07a6138f Binary files /dev/null and b/docs/images/spatialxe-logo.png differ diff --git a/docs/images/spatialxe-logo.svg b/docs/images/spatialxe-logo.svg new file mode 100644 index 00000000..6cbcf5ed --- /dev/null +++ b/docs/images/spatialxe-logo.svg @@ -0,0 +1,234 @@ + + diff --git a/docs/images/spatialxe-metromap.png b/docs/images/spatialxe-metromap.png index c48c05ea..87f17903 100644 Binary files a/docs/images/spatialxe-metromap.png and b/docs/images/spatialxe-metromap.png differ diff --git a/docs/images/spatialxe-metromap.svg b/docs/images/spatialxe-metromap.svg index c4f2ab69..bcd548a9 100644 --- a/docs/images/spatialxe-metromap.svg +++ b/docs/images/spatialxe-metromap.svg @@ -1,4 +1,3277 @@ - - - -
Xenium bundle
gene panel
relabel
import-segmentation
Bundle redefinition Xenium Ranger
segger
xeniumranger resegment
Segmentation refinement
Coordinates/ mask
FICTURE
Proseg
Segmentation-free approach
BOMS
Cellpose
Image-based segmentation approach
Baysor
tiff
csv
JSON
morphology
transcripts
Xenium bundle (redefined)
spatialxe QC
spatialxe meta
SpatialData Domain
html
QC reports
JSON
Metadata
Coordinates/ mask
RO-crate output
SpatialData integration
Approach: image-based (Cellpose, BOMS)
Approach: image-based and segmentation-free (Baysor)
Approach: segmentation-free (Proseg, FICTURE)
Optional step
Outputs
Xenium onboard analysis (XOA)
Inputs
\ No newline at end of file + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + vcfvcftxtXenium onboardanalysis (XOA)vcfXR importsegmentationXeniumbundlehtmlQC reportjsonMetadatajsongene panelXenium bundle(redefined)htmlMultiQCSpatialDatabundleXR relabelSpatialDataSpatialDataSpatialDataQuality ControlXenium onboardanalysis (XOA)Default workflow for image modeDefault workflow for coordinate modeDefault workflow for segfree modeOptional stepsOutputsInputsmode: imageXR resegmentStarDistCellposeBaysorCellposevcfvcftiffimageBaysorpriorsegmentationmasksegmentationpolygon/csvsegmentationpolygon/csv/maskSeggerProsegBaysorvcfvcfparquettranscriptmoleculesreportBaysor previewhtmlparquettranscriptmoleculesmode: coordinatencvs/loomBaysor segfreeFicturevcfvcfparquettranscriptmoleculesmode: segfreemode: previewmode: coordinate diff --git a/docs/output.md b/docs/output.md index 6f44c7e1..5f4c0b9e 100644 --- a/docs/output.md +++ b/docs/output.md @@ -2,40 +2,180 @@ ## Introduction -This document describes the output produced by the pipeline. Most of the plots are taken from the MultiQC report, which summarises results at the end of the pipeline. +This document describes the output produced by the pipeline. The directories listed below will be created in the results directory after the pipeline has finished. All paths are relative to the top-level results directory. - - ## Pipeline overview The pipeline is built using [Nextflow](https://www.nextflow.io/) and processes data using the following steps: -- [FastQC](#fastqc) - Raw read QC -- [MultiQC](#multiqc) - Aggregate report describing results and QC from the whole pipeline -- [Pipeline information](#pipeline-information) - Report metrics generated during the workflow execution +- Mode specific output: + - [image mode](#image-mode) + - [cooridnate mode](#coordinate-mode) + - [segfree mode](#segfree-mode) + - [qc mode](#qc-mode) (or using `--run_qc`) + - [preview mode](#preview-mode) +- [Additional functionality of spatialxe](#additional-functionality): + - [SpatialData](#spatialdata) + - [Xenium Ranger import segmentation](#xenium-ranger-import-segmentation) + - [MultiQC](#multiqc) - Aggregate report describing results and QC from the whole pipeline + - [Pipeline information](#pipeline-information) - Report metrics generated during the workflow execution + +## Image mode + +
+Output files + +- `image/` + - `xeniumranger/` + - `resegment/` + - `${meta.id}/` Directory containing the output xenium bundle of Xenium + - `baysor/` + - `preprocess/` + - `*.csv` filtered transcripts CSV (for Baysor 0.7.1 Parquet.jl compatibility) + - `run/` + - `*segmentation.csv` results of segmentation + - `*.json` file with outlines of segmentation + - `segmentation_params.dump.toml` file with full list of parameters used for the model + - `segmentation_log.log` output file with metadata of running the workflow + - `segmentation_counts.loom` loom file with metadata + - `segmentation_cell_stats.csv` statistics of segmented cells + - `cellpose_cells/` + - `*masks.tif` labelled mask output from cellpose in tif format + - `*flows.tif` cell flow output from cellpose + - `*seg.npy` numpy array with cell segmentation data + - `stardist_nuclei/` + - `*.{tiff,tif}` labelled mask output from stardist in tif format + - `resolift/` + - `*.tiff` path to save the upscaled TIFF file -### FastQC +
+ +## Coordinate mode
Output files -- `fastqc/` - - `*_fastqc.html`: FastQC report containing quality metrics. - - `*_fastqc.zip`: Zip archive containing the FastQC report, tab-delimited data file and plot images. +- `coordinate/` + - `xenium_patch/` + - `patches/patch_grid.json` patch_grid.json metadata file + - `patches/patch_*/transcripts.parquet` per-patch transcripts.parquet files (one per patch) + - `output/xr-cell-polygons.geojson` stitched cell polygons + - `output/xr-transcript-metadata.csv` transcript metadata + - `proseg/` + - `preset/` + - `cell-polygons.geojson.gz` 2D polygons for each cell in GeoJSON format. These are flattened from 3D + - `expected-counts.csv.gz` cell-by-gene count matrix + - `cell-metadata.csv.gz` cell centroids, volume, and other information + - `transcript-metadata.csv.gz` transcript ids, genes, revised positions, assignment probability + - `gene-metadata.csv.gz` per-gene summary statistics + - `rates.csv.gz` cell-by-gene Poisson rate parameters + - `cell-polygons-layers.geojson.gz` a separate, non-overlapping cell polygon for each z-layer, preserving 3D segmentation + - `cell-hulls.geojson.gz` convex hulls around assigned transcripts + - `proseg2baysor/` + - `xr-cell-polygons.geojson` 2D polygons for each cell in GeoJSON format. These are flattened from 3D + - `xr-transcript-metadata.csv` transcript ids, genes, revised positions, assignment probability + - `segger/` + - `create_dataset/` + - `${meta.id}/` directory to save the processed Segger dataset (in PyTorch Geometric format) + - `train/` + - `${meta.id}/` directory to save the trained model and checkpoints + - `predict/` + - `${meta.id}/` directory to save the segmentation results, including cell boundaries and associations + - `baysor/` + - `run/` + - `*segmentation.csv` results of segmentation + - `*.json` file with outlines of segmentation + - `segmentation_params.dump.toml` file with full list of parameters used for the model + - `segmentation_log.log` output file with metadata of running the workflow + - `segmentation_counts.loom` loom file with metadata + - `segmentation_cell_stats.csv` statistics of segmented cells
-[FastQC](http://www.bioinformatics.babraham.ac.uk/projects/fastqc/) gives general quality metrics about your sequenced reads. It provides information about the quality score distribution across your reads, per base sequence content (%A/T/G/C), adapter contamination and overrepresented sequences. For further reading and documentation see the [FastQC help pages](http://www.bioinformatics.babraham.ac.uk/projects/fastqc/Help/). +## Segfree mode + +
+Output files -![MultiQC - FastQC sequence counts plot](images/mqc_fastqc_counts.png) +- `segfree/` + - `baysor/` + - `preprocess/` + - `*.csv` filtered transcripts CSV (for Baysor 0.7.1 Parquet.jl compatibility) + - `segfree/` + - `ncvs.loom` loom file with neighborhood results + - `ncvs_segfree_log.log` Log file with summary statistics + - `ficture/` + - `preprocess/` + - `processed_transcripts.tsv.gz` transcirpt file used for FICTURE + - `coordinate_minmax.tsv` listing the min and max of the coordinates used for FICTURE + - `feature.clean.tsv.gz` another file contains the (unique) names of genes that should be used for FICUTRE + - `${meta.id}/results/` files containing the results of FICTURE -![MultiQC - FastQC mean quality scores plot](images/mqc_fastqc_quality.png) +
+ +## QC mode + +
+Output files + +- `opt/` + - `flip/` + - `*.fa` the forward oriented fasta file + - `track/` + - `*.tsv` TSV file containing the gene and transcript information to which each probe aligns + - `stat/` + - `*.tsv` TSV file containing the summary stats +- `multiqc/` + - `multiqc_report.html`: a standalone HTML file that can be viewed in your web browser. + - `multiqc_data/`: directory containing parsed statistics from the different tools used in the pipeline. + - `multiqc_plots/`: directory containing static images from the report in various formats. + +
-![MultiQC - FastQC adapter content plot](images/mqc_fastqc_adapter.png) +## Preview mode -> **NB:** The FastQC plots displayed in the MultiQC report shows _untrimmed_ reads. They may contain adapter sequence and potentially regions with low quality. +
+Output files + +- `preview/` + - `baysor/` + - `preview/` + - `preview.html` segmentation preview + +
+ +## Additional Functionality + +### SpatialData + +The pipeline create spatialdata objects (data bundles) on various stages (see metromap in the [README](../README.md)) + +
+Output files + +- `spatialdata/` + - `write/${meta.id}/spatialdata/` spatialdata bundle of the raw data + - `meta/${meta.id}/spatialdata_spatialxe_final/` spatialdata bundle of the final data with metadata + - `sdata['raw_table'].uns['spatialdata_attrs']` provenance metadata + - `sdata['raw_table'].uns['experiment_xenium']` experimental metadata + - `sdata['raw_table'].uns['gene_panel']` gene panel metadata + +
+ +### Xenium Ranger Import Segmentation) + +This step is needed to import segemntations from different methods into the xenium bundle and is called at different stages of the pipeline. + +
+Output files + +- `xeniumranger/` + - `import_segementation/` + - `${meta.id}/` directory containing the output xenium bundle of Xenium + +
### MultiQC @@ -51,7 +191,7 @@ The pipeline is built using [Nextflow](https://www.nextflow.io/) and processes d [MultiQC](http://multiqc.info) is a visualization tool that generates a single HTML report summarising all samples in your project. Most of the pipeline QC results are visualised in the report and further statistics are available in the report data directory. -Results generated by MultiQC collate pipeline QC from supported tools e.g. FastQC. The pipeline has special steps which also allow the software versions to be reported in the MultiQC output for future traceability. For more information about how to use MultiQC reports, see . +The pipeline has special steps which also allow the software versions to be reported in the MultiQC output for future traceability. For more information about how to use MultiQC reports, see . ### Pipeline information @@ -62,6 +202,7 @@ Results generated by MultiQC collate pipeline QC from supported tools e.g. FastQ - Reports generated by Nextflow: `execution_report.html`, `execution_timeline.html`, `execution_trace.txt` and `pipeline_dag.dot`/`pipeline_dag.svg`. - Reports generated by the pipeline: `pipeline_report.html`, `pipeline_report.txt` and `software_versions.yml`. The `pipeline_report*` files will only be present if the `--email` / `--email_on_fail` parameter's are used when running the pipeline. - Reformatted samplesheet files used as input to the pipeline: `samplesheet.valid.csv`. + - Parameters used by the pipeline run: `params.json`. diff --git a/docs/usage.md b/docs/usage.md index 72e5980d..189ecd88 100644 --- a/docs/usage.md +++ b/docs/usage.md @@ -6,58 +6,150 @@ ## Introduction - - ## Samplesheet input -You will need to create a samplesheet with information about the samples you would like to analyse before running the pipeline. Use this parameter to specify its location. It has to be a comma-separated file with 3 columns, and a header row as shown in the examples below. +You will need to create a samplesheet with information about the sample you would like to analyse before running the pipeline. It has to be a comma-separated file with 3 columns, and a header row as shown in the examples below. + +```csv title="samplesheet.csv" +sample,bundle,image +breast_cancer,/path/to/xenium/bundle,/path/to/morphology.ome.tif +``` + +| Column | Description | +| -------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `sample` | `Required`. Custom sample name. It is recommended to follow the same name from the output of the Xenium Onboard Analysis (XOA). Avoid using spaces in the sample name. | +| `bundle` | `Required`. Full path to the Xenium bundle, output of the Xenium Onboard Analysis. | +| `image` | `Optional`. Full path to morphology.ome.tif. If not provided, the morphology.ome.tif from the bundle is considered. | + +An [example samplesheet](../assets/example_samplesheet.csv) has been provided with the pipeline. + +#### Using the samplesheet ```bash --input '[path to samplesheet file]' ``` -### Multiple runs of the same sample +## Running the pipeline + +The typical command for running the pipeline is as follows: + +#### Image-based segmentation mode -The `sample` identifiers have to be the same when you have re-sequenced the same sample more than once e.g. to increase sequencing depth. The pipeline will concatenate the raw reads before performing any downstream analysis. Below is an example for the same sample sequenced across 3 lanes: +This runs the default image mode:
+`CELLPOSE ➔ BAYSOR ➔ XR-IMPORT-SEGMENTATION ➔ SPATIALDATA ➔ QC` -```console -sample,fastq_1,fastq_2 -CONTROL_REP1,AEG588A1_S1_L002_R1_001.fastq.gz,AEG588A1_S1_L002_R2_001.fastq.gz -CONTROL_REP1,AEG588A1_S1_L003_R1_001.fastq.gz,AEG588A1_S1_L003_R2_001.fastq.gz -CONTROL_REP1,AEG588A1_S1_L004_R1_001.fastq.gz,AEG588A1_S1_L004_R2_001.fastq.gz +```bash +nextflow run nf-core/spatialxe \ + -profile + --input ./samplesheet.csv \ + --outdir ./results \ + --mode image ``` -### Full samplesheet +#### Coordinate-based (transcripts-based) segmentation mode + +This runs the default coordinate mode:
+`PROSEG ➔ PROSEG2BAYSOR ➔ XR-IMPORT-SEGMENTATION ➔ SPATIALDATA ➔ QC` + +```bash +nextflow run nf-core/spatialxe \ + -profile + --input ./samplesheet.csv \ + --outdir ./results \ + --mode coordinate +``` -The pipeline will auto-detect whether a sample is single- or paired-end using the information provided in the samplesheet. The samplesheet can have as many columns as you desire, however, there is a strict requirement for the first 3 columns to match those defined in the table below. +### Segmenation free mode -A final samplesheet file consisting of both single- and paired-end data may look something like the one below. This is for 6 samples, where `TREATMENT_REP3` has been sequenced twice. +`BAYSOR_SEGFREE` -```console -sample,fastq_1,fastq_2 -CONTROL_REP1,AEG588A1_S1_L002_R1_001.fastq.gz,AEG588A1_S1_L002_R2_001.fastq.gz -CONTROL_REP2,AEG588A2_S2_L002_R1_001.fastq.gz,AEG588A2_S2_L002_R2_001.fastq.gz -CONTROL_REP3,AEG588A3_S3_L002_R1_001.fastq.gz,AEG588A3_S3_L002_R2_001.fastq.gz -TREATMENT_REP1,AEG588A4_S4_L003_R1_001.fastq.gz, -TREATMENT_REP2,AEG588A5_S5_L003_R1_001.fastq.gz, -TREATMENT_REP3,AEG588A6_S6_L003_R1_001.fastq.gz, -TREATMENT_REP3,AEG588A6_S6_L004_R1_001.fastq.gz, +```bash +nextflow run nf-core/spatialxe \ + -profile \ + --input samplesheet.csv \ + --outdir \ + --mode segfree ``` -| Column | Description | -| --------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `sample` | Custom sample name. This entry will be identical for multiple sequencing libraries/runs from the same sample. Spaces in sample names are automatically converted to underscores (`_`). | -| `fastq_1` | Full path to FastQ file for Illumina short reads 1. File has to be gzipped and have the extension ".fastq.gz" or ".fq.gz". | -| `fastq_2` | Full path to FastQ file for Illumina short reads 2. File has to be gzipped and have the extension ".fastq.gz" or ".fq.gz". | +### Preview mode
-An [example samplesheet](../assets/samplesheet.csv) has been provided with the pipeline. +`BAYSOR_PREVIEW` -## Running the pipeline +```bash +nextflow run nf-core/spatialxe \ + -profile \ + --input samplesheet.csv \ + --outdir \ + --mode preview +``` -The typical command for running the pipeline is as follows: +### Quality control
+ +It is possible to run the quality control with `--run_qc` to couple it with another mode like so: + +```bash +nextflow run nf-core/spatialxe \ + -profile + --input ./samplesheet.csv \ + --outdir ./results \ + --mode image \ + --run_qc +``` + +It is also possible to run just he QC with: + +```bash +nextflow run nf-core/spatialxe \ + -profile \ + --input samplesheet.csv \ + --outdir \ + --mode qc +``` + +- QC methods: + - [MultiQC Xenium Extra Plugin](https://github.com/MultiQC/xenium-extra) + - [OPT](https://github.com/JEFworks-Lab/off-target-probe-tracker) + +### Image-based Segmentation mode (--mode image):
+ +- cellpose +- baysor +- xeniumranger +- stardist + +### Coordinate-based (transcripts-based) Segmentation methods (--mode coordinate):
+ +- proseg +- baysor +- segger + +### Segmentation free methods (--mode segfree):
+ +- baysor +- ficture + +#### Run Segmentation with the methods methods mentioned above :
+ +eg: To run proseg segmentation use the `coordinate` mode and the `proseg` segmentation method (--method) + +```bash +nextflow run nf-core/spatialxe \ + -profile + --input ./samplesheet.csv \ + --outdir ./results \ + --mode coordinate \ + --method proseg +``` + +eg: To run cellpose segmentation use the `image` mode and the `cellpose` segmentation method (--method) ```bash -nextflow run nf-core/spatialxe --input samplesheet.csv --outdir --genome GRCh37 -profile docker +nextflow run nf-core/spatialxe \ + -profile + --input ./samplesheet.csv \ + --outdir ./results \ + --mode image \ + --method cellpose ``` This will launch the pipeline with the `docker` configuration profile. See below for more information about profiles. @@ -71,6 +163,29 @@ work # Directory containing the nextflow working files # Other nextflow hidden files, eg. history of pipeline runs and old logs. ``` +If you wish to repeatedly use the same parameters for multiple runs, rather than specifying each flag in the command, you can specify these in a params file. + +Pipeline settings can be provided in a `yaml` or `json` file via `-params-file `. + +> [!WARNING] +> Do not use `-c ` to specify parameters as this will result in errors. Custom config files specified with `-c` must only be used for [tuning process resource specifications](https://nf-co.re/docs/usage/configuration#tuning-workflow-resources), other infrastructural tweaks (such as output directories), or module arguments (args). + +The above pipeline run specified with a params file in yaml format: + +```bash +nextflow run nf-core/spatialxe -profile docker -params-file params.yaml +``` + +with: + +```yaml title="params.yaml" +input: './samplesheet.csv' +outdir: './results/' +<...> +``` + +You can also generate such `YAML`/`JSON` files via [nf-core/launch](https://nf-co.re/launch). + ### Updating the pipeline When you run the above command, Nextflow automatically pulls the pipeline code from GitHub and stores it as a cached version. When running the pipeline after this, it will always use the cached version if available - even if the pipeline has been updated since. To make sure that you're running the latest version of the pipeline, make sure that you regularly update the cached version of the pipeline: @@ -81,30 +196,37 @@ nextflow pull nf-core/spatialxe ### Reproducibility -It is a good idea to specify a pipeline version when running the pipeline on your data. This ensures that a specific version of the pipeline code and software are used when you run your pipeline. If you keep using the same tag, you'll be running the same version of the pipeline, even if there have been changes to the code since. +It is a good idea to specify the pipeline version when running the pipeline on your data. This ensures that a specific version of the pipeline code and software are used when you run your pipeline. If you keep using the same tag, you'll be running the same version of the pipeline, even if there have been changes to the code since. -First, go to the [nf-core/spatialxe releases page](https://github.com/nf-core/spatialxe/releases) and find the latest pipeline version - numeric only (eg. `1.3.1`). Then specify this when running the pipeline with `-r` (one hyphen) - eg. `-r 1.3.1`. Of course, you can switch to another version by changing the number after the `-r` flag. +First, go to the [nf-core/spatialxe releases page](https://github.com/nf-core/spatialxe/releases) and find the latest pipeline version - numeric only (eg. `1.0.0`). Then specify this when running the pipeline with `-r` (one hyphen) - eg. `-r 1.0.0`. Of course, you can switch to another version by changing the number after the `-r` flag. This version number will be logged in reports when you run the pipeline, so that you'll know what you used when you look back in the future. For example, at the bottom of the MultiQC reports. +To further assist in reproducibility, you can use share and reuse [parameter files](#running-the-pipeline) to repeat pipeline runs with the same settings without having to write out a command with every single parameter. + +> [!TIP] +> If you wish to share such profile (such as upload as supplementary material for academic publications), make sure to NOT include cluster specific paths to files, nor institutional specific profiles. + ## Core Nextflow arguments -> **NB:** These options are part of Nextflow and use a _single_ hyphen (pipeline parameters use a double-hyphen). +> [!NOTE] +> These options are part of Nextflow and use a _single_ hyphen (pipeline parameters use a double-hyphen) ### `-profile` Use this parameter to choose a configuration profile. Profiles can give configuration presets for different compute environments. -Several generic profiles are bundled with the pipeline which instruct the pipeline to use software packaged using different methods (Docker, Singularity, Podman, Shifter, Charliecloud, Conda) - see below. +Several generic profiles are bundled with the pipeline which instruct the pipeline to use software packaged using different methods (Docker, Singularity, Podman, Shifter, Charliecloud, Apptainer, Conda) - see below. +> [!IMPORTANT] > We highly recommend the use of Docker or Singularity containers for full pipeline reproducibility, however when this is not possible, Conda is also supported. -The pipeline also dynamically loads configurations from [https://github.com/nf-core/configs](https://github.com/nf-core/configs) when it runs, making multiple config profiles for various institutional clusters available at run time. For more information and to see if your system is available in these configs please see the [nf-core/configs documentation](https://github.com/nf-core/configs#documentation). +The pipeline also dynamically loads configurations from [https://github.com/nf-core/configs](https://github.com/nf-core/configs) when it runs, making multiple config profiles for various institutional clusters available at run time. For more information and to check if your system is supported, please see the [nf-core/configs documentation](https://github.com/nf-core/configs#documentation). Note that multiple profiles can be loaded, for example: `-profile test,docker` - the order of arguments is important! They are loaded in sequence, so later profiles can overwrite earlier profiles. -If `-profile` is not specified, the pipeline will run locally and expect all software to be installed and available on the `PATH`. This is _not_ recommended, since it can lead to different results on different machines dependent on the computer enviroment. +If `-profile` is not specified, the pipeline will run locally and expect all software to be installed and available on the `PATH`. This is _not_ recommended, since it can lead to different results on different machines dependent on the computer environment. - `test` - A profile with a complete configuration for automated testing @@ -118,9 +240,13 @@ If `-profile` is not specified, the pipeline will run locally and expect all sof - `shifter` - A generic configuration profile to be used with [Shifter](https://nersc.gitlab.io/development/shifter/how-to-use/) - `charliecloud` - - A generic configuration profile to be used with [Charliecloud](https://hpc.github.io/charliecloud/) + - A generic configuration profile to be used with [Charliecloud](https://charliecloud.io/) +- `apptainer` + - A generic configuration profile to be used with [Apptainer](https://apptainer.org/) +- `wave` + - A generic configuration profile to enable [Wave](https://seqera.io/wave/) containers. Use together with one of the above (requires Nextflow ` 24.03.0-edge` or later). - `conda` - - A generic configuration profile to be used with [Conda](https://conda.io/docs/). Please only use Conda as a last resort i.e. when it's not possible to run the pipeline with Docker, Singularity, Podman, Shifter or Charliecloud. + - A generic configuration profile to be used with [Conda](https://conda.io/docs/). Please only use Conda as a last resort i.e. when it's not possible to run the pipeline with Docker, Singularity, Podman, Shifter, Charliecloud, or Apptainer. ### `-resume` @@ -136,104 +262,21 @@ Specify the path to a specific config file (this is a core Nextflow command). Se ### Resource requests -Whilst the default requirements set within the pipeline will hopefully work for most people and with most input data, you may find that you want to customise the compute resources that the pipeline requests. Each step in the pipeline has a default set of requirements for number of CPUs, memory and time. For most of the steps in the pipeline, if the job exits with any of the error codes specified [here](https://github.com/nf-core/rnaseq/blob/4c27ef5610c87db00c3c5a3eed10b1d161abf575/conf/base.config#L18) it will automatically be resubmitted with higher requests (2 x original, then 3 x original). If it still fails after the third attempt then the pipeline execution is stopped. - -For example, if the nf-core/rnaseq pipeline is failing after multiple re-submissions of the `STAR_ALIGN` process due to an exit code of `137` this would indicate that there is an out of memory issue: - -```console -[62/149eb0] NOTE: Process `NFCORE_RNASEQ:RNASEQ:ALIGN_STAR:STAR_ALIGN (WT_REP1)` terminated with an error exit status (137) -- Execution is retried (1) -Error executing process > 'NFCORE_RNASEQ:RNASEQ:ALIGN_STAR:STAR_ALIGN (WT_REP1)' - -Caused by: - Process `NFCORE_RNASEQ:RNASEQ:ALIGN_STAR:STAR_ALIGN (WT_REP1)` terminated with an error exit status (137) +Whilst the default requirements set within the pipeline will hopefully work for most people and with most input data, you may find that you want to customise the compute resources that the pipeline requests. Each step in the pipeline has a default set of requirements for number of CPUs, memory and time. For most of the pipeline steps, if the job exits with any of the error codes specified [here](https://github.com/nf-core/rnaseq/blob/4c27ef5610c87db00c3c5a3eed10b1d161abf575/conf/base.config#L18) it will automatically be resubmitted with higher resources request (2 x original, then 3 x original). If it still fails after the third attempt then the pipeline execution is stopped. -Command executed: - STAR \ - --genomeDir star \ - --readFilesIn WT_REP1_trimmed.fq.gz \ - --runThreadN 2 \ - --outFileNamePrefix WT_REP1. \ - +To change the resource requests, please see the [max resources](https://nf-co.re/docs/usage/configuration#max-resources) and [tuning workflow resources](https://nf-co.re/docs/usage/configuration#tuning-workflow-resources) section of the nf-core website. -Command exit status: - 137 +### Custom Containers -Command output: - (empty) +In some cases, you may wish to change the container or conda environment used by a pipeline steps for a particular tool. By default, nf-core pipelines use containers and software from the [biocontainers](https://biocontainers.pro/) or [bioconda](https://bioconda.github.io/) projects. However, in some cases the pipeline specified version maybe out of date. -Command error: - .command.sh: line 9: 30 Killed STAR --genomeDir star --readFilesIn WT_REP1_trimmed.fq.gz --runThreadN 2 --outFileNamePrefix WT_REP1. -Work dir: - /home/pipelinetest/work/9d/172ca5881234073e8d76f2a19c88fb +To use a different container from the default container or conda environment specified in a pipeline, please see the [updating tool versions](https://nf-co.re/docs/usage/configuration#updating-tool-versions) section of the nf-core website. -Tip: you can replicate the issue by changing to the process work dir and entering the command `bash .command.run` -``` - -#### For beginners - -A first step to bypass this error, you could try to increase the amount of CPUs, memory, and time for the whole pipeline. Therefor you can try to increase the resource for the parameters `--max_cpus`, `--max_memory`, and `--max_time`. Based on the error above, you have to increase the amount of memory. Therefore you can go to the [parameter documentation of rnaseq](https://nf-co.re/rnaseq/3.9/parameters) and scroll down to the `show hidden parameter` button to get the default value for `--max_memory`. In this case 128GB, you than can try to run your pipeline again with `--max_memory 200GB -resume` to skip all process, that were already calculated. If you can not increase the resource of the complete pipeline, you can try to adapt the resource for a single process as mentioned below. - -#### Advanced option on process level - -To bypass this error you would need to find exactly which resources are set by the `STAR_ALIGN` process. The quickest way is to search for `process STAR_ALIGN` in the [nf-core/rnaseq Github repo](https://github.com/nf-core/rnaseq/search?q=process+STAR_ALIGN). -We have standardised the structure of Nextflow DSL2 pipelines such that all module files will be present in the `modules/` directory and so, based on the search results, the file we want is `modules/nf-core/star/align/main.nf`. -If you click on the link to that file you will notice that there is a `label` directive at the top of the module that is set to [`label process_high`](https://github.com/nf-core/rnaseq/blob/4c27ef5610c87db00c3c5a3eed10b1d161abf575/modules/nf-core/software/star/align/main.nf#L9). -The [Nextflow `label`](https://www.nextflow.io/docs/latest/process.html#label) directive allows us to organise workflow processes in separate groups which can be referenced in a configuration file to select and configure subset of processes having similar computing requirements. -The default values for the `process_high` label are set in the pipeline's [`base.config`](https://github.com/nf-core/rnaseq/blob/4c27ef5610c87db00c3c5a3eed10b1d161abf575/conf/base.config#L33-L37) which in this case is defined as 72GB. -Providing you haven't set any other standard nf-core parameters to **cap** the [maximum resources](https://nf-co.re/usage/configuration#max-resources) used by the pipeline then we can try and bypass the `STAR_ALIGN` process failure by creating a custom config file that sets at least 72GB of memory, in this case increased to 100GB. -The custom config below can then be provided to the pipeline via the [`-c`](#-c) parameter as highlighted in previous sections. - -```nextflow -process { - withName: 'NFCORE_RNASEQ:RNASEQ:ALIGN_STAR:STAR_ALIGN' { - memory = 100.GB - } -} -``` - -> **NB:** We specify the full process name i.e. `NFCORE_RNASEQ:RNASEQ:ALIGN_STAR:STAR_ALIGN` in the config file because this takes priority over the short name (`STAR_ALIGN`) and allows existing configuration using the full process name to be correctly overridden. -> -> If you get a warning suggesting that the process selector isn't recognised check that the process name has been specified correctly. - -### Updating containers (advanced users) - -The [Nextflow DSL2](https://www.nextflow.io/docs/latest/dsl2.html) implementation of this pipeline uses one container per process which makes it much easier to maintain and update software dependencies. If for some reason you need to use a different version of a particular tool with the pipeline then you just need to identify the `process` name and override the Nextflow `container` definition for that process using the `withName` declaration. For example, in the [nf-core/viralrecon](https://nf-co.re/viralrecon) pipeline a tool called [Pangolin](https://github.com/cov-lineages/pangolin) has been used during the COVID-19 pandemic to assign lineages to SARS-CoV-2 genome sequenced samples. Given that the lineage assignments change quite frequently it doesn't make sense to re-release the nf-core/viralrecon everytime a new version of Pangolin has been released. However, you can override the default container used by the pipeline by creating a custom config file and passing it as a command-line argument via `-c custom.config`. +### Custom Tool Arguments -1. Check the default version used by the pipeline in the module file for [Pangolin](https://github.com/nf-core/viralrecon/blob/a85d5969f9025409e3618d6c280ef15ce417df65/modules/nf-core/software/pangolin/main.nf#L14-L19) -2. Find the latest version of the Biocontainer available on [Quay.io](https://quay.io/repository/biocontainers/pangolin?tag=latest&tab=tags) -3. Create the custom config accordingly: +A pipeline might not always support every possible argument or option of a particular tool used in pipeline. Fortunately, nf-core pipelines provide some freedom to users to insert additional parameters that the pipeline does not include by default. - - For Docker: - - ```nextflow - process { - withName: PANGOLIN { - container = 'quay.io/biocontainers/pangolin:3.0.5--pyhdfd78af_0' - } - } - ``` - - - For Singularity: - - ```nextflow - process { - withName: PANGOLIN { - container = 'https://depot.galaxyproject.org/singularity/pangolin:3.0.5--pyhdfd78af_0' - } - } - ``` - - - For Conda: - - ```nextflow - process { - withName: PANGOLIN { - conda = 'bioconda::pangolin=3.0.5' - } - } - ``` - -> **NB:** If you wish to periodically update individual tool-specific results (e.g. Pangolin) generated by the pipeline then you must ensure to keep the `work/` directory otherwise the `-resume` ability of the pipeline will be compromised and it will restart from scratch. +To learn how to provide additional arguments to a particular tool of the pipeline, please see the [customising tool arguments](https://nf-co.re/docs/usage/configuration#customising-tool-arguments) section of the nf-core website. ### nf-core/configs @@ -243,14 +286,6 @@ See the main [Nextflow documentation](https://www.nextflow.io/docs/latest/config If you have any questions or issues please send us a message on [Slack](https://nf-co.re/join/slack) on the [`#configs` channel](https://nfcore.slack.com/channels/configs). -## Azure Resource Requests - -To be used with the `azurebatch` profile by specifying the `-profile azurebatch`. -We recommend providing a compute `params.vm_type` of `Standard_D16_v3` VMs by default but these options can be changed if required. - -Note that the choice of VM size depends on your quota and the overall workload during the analysis. -For a thorough list, please refer the [Azure Sizes for virtual machines in Azure](https://docs.microsoft.com/en-us/azure/virtual-machines/sizes). - ## Running in the background Nextflow handles job submissions and supervises the running jobs. The Nextflow process must run until the pipeline is finished. diff --git a/lib/NfcoreSchema.groovy b/lib/NfcoreSchema.groovy deleted file mode 100755 index 33cd4f6e..00000000 --- a/lib/NfcoreSchema.groovy +++ /dev/null @@ -1,528 +0,0 @@ -// -// This file holds several functions used to perform JSON parameter validation, help and summary rendering for the nf-core pipeline template. -// - -import org.everit.json.schema.Schema -import org.everit.json.schema.loader.SchemaLoader -import org.everit.json.schema.ValidationException -import org.json.JSONObject -import org.json.JSONTokener -import org.json.JSONArray -import groovy.json.JsonSlurper -import groovy.json.JsonBuilder - -class NfcoreSchema { - - // - // Resolve Schema path relative to main workflow directory - // - public static String getSchemaPath(workflow, schema_filename='nextflow_schema.json') { - return "${workflow.projectDir}/${schema_filename}" - } - - // - // Function to loop over all parameters defined in schema and check - // whether the given parameters adhere to the specifications - // - /* groovylint-disable-next-line UnusedPrivateMethodParameter */ - public static void validateParameters(workflow, params, log, schema_filename='nextflow_schema.json') { - def has_error = false - //~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~// - // Check for nextflow core params and unexpected params - def json = new File(getSchemaPath(workflow, schema_filename=schema_filename)).text - def Map schemaParams = (Map) new JsonSlurper().parseText(json).get('definitions') - def nf_params = [ - // Options for base `nextflow` command - 'bg', - 'c', - 'C', - 'config', - 'd', - 'D', - 'dockerize', - 'h', - 'log', - 'q', - 'quiet', - 'syslog', - 'v', - - // Options for `nextflow run` command - 'ansi', - 'ansi-log', - 'bg', - 'bucket-dir', - 'c', - 'cache', - 'config', - 'dsl2', - 'dump-channels', - 'dump-hashes', - 'E', - 'entry', - 'latest', - 'lib', - 'main-script', - 'N', - 'name', - 'offline', - 'params-file', - 'pi', - 'plugins', - 'poll-interval', - 'pool-size', - 'profile', - 'ps', - 'qs', - 'queue-size', - 'r', - 'resume', - 'revision', - 'stdin', - 'stub', - 'stub-run', - 'test', - 'w', - 'with-charliecloud', - 'with-conda', - 'with-dag', - 'with-docker', - 'with-mpi', - 'with-notification', - 'with-podman', - 'with-report', - 'with-singularity', - 'with-timeline', - 'with-tower', - 'with-trace', - 'with-weblog', - 'without-docker', - 'without-podman', - 'work-dir' - ] - def unexpectedParams = [] - - // Collect expected parameters from the schema - def expectedParams = [] - def enums = [:] - for (group in schemaParams) { - for (p in group.value['properties']) { - expectedParams.push(p.key) - if (group.value['properties'][p.key].containsKey('enum')) { - enums[p.key] = group.value['properties'][p.key]['enum'] - } - } - } - - for (specifiedParam in params.keySet()) { - // nextflow params - if (nf_params.contains(specifiedParam)) { - log.error "ERROR: You used a core Nextflow option with two hyphens: '--${specifiedParam}'. Please resubmit with '-${specifiedParam}'" - has_error = true - } - // unexpected params - def params_ignore = params.schema_ignore_params.split(',') + 'schema_ignore_params' - def expectedParamsLowerCase = expectedParams.collect{ it.replace("-", "").toLowerCase() } - def specifiedParamLowerCase = specifiedParam.replace("-", "").toLowerCase() - def isCamelCaseBug = (specifiedParam.contains("-") && !expectedParams.contains(specifiedParam) && expectedParamsLowerCase.contains(specifiedParamLowerCase)) - if (!expectedParams.contains(specifiedParam) && !params_ignore.contains(specifiedParam) && !isCamelCaseBug) { - // Temporarily remove camelCase/camel-case params #1035 - def unexpectedParamsLowerCase = unexpectedParams.collect{ it.replace("-", "").toLowerCase()} - if (!unexpectedParamsLowerCase.contains(specifiedParamLowerCase)){ - unexpectedParams.push(specifiedParam) - } - } - } - - //~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~// - // Validate parameters against the schema - InputStream input_stream = new File(getSchemaPath(workflow, schema_filename=schema_filename)).newInputStream() - JSONObject raw_schema = new JSONObject(new JSONTokener(input_stream)) - - // Remove anything that's in params.schema_ignore_params - raw_schema = removeIgnoredParams(raw_schema, params) - - Schema schema = SchemaLoader.load(raw_schema) - - // Clean the parameters - def cleanedParams = cleanParameters(params) - - // Convert to JSONObject - def jsonParams = new JsonBuilder(cleanedParams) - JSONObject params_json = new JSONObject(jsonParams.toString()) - - // Validate - try { - schema.validate(params_json) - } catch (ValidationException e) { - println '' - log.error 'ERROR: Validation of pipeline parameters failed!' - JSONObject exceptionJSON = e.toJSON() - printExceptions(exceptionJSON, params_json, log, enums) - println '' - has_error = true - } - - // Check for unexpected parameters - if (unexpectedParams.size() > 0) { - Map colors = NfcoreTemplate.logColours(params.monochrome_logs) - println '' - def warn_msg = 'Found unexpected parameters:' - for (unexpectedParam in unexpectedParams) { - warn_msg = warn_msg + "\n* --${unexpectedParam}: ${params[unexpectedParam].toString()}" - } - log.warn warn_msg - log.info "- ${colors.dim}Ignore this warning: params.schema_ignore_params = \"${unexpectedParams.join(',')}\" ${colors.reset}" - println '' - } - - if (has_error) { - System.exit(1) - } - } - - // - // Beautify parameters for --help - // - public static String paramsHelp(workflow, params, command, schema_filename='nextflow_schema.json') { - Map colors = NfcoreTemplate.logColours(params.monochrome_logs) - Integer num_hidden = 0 - String output = '' - output += 'Typical pipeline command:\n\n' - output += " ${colors.cyan}${command}${colors.reset}\n\n" - Map params_map = paramsLoad(getSchemaPath(workflow, schema_filename=schema_filename)) - Integer max_chars = paramsMaxChars(params_map) + 1 - Integer desc_indent = max_chars + 14 - Integer dec_linewidth = 160 - desc_indent - for (group in params_map.keySet()) { - Integer num_params = 0 - String group_output = colors.underlined + colors.bold + group + colors.reset + '\n' - def group_params = params_map.get(group) // This gets the parameters of that particular group - for (param in group_params.keySet()) { - if (group_params.get(param).hidden && !params.show_hidden_params) { - num_hidden += 1 - continue; - } - def type = '[' + group_params.get(param).type + ']' - def description = group_params.get(param).description - def defaultValue = group_params.get(param).default != null ? " [default: " + group_params.get(param).default.toString() + "]" : '' - def description_default = description + colors.dim + defaultValue + colors.reset - // Wrap long description texts - // Loosely based on https://dzone.com/articles/groovy-plain-text-word-wrap - if (description_default.length() > dec_linewidth){ - List olines = [] - String oline = "" // " " * indent - description_default.split(" ").each() { wrd -> - if ((oline.size() + wrd.size()) <= dec_linewidth) { - oline += wrd + " " - } else { - olines += oline - oline = wrd + " " - } - } - olines += oline - description_default = olines.join("\n" + " " * desc_indent) - } - group_output += " --" + param.padRight(max_chars) + colors.dim + type.padRight(10) + colors.reset + description_default + '\n' - num_params += 1 - } - group_output += '\n' - if (num_params > 0){ - output += group_output - } - } - if (num_hidden > 0){ - output += colors.dim + "!! Hiding $num_hidden params, use --show_hidden_params to show them !!\n" + colors.reset - } - output += NfcoreTemplate.dashedLine(params.monochrome_logs) - return output - } - - // - // Groovy Map summarising parameters/workflow options used by the pipeline - // - public static LinkedHashMap paramsSummaryMap(workflow, params, schema_filename='nextflow_schema.json') { - // Get a selection of core Nextflow workflow options - def Map workflow_summary = [:] - if (workflow.revision) { - workflow_summary['revision'] = workflow.revision - } - workflow_summary['runName'] = workflow.runName - if (workflow.containerEngine) { - workflow_summary['containerEngine'] = workflow.containerEngine - } - if (workflow.container) { - workflow_summary['container'] = workflow.container - } - workflow_summary['launchDir'] = workflow.launchDir - workflow_summary['workDir'] = workflow.workDir - workflow_summary['projectDir'] = workflow.projectDir - workflow_summary['userName'] = workflow.userName - workflow_summary['profile'] = workflow.profile - workflow_summary['configFiles'] = workflow.configFiles.join(', ') - - // Get pipeline parameters defined in JSON Schema - def Map params_summary = [:] - def params_map = paramsLoad(getSchemaPath(workflow, schema_filename=schema_filename)) - for (group in params_map.keySet()) { - def sub_params = new LinkedHashMap() - def group_params = params_map.get(group) // This gets the parameters of that particular group - for (param in group_params.keySet()) { - if (params.containsKey(param)) { - def params_value = params.get(param) - def schema_value = group_params.get(param).default - def param_type = group_params.get(param).type - if (schema_value != null) { - if (param_type == 'string') { - if (schema_value.contains('$projectDir') || schema_value.contains('${projectDir}')) { - def sub_string = schema_value.replace('\$projectDir', '') - sub_string = sub_string.replace('\${projectDir}', '') - if (params_value.contains(sub_string)) { - schema_value = params_value - } - } - if (schema_value.contains('$params.outdir') || schema_value.contains('${params.outdir}')) { - def sub_string = schema_value.replace('\$params.outdir', '') - sub_string = sub_string.replace('\${params.outdir}', '') - if ("${params.outdir}${sub_string}" == params_value) { - schema_value = params_value - } - } - } - } - - // We have a default in the schema, and this isn't it - if (schema_value != null && params_value != schema_value) { - sub_params.put(param, params_value) - } - // No default in the schema, and this isn't empty - else if (schema_value == null && params_value != "" && params_value != null && params_value != false) { - sub_params.put(param, params_value) - } - } - } - params_summary.put(group, sub_params) - } - return [ 'Core Nextflow options' : workflow_summary ] << params_summary - } - - // - // Beautify parameters for summary and return as string - // - public static String paramsSummaryLog(workflow, params) { - Map colors = NfcoreTemplate.logColours(params.monochrome_logs) - String output = '' - def params_map = paramsSummaryMap(workflow, params) - def max_chars = paramsMaxChars(params_map) - for (group in params_map.keySet()) { - def group_params = params_map.get(group) // This gets the parameters of that particular group - if (group_params) { - output += colors.bold + group + colors.reset + '\n' - for (param in group_params.keySet()) { - output += " " + colors.blue + param.padRight(max_chars) + ": " + colors.green + group_params.get(param) + colors.reset + '\n' - } - output += '\n' - } - } - output += "!! Only displaying parameters that differ from the pipeline defaults !!\n" - output += NfcoreTemplate.dashedLine(params.monochrome_logs) - return output - } - - // - // Loop over nested exceptions and print the causingException - // - private static void printExceptions(ex_json, params_json, log, enums, limit=5) { - def causingExceptions = ex_json['causingExceptions'] - if (causingExceptions.length() == 0) { - def m = ex_json['message'] =~ /required key \[([^\]]+)\] not found/ - // Missing required param - if (m.matches()) { - log.error "* Missing required parameter: --${m[0][1]}" - } - // Other base-level error - else if (ex_json['pointerToViolation'] == '#') { - log.error "* ${ex_json['message']}" - } - // Error with specific param - else { - def param = ex_json['pointerToViolation'] - ~/^#\// - def param_val = params_json[param].toString() - if (enums.containsKey(param)) { - def error_msg = "* --${param}: '${param_val}' is not a valid choice (Available choices" - if (enums[param].size() > limit) { - log.error "${error_msg} (${limit} of ${enums[param].size()}): ${enums[param][0..limit-1].join(', ')}, ... )" - } else { - log.error "${error_msg}: ${enums[param].join(', ')})" - } - } else { - log.error "* --${param}: ${ex_json['message']} (${param_val})" - } - } - } - for (ex in causingExceptions) { - printExceptions(ex, params_json, log, enums) - } - } - - // - // Remove an element from a JSONArray - // - private static JSONArray removeElement(json_array, element) { - def list = [] - int len = json_array.length() - for (int i=0;i - if(raw_schema.keySet().contains('definitions')){ - raw_schema.definitions.each { definition -> - for (key in definition.keySet()){ - if (definition[key].get("properties").keySet().contains(ignore_param)){ - // Remove the param to ignore - definition[key].get("properties").remove(ignore_param) - // If the param was required, change this - if (definition[key].has("required")) { - def cleaned_required = removeElement(definition[key].required, ignore_param) - definition[key].put("required", cleaned_required) - } - } - } - } - } - if(raw_schema.keySet().contains('properties') && raw_schema.get('properties').keySet().contains(ignore_param)) { - raw_schema.get("properties").remove(ignore_param) - } - if(raw_schema.keySet().contains('required') && raw_schema.required.contains(ignore_param)) { - def cleaned_required = removeElement(raw_schema.required, ignore_param) - raw_schema.put("required", cleaned_required) - } - } - return raw_schema - } - - // - // Clean and check parameters relative to Nextflow native classes - // - private static Map cleanParameters(params) { - def new_params = params.getClass().newInstance(params) - for (p in params) { - // remove anything evaluating to false - if (!p['value']) { - new_params.remove(p.key) - } - // Cast MemoryUnit to String - if (p['value'].getClass() == nextflow.util.MemoryUnit) { - new_params.replace(p.key, p['value'].toString()) - } - // Cast Duration to String - if (p['value'].getClass() == nextflow.util.Duration) { - new_params.replace(p.key, p['value'].toString().replaceFirst(/d(?!\S)/, "day")) - } - // Cast LinkedHashMap to String - if (p['value'].getClass() == LinkedHashMap) { - new_params.replace(p.key, p['value'].toString()) - } - } - return new_params - } - - // - // This function tries to read a JSON params file - // - private static LinkedHashMap paramsLoad(String json_schema) { - def params_map = new LinkedHashMap() - try { - params_map = paramsRead(json_schema) - } catch (Exception e) { - println "Could not read parameters settings from JSON. $e" - params_map = new LinkedHashMap() - } - return params_map - } - - // - // Method to actually read in JSON file using Groovy. - // Group (as Key), values are all parameters - // - Parameter1 as Key, Description as Value - // - Parameter2 as Key, Description as Value - // .... - // Group - // - - private static LinkedHashMap paramsRead(String json_schema) throws Exception { - def json = new File(json_schema).text - def Map schema_definitions = (Map) new JsonSlurper().parseText(json).get('definitions') - def Map schema_properties = (Map) new JsonSlurper().parseText(json).get('properties') - /* Tree looks like this in nf-core schema - * definitions <- this is what the first get('definitions') gets us - group 1 - title - description - properties - parameter 1 - type - description - parameter 2 - type - description - group 2 - title - description - properties - parameter 1 - type - description - * properties <- parameters can also be ungrouped, outside of definitions - parameter 1 - type - description - */ - - // Grouped params - def params_map = new LinkedHashMap() - schema_definitions.each { key, val -> - def Map group = schema_definitions."$key".properties // Gets the property object of the group - def title = schema_definitions."$key".title - def sub_params = new LinkedHashMap() - group.each { innerkey, value -> - sub_params.put(innerkey, value) - } - params_map.put(title, sub_params) - } - - // Ungrouped params - def ungrouped_params = new LinkedHashMap() - schema_properties.each { innerkey, value -> - ungrouped_params.put(innerkey, value) - } - params_map.put("Other parameters", ungrouped_params) - - return params_map - } - - // - // Get maximum number of characters across all parameter names - // - private static Integer paramsMaxChars(params_map) { - Integer max_chars = 0 - for (group in params_map.keySet()) { - def group_params = params_map.get(group) // This gets the parameters of that particular group - for (param in group_params.keySet()) { - if (param.size() > max_chars) { - max_chars = param.size() - } - } - } - return max_chars - } -} diff --git a/lib/NfcoreTemplate.groovy b/lib/NfcoreTemplate.groovy deleted file mode 100755 index 25a0a74a..00000000 --- a/lib/NfcoreTemplate.groovy +++ /dev/null @@ -1,336 +0,0 @@ -// -// This file holds several functions used within the nf-core pipeline template. -// - -import org.yaml.snakeyaml.Yaml - -class NfcoreTemplate { - - // - // Check AWS Batch related parameters have been specified correctly - // - public static void awsBatch(workflow, params) { - if (workflow.profile.contains('awsbatch')) { - // Check params.awsqueue and params.awsregion have been set if running on AWSBatch - assert (params.awsqueue && params.awsregion) : "Specify correct --awsqueue and --awsregion parameters on AWSBatch!" - // Check outdir paths to be S3 buckets if running on AWSBatch - assert params.outdir.startsWith('s3:') : "Outdir not on S3 - specify S3 Bucket to run on AWSBatch!" - } - } - - // - // Warn if a -profile or Nextflow config has not been provided to run the pipeline - // - public static void checkConfigProvided(workflow, log) { - if (workflow.profile == 'standard' && workflow.configFiles.size() <= 1) { - log.warn "[$workflow.manifest.name] You are attempting to run the pipeline without any custom configuration!\n\n" + - "This will be dependent on your local compute environment but can be achieved via one or more of the following:\n" + - " (1) Using an existing pipeline profile e.g. `-profile docker` or `-profile singularity`\n" + - " (2) Using an existing nf-core/configs for your Institution e.g. `-profile crick` or `-profile uppmax`\n" + - " (3) Using your own local custom config e.g. `-c /path/to/your/custom.config`\n\n" + - "Please refer to the quick start section and usage docs for the pipeline.\n " - } - } - - // - // Generate version string - // - public static String version(workflow) { - String version_string = "" - - if (workflow.manifest.version) { - def prefix_v = workflow.manifest.version[0] != 'v' ? 'v' : '' - version_string += "${prefix_v}${workflow.manifest.version}" - } - - if (workflow.commitId) { - def git_shortsha = workflow.commitId.substring(0, 7) - version_string += "-g${git_shortsha}" - } - - return version_string - } - - // - // Construct and send completion email - // - public static void email(workflow, params, summary_params, projectDir, log, multiqc_report=[]) { - - // Set up the e-mail variables - def subject = "[$workflow.manifest.name] Successful: $workflow.runName" - if (!workflow.success) { - subject = "[$workflow.manifest.name] FAILED: $workflow.runName" - } - - def summary = [:] - for (group in summary_params.keySet()) { - summary << summary_params[group] - } - - def misc_fields = [:] - misc_fields['Date Started'] = workflow.start - misc_fields['Date Completed'] = workflow.complete - misc_fields['Pipeline script file path'] = workflow.scriptFile - misc_fields['Pipeline script hash ID'] = workflow.scriptId - if (workflow.repository) misc_fields['Pipeline repository Git URL'] = workflow.repository - if (workflow.commitId) misc_fields['Pipeline repository Git Commit'] = workflow.commitId - if (workflow.revision) misc_fields['Pipeline Git branch/tag'] = workflow.revision - misc_fields['Nextflow Version'] = workflow.nextflow.version - misc_fields['Nextflow Build'] = workflow.nextflow.build - misc_fields['Nextflow Compile Timestamp'] = workflow.nextflow.timestamp - - def email_fields = [:] - email_fields['version'] = NfcoreTemplate.version(workflow) - email_fields['runName'] = workflow.runName - email_fields['success'] = workflow.success - email_fields['dateComplete'] = workflow.complete - email_fields['duration'] = workflow.duration - email_fields['exitStatus'] = workflow.exitStatus - email_fields['errorMessage'] = (workflow.errorMessage ?: 'None') - email_fields['errorReport'] = (workflow.errorReport ?: 'None') - email_fields['commandLine'] = workflow.commandLine - email_fields['projectDir'] = workflow.projectDir - email_fields['summary'] = summary << misc_fields - - // On success try attach the multiqc report - def mqc_report = null - try { - if (workflow.success) { - mqc_report = multiqc_report.getVal() - if (mqc_report.getClass() == ArrayList && mqc_report.size() >= 1) { - if (mqc_report.size() > 1) { - log.warn "[$workflow.manifest.name] Found multiple reports from process 'MULTIQC', will use only one" - } - mqc_report = mqc_report[0] - } - } - } catch (all) { - if (multiqc_report) { - log.warn "[$workflow.manifest.name] Could not attach MultiQC report to summary email" - } - } - - // Check if we are only sending emails on failure - def email_address = params.email - if (!params.email && params.email_on_fail && !workflow.success) { - email_address = params.email_on_fail - } - - // Render the TXT template - def engine = new groovy.text.GStringTemplateEngine() - def tf = new File("$projectDir/assets/email_template.txt") - def txt_template = engine.createTemplate(tf).make(email_fields) - def email_txt = txt_template.toString() - - // Render the HTML template - def hf = new File("$projectDir/assets/email_template.html") - def html_template = engine.createTemplate(hf).make(email_fields) - def email_html = html_template.toString() - - // Render the sendmail template - def max_multiqc_email_size = params.max_multiqc_email_size as nextflow.util.MemoryUnit - def smail_fields = [ email: email_address, subject: subject, email_txt: email_txt, email_html: email_html, projectDir: "$projectDir", mqcFile: mqc_report, mqcMaxSize: max_multiqc_email_size.toBytes() ] - def sf = new File("$projectDir/assets/sendmail_template.txt") - def sendmail_template = engine.createTemplate(sf).make(smail_fields) - def sendmail_html = sendmail_template.toString() - - // Send the HTML e-mail - Map colors = logColours(params.monochrome_logs) - if (email_address) { - try { - if (params.plaintext_email) { throw GroovyException('Send plaintext e-mail, not HTML') } - // Try to send HTML e-mail using sendmail - [ 'sendmail', '-t' ].execute() << sendmail_html - log.info "-${colors.purple}[$workflow.manifest.name]${colors.green} Sent summary e-mail to $email_address (sendmail)-" - } catch (all) { - // Catch failures and try with plaintext - def mail_cmd = [ 'mail', '-s', subject, '--content-type=text/html', email_address ] - if ( mqc_report.size() <= max_multiqc_email_size.toBytes() ) { - mail_cmd += [ '-A', mqc_report ] - } - mail_cmd.execute() << email_html - log.info "-${colors.purple}[$workflow.manifest.name]${colors.green} Sent summary e-mail to $email_address (mail)-" - } - } - - // Write summary e-mail HTML to a file - def output_d = new File("${params.outdir}/pipeline_info/") - if (!output_d.exists()) { - output_d.mkdirs() - } - def output_hf = new File(output_d, "pipeline_report.html") - output_hf.withWriter { w -> w << email_html } - def output_tf = new File(output_d, "pipeline_report.txt") - output_tf.withWriter { w -> w << email_txt } - } - - // - // Construct and send a notification to a web server as JSON - // e.g. Microsoft Teams and Slack - // - public static void IM_notification(workflow, params, summary_params, projectDir, log) { - def hook_url = params.hook_url - - def summary = [:] - for (group in summary_params.keySet()) { - summary << summary_params[group] - } - - def misc_fields = [:] - misc_fields['start'] = workflow.start - misc_fields['complete'] = workflow.complete - misc_fields['scriptfile'] = workflow.scriptFile - misc_fields['scriptid'] = workflow.scriptId - if (workflow.repository) misc_fields['repository'] = workflow.repository - if (workflow.commitId) misc_fields['commitid'] = workflow.commitId - if (workflow.revision) misc_fields['revision'] = workflow.revision - misc_fields['nxf_version'] = workflow.nextflow.version - misc_fields['nxf_build'] = workflow.nextflow.build - misc_fields['nxf_timestamp'] = workflow.nextflow.timestamp - - def msg_fields = [:] - msg_fields['version'] = NfcoreTemplate.version(workflow) - msg_fields['runName'] = workflow.runName - msg_fields['success'] = workflow.success - msg_fields['dateComplete'] = workflow.complete - msg_fields['duration'] = workflow.duration - msg_fields['exitStatus'] = workflow.exitStatus - msg_fields['errorMessage'] = (workflow.errorMessage ?: 'None') - msg_fields['errorReport'] = (workflow.errorReport ?: 'None') - msg_fields['commandLine'] = workflow.commandLine.replaceFirst(/ +--hook_url +[^ ]+/, "") - msg_fields['projectDir'] = workflow.projectDir - msg_fields['summary'] = summary << misc_fields - - // Render the JSON template - def engine = new groovy.text.GStringTemplateEngine() - // Different JSON depending on the service provider - // Defaults to "Adaptive Cards" (https://adaptivecards.io), except Slack which has its own format - def json_path = hook_url.contains("hooks.slack.com") ? "slackreport.json" : "adaptivecard.json" - def hf = new File("$projectDir/assets/${json_path}") - def json_template = engine.createTemplate(hf).make(msg_fields) - def json_message = json_template.toString() - - // POST - def post = new URL(hook_url).openConnection(); - post.setRequestMethod("POST") - post.setDoOutput(true) - post.setRequestProperty("Content-Type", "application/json") - post.getOutputStream().write(json_message.getBytes("UTF-8")); - def postRC = post.getResponseCode(); - if (! postRC.equals(200)) { - log.warn(post.getErrorStream().getText()); - } - } - - // - // Print pipeline summary on completion - // - public static void summary(workflow, params, log) { - Map colors = logColours(params.monochrome_logs) - if (workflow.success) { - if (workflow.stats.ignoredCount == 0) { - log.info "-${colors.purple}[$workflow.manifest.name]${colors.green} Pipeline completed successfully${colors.reset}-" - } else { - log.info "-${colors.purple}[$workflow.manifest.name]${colors.yellow} Pipeline completed successfully, but with errored process(es) ${colors.reset}-" - } - } else { - log.info "-${colors.purple}[$workflow.manifest.name]${colors.red} Pipeline completed with errors${colors.reset}-" - } - } - - // - // ANSII Colours used for terminal logging - // - public static Map logColours(Boolean monochrome_logs) { - Map colorcodes = [:] - - // Reset / Meta - colorcodes['reset'] = monochrome_logs ? '' : "\033[0m" - colorcodes['bold'] = monochrome_logs ? '' : "\033[1m" - colorcodes['dim'] = monochrome_logs ? '' : "\033[2m" - colorcodes['underlined'] = monochrome_logs ? '' : "\033[4m" - colorcodes['blink'] = monochrome_logs ? '' : "\033[5m" - colorcodes['reverse'] = monochrome_logs ? '' : "\033[7m" - colorcodes['hidden'] = monochrome_logs ? '' : "\033[8m" - - // Regular Colors - colorcodes['black'] = monochrome_logs ? '' : "\033[0;30m" - colorcodes['red'] = monochrome_logs ? '' : "\033[0;31m" - colorcodes['green'] = monochrome_logs ? '' : "\033[0;32m" - colorcodes['yellow'] = monochrome_logs ? '' : "\033[0;33m" - colorcodes['blue'] = monochrome_logs ? '' : "\033[0;34m" - colorcodes['purple'] = monochrome_logs ? '' : "\033[0;35m" - colorcodes['cyan'] = monochrome_logs ? '' : "\033[0;36m" - colorcodes['white'] = monochrome_logs ? '' : "\033[0;37m" - - // Bold - colorcodes['bblack'] = monochrome_logs ? '' : "\033[1;30m" - colorcodes['bred'] = monochrome_logs ? '' : "\033[1;31m" - colorcodes['bgreen'] = monochrome_logs ? '' : "\033[1;32m" - colorcodes['byellow'] = monochrome_logs ? '' : "\033[1;33m" - colorcodes['bblue'] = monochrome_logs ? '' : "\033[1;34m" - colorcodes['bpurple'] = monochrome_logs ? '' : "\033[1;35m" - colorcodes['bcyan'] = monochrome_logs ? '' : "\033[1;36m" - colorcodes['bwhite'] = monochrome_logs ? '' : "\033[1;37m" - - // Underline - colorcodes['ublack'] = monochrome_logs ? '' : "\033[4;30m" - colorcodes['ured'] = monochrome_logs ? '' : "\033[4;31m" - colorcodes['ugreen'] = monochrome_logs ? '' : "\033[4;32m" - colorcodes['uyellow'] = monochrome_logs ? '' : "\033[4;33m" - colorcodes['ublue'] = monochrome_logs ? '' : "\033[4;34m" - colorcodes['upurple'] = monochrome_logs ? '' : "\033[4;35m" - colorcodes['ucyan'] = monochrome_logs ? '' : "\033[4;36m" - colorcodes['uwhite'] = monochrome_logs ? '' : "\033[4;37m" - - // High Intensity - colorcodes['iblack'] = monochrome_logs ? '' : "\033[0;90m" - colorcodes['ired'] = monochrome_logs ? '' : "\033[0;91m" - colorcodes['igreen'] = monochrome_logs ? '' : "\033[0;92m" - colorcodes['iyellow'] = monochrome_logs ? '' : "\033[0;93m" - colorcodes['iblue'] = monochrome_logs ? '' : "\033[0;94m" - colorcodes['ipurple'] = monochrome_logs ? '' : "\033[0;95m" - colorcodes['icyan'] = monochrome_logs ? '' : "\033[0;96m" - colorcodes['iwhite'] = monochrome_logs ? '' : "\033[0;97m" - - // Bold High Intensity - colorcodes['biblack'] = monochrome_logs ? '' : "\033[1;90m" - colorcodes['bired'] = monochrome_logs ? '' : "\033[1;91m" - colorcodes['bigreen'] = monochrome_logs ? '' : "\033[1;92m" - colorcodes['biyellow'] = monochrome_logs ? '' : "\033[1;93m" - colorcodes['biblue'] = monochrome_logs ? '' : "\033[1;94m" - colorcodes['bipurple'] = monochrome_logs ? '' : "\033[1;95m" - colorcodes['bicyan'] = monochrome_logs ? '' : "\033[1;96m" - colorcodes['biwhite'] = monochrome_logs ? '' : "\033[1;97m" - - return colorcodes - } - - // - // Does what is says on the tin - // - public static String dashedLine(monochrome_logs) { - Map colors = logColours(monochrome_logs) - return "-${colors.dim}----------------------------------------------------${colors.reset}-" - } - - // - // nf-core logo - // - public static String logo(workflow, monochrome_logs) { - Map colors = logColours(monochrome_logs) - String workflow_version = NfcoreTemplate.version(workflow) - String.format( - """\n - ${dashedLine(monochrome_logs)} - ${colors.green},--.${colors.black}/${colors.green},-.${colors.reset} - ${colors.blue} ___ __ __ __ ___ ${colors.green}/,-._.--~\'${colors.reset} - ${colors.blue} |\\ | |__ __ / ` / \\ |__) |__ ${colors.yellow}} {${colors.reset} - ${colors.blue} | \\| | \\__, \\__/ | \\ |___ ${colors.green}\\`-._,-`-,${colors.reset} - ${colors.green}`._,._,\'${colors.reset} - ${colors.purple} ${workflow.manifest.name} ${workflow_version}${colors.reset} - ${dashedLine(monochrome_logs)} - """.stripIndent() - ) - } -} diff --git a/lib/Utils.groovy b/lib/Utils.groovy deleted file mode 100644 index 8d030f4e..00000000 --- a/lib/Utils.groovy +++ /dev/null @@ -1,47 +0,0 @@ -// -// This file holds several Groovy functions that could be useful for any Nextflow pipeline -// - -import org.yaml.snakeyaml.Yaml - -class Utils { - - // - // When running with -profile conda, warn if channels have not been set-up appropriately - // - public static void checkCondaChannels(log) { - Yaml parser = new Yaml() - def channels = [] - try { - def config = parser.load("conda config --show channels".execute().text) - channels = config.channels - } catch(NullPointerException | IOException e) { - log.warn "Could not verify conda channel configuration." - return - } - - // Check that all channels are present - // This channel list is ordered by required channel priority. - def required_channels_in_order = ['conda-forge', 'bioconda', 'defaults'] - def channels_missing = ((required_channels_in_order as Set) - (channels as Set)) as Boolean - - // Check that they are in the right order - def channel_priority_violation = false - def n = required_channels_in_order.size() - for (int i = 0; i < n - 1; i++) { - channel_priority_violation |= !(channels.indexOf(required_channels_in_order[i]) < channels.indexOf(required_channels_in_order[i+1])) - } - - if (channels_missing | channel_priority_violation) { - log.warn "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n" + - " There is a problem with your Conda configuration!\n\n" + - " You will need to set-up the conda-forge and bioconda channels correctly.\n" + - " Please refer to https://bioconda.github.io/\n" + - " The observed channel order is \n" + - " ${channels}\n" + - " but the following channel order is required:\n" + - " ${required_channels_in_order}\n" + - "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~" - } - } -} diff --git a/lib/WorkflowMain.groovy b/lib/WorkflowMain.groovy deleted file mode 100755 index b55ff286..00000000 --- a/lib/WorkflowMain.groovy +++ /dev/null @@ -1,99 +0,0 @@ -// -// This file holds several functions specific to the main.nf workflow in the nf-core/spatialxe pipeline -// - -class WorkflowMain { - - // - // Citation string for pipeline - // - public static String citation(workflow) { - return "If you use ${workflow.manifest.name} for your analysis please cite:\n\n" + - // TODO nf-core: Add Zenodo DOI for pipeline after first release - //"* The pipeline\n" + - //" https://doi.org/10.5281/zenodo.XXXXXXX\n\n" + - "* The nf-core framework\n" + - " https://doi.org/10.1038/s41587-020-0439-x\n\n" + - "* Software dependencies\n" + - " https://github.com/${workflow.manifest.name}/blob/master/CITATIONS.md" - } - - // - // Generate help string - // - public static String help(workflow, params, log) { - def command = "nextflow run ${workflow.manifest.name} --input samplesheet.csv --genome GRCh37 -profile docker" - def help_string = '' - help_string += NfcoreTemplate.logo(workflow, params.monochrome_logs) - help_string += NfcoreSchema.paramsHelp(workflow, params, command) - help_string += '\n' + citation(workflow) + '\n' - help_string += NfcoreTemplate.dashedLine(params.monochrome_logs) - return help_string - } - - // - // Generate parameter summary log string - // - public static String paramsSummaryLog(workflow, params, log) { - def summary_log = '' - summary_log += NfcoreTemplate.logo(workflow, params.monochrome_logs) - summary_log += NfcoreSchema.paramsSummaryLog(workflow, params) - summary_log += '\n' + citation(workflow) + '\n' - summary_log += NfcoreTemplate.dashedLine(params.monochrome_logs) - return summary_log - } - - // - // Validate parameters and print summary to screen - // - public static void initialise(workflow, params, log) { - // Print help to screen if required - if (params.help) { - log.info help(workflow, params, log) - System.exit(0) - } - - // Print workflow version and exit on --version - if (params.version) { - String workflow_version = NfcoreTemplate.version(workflow) - log.info "${workflow.manifest.name} ${workflow_version}" - System.exit(0) - } - - // Print parameter summary log to screen - log.info paramsSummaryLog(workflow, params, log) - - // Validate workflow parameters via the JSON schema - if (params.validate_params) { - NfcoreSchema.validateParameters(workflow, params, log) - } - - // Check that a -profile or Nextflow config has been provided to run the pipeline - NfcoreTemplate.checkConfigProvided(workflow, log) - - // Check that conda channels are set-up correctly - if (workflow.profile.tokenize(',').intersect(['conda', 'mamba']).size() >= 1) { - Utils.checkCondaChannels(log) - } - - // Check AWS batch settings - NfcoreTemplate.awsBatch(workflow, params) - - // Check input has been provided - if (!params.input) { - log.error "Please provide an input samplesheet to the pipeline e.g. '--input samplesheet.csv'" - System.exit(1) - } - } - // - // Get attribute from genome config file e.g. fasta - // - public static Object getGenomeAttribute(params, attribute) { - if (params.genomes && params.genome && params.genomes.containsKey(params.genome)) { - if (params.genomes[ params.genome ].containsKey(attribute)) { - return params.genomes[ params.genome ][ attribute ] - } - } - return null - } -} diff --git a/lib/WorkflowSpatialxe.groovy b/lib/WorkflowSpatialxe.groovy deleted file mode 100755 index 576a098f..00000000 --- a/lib/WorkflowSpatialxe.groovy +++ /dev/null @@ -1,77 +0,0 @@ -// -// This file holds several functions specific to the workflow/spatialxe.nf in the nf-core/spatialxe pipeline -// - -import groovy.text.SimpleTemplateEngine - -class WorkflowSpatialxe { - - // - // Check and validate parameters - // - public static void initialise(params, log) { - genomeExistsError(params, log) - - - if (!params.fasta) { - log.error "Genome fasta file not specified with e.g. '--fasta genome.fa' or via a detectable config file." - System.exit(1) - } - } - - // - // Get workflow summary for MultiQC - // - public static String paramsSummaryMultiqc(workflow, summary) { - String summary_section = '' - for (group in summary.keySet()) { - def group_params = summary.get(group) // This gets the parameters of that particular group - if (group_params) { - summary_section += "

$group

\n" - summary_section += "
\n" - for (param in group_params.keySet()) { - summary_section += "
$param
${group_params.get(param) ?: 'N/A'}
\n" - } - summary_section += "
\n" - } - } - - String yaml_file_text = "id: '${workflow.manifest.name.replace('/','-')}-summary'\n" - yaml_file_text += "description: ' - this information is collected when the pipeline is started.'\n" - yaml_file_text += "section_name: '${workflow.manifest.name} Workflow Summary'\n" - yaml_file_text += "section_href: 'https://github.com/${workflow.manifest.name}'\n" - yaml_file_text += "plot_type: 'html'\n" - yaml_file_text += "data: |\n" - yaml_file_text += "${summary_section}" - return yaml_file_text - } - - public static String methodsDescriptionText(run_workflow, mqc_methods_yaml) { - // Convert to a named map so can be used as with familar NXF ${workflow} variable syntax in the MultiQC YML file - def meta = [:] - meta.workflow = run_workflow.toMap() - meta["manifest_map"] = run_workflow.manifest.toMap() - - meta["doi_text"] = meta.manifest_map.doi ? "(doi: ${meta.manifest_map.doi})" : "" - meta["nodoi_text"] = meta.manifest_map.doi ? "": "
  • If available, make sure to update the text to include the Zenodo DOI of version of the pipeline used.
  • " - - def methods_text = mqc_methods_yaml.text - - def engine = new SimpleTemplateEngine() - def description_html = engine.createTemplate(methods_text).make(meta) - - return description_html - }// - // Exit pipeline if incorrect --genome key provided - // - private static void genomeExistsError(params, log) { - if (params.genomes && params.genome && !params.genomes.containsKey(params.genome)) { - log.error "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n" + - " Genome '${params.genome}' not found in any config files provided to the pipeline.\n" + - " Currently, the available genome keys are:\n" + - " ${params.genomes.keySet().join(", ")}\n" + - "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~" - System.exit(1) - } - } -} diff --git a/lib/nfcore_external_java_deps.jar b/lib/nfcore_external_java_deps.jar deleted file mode 100644 index 805c8bb5..00000000 Binary files a/lib/nfcore_external_java_deps.jar and /dev/null differ diff --git a/main.nf b/main.nf index ee5e0f48..14193a1c 100644 --- a/main.nf +++ b/main.nf @@ -4,57 +4,139 @@ nf-core/spatialxe ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Github : https://github.com/nf-core/spatialxe - Website: https://nf-co.re/spatialxe Slack : https://nfcore.slack.com/channels/spatialxe ---------------------------------------------------------------------------------------- */ -nextflow.enable.dsl = 2 - /* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - GENOME PARAMETER VALUES + IMPORT FUNCTIONS / MODULES / SUBWORKFLOWS / WORKFLOWS ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ -params.fasta = WorkflowMain.getGenomeAttribute(params, 'fasta') +include { SPATIALXE } from './workflows/spatialxe.nf' +include { PIPELINE_INITIALISATION } from './subworkflows/local/utils_nfcore_spatialxe_pipeline' +include { PIPELINE_COMPLETION } from './subworkflows/local/utils_nfcore_spatialxe_pipeline' /* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - VALIDATE & PRINT PARAMETER SUMMARY + NAMED WORKFLOWS FOR PIPELINE ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ -WorkflowMain.initialise(workflow, params, log) - -/* -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - NAMED WORKFLOW FOR PIPELINE -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -*/ - -include { SPATIALXE } from './workflows/spatialxe' - // -// WORKFLOW: Run main nf-core/spatialxe analysis pipeline +// WORKFLOW: Run main analysis pipeline depending on type of input // workflow NFCORE_SPATIALXE { - SPATIALXE () -} + take: + samplesheet // channel: samplesheet read in from --input + + main: + + // + // WORKFLOW: Run pipeline + // + SPATIALXE ( + samplesheet, + params.alignment_csv, + params.baysor_config, + params.baysor_prior, + params.baysor_scale, + params.baysor_tiling, + params.baysor_tiling_scale, + params.buffer_samples, + params.buffer_size, + params.cell_segmentation_only, + params.cellpose_downscale, + params.cellpose_model, + params.expansion_distance, + params.features, + params.gene_panel, + params.gene_synonyms, + params.max_x, + params.max_y, + params.method, + params.min_qv, + params.min_x, + params.min_y, + params.mode, + params.multiqc_config, + params.multiqc_logo, + params.multiqc_methods_description, + params.nucleus_segmentation_only, + params.offtarget_probe_tracking, + params.outdir, + params.probes_fasta, + params.qupath_polygons, + params.reference_annotations, + params.relabel_genes, + params.run_qc, + params.segger_model, + params.segmentation_mask, + params.sharpen_tiff, + params.stardist_nuclei_model, + params.tiling, + params.xeniumranger_only, + ) + emit: + multiqc_report = SPATIALXE.out.multiqc_report // channel: /path/to/multiqc_report.html +} /* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - RUN ALL WORKFLOWS + RUN MAIN WORKFLOW ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ -// -// WORKFLOW: Execute a single named workflow for the pipeline -// See: https://github.com/nf-core/rnaseq/issues/619 -// workflow { - NFCORE_SPATIALXE () + + main: + // + // SUBWORKFLOW: Run initialisation tasks + // + PIPELINE_INITIALISATION ( + params.version, + params.validate_params, + params.monochrome_logs, + args, + params.outdir, + params.input, + params.help, + params.help_full, + params.show_hidden, + params.gene_panel, + params.gene_synonyms, + params.image_seg_methods, + params.method, + params.mode, + params.nucleus_segmentation_only, + params.offtarget_probe_tracking, + params.probes_fasta, + params.reference_annotations, + params.relabel_genes, + params.segmentation_mask, + params.transcript_seg_methods, + ) + + // + // WORKFLOW: Run main workflow + // + NFCORE_SPATIALXE ( + PIPELINE_INITIALISATION.out.samplesheet + ) + // + // SUBWORKFLOW: Run completion tasks + // + PIPELINE_COMPLETION ( + params.email, + params.email_on_fail, + params.plaintext_email, + params.outdir, + params.monochrome_logs, + params.hook_url, + NFCORE_SPATIALXE.out.multiqc_report + ) } /* diff --git a/modules.json b/modules.json index 5654423a..38be2769 100644 --- a/modules.json +++ b/modules.json @@ -7,33 +7,87 @@ "nf-core": { "cellpose": { "branch": "master", - "git_sha": "666652151335353eef2fcd58880bcef5bc2928e1", - "installed_by": ["modules"] + "git_sha": "0780b963d3ab087e861a4b74e9d0e404115e5352", + "installed_by": ["modules"], + "patch": "modules/nf-core/cellpose/cellpose.diff" }, - "custom/dumpsoftwareversions": { + "multiqc": { "branch": "master", - "git_sha": "c8e35eb2055c099720a75538d1b8adb3fb5a464c", - "installed_by": ["modules"] + "git_sha": "cb9f4bec379866ca560f7f79d9a9a06bea8c9b51", + "installed_by": ["modules"], + "patch": "modules/nf-core/multiqc/multiqc.diff" }, - "multiqc": { + "opt/flip": { "branch": "master", - "git_sha": "c8e35eb2055c099720a75538d1b8adb3fb5a464c", - "installed_by": ["modules"] + "git_sha": "7d3e5c9d3d44", + "installed_by": ["modules"], + "patch": "modules/nf-core/opt/flip/opt-flip.diff" + }, + "opt/stat": { + "branch": "master", + "git_sha": "7d3e5c9d3d44", + "installed_by": ["modules"], + "patch": "modules/nf-core/opt/stat/opt-stat.diff" + }, + "opt/track": { + "branch": "master", + "git_sha": "7d3e5c9d3d44", + "installed_by": ["modules"], + "patch": "modules/nf-core/opt/track/opt-track.diff" + }, + "stardist": { + "branch": "master", + "git_sha": "4e783502ab661bed13f15189401b73c93966831f", + "installed_by": ["modules"], + "patch": "modules/nf-core/stardist/stardist.diff" + }, + "untar": { + "branch": "master", + "git_sha": "447f7bc0fa41dfc2400c8cad4c0291880dc060cf", + "installed_by": ["modules"], + "patch": "modules/nf-core/untar/untar.diff" + }, + "unzip": { + "branch": "master", + "git_sha": "4dd9d8439a429c7ee566e0e2347f76ddeef27e66", + "installed_by": ["modules"], + "patch": "modules/nf-core/unzip/unzip.diff" }, "xeniumranger/import-segmentation": { "branch": "master", - "git_sha": "b5e1891a88491d8731b5e68e22bd907726caec4a", - "installed_by": ["modules"] + "git_sha": "39365e944e936511e33b993cdd978e0f12adac9a", + "installed_by": ["modules"], + "patch": "modules/nf-core/xeniumranger/import-segmentation/xeniumranger-import-segmentation.diff" }, "xeniumranger/relabel": { "branch": "master", - "git_sha": "b5e1891a88491d8731b5e68e22bd907726caec4a", + "git_sha": "39365e944e936511e33b993cdd978e0f12adac9a", "installed_by": ["modules"] }, "xeniumranger/resegment": { "branch": "master", - "git_sha": "b5e1891a88491d8731b5e68e22bd907726caec4a", - "installed_by": ["modules"] + "git_sha": "39365e944e936511e33b993cdd978e0f12adac9a", + "installed_by": ["modules"], + "patch": "modules/nf-core/xeniumranger/resegment/xeniumranger-resegment.diff" + } + } + }, + "subworkflows": { + "nf-core": { + "utils_nextflow_pipeline": { + "branch": "master", + "git_sha": "05954dab2ff481bcb999f24455da29a5828af08d", + "installed_by": ["subworkflows"] + }, + "utils_nfcore_pipeline": { + "branch": "master", + "git_sha": "05954dab2ff481bcb999f24455da29a5828af08d", + "installed_by": ["subworkflows"] + }, + "utils_nfschema_plugin": { + "branch": "master", + "git_sha": "4b406a74dc0449c0401ed87d5bfff4252fd277fd", + "installed_by": ["subworkflows"] } } } diff --git a/modules/local/baysor/create_dataset/main.nf b/modules/local/baysor/create_dataset/main.nf new file mode 100644 index 00000000..98046161 --- /dev/null +++ b/modules/local/baysor/create_dataset/main.nf @@ -0,0 +1,45 @@ +process BAYSOR_CREATE_DATASET { + tag "${meta.id}" + label 'process_medium' + + container "khersameesh24/baysor:0.7.1" + + input: + tuple val(meta), path(transcripts) + val sample_fraction + + output: + tuple val(meta), path("${prefix}/sampled_transcripts.csv"), emit: sampled_transcripts + tuple val("${task.process}"), val('python'), eval("python3 --version | sed 's/Python //'"), topic: versions, emit: versions_python + + when: + task.ext.when == null || task.ext.when + + script: + // Exit if running this module with -profile conda / -profile mamba + if (workflow.profile.tokenize(',').intersect(['conda', 'mamba']).size() >= 1) { + error("BAYSOR_CREATE_DATASET module does not support Conda. Please use Docker / Singularity / Podman instead.") + } + + prefix = task.ext.prefix ?: "${meta.id}" + + """ + baysor_create_dataset.py \\ + --transcripts ${transcripts} \\ + --sample-fraction ${sample_fraction} \\ + --prefix ${prefix} + """ + + stub: + // Exit if running this module with -profile conda / -profile mamba + if (workflow.profile.tokenize(',').intersect(['conda', 'mamba']).size() >= 1) { + error("BAYSOR_CREATE_DATASET module does not support Conda. Please use Docker / Singularity / Podman instead.") + } + + prefix = task.ext.prefix ?: "${meta.id}" + + """ + mkdir -p ${prefix} + touch "${prefix}/sampled_transcripts.csv" + """ +} diff --git a/modules/local/baysor/create_dataset/meta.yml b/modules/local/baysor/create_dataset/meta.yml new file mode 100644 index 00000000..2b40e712 --- /dev/null +++ b/modules/local/baysor/create_dataset/meta.yml @@ -0,0 +1,82 @@ +name: "baysor_create_dataset" +description: Subsample a transcripts CSV to create a smaller Baysor input dataset. +keywords: + - xenium + - baysor + - dataset + - transcripts + - subsample +tools: + - "python": + description: | + Python programming language interpreter. + homepage: "https://www.python.org/" + documentation: "https://docs.python.org/3/" + tool_dev_url: "https://github.com/python/cpython" + doi: "no DOI available" + licence: ["PSF-2.0"] + identifier: "" + - "baysor": + description: | + Bayesian Segmentation of Spatial Transcriptomics Data. + homepage: "https://kharchenkolab.github.io/Baysor/dev/" + documentation: "https://kharchenkolab.github.io/Baysor/dev/" + tool_dev_url: "https://github.com/kharchenkolab/Baysor" + doi: "10.1038/s41587-021-01044-w" + licence: ["MIT"] + identifier: "" + +input: + - - meta: + type: map + description: | + Groovy Map containing sample information + e.g. [ id:'test' ] + - transcripts: + type: file + description: Transcripts CSV file to subsample for Baysor dataset creation. + pattern: "*.csv" + ontologies: [] + - sample_fraction: + type: float + description: Fraction of transcripts to retain in the subsampled dataset (0-1). + +output: + sampled_transcripts: + - - meta: + type: map + description: | + Groovy Map containing sample information + e.g. [ id:'test' ] + - "${prefix}/sampled_transcripts.csv": + type: file + description: Subsampled transcripts CSV used as Baysor input dataset. + pattern: "*/sampled_transcripts.csv" + ontologies: [] + versions_python: + - - ${task.process}: + type: string + description: The process the versions were collected from + - python: + type: string + description: The tool name + - "python3 --version | sed 's/Python //'": + type: eval + description: The expression to obtain the version of the tool + +topics: + versions: + - - ${task.process}: + type: string + description: The process the versions were collected from + - python: + type: string + description: The tool name + - "python3 --version | sed 's/Python //'": + type: eval + description: The expression to obtain the version of the tool + +authors: + - "@khersameesh24" +maintainers: + - "@khersameesh24" diff --git a/modules/local/baysor/create_dataset/tests/main.nf.test b/modules/local/baysor/create_dataset/tests/main.nf.test new file mode 100644 index 00000000..41b2742a --- /dev/null +++ b/modules/local/baysor/create_dataset/tests/main.nf.test @@ -0,0 +1,60 @@ +nextflow_process { + + name "Test Process BAYSOR CREATE_DATASET" + script "../main.nf" + process "BAYSOR_CREATE_DATASET" + config "./nextflow.config" + + tag "modules" + tag "modules_local" + tag "baysor" + tag "baysor/create_dataset" + + test("baysor create dataset - transcripts.parquet") { + + when { + process { + """ + input[0] = channel.of([ + [id: "test_run_baysor"], + file(params.modules_testdata_base_path + "spatial_omics/xenium/homo_sapiens/spatial_gene_expression.csv", checkIfExists: true) + ]) + input[1] = 0.3 + """ + } + } + + then { + assertAll( + { assert process.success }, + { assert file(process.out.sampled_transcripts[0][1]).exists() }, + { assert file(process.out.sampled_transcripts[0][1]).name == "sampled_transcripts.csv" }, + { assert snapshot(process.out.versions_python).match("versions") } + ) + } + } + + test("baysor create dataset stub") { + + options "-stub" + + when { + process { + """ + input[0] = channel.of([ + [id: "test_run_baysor"], + file(params.modules_testdata_base_path + "spatial_omics/xenium/homo_sapiens/spatial_gene_expression.csv", checkIfExists: true) + ]) + input[1] = 0.3 + """ + } + } + + then { + assertAll( + { assert process.success }, + { assert snapshot(process.out.versions_python).match("versions_stub") } + ) + } + } +} diff --git a/modules/local/baysor/create_dataset/tests/main.nf.test.snap b/modules/local/baysor/create_dataset/tests/main.nf.test.snap new file mode 100644 index 00000000..6a0b446e --- /dev/null +++ b/modules/local/baysor/create_dataset/tests/main.nf.test.snap @@ -0,0 +1,34 @@ +{ + "versions_stub": { + "content": [ + [ + [ + "BAYSOR_CREATE_DATASET", + "python", + "3.11.2" + ] + ] + ], + "meta": { + "nf-test": "0.9.3", + "nextflow": "25.10.2" + }, + "timestamp": "2026-03-21T03:57:19.078934192" + }, + "versions": { + "content": [ + [ + [ + "BAYSOR_CREATE_DATASET", + "python", + "3.11.2" + ] + ] + ], + "meta": { + "nf-test": "0.9.3", + "nextflow": "25.10.2" + }, + "timestamp": "2026-03-21T05:05:40.700569871" + } +} \ No newline at end of file diff --git a/modules/local/baysor/create_dataset/tests/nextflow.config b/modules/local/baysor/create_dataset/tests/nextflow.config new file mode 100644 index 00000000..f8b3a30a --- /dev/null +++ b/modules/local/baysor/create_dataset/tests/nextflow.config @@ -0,0 +1,9 @@ +process { + + resourceLimits = [ + cpus: 4, + memory: '8.GB', + time: '2.h', + ] + +} diff --git a/modules/local/baysor/preprocess/main.nf b/modules/local/baysor/preprocess/main.nf new file mode 100644 index 00000000..cfe6fe3b --- /dev/null +++ b/modules/local/baysor/preprocess/main.nf @@ -0,0 +1,55 @@ +process BAYSOR_PREPROCESS_TRANSCRIPTS { + tag "${meta.id}" + label 'process_medium' + + container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ? + 'https://community-cr-prod.seqera.io/docker/registry/v2/blobs/sha256/94/9409ce399922a5746bea1b7df5668c3d1d79b9af49a15950d9818c4fe45ac749/data' : + 'community.wave.seqera.io/library/pandas_procs_pyarrow:d8f882b65dfea451' }" + + input: + tuple val(meta), path(transcripts) + val min_qv + val max_x + val min_x + val max_y + val min_y + + output: + tuple val(meta), path("${prefix}/filtered_transcripts.csv"), emit: transcripts_file + tuple val("${task.process}"), val('python'), eval("python3 --version | sed 's/Python //'"), topic: versions, emit: versions_python + + when: + task.ext.when == null || task.ext.when + + script: + // Exit if running this module with -profile conda / -profile mamba + if (workflow.profile.tokenize(',').intersect(['conda', 'mamba']).size() >= 1) { + error("BAYSOR_PREPROCESS_TRANSCRIPTS module does not support Conda. Please use Docker / Singularity / Podman instead.") + } + + prefix = task.ext.prefix ?: "${meta.id}" + + """ + baysor_preprocess_transcripts.py \\ + --transcripts ${transcripts} \\ + --prefix ${prefix} \\ + --min-qv ${min_qv} \\ + --min-x ${min_x} \\ + --max-x ${max_x} \\ + --min-y ${min_y} \\ + --max-y ${max_y} + """ + + stub: + // Exit if running this module with -profile conda / -profile mamba + if (workflow.profile.tokenize(',').intersect(['conda', 'mamba']).size() >= 1) { + error("BAYSOR_PREPROCESS_TRANSCRIPTS module does not support Conda. Please use Docker / Singularity / Podman instead.") + } + + prefix = task.ext.prefix ?: "${meta.id}" + + """ + mkdir -p ${prefix} + touch ${prefix}/filtered_transcripts.csv + """ +} diff --git a/modules/local/baysor/preprocess/meta.yml b/modules/local/baysor/preprocess/meta.yml new file mode 100644 index 00000000..a6fc27d0 --- /dev/null +++ b/modules/local/baysor/preprocess/meta.yml @@ -0,0 +1,70 @@ +name: "baysor_preprocess" +description: Filter transcript.parquet file based on the specified thresholds +keywords: + - baysor + - transcripts + - filter_transcripts +tools: + - "baysor": + description: "Baysor is a tool that segments cells using spatial gene expression maps. Optionally, segmentation masks can be given as additional input." + homepage: "https://kharchenkolab.github.io/Baysor/dev/" + documentation: "https://kharchenkolab.github.io/Baysor/dev/" + tool_dev_url: "https://github.com/kharchenkolab/Baysor" + doi: "https://doi.org/10.1038/s41587-021-01044-w" + licence: ["MIT license"] + identifier: + +input: + - - meta: + type: map + description: | + Groovy Map containing sample information + e.g. [ id:'test' ] + - transcripts: + type: file + description: transcripts.parquet file from the xenium bundle + pattern: "*.parquet" + + - min_qv: + type: float + description: minimum Q-Score to pass filtering (default - 20.0) + - max_x: + type: float + description: Only keep transcripts whose x-coordinate is less than specified limit + if no limit is specified, the default value will retain all + transcripts since Xenium slide is <24000 microns in x and y (default - 24000.0) + - min_x: + type: float + description: only keep transcripts whose x-coordinate is greater than specified limit + if no limit is specified, the default minimum value will be 0.0 + - max_y: + type: float + description: only keep transcripts whose y-coordinate is less than specified limit + if no limit is specified, the default value will retain all + transcripts since Xenium slide is <24000 microns in x and y (default - 24000.0) + - min_y: + type: float + description: only keep transcripts whose y-coordinate is greater than specified limit + if no limit is specified, the default minimum value will be 0.0 + +output: + - - transcripts_parquet: + - meta: + type: map + description: | + Groovy Map containing sample information + e.g. [ id:'test' ] + - "*.csv": + type: file + description: filtered transcripts CSV (for Baysor 0.7.1 Parquet.jl compatibility) + pattern: "filtered_transcripts.csv" + + - - versions_python: + type: tuple + description: Python version emitted via topic channel + pattern: "topic: versions" + +authors: + - "@khersameesh24" +maintainers: + - "@khersameesh24" diff --git a/modules/local/baysor/preprocess/tests/main.nf.test b/modules/local/baysor/preprocess/tests/main.nf.test new file mode 100644 index 00000000..f3112c25 --- /dev/null +++ b/modules/local/baysor/preprocess/tests/main.nf.test @@ -0,0 +1,68 @@ +nextflow_process { + + name "Test Process BAYSOR PREPROCESS TRANSCRIPTS" + script "../main.nf" + process "BAYSOR_PREPROCESS_TRANSCRIPTS" + config "./nextflow.config" + + tag "modules" + tag "modules_local" + tag "baysor" + tag "baysor/preprocess" + + test("baysor preprocess transcripts - transcripts.parquet") { + + when { + process { + """ + input[0] = channel.of([ + [id: "test_run_baysor"], + file("https://raw.githubusercontent.com/khersameesh24/test-datasets/baysor/transcripts.parquet", checkIfExists: true) + ]) + input[1] = 20 + input[2] = 24000.0 + input[3] = 0.0 + input[4] = 24000.0 + input[5] = 0.0 + """ + } + } + + then { + assertAll( + { assert process.success }, + { assert file(process.out.transcripts_file[0][1]).exists() }, + { assert file(process.out.transcripts_file[0][1]).name == "filtered_transcripts.csv" }, + { assert snapshot(process.out.versions_python).match("versions") } + ) + } + } + + test("baysor preprocess transcripts stub") { + + options "-stub" + + when { + process { + """ + input[0] = channel.of([ + [id: "test_run_baysor"], + file("https://raw.githubusercontent.com/khersameesh24/test-datasets/baysor/transcripts.parquet", checkIfExists: true) + ]) + input[1] = 20 + input[2] = 24000.0 + input[3] = 0.0 + input[4] = 24000.0 + input[5] = 0.0 + """ + } + } + + then { + assertAll( + { assert process.success }, + { assert snapshot(process.out.versions_python).match("versions_stub") } + ) + } + } +} diff --git a/modules/local/baysor/preprocess/tests/main.nf.test.snap b/modules/local/baysor/preprocess/tests/main.nf.test.snap new file mode 100644 index 00000000..1baeceb1 --- /dev/null +++ b/modules/local/baysor/preprocess/tests/main.nf.test.snap @@ -0,0 +1,34 @@ +{ + "versions_stub": { + "content": [ + [ + [ + "BAYSOR_PREPROCESS_TRANSCRIPTS", + "python", + "3.14.4" + ] + ] + ], + "timestamp": "2026-04-29T18:32:45.74436808", + "meta": { + "nf-test": "0.9.5", + "nextflow": "25.10.4" + } + }, + "versions": { + "content": [ + [ + [ + "BAYSOR_PREPROCESS_TRANSCRIPTS", + "python", + "3.14.4" + ] + ] + ], + "timestamp": "2026-04-29T18:32:39.447114547", + "meta": { + "nf-test": "0.9.5", + "nextflow": "25.10.4" + } + } +} \ No newline at end of file diff --git a/modules/local/baysor/preprocess/tests/nextflow.config b/modules/local/baysor/preprocess/tests/nextflow.config new file mode 100644 index 00000000..f8b3a30a --- /dev/null +++ b/modules/local/baysor/preprocess/tests/nextflow.config @@ -0,0 +1,9 @@ +process { + + resourceLimits = [ + cpus: 4, + memory: '8.GB', + time: '2.h', + ] + +} diff --git a/modules/local/baysor/preview/main.nf b/modules/local/baysor/preview/main.nf new file mode 100644 index 00000000..b47bf43c --- /dev/null +++ b/modules/local/baysor/preview/main.nf @@ -0,0 +1,50 @@ +process BAYSOR_PREVIEW { + tag "${meta.id}" + label 'process_medium' + + container "khersameesh24/baysor:0.7.1" + + input: + tuple val(meta), path(transcripts), path(config) + + output: + tuple val(meta), path("${prefix}/preview.html"), emit: preview_html + tuple val("${task.process}"), val('baysor'), eval("baysor --version 2>&1 | grep -oP '\\d+\\.\\d+\\.\\d+' || echo unknown"), topic: versions, emit: versions_baysor + + when: + task.ext.when == null || task.ext.when + + script: + // Exit if running this module with -profile conda / -profile mamba + if (workflow.profile.tokenize(',').intersect(['conda', 'mamba']).size() >= 1) { + error("BAYSOR_PREVIEW module does not support Conda. Please use Docker / Singularity / Podman instead.") + } + + def args = task.ext.args ?: '' + prefix = task.ext.prefix ?: "${meta.id}" + + """ + export JULIA_NUM_THREADS=${task.cpus} + + mkdir -p ${prefix} + + baysor preview \\ + ${transcripts} \\ + --config ${config} \\ + --output ${prefix}/preview.html + ${args} + """ + + stub: + // Exit if running this module with -profile conda / -profile mamba + if (workflow.profile.tokenize(',').intersect(['conda', 'mamba']).size() >= 1) { + error("BAYSOR_PREVIEW module does not support Conda. Please use Docker / Singularity / Podman instead.") + } + + prefix = task.ext.prefix ?: "${meta.id}" + + """ + mkdir -p ${prefix} + touch ${prefix}/preview.html + """ +} diff --git a/modules/local/baysor/preview/meta.yml b/modules/local/baysor/preview/meta.yml new file mode 100644 index 00000000..60596066 --- /dev/null +++ b/modules/local/baysor/preview/meta.yml @@ -0,0 +1,56 @@ +name: "baysor_preview" +description: Preview run for visualization of data. +keywords: + - exploratory-data-analysis +tools: + - "baysor": + description: "Baysor is a tool that segments cells using spatial gene expression maps. Optionally, segmentation masks can be given as additional input." + homepage: "https://kharchenkolab.github.io/Baysor/dev/" + documentation: "https://kharchenkolab.github.io/Baysor/dev/" + tool_dev_url: "https://github.com/kharchenkolab/Baysor" + doi: "https://doi.org/10.1038/s41587-021-01044-w" + licence: ["MIT license"] + identifier: + +## baysor_preview requires a transcript map of the data and a configuration file with argument values +input: + - - meta: + type: map + description: | + Groovy Map containing sample information + e.g. [ id:'test' ] + - transcripts_csv: + type: file + description: CSV file + pattern: "*.csv" + + - config_toml: + type: file + description: TOML file with config arguments + pattern: "*.toml" + +## segmentation results +output: + - preview_html: + - meta: + type: map + description: | + Groovy Map containing sample information + e.g. [ id:'test' ] + - "preview.html": + type: file + description: segmentation preview + pattern: "preview.html" + + - preview_log: + type: file + description: | + Log file with summary statistics of preview + pattern: "preview_preview_log.log" + +authors: + - "@sebgoti8" + - "@khersameesh24" +maintainers: + - "@sebgoti8" + - "@khersameesh24" diff --git a/modules/local/baysor/preview/tests/main.nf.test b/modules/local/baysor/preview/tests/main.nf.test new file mode 100644 index 00000000..d3f522d7 --- /dev/null +++ b/modules/local/baysor/preview/tests/main.nf.test @@ -0,0 +1,60 @@ +nextflow_process { + + name "Test Process BAYSOR PREVIEW" + script "../main.nf" + process "BAYSOR_PREVIEW" + + tag "modules" + tag "modules_local" + tag "baysor" + tag "baysor/preview" + tag "preview" + + test("baysor preview - transcripts.parquet") { + + when { + process { + """ + input[0] = channel.of([ + [id: "test_run_baysor"], + file("https://raw.githubusercontent.com/khersameesh24/test-datasets/baysor/transcripts.parquet", checkIfExists: true), + file("https://raw.githubusercontent.com/khersameesh24/test-datasets/baysor/config/xenium.toml", checkIfExists: true) + ]) + """ + } + } + + then { + assertAll( + { assert process.success }, + { assert file(process.out.preview_html[0][1]).exists() }, + { assert file(process.out.preview_html[0][1]).name == "preview.html" }, + { assert snapshot(process.out.versions_baysor).match("versions") } + ) + } + } + + test("baysor preview stub") { + + options "-stub" + + when { + process { + """ + input[0] = channel.of([ + [id: "test_run_baysor"], + file("https://raw.githubusercontent.com/khersameesh24/test-datasets/baysor/transcripts.parquet", checkIfExists: true), + file("https://raw.githubusercontent.com/khersameesh24/test-datasets/baysor/config/xenium.toml", checkIfExists: true) + ]) + """ + } + } + + then { + assertAll( + { assert process.success }, + { assert snapshot(process.out.versions_baysor).match("versions_stub") } + ) + } + } +} diff --git a/modules/local/baysor/preview/tests/main.nf.test.snap b/modules/local/baysor/preview/tests/main.nf.test.snap new file mode 100644 index 00000000..08ec9503 --- /dev/null +++ b/modules/local/baysor/preview/tests/main.nf.test.snap @@ -0,0 +1,34 @@ +{ + "versions_stub": { + "content": [ + [ + [ + "BAYSOR_PREVIEW", + "baysor", + "0.7.1" + ] + ] + ], + "meta": { + "nf-test": "0.9.3", + "nextflow": "25.10.2" + }, + "timestamp": "2026-03-21T03:58:47.202659888" + }, + "versions": { + "content": [ + [ + [ + "BAYSOR_PREVIEW", + "baysor", + "0.7.1" + ] + ] + ], + "meta": { + "nf-test": "0.9.3", + "nextflow": "25.10.2" + }, + "timestamp": "2026-03-21T03:58:35.006511807" + } +} \ No newline at end of file diff --git a/modules/local/baysor/preview/tests/nextflow.config b/modules/local/baysor/preview/tests/nextflow.config new file mode 100644 index 00000000..f8b3a30a --- /dev/null +++ b/modules/local/baysor/preview/tests/nextflow.config @@ -0,0 +1,9 @@ +process { + + resourceLimits = [ + cpus: 4, + memory: '8.GB', + time: '2.h', + ] + +} diff --git a/modules/local/baysor/run/main.nf b/modules/local/baysor/run/main.nf new file mode 100644 index 00000000..b6bdcbaa --- /dev/null +++ b/modules/local/baysor/run/main.nf @@ -0,0 +1,67 @@ +process BAYSOR_RUN { + tag "${meta.id}" + label 'process_high' + + container "khersameesh24/baysor:0.7.1" + + input: + tuple val(meta), path(transcripts), path(prior_segmentation), path(config), val(scale) + + output: + tuple val(meta), path("${prefix}/segmentation.csv"), path("${prefix}/segmentation_polygons_2d.json"), emit: segmentation + tuple val("${task.process}"), val('baysor'), eval("baysor --version 2>&1 | grep -oP '\\d+\\.\\d+\\.\\d+' || echo unknown"), topic: versions, emit: versions_baysor + + when: + task.ext.when == null || task.ext.when + + script: + // Exit if running this module with -profile conda / -profile mamba + if (workflow.profile.tokenize(',').intersect(['conda', 'mamba']).size() >= 1) { + error("BAYSOR_RUN module does not support Conda. Please use Docker / Singularity / Podman instead.") + } + + def args = task.ext.args ?: '' + // Column-based prior (e.g. :cell_id) takes precedence over file-based prior + def prior_col = task.ext.prior_column ? ":${task.ext.prior_column}" : '' + def prior_seg = prior_col ?: (prior_segmentation ? prior_segmentation : '') + def confidence = task.ext.prior_confidence != null ? "--prior-segmentation-confidence=${task.ext.prior_confidence}" : '' + def scaling_factor = scale ? "--scale=${scale}" : '' + def config_arg = config ? "--config=${config}" : '' + prefix = task.ext.prefix ?: "${meta.id}" + + // Build command parts, filtering out empty strings + def cmd_parts = [ + "baysor run", + "${transcripts}", + prior_seg, + scaling_factor, + confidence, + "--output=\"${prefix}/segmentation.csv\"", + config_arg, + "--plot", + "--polygon-format=GeometryCollectionLegacy", + args + ].findAll { it } + + """ + export JULIA_NUM_THREADS=${task.cpus} + + mkdir -p ${prefix} + + ${cmd_parts.join(' \\\n ')} + """ + + stub: + // Exit if running this module with -profile conda / -profile mamba + if (workflow.profile.tokenize(',').intersect(['conda', 'mamba']).size() >= 1) { + error("BAYSOR_RUN module does not support Conda. Please use Docker / Singularity / Podman instead.") + } + + prefix = task.ext.prefix ?: "${meta.id}" + + """ + mkdir -p ${prefix} + touch "${prefix}/segmentation.csv" + touch "${prefix}/segmentation_polygons_2d.json" + """ +} diff --git a/modules/local/baysor/run/meta.yml b/modules/local/baysor/run/meta.yml new file mode 100644 index 00000000..455bade3 --- /dev/null +++ b/modules/local/baysor/run/meta.yml @@ -0,0 +1,88 @@ +name: "baysor_run" +description: Bayesian segmentation of spatial transcriptomics data. +keywords: + - segmentation + - spatial transcriptomics + - cell clustering + - imaging +tools: + - "baysor": + description: "Baysor is a tool that segments cells using spatial gene expression maps. Optionally, segmentation masks can be given as additional input." + homepage: "https://kharchenkolab.github.io/Baysor/dev/" + documentation: "https://kharchenkolab.github.io/Baysor/dev/" + tool_dev_url: "https://github.com/kharchenkolab/Baysor" + doi: "https://doi.org/10.1038/s41587-021-01044-w" + licence: ["MIT license"] + identifier: + +## Baysor requires a transcript map of the data and a configuration file with argument values +input: + - - meta: + type: map + description: | + Groovy Map containing sample information + e.g. [ id:'test' ] + - transcripts_csv: + type: file + description: CSV file + pattern: "*.csv" + + - config_toml: + type: file + description: TOML file with config arguments + pattern: "*.toml" + +## segmentation results +output: + - segmentation: + - meta: + type: map + description: | + Groovy Map containing sample information + e.g. [ id:'test' ] + - "*segmentation.csv": + type: file + description: results of segmentation + pattern: "segmentation.csv" + + - polygons: + type: file + description: | + File with outlines of segmentation + pattern: "*.json" + + - params: + type: file + description: | + File with full list of parameters used for the model + pattern: "segmentation_params.dump.toml" + + - log: + type: file + description: | + Output file with metadata of running the workflow + pattern: "segmentation_log.log" + + - loom: + type: file + description: | + Loom file with metadata + pattern: "segmentation_counts.loom" + + - stats: + type: file + description: | + Statistics of segmented cells + pattern: "segmentation_cell_stats.csv" + + - versions: + type: file + description: File containing software versions + pattern: "versions.yml" + +authors: + - "@sebgoti8" + - "@khersameesh24" +maintainers: + - "@sebgoti8" + - "@khersameesh24" diff --git a/modules/local/baysor/run/tests/main.nf.test b/modules/local/baysor/run/tests/main.nf.test new file mode 100644 index 00000000..37cda127 --- /dev/null +++ b/modules/local/baysor/run/tests/main.nf.test @@ -0,0 +1,67 @@ +nextflow_process { + + name "Test Process BAYSOR RUN" + script "../main.nf" + process "BAYSOR_RUN" + + tag "modules" + tag "modules_local" + tag "baysor" + tag "baysor/run" + tag "segmentation" + tag "cell_segmentation" + + test("baysor run - transcripts.parquet") { + + when { + process { + """ + input[0] = channel.of([ + [id: "test_run_baysor"], + file("https://raw.githubusercontent.com/khersameesh24/test-datasets/baysor/transcripts.parquet", checkIfExists: true), + [], + file("https://raw.githubusercontent.com/khersameesh24/test-datasets/baysor/config/xenium.toml", checkIfExists: true), + 30 + ]) + """ + } + } + + then { + assertAll( + { assert process.success }, + { assert file(process.out.segmentation[0][1]).exists() }, + { assert file(process.out.segmentation[0][1]).name == "segmentation.csv" }, + { assert file(process.out.segmentation[0][2]).exists() }, + { assert file(process.out.segmentation[0][2]).name == "segmentation_polygons_2d.json" }, + { assert snapshot(process.out.versions_baysor).match("versions") } + ) + } + } + + test("baysor run stub") { + + options "-stub" + + when { + process { + """ + input[0] = channel.of([ + [id: "test_run_baysor"], + file("https://raw.githubusercontent.com/khersameesh24/test-datasets/baysor/transcripts.parquet", checkIfExists: true), + [], + file("https://raw.githubusercontent.com/khersameesh24/test-datasets/baysor/config/xenium.toml", checkIfExists: true), + 30 + ]) + """ + } + } + + then { + assertAll( + { assert process.success }, + { assert snapshot(process.out.versions_baysor).match("versions_stub") } + ) + } + } +} diff --git a/modules/local/baysor/run/tests/main.nf.test.snap b/modules/local/baysor/run/tests/main.nf.test.snap new file mode 100644 index 00000000..73d72688 --- /dev/null +++ b/modules/local/baysor/run/tests/main.nf.test.snap @@ -0,0 +1,34 @@ +{ + "versions_stub": { + "content": [ + [ + [ + "BAYSOR_RUN", + "baysor", + "0.7.1" + ] + ] + ], + "meta": { + "nf-test": "0.9.3", + "nextflow": "25.10.2" + }, + "timestamp": "2026-03-21T03:59:56.882483481" + }, + "versions": { + "content": [ + [ + [ + "BAYSOR_RUN", + "baysor", + "0.7.1" + ] + ] + ], + "meta": { + "nf-test": "0.9.3", + "nextflow": "25.10.2" + }, + "timestamp": "2026-03-21T03:59:44.964721046" + } +} \ No newline at end of file diff --git a/modules/local/baysor/segfree/main.nf b/modules/local/baysor/segfree/main.nf new file mode 100644 index 00000000..eb41fa87 --- /dev/null +++ b/modules/local/baysor/segfree/main.nf @@ -0,0 +1,50 @@ +process BAYSOR_SEGFREE { + tag "${meta.id}" + label 'process_high' + + container "khersameesh24/baysor:0.7.1" + + input: + tuple val(meta), path(transcripts), path(config) + + output: + tuple val(meta), path("${prefix}/ncvs.loom"), emit: ncvs + tuple val("${task.process}"), val('baysor'), eval("baysor --version 2>&1 | grep -oP '\\d+\\.\\d+\\.\\d+' || echo unknown"), topic: versions, emit: versions_baysor + + when: + task.ext.when == null || task.ext.when + + script: + // Exit if running this module with -profile conda / -profile mamba + if (workflow.profile.tokenize(',').intersect(['conda', 'mamba']).size() >= 1) { + error("BAYSOR_SEGFREE module does not support Conda. Please use Docker / Singularity / Podman instead.") + } + + def args = task.ext.args ?: '' + prefix = task.ext.prefix ?: "${meta.id}" + + """ + export JULIA_NUM_THREADS=${task.cpus} + + mkdir -p ${prefix} + + baysor segfree \\ + ${transcripts} \\ + --config ${config} \\ + --output=${prefix}/ncvs.loom \\ + ${args} + """ + + stub: + // Exit if running this module with -profile conda / -profile mamba + if (workflow.profile.tokenize(',').intersect(['conda', 'mamba']).size() >= 1) { + error("BAYSOR_SEGFREE module does not support Conda. Please use Docker / Singularity / Podman instead.") + } + + prefix = task.ext.prefix ?: "${meta.id}" + + """ + mkdir -p ${prefix} + touch "${prefix}/ncvs.loom" + """ +} diff --git a/modules/local/baysor/segfree/meta.yml b/modules/local/baysor/segfree/meta.yml new file mode 100644 index 00000000..9161c1ce --- /dev/null +++ b/modules/local/baysor/segfree/meta.yml @@ -0,0 +1,55 @@ +name: "baysor_segfree" +description: Extract neighborhood composition vectors (NVCs) from a dataset. +keywords: + - neighborhood +tools: + - "baysor": + description: "Baysor is a tool that segments cells using spatial gene expression maps. Optionally, segmentation masks can be given as additional input." + homepage: "https://kharchenkolab.github.io/Baysor/dev/" + documentation: "https://kharchenkolab.github.io/Baysor/dev/" + tool_dev_url: "https://github.com/kharchenkolab/Baysor" + doi: "https://doi.org/10.1038/s41587-021-01044-w" + licence: ["MIT license"] + identifier: + +## baysor_segfree requires a transcript map of the data and a configuration file with argument values +input: + - - meta: + type: map + description: | + Groovy Map containing sample information + e.g. [ id:'test' ] + - transcripts_csv: + type: file + description: CSV file + pattern: "*.csv" + - config_toml: + type: file + description: TOML file with config arguments + pattern: "*.toml" + +## neighborhood composition vectors +output: + - preview_html: + - meta: + type: map + description: | + Groovy Map containing sample information + e.g. [ id:'test' ] + - "ncvs.loom": + type: file + description: loom file with neighborhood results + pattern: "ncvs.loom" + + - ncvs_log: + type: file + description: | + Log file with summary statistics + pattern: "ncvs_segfree_log.log" + +authors: + - "@sebgoti8" + - "@khersameesh24" +maintainers: + - "@sebgoti8" + - "@khersameesh24" diff --git a/modules/local/baysor/segfree/tests/main.nf.test b/modules/local/baysor/segfree/tests/main.nf.test new file mode 100644 index 00000000..1fb7d2bc --- /dev/null +++ b/modules/local/baysor/segfree/tests/main.nf.test @@ -0,0 +1,60 @@ +nextflow_process { + + name "Test Process BAYSOR SEGFREE" + script "../main.nf" + process "BAYSOR_SEGFREE" + + tag "modules" + tag "modules_local" + tag "baysor" + tag "baysor/segfree" + tag "segmentation-free" + + test("baysor segfree - transcripts.parquet") { + + when { + process { + """ + input[0] = channel.of([ + [id: "test_run_baysor"], + file("https://raw.githubusercontent.com/khersameesh24/test-datasets/baysor/transcripts.parquet", checkIfExists: true), + file("https://raw.githubusercontent.com/khersameesh24/test-datasets/baysor/config/xenium.toml", checkIfExists: true) + ]) + """ + } + } + + then { + assertAll( + { assert process.success }, + { assert file(process.out.ncvs[0][1]).exists() }, + { assert file(process.out.ncvs[0][1]).name == "ncvs.loom" }, + { assert snapshot(process.out.versions_baysor).match("versions") } + ) + } + } + + test("baysor run stub") { + + options "-stub" + + when { + process { + """ + input[0] = channel.of([ + [id: "test_run_baysor"], + file("https://raw.githubusercontent.com/khersameesh24/test-datasets/baysor/transcripts.parquet", checkIfExists: true), + file("https://raw.githubusercontent.com/khersameesh24/test-datasets/baysor/config/xenium.toml", checkIfExists: true) + ]) + """ + } + } + + then { + assertAll( + { assert process.success }, + { assert snapshot(process.out.versions_baysor).match("versions_stub") } + ) + } + } +} diff --git a/modules/local/baysor/segfree/tests/main.nf.test.snap b/modules/local/baysor/segfree/tests/main.nf.test.snap new file mode 100644 index 00000000..075e13ea --- /dev/null +++ b/modules/local/baysor/segfree/tests/main.nf.test.snap @@ -0,0 +1,34 @@ +{ + "versions_stub": { + "content": [ + [ + [ + "BAYSOR_SEGFREE", + "baysor", + "0.7.1" + ] + ] + ], + "meta": { + "nf-test": "0.9.3", + "nextflow": "25.10.2" + }, + "timestamp": "2026-03-21T04:00:54.745153684" + }, + "versions": { + "content": [ + [ + [ + "BAYSOR_SEGFREE", + "baysor", + "0.7.1" + ] + ] + ], + "meta": { + "nf-test": "0.9.3", + "nextflow": "25.10.2" + }, + "timestamp": "2026-03-21T04:00:42.542466144" + } +} \ No newline at end of file diff --git a/modules/local/ficture/model/main.nf b/modules/local/ficture/model/main.nf new file mode 100644 index 00000000..34c2dbc0 --- /dev/null +++ b/modules/local/ficture/model/main.nf @@ -0,0 +1,43 @@ +process FICTURE { + tag "$meta.id" + label 'process_high' + + container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ? + 'https://community-cr-prod.seqera.io/docker/registry/v2/blobs/sha256/08/08f94799a8abd47d274654c49ed5ae225811b8a64bc9788739f4c5d23fa08230/data' : + 'community.wave.seqera.io/library/pip_ficture:ad8a1265a51b53cf' }" + + input: + tuple val(meta), path(transcripts) + path(coordinate_minmax) + path(features) + + output: + tuple val(meta), path("results/**"), emit: results + tuple val("${task.process}"), val('ficture'), eval("pip show ficture | sed -n 's/^Version: //p'"), topic: versions, emit: versions_ficture + + when: + task.ext.when == null || task.ext.when + + script: + def args = task.ext.args ?: '' + def features_list = features ? "--in-feature ${features}": "" + + """ + ficture run_together \\ + --in-tsv ${transcripts} \\ + --in-minmax ${coordinate_minmax} \\ + ${features_list} \\ + --out-dir results \\ + --train-width 12,18 \\ + --n-factor 6,12 \\ + --n-jobs ${task.cpus} \\ + --plot-each-factor \\ + --all \\ + ${args} + """ + + stub: + """ + mkdir -p results/ + """ +} diff --git a/modules/local/ficture/model/meta.yml b/modules/local/ficture/model/meta.yml new file mode 100644 index 00000000..5b2abcc1 --- /dev/null +++ b/modules/local/ficture/model/meta.yml @@ -0,0 +1,47 @@ +name: ficture +description: FICTURE is a software tool that performs segmentation-free analysis of submicron-resolution analysis of spatial transcriptomics data. +keywords: + - spatial + - segmentation free + - imaging +tools: + - ficture: + description: | + FICTURE is a software tool that performs segmentation-free analysis of submicron-resolution + analysis of spatial transcriptomics data. This tool executes ficture itself. + homepage: "https://seqscope.github.io/ficture/" + documentation: "https://seqscope.github.io/ficture/" + tool_dev_url: "https://github.com/seqscope/ficture" + licence: + - "Attribution-NonCommercial 4.0 International" + identifier: "" +input: + - - meta: + type: map + description: | + Groovy Map containing run information + e.g. [id:'sample'] + - transcripts: + type: file + description: One file contains the molecular or pixel level information, the required columns are X, Y, gene, and Count. (There could be other columns in the file which would be ignored.) + - - coordinate_minmax: + type: file + description: We also prefer to keep a file listing the min and max of the coordinates (this is primarily for visualizing very big tissue region where we do not read all data at once but would want to know the image dimension). The unit of the coordinates is micrometer. + - - features: + type: file + description: Another file contains the (unique) names of genes that should be used in analysis. The required columns is just gene (including the header), the naming of genes should match the gene column in the transcript file. If your data contain negative control probes or if you would like to remove certain genes this is where you can specify. +output: + - results: + type: file + description: Files containing the results of FICTURE + pattern: "${meta.id}/results/*" + - versions: + type: file + description: File containing software versions + pattern: "versions.yml" +authors: + - "@khersameesh24" + - "@heylf" +maintainers: + - "@khersameesh24" + - "@heylf" diff --git a/modules/local/ficture/preprocess/main.nf b/modules/local/ficture/preprocess/main.nf new file mode 100644 index 00000000..7ec3c081 --- /dev/null +++ b/modules/local/ficture/preprocess/main.nf @@ -0,0 +1,39 @@ +process FICTURE_PREPROCESS { + tag "$meta.id" + label 'process_high' + + container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ? + 'https://community-cr-prod.seqera.io/docker/registry/v2/blobs/sha256/08/08f94799a8abd47d274654c49ed5ae225811b8a64bc9788739f4c5d23fa08230/data' : + 'community.wave.seqera.io/library/pip_ficture:ad8a1265a51b53cf' }" + + input: + tuple val(meta), path(transcripts) + path(features) + + output: + tuple val(meta), path("*processed_transcripts.tsv.gz"), emit: transcripts + path("*coordinate_minmax.tsv") , emit: coordinate_minmax + path("*feature.clean.tsv.gz") , optional:true, emit: features + tuple val("${task.process}"), val('ficture'), eval("pip show ficture | sed -n 's/^Version: //p'"), topic: versions, emit: versions_ficture + + when: + task.ext.when == null || task.ext.when + + script: + def args = task.ext.args ?: '' + def features_arg = features ? "--features ${features}" : "" + + """ + ficture_preprocess.py \\ + --transcripts ${transcripts} \\ + ${features_arg} \\ + --negative-control-regex '${params.negative_control_regex}' \\ + ${args} + """ + + stub: + """ + touch processed_transcripts.tsv.gz + touch coordinate_minmax.tsv + """ +} diff --git a/modules/local/ficture/preprocess/meta.yml b/modules/local/ficture/preprocess/meta.yml new file mode 100644 index 00000000..89366c32 --- /dev/null +++ b/modules/local/ficture/preprocess/meta.yml @@ -0,0 +1,53 @@ +name: ficture_preprocessing +description: FICTURE is a software tool that performs segmentation-free analysis of submicron-resolution analysis of spatial transcriptomics data. +keywords: + - spatial + - segmentation free + - imaging + - preprocessing +tools: + - ficture: + description: | + FICTURE is a software tool that performs segmentation-free analysis of submicron-resolution + analysis of spatial transcriptomics data. This tool executes ficture itself. + homepage: "https://seqscope.github.io/ficture/" + documentation: "https://seqscope.github.io/ficture/" + tool_dev_url: "https://github.com/seqscope/ficture" + licence: + - "Attribution-NonCommercial 4.0 International" + identifier: "" +input: + - - meta: + type: map + description: | + Groovy Map containing run information + e.g. [id:'sample'] + - transcripts: + type: file + description: One file contains the molecular or pixel level information, the required columns are X, Y, gene, and Count. (There could be other columns in the file which would be ignored.) + - - features: + type: file + description: Another txt file that contains the (unique) names of genes that should be used in analysis. If your data contain negative control probes or if you would like to remove certain genes this is where you can specify. Just list the genes. One gene per line. No header. +output: + - transcripts: + type: file + description: Transcirpt file used for FICTURE + pattern: "processed_transcripts.tsv.gz" + - coordinate_minmax: + type: file + description: Listing the min and max of the coordinates used for FICTURE + pattern: "coordinate_minmax.tsv" + - features: + type: file + description: Another file contains the (unique) names of genes that should be used for FICUTRE + pattern: "feature.clean.tsv.gz" + - versions: + type: file + description: File containing software versions + pattern: "versions.yml" +authors: + - "@khersameesh24" + - "@heylf" +maintainers: + - "@khersameesh24" + - "@heylf" diff --git a/modules/local/parquet_to_csv/main.nf b/modules/local/parquet_to_csv/main.nf new file mode 100644 index 00000000..abfb246e --- /dev/null +++ b/modules/local/parquet_to_csv/main.nf @@ -0,0 +1,44 @@ +/* + * PARQUET_TO_CSV: Convert parquet file to CSV for tools with old Parquet readers. + * + * Input: + * - meta: Sample metadata map + * - parquet: Parquet file to convert + * + * Output: + * - csv: Converted CSV file + * - versions: Software versions + */ +process PARQUET_TO_CSV { + tag "$meta.id" + label 'process_low' + + container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ? + 'https://community-cr-prod.seqera.io/docker/registry/v2/blobs/sha256/f9/f9c8f3a2de4e2aa94500011f7d7d09276e9b6f2d79ee8737c9098fe22d4649bc/data' : + 'community.wave.seqera.io/library/sopa_procps-ng_pyarrow:c9ce8cd2ede79d72' }" + + input: + tuple val(meta), path(parquet) + + output: + tuple val(meta), path("transcripts.csv"), emit: csv + tuple val("${task.process}"), val('pyarrow'), eval("python3 -c 'import pyarrow; print(pyarrow.__version__)'"), topic: versions, emit: versions_pyarrow + + when: + task.ext.when == null || task.ext.when + + script: + """ + python3 -c " +import pyarrow.parquet as pq +import pyarrow.csv as pa_csv +t = pq.read_table('${parquet}') +pa_csv.write_csv(t, 'transcripts.csv') +" + """ + + stub: + """ + touch transcripts.csv + """ +} diff --git a/modules/local/parquet_to_csv/meta.yml b/modules/local/parquet_to_csv/meta.yml new file mode 100644 index 00000000..c3ab2506 --- /dev/null +++ b/modules/local/parquet_to_csv/meta.yml @@ -0,0 +1,70 @@ +name: "parquet_to_csv" +description: Convert parquet file to CSV for tools with old Parquet readers. +keywords: + - xenium + - parquet + - csv + - transcripts +tools: + - "pyarrow": + description: | + Python library providing a Pythonic API for Apache Arrow, + including fast Parquet and CSV I/O. + homepage: "https://arrow.apache.org/docs/python/" + documentation: "https://arrow.apache.org/docs/python/" + tool_dev_url: "https://github.com/apache/arrow" + doi: "no DOI available" + licence: ["Apache-2.0"] + identifier: "" + +input: + - - meta: + type: map + description: | + Groovy Map containing sample information + e.g. [ id:'test' ] + - parquet: + type: file + description: Parquet file to convert. + pattern: "*.parquet" + ontologies: [] + +output: + csv: + - - meta: + type: map + description: | + Groovy Map containing sample information + e.g. [ id:'test' ] + - "transcripts.csv": + type: file + description: Converted CSV file. + pattern: "transcripts.csv" + ontologies: [] + versions_pyarrow: + - - ${task.process}: + type: string + description: The process the versions were collected from + - pyarrow: + type: string + description: The tool name + - "python3 -c 'import pyarrow; print(pyarrow.__version__)'": + type: eval + description: The expression to obtain the version of the tool + +topics: + versions: + - - ${task.process}: + type: string + description: The process the versions were collected from + - pyarrow: + type: string + description: The tool name + - "python3 -c 'import pyarrow; print(pyarrow.__version__)'": + type: eval + description: The expression to obtain the version of the tool + +authors: + - "@an-altosian" +maintainers: + - "@an-altosian" diff --git a/modules/local/proseg/main.nf b/modules/local/proseg/main.nf deleted file mode 100644 index d67abdd7..00000000 --- a/modules/local/proseg/main.nf +++ /dev/null @@ -1,82 +0,0 @@ -process PROSEG { - tag "$meta.id" - label 'process_high' - - container "nf-core/proseg:1.1.8" - - input: - tuple val(meta), path(transcripts) - - output: - tuple val(meta), path("cell-polygons.geojson.gz"), emit: cell_polygons_2d - path("expected-counts.csv.gz"), emit: expected_counts - path("cell-metadata.csv.gz"), emit: cell_metadata - path("transcript-metadata.csv.gz"), emit: transcript_metadata - path("gene-metadata.csv.gz"), emit: gene_metadata - path("rates.csv.gz"), emit: rates - path("cell-polygons-layers.geojson.gz"), emit: cell_polygons_layers - path("cell-hulls.geojson.gz"), emit: cell_hulls - path("versions.yml"), emit: versions - - when: - task.ext.when == null || task.ext.when - - script: - // Exit if running this module with -profile conda / -profile mamba - if (workflow.profile.tokenize(',').intersect(['conda', 'mamba']).size() >= 1) { - error "PROSEG module does not support Conda. Please use Docker / Singularity / Podman instead." - } - def args = task.ext.args ?: '' - def prefix = task.ext.prefix ?: "${meta.id}" - def platform = preset ? "${params.preset}" : "" - - // check for preset values - if (!(platform in ['xenium', 'cosmx', 'merscope'])) { - error "${platform} is an invalid platform (preset) type. Please specify xenium, cosmx, or merscope" - } - - """ - proseg \\ - --${preset} \\ - ${transcripts} \\ - --nthreads ${task.cpus} \\ - --output-expected-counts expected-counts.csv.gz \\ - --output-cell-metadata cell-metadata.csv.gz \\ - --output-transcript-metadata transcript-metadata.csv.gz \\ - --output-gene-metadata gene-metadata.csv.gz \\ - --output-rates rates.csv.gz \\ - --output-cell-polygons cell-polygons.geojson.gz \\ - --output-cell-polygon-layers cell-polygons-layers.geojson.gz \\ - --output-cell-hulls cell-hulls.geojson.gz \\ - ${args} - - cat <<-END_VERSIONS > versions.yml - "${task.process}": - proseg: \$(proseg --version | sed 's/proseg //') - END_VERSIONS - """ - - stub: - // Exit if running this module with -profile conda / -profile mamba - if (workflow.profile.tokenize(',').intersect(['conda', 'mamba']).size() >= 1) { - error "PROSEG module does not support Conda. Please use Docker / Singularity / Podman instead." - } - def args = task.ext.args ?: '' - def prefix = task.ext.prefix ?: "${meta.id}" - - """ - touch expected-counts.csv.gz - touch cell-metadata.csv.gz - touch transcript-metadata.csv.gz - touch gene-metadata.csv.gz - touch rates.csv.gz - touch cell-polygons.geojson.gz - touch cell-polygons-layers.geojson.gz - touch cell-hulls.geojson.gz - - cat <<-END_VERSIONS > versions.yml - "${task.process}": - proseg: \$(proseg --version | sed 's/proseg //') - END_VERSIONS - """ -} diff --git a/modules/local/proseg/preprocess/main.nf b/modules/local/proseg/preprocess/main.nf deleted file mode 100644 index dd979c4f..00000000 --- a/modules/local/proseg/preprocess/main.nf +++ /dev/null @@ -1,53 +0,0 @@ -process PROSEG2BAYSOR { - tag "$meta.id" - label 'process_high' - - container "nf-core/proseg:1.1.8" - - input: - path(transcript_metadata) - path(cell_polygons) - - output: - path("xr-transcript-metadata.csv"), emit: xr_metadata - path("xr-cell-polygons.geojson"), emit: xr_polygons - - script: - // Exit if running this module with -profile conda / -profile mamba - if (workflow.profile.tokenize(',').intersect(['conda', 'mamba']).size() >= 1) { - error "PROSEG2BAYSOR (preprocess) module does not support Conda. Please use Docker / Singularity / Podman instead." - } - - """ - proseg-to-baysor \ - ${transcript_metadata} \ - ${cell_polygons} \ - --output-transcript-metadata xr-transcript-metadata.csv \ - --output-cell-polygons xr-cell-polygons.geojson - - cat <<-END_VERSIONS > versions.yml - "${task.process}": - proseg: \$(proseg --version | sed 's/proseg //') - END_VERSIONS - - """ - - stub: - // Exit if running this module with -profile conda / -profile mamba - if (workflow.profile.tokenize(',').intersect(['conda', 'mamba']).size() >= 1) { - error "PROSEG module does not support Conda. Please use Docker / Singularity / Podman instead." - } - def args = task.ext.args ?: '' - def prefix = task.ext.prefix ?: "${meta.id}" - - """ - touch xr-transcript-metadata.csv - touch xr-cell-polygons.geojson - - cat <<-END_VERSIONS > versions.yml - "${task.process}": - proseg: \$(proseg --version | sed 's/proseg //') - END_VERSIONS - """ -} - diff --git a/modules/local/proseg/preset/main.nf b/modules/local/proseg/preset/main.nf new file mode 100644 index 00000000..d734c830 --- /dev/null +++ b/modules/local/proseg/preset/main.nf @@ -0,0 +1,71 @@ +process PROSEG { + tag "${meta.id}" + label 'process_high' + + container "ghcr.io/dcjones/proseg:v3.1.0" + + input: + tuple val(meta), path(transcripts) + + output: + tuple val(meta), path("${prefix}/cell-polygons.geojson.gz"), path("${prefix}/transcript-metadata.csv.gz"), emit: seg_outs + tuple val(meta), path("${prefix}/proseg-output.zarr"), emit: zarr + tuple val("${task.process}"), val('proseg'), eval("proseg --version | sed 's/proseg //'"), topic: versions, emit: versions_proseg + + when: + task.ext.when == null || task.ext.when + + script: + // Exit if running this module with -profile conda / -profile mamba + if (workflow.profile.tokenize(',').intersect(['conda', 'mamba']).size() >= 1) { + error("PROSEG module does not support Conda. Please use Docker / Singularity / Podman instead.") + } + + def args = task.ext.args ?: '' + prefix = task.ext.prefix ?: "${meta.id}" + + // check for platform values + if (!(params.format in ['xenium', 'cosmx', 'merscope'])) { + error("${params.format} is an invalid platform type. Please specify xenium, cosmx, or merscope") + } + + """ + mkdir -p ${prefix} + + proseg \\ + --${params.format} \\ + ${transcripts} \\ + --nthreads ${task.cpus} \\ + --output-expected-counts ${prefix}/expected-counts.csv.gz \\ + --output-cell-metadata ${prefix}/cell-metadata.csv.gz \\ + --output-transcript-metadata ${prefix}/transcript-metadata.csv.gz \\ + --output-gene-metadata ${prefix}/gene-metadata.csv.gz \\ + --output-rates ${prefix}/rates.csv.gz \\ + --output-cell-polygons ${prefix}/cell-polygons.geojson.gz \\ + --output-cell-polygon-layers ${prefix}/cell-polygons-layers.geojson.gz \\ + --output-union-cell-polygons ${prefix}/union-cell-polygons.geojson.gz \\ + --output-spatialdata ${prefix}/proseg-output.zarr \\ + ${args} + """ + + stub: + // Exit if running this module with -profile conda / -profile mamba + if (workflow.profile.tokenize(',').intersect(['conda', 'mamba']).size() >= 1) { + error("PROSEG module does not support Conda. Please use Docker / Singularity / Podman instead.") + } + + prefix = task.ext.prefix ?: "${meta.id}" + + """ + mkdir -p ${prefix}/ + touch "${prefix}/expected-counts.csv.gz" + touch "${prefix}/cell-metadata.csv.gz" + touch "${prefix}/transcript-metadata.csv.gz" + touch "${prefix}/gene-metadata.csv.gz" + touch "${prefix}/rates.csv.gz" + touch "${prefix}/cell-polygons.geojson.gz" + touch "${prefix}/cell-polygons-layers.geojson.gz" + touch "${prefix}/union-cell-polygons.geojson.gz" + mkdir -p "${prefix}/proseg-output.zarr" + """ +} diff --git a/modules/local/proseg/meta.yml b/modules/local/proseg/preset/meta.yml similarity index 100% rename from modules/local/proseg/meta.yml rename to modules/local/proseg/preset/meta.yml diff --git a/modules/local/proseg/preset/tests/main.nf.test b/modules/local/proseg/preset/tests/main.nf.test new file mode 100644 index 00000000..baa48c10 --- /dev/null +++ b/modules/local/proseg/preset/tests/main.nf.test @@ -0,0 +1,61 @@ +nextflow_process { + + name "Test Process PROSEG" + script "../main.nf" + process "PROSEG" + + tag "modules" + tag "modules_nfcore" + tag "proseg" + tag "proseg/proseg" + tag "segmentation" + tag "cell_segmentation" + + test("proseg - transcripts.csv") { + + when { + process { + """ + input[0] = [ + [id: "test_run_proseg"], + file(params.modules_testdata_base_path + "spatial_omics/xenium/homo_sapiens/spatial_gene_expression.csv", checkIfExists: true) + ] + """ + } + } + + then { + assertAll( + { assert process.success }, + { assert file(process.out.seg_outs[0][1]).exists() }, + { assert file(process.out.seg_outs[0][1]).name == "cell-polygons.geojson.gz" }, + { assert file(process.out.seg_outs[0][2]).exists() }, + { assert file(process.out.seg_outs[0][2]).name == "transcript-metadata.csv.gz" }, + { assert snapshot(process.out.versions_proseg).match("versions") } + ) + } + } + + test("proseg stub") { + + options "-stub" + + when { + process { + """ + input[0] = [ + [id: "test_run_proseg"], + file(params.modules_testdata_base_path + "spatial_omics/xenium/homo_sapiens/spatial_gene_expression.csv", checkIfExists: true) + ] + """ + } + } + + then { + assertAll( + { assert process.success }, + { assert snapshot(process.out.versions_proseg).match("versions_stub") } + ) + } + } +} diff --git a/modules/local/proseg/preset/tests/main.nf.test.snap b/modules/local/proseg/preset/tests/main.nf.test.snap new file mode 100644 index 00000000..944325aa --- /dev/null +++ b/modules/local/proseg/preset/tests/main.nf.test.snap @@ -0,0 +1,34 @@ +{ + "versions_stub": { + "content": [ + [ + [ + "PROSEG", + "proseg", + "3.1.0" + ] + ] + ], + "meta": { + "nf-test": "0.9.3", + "nextflow": "25.10.2" + }, + "timestamp": "2026-03-21T04:01:19.610456233" + }, + "versions": { + "content": [ + [ + [ + "PROSEG", + "proseg", + "3.1.0" + ] + ] + ], + "meta": { + "nf-test": "0.9.3", + "nextflow": "25.10.2" + }, + "timestamp": "2026-03-21T04:01:12.114004958" + } +} \ No newline at end of file diff --git a/modules/local/proseg/preset/tests/nextflow.config b/modules/local/proseg/preset/tests/nextflow.config new file mode 100644 index 00000000..f8b3a30a --- /dev/null +++ b/modules/local/proseg/preset/tests/nextflow.config @@ -0,0 +1,9 @@ +process { + + resourceLimits = [ + cpus: 4, + memory: '8.GB', + time: '2.h', + ] + +} diff --git a/modules/local/proseg/proseg2baysor/main.nf b/modules/local/proseg/proseg2baysor/main.nf new file mode 100644 index 00000000..1a0c8b38 --- /dev/null +++ b/modules/local/proseg/proseg2baysor/main.nf @@ -0,0 +1,47 @@ +process PROSEG2BAYSOR { + tag "$meta.id" + label 'process_high' + + container "ghcr.io/dcjones/proseg:v3.1.0" + + input: + tuple val(meta), path(zarr_dir) + + output: + tuple val(meta), path("${prefix}/cell-polygons.geojson") , emit: xr_polygons + tuple val(meta), path("${prefix}/transcript-metadata.csv"), emit: xr_metadata + tuple val("${task.process}"), val('proseg'), eval("proseg --version | sed 's/proseg //'"), topic: versions, emit: versions_proseg + + script: + // Exit if running this module with -profile conda / -profile mamba + if (workflow.profile.tokenize(',').intersect(['conda', 'mamba']).size() >= 1) { + error "PROSEG2BAYSOR module does not support Conda. Please use Docker / Singularity / Podman instead." + } + + def args = task.ext.args ?: '' + prefix = task.ext.prefix ?: "${meta.id}" + + """ + mkdir -p ${prefix} + + proseg-to-baysor \\ + ${zarr_dir} \\ + --output-transcript-metadata ${prefix}/transcript-metadata.csv \\ + --output-cell-polygons ${prefix}/cell-polygons.geojson \\ + ${args} + """ + + stub: + // Exit if running this module with -profile conda / -profile mamba + if (workflow.profile.tokenize(',').intersect(['conda', 'mamba']).size() >= 1) { + error "PROSEG2BAYSOR module does not support Conda. Please use Docker / Singularity / Podman instead." + } + + prefix = task.ext.prefix ?: "${meta.id}" + + """ + mkdir -p ${prefix} + touch "${prefix}/transcript-metadata.csv" + touch "${prefix}/cell-polygons.geojson" + """ +} diff --git a/modules/local/proseg/proseg2baysor/meta.yml b/modules/local/proseg/proseg2baysor/meta.yml new file mode 100644 index 00000000..e423e97b --- /dev/null +++ b/modules/local/proseg/proseg2baysor/meta.yml @@ -0,0 +1,56 @@ +name: "proseg2baysor" +description: Probabilistic cell segmentation for in situ spatial transcriptomics +keywords: + - segmentation + - cell segmentation + - spatialomics + - probabilistic segmentation + - in situ spatial transcriptomics +tools: + - "proseg": + description: "Proseg (probabilistic segmentation) is a cell segmentation method for in situ spatial transcriptomics. Xenium, CosMx, and MERSCOPE platforms are currently supported." + homepage: "https://github.com/dcjones/proseg/tree/main" + documentation: "https://github.com/dcjones/proseg/blob/main/README.md" + tool_dev_url: "https://github.com/dcjones/proseg" + doi: "" + licence: ["GNU Public License"] + +input: + - - meta: + type: map + description: | + Groovy Map containing run information + e.g. `[ id:'run_id']` + - cell_polygons: + type: file + description: | + Cell polygons output file from the proseg xenium (format) run + pattern: "cell-polygons.geojson.gz" + - - transcript_metadata: + type: file + description: | + Transcript metadata file output file from the proseg xenium (format) run + pattern: "transcript-metadata.csv.gz" +output: + - - meta: + type: map + description: | + Groovy Map containing run information + e.g. `[ id:'run_id']` + - xr_polygons: + type: file + description: 2D polygons for each cell in GeoJSON format. These are flattened from 3D + pattern: "xr-cell-polygons.geojson" + - - xr_metadata: + type: file + description: Transcript ids, genes, revised positions, assignment probability + pattern: "xr-transcript-metadata.csv" + - - versions: + type: file + description: File containing software versions + pattern: "versions.yml" + +authors: + - "@khersameesh24" +maintainers: + - "@khersameesh24" diff --git a/modules/local/proseg/proseg2baysor/tests/main.nf.test b/modules/local/proseg/proseg2baysor/tests/main.nf.test new file mode 100644 index 00000000..039217e0 --- /dev/null +++ b/modules/local/proseg/proseg2baysor/tests/main.nf.test @@ -0,0 +1,72 @@ +nextflow_process { + + name "Test Process PROSEG" + script "../main.nf" + process "PROSEG2BAYSOR" + + tag "modules" + tag "modules_nfcore" + tag "proseg" + tag "segmentation" + tag "cell_segmentation" + + + setup { + run("PROSEG") { + script "modules/local/proseg/preset/main.nf" + process { + """ + input[0] = [ + [id: "test_run_proseg"], + file(params.modules_testdata_base_path + "spatial_omics/xenium/homo_sapiens/spatial_gene_expression.csv", checkIfExists: true) + ] + """ + } + } + } + + test("proseg2baysor - cell_polygons, transcript_metadata") { + + when { + process { + """ + input[0] = channel.of([ + [id: "test_run_proseg2baysor"], + ]).combine(PROSEG.out.seg_outs, by: 0) + """ + } + } + + then { + assertAll( + { assert process.success }, + { assert snapshot(process.out).match() } + ) + } + + } + + test("proseg2baysor stub") { + + options "-stub" + + when { + process { + """ + input[0] = channel.of([ + [id: "test_run_proseg2baysor"], + ]).combine(PROSEG.out.seg_outs, by: 0) + """ + } + } + + then { + assertAll( + { assert process.success }, + { assert snapshot(process.out).match() } + ) + } + + } + +} diff --git a/modules/local/proseg/proseg2baysor/tests/main.nf.test.snap b/modules/local/proseg/proseg2baysor/tests/main.nf.test.snap new file mode 100644 index 00000000..7dff8302 --- /dev/null +++ b/modules/local/proseg/proseg2baysor/tests/main.nf.test.snap @@ -0,0 +1,60 @@ +{ + "proseg2baysor - cell_polygons, transcript_metadata": { + "content": [ + { + "0": [ + + ], + "1": [ + + ], + "2": [ + + ], + "versions_proseg": [ + + ], + "xr_metadata": [ + + ], + "xr_polygons": [ + + ] + } + ], + "meta": { + "nf-test": "0.9.3", + "nextflow": "25.10.2" + }, + "timestamp": "2026-03-21T03:50:54.118409704" + }, + "proseg2baysor stub": { + "content": [ + { + "0": [ + + ], + "1": [ + + ], + "2": [ + + ], + "versions_proseg": [ + + ], + "xr_metadata": [ + + ], + "xr_polygons": [ + + ] + } + ], + "meta": { + "nf-test": "0.9.3", + "nextflow": "25.10.2" + }, + "timestamp": "2026-03-21T03:51:01.546798675" + } +} \ No newline at end of file diff --git a/modules/local/proseg/proseg2baysor/tests/nextflow.config b/modules/local/proseg/proseg2baysor/tests/nextflow.config new file mode 100644 index 00000000..f8b3a30a --- /dev/null +++ b/modules/local/proseg/proseg2baysor/tests/nextflow.config @@ -0,0 +1,9 @@ +process { + + resourceLimits = [ + cpus: 4, + memory: '8.GB', + time: '2.h', + ] + +} diff --git a/modules/local/proseg/tests/main.nf.test b/modules/local/proseg/tests/main.nf.test deleted file mode 100644 index 9b8f8c35..00000000 --- a/modules/local/proseg/tests/main.nf.test +++ /dev/null @@ -1,77 +0,0 @@ -nextflow_process { - - name "Test Process PROSEG" - script "../main.nf" - process "PROSEG" - - tag "modules" - tag "modules_nfcore" - tag "proseg" - tag "segmentation" - tag "cell_segmentation" - - - setup { - run("UNZIP") { - script "modules/nf-core/unzip/main.nf" - process { - """ - input[0] = [[], file('https://raw.githubusercontent.com/nf-core/test-datasets/spatialxe/Xenium_Prime_Mouse_Ileum_tiny_outs.zip', checkIfExists: true)] - """ - } - } - } - - test("proseg - transcripts.csv") { - - when { - process { - """ - input[0] = Channel.of([ - [id: "test_run_proseg"], - ]).combine(UNZIP.out.unzipped_archive.map { it[1] } + "/transcripts.csv") - """ - } - } - - then { - assertAll( - { assert process.success }, - { assert snapshot(process.out).match() }, - { assert file(process.out.expected_counts.get(0).get(1).find { file(it).name == 'expected-counts.csv.gz' }).exists() }, - { assert file(process.out.cell_metadata.get(0).get(1).find { file(it).name == 'cell-metadata.csv.gz' }).exists() }, - { assert file(process.out.transcript_metadata.get(0).get(1).find { file(it).name == 'transcript-metadata.csv.gz' }).exists() }, - { assert file(process.out.gene_metadata.get(0).get(1).find { file(it).name == 'gene-metadata.csv.gz' }).exists() }, - { assert file(process.out.rates.get(0).get(1).find { file(it).name == 'rates.csv.gz' }).exists() }, - { assert file(process.out.cell_polygons.get(0).get(1).find { file(it).name == 'cell-polygons.geojson.gz' }).exists() }, - { assert file(process.out.cell_polygons_layers.get(0).get(1).find { file(it).name == 'cell-polygons-layers.geojson.gz' }).exists() }, - { assert file(process.out.cell-hulls.get(0).get(1).find { file(it).name == 'cell-hulls.geojson.gz' }).exists() }, - ) - } - - } - - test("proseg stub") { - - options "-stub" - - when { - process { - """ - input[0] = Channel.of([ - [id: "test_run_proseg"], - ]).combine(UNZIP.out.unzipped_archive.map { it[1] } + "/transcripts.csv") - """ - } - } - - then { - assertAll( - { assert process.success }, - { assert snapshot(process.out).match() } - ) - } - - } - -} diff --git a/modules/local/proseg/tests/tags.yml b/modules/local/proseg/tests/tags.yml deleted file mode 100644 index 8fec433a..00000000 --- a/modules/local/proseg/tests/tags.yml +++ /dev/null @@ -1,2 +0,0 @@ -proseg: - - "modules/nf-core/proseg/**" diff --git a/modules/local/resolift/main.nf b/modules/local/resolift/main.nf new file mode 100644 index 00000000..f5f22d09 --- /dev/null +++ b/modules/local/resolift/main.nf @@ -0,0 +1,46 @@ +process RESOLIFT { + tag "${meta.id}" + label 'process_low' + + container "khersameesh24/resolift:1.0.0" + + input: + tuple val(meta), path(morphology_tiff) + + output: + tuple val(meta), path("${prefix}/morphology.ome.enhanced.tiff"), emit: enhanced_tiff + tuple val("${task.process}"), val('resolift'), eval("pip show resolift | sed -n 's/^Version: //p'"), topic: versions, emit: versions_resolift + + when: + task.ext.when == null || task.ext.when + + script: + // Exit if running this module with -profile conda / -profile mamba + if (workflow.profile.tokenize(',').intersect(['conda', 'mamba']).size() >= 1) { + error("RESOLIFT module does not support Conda. Please use Docker / Singularity / Podman instead.") + } + + def args = task.ext.args ?: '' + prefix = task.ext.prefix ?: "${meta.id}" + + """ + mkdir -p ${prefix} + + resolift \\ + -i ${morphology_tiff} \\ + -o ${prefix}/morphology.ome.enhanced.tiff \\ + ${args} + """ + + stub: + // Exit if running this module with -profile conda / -profile mamba + if (workflow.profile.tokenize(',').intersect(['conda', 'mamba']).size() >= 1) { + error("RESOLIFT module does not support Conda. Please use Docker / Singularity / Podman instead.") + } + prefix = task.ext.prefix ?: "${meta.id}" + + """ + mkdir -p ${prefix} + touch "${prefix}/morphology.ome.enhanced.tiff" + """ +} diff --git a/modules/local/resolift/meta.yml b/modules/local/resolift/meta.yml new file mode 100644 index 00000000..673e23fc --- /dev/null +++ b/modules/local/resolift/meta.yml @@ -0,0 +1,46 @@ +name: "resolift" +description: Upscale large TIFFs using chunk-based image processing and sharpening. +keywords: + - segmentation + - spatialomics + - image based segmentation +tools: + - "resolift": + description: "Upscale large TIFFs using chunk-based image processing and sharpening." + homepage: "https://github.com/khersameesh24/ResoLift" + documentation: "https://github.com/khersameesh24/ResoLift" + tool_dev_url: "https://github.com/khersameesh24/ResoLift" + doi: "" + licence: ["GNU Public License"] + +input: + - - meta: + type: map + description: | + Groovy Map containing run information + e.g. `[ id:'run_id']` + - input: + type: file + description: | + Path to the input TIFF file. + pattern: ".tiff" + +output: + - - meta: + type: map + description: | + Groovy Map containing run information + e.g. `[ id:'run_id']` + - output: + type: file + description: Path to save the upscaled TIFF file. + pattern: ".tiff" + - - versions: + type: file + description: File containing software versions + pattern: "versions.yml" + +authors: + - "@kubranarci" +maintainers: + - "@kubranarci" diff --git a/modules/local/resolift/tests/main.nf.test b/modules/local/resolift/tests/main.nf.test new file mode 100644 index 00000000..7130d733 --- /dev/null +++ b/modules/local/resolift/tests/main.nf.test @@ -0,0 +1,60 @@ +nextflow_process { + + name "Test Process RESOLIFT" + script "../main.nf" + process "RESOLIFT" + config "./nextflow.config" + + tag "modules" + tag "modules_local" + tag "resolift" + tag "segmentation" + tag "cell_segmentation" + + test("resolift tif") { + + when { + process { + """ + input[0] = [ + [ id:'test_run' ], + file(params.modules_testdata_base_path + 'imaging/segmentation/nuclear_image.tif', checkIfExists: true), + ] + """ + } + } + + then { + assertAll( + { assert process.success }, + { assert snapshot(process.out).match() }, + ) + } + + } + + test("resolift stub") { + + options "-stub" + + when { + process { + """ + input[0] = [ + [ id:'test_run' ], + file(params.modules_testdata_base_path + 'imaging/segmentation/nuclear_image.tif', checkIfExists: true), + ] + """ + } + } + + then { + assertAll( + { assert process.success }, + { assert snapshot(process.out).match() } + ) + } + + } + +} diff --git a/modules/local/resolift/tests/main.nf.test.snap b/modules/local/resolift/tests/main.nf.test.snap new file mode 100644 index 00000000..8972d043 --- /dev/null +++ b/modules/local/resolift/tests/main.nf.test.snap @@ -0,0 +1,84 @@ +{ + "resolift stub": { + "content": [ + { + "0": [ + [ + { + "id": "test_run" + }, + "morphology.ome.enhanced.tiff:md5,d41d8cd98f00b204e9800998ecf8427e" + ] + ], + "1": [ + [ + "RESOLIFT", + "resolift", + "1.0" + ] + ], + "enhanced_tiff": [ + [ + { + "id": "test_run" + }, + "morphology.ome.enhanced.tiff:md5,d41d8cd98f00b204e9800998ecf8427e" + ] + ], + "versions_resolift": [ + [ + "RESOLIFT", + "resolift", + "1.0" + ] + ] + } + ], + "meta": { + "nf-test": "0.9.3", + "nextflow": "25.10.2" + }, + "timestamp": "2026-03-22T16:00:00.000000000" + }, + "resolift tif": { + "content": [ + { + "0": [ + [ + { + "id": "test_run" + }, + "morphology.ome.enhanced.tiff:md5,a9feec67b317d8e5c5c93bc45d5a8763" + ] + ], + "1": [ + [ + "RESOLIFT", + "resolift", + "1.0" + ] + ], + "enhanced_tiff": [ + [ + { + "id": "test_run" + }, + "morphology.ome.enhanced.tiff:md5,a9feec67b317d8e5c5c93bc45d5a8763" + ] + ], + "versions_resolift": [ + [ + "RESOLIFT", + "resolift", + "1.0" + ] + ] + } + ], + "meta": { + "nf-test": "0.9.3", + "nextflow": "25.10.2" + }, + "timestamp": "2026-03-22T16:00:00.000000000" + } +} \ No newline at end of file diff --git a/modules/local/resolift/tests/nextflow.config b/modules/local/resolift/tests/nextflow.config new file mode 100644 index 00000000..f8b3a30a --- /dev/null +++ b/modules/local/resolift/tests/nextflow.config @@ -0,0 +1,9 @@ +process { + + resourceLimits = [ + cpus: 4, + memory: '8.GB', + time: '2.h', + ] + +} diff --git a/modules/local/samplesheet_check.nf b/modules/local/samplesheet_check.nf deleted file mode 100644 index 431ea42a..00000000 --- a/modules/local/samplesheet_check.nf +++ /dev/null @@ -1,31 +0,0 @@ -process SAMPLESHEET_CHECK { - tag "$samplesheet" - label 'process_single' - - conda "conda-forge::python=3.8.3" - container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ? - 'https://depot.galaxyproject.org/singularity/python:3.8.3' : - 'quay.io/biocontainers/python:3.8.3' }" - - input: - path samplesheet - - output: - path '*.csv' , emit: csv - path "versions.yml", emit: versions - - when: - task.ext.when == null || task.ext.when - - script: // This script is bundled with the pipeline, in nf-core/spatialxe/bin/ - """ - check_samplesheet.py \\ - $samplesheet \\ - samplesheet.valid.csv - - cat <<-END_VERSIONS > versions.yml - "${task.process}": - python: \$(python --version | sed 's/Python //g') - END_VERSIONS - """ -} diff --git a/modules/local/segger/Dockerfile b/modules/local/segger/Dockerfile new file mode 100644 index 00000000..e907e350 --- /dev/null +++ b/modules/local/segger/Dockerfile @@ -0,0 +1,52 @@ +# GPU image for segger segmentation +# Base: PyTorch with CUDA 12.4 (consistent CUDA support) +FROM pytorch/pytorch:2.5.1-cuda12.4-cudnn9-runtime + +# Install system dependencies +RUN apt-get update && apt-get install -y --no-install-recommends \ + git \ + procps \ + build-essential \ + && rm -rf /var/lib/apt/lists/* + +# Upgrade pip +RUN pip install --no-cache-dir --upgrade pip + +# Clone and install segger from fork with CUDA 12 support +# Using fork with fixed API (is_token_based, num_node_features) and data module exports +WORKDIR /workspace +RUN git clone https://github.com/an-altosian/segger_dev.git /workspace/segger_dev && \ + cd /workspace/segger_dev && \ + pip install --no-cache-dir -e ".[cuda12]" + +# Install missing dependencies for data.io module and training +RUN pip install --no-cache-dir dask-geopandas lightning pytorch_lightning + +# Fix squidpy/anndata compatibility - upgrade squidpy to version compatible with anndata 0.10+ +RUN pip install --no-cache-dir "squidpy>=1.4.0" + +# Make NVRTC discoverable for CuPy (needs libnvrtc.so.12 for JIT kernel compilation) +# The nvidia-cuda-nvrtc-cu12 pip package installs it under site-packages. +# Use both ldconfig AND symlinks for maximum reliability (ldconfig may not survive +# Wave container augmentation on Seqera Platform). +RUN NVRTC_LIB=$(python -c "import nvidia.cuda_nvrtc, pathlib; print(pathlib.Path(nvidia.cuda_nvrtc.__file__).parent / 'lib')") && \ + echo "$NVRTC_LIB" > /etc/ld.so.conf.d/nvidia-nvrtc.conf && \ + ln -sf "$NVRTC_LIB"/libnvrtc* /usr/lib/ && \ + ldconfig && \ + echo "NVRTC registered at: $NVRTC_LIB" && \ + ldconfig -p | grep nvrtc + +# Install faiss-cpu for vector search (faiss-gpu not available via pip) +# Note: Removed cuvs-cu12 and nvidia library force-reinstalls as they break PyTorch CUDA detection +RUN pip install --no-cache-dir faiss-cpu + +# Verify PyTorch was built with CUDA support (doesn't require GPU at build time) +RUN python -c "import torch; print(f'PyTorch version: {torch.__version__}'); print(f'CUDA compiled: {torch.version.cuda}')" + +# Set environment variables for segger +ENV PYTHONPATH=/workspace/segger_dev/src:$PYTHONPATH +ENV PATH="$PATH:/workspace/segger_dev/src/segger/cli/" +ENV CUPY_CACHE_DIR="/tmp/cupy_cache" + +# Set default shell +CMD ["/bin/bash"] diff --git a/modules/local/segger/create_dataset/main.nf b/modules/local/segger/create_dataset/main.nf new file mode 100644 index 00000000..ce008ec3 --- /dev/null +++ b/modules/local/segger/create_dataset/main.nf @@ -0,0 +1,58 @@ +process SEGGER_CREATE_DATASET { + tag "${meta.id}" + label 'process_xl' + maxForks params.restrict_concurrency ? 1 : 0 + + container "quay.io/dongzehe/segger:1.0.14" + + input: + tuple val(meta), path(base_dir) + + output: + tuple val(meta), path("${prefix}/"), emit: datasetdir + tuple val("${task.process}"), val('segger'), eval("pip show segger | sed -n 's/^Version: //p'"), topic: versions, emit: versions_segger + + when: + task.ext.when == null || task.ext.when + + script: + // Exit if running this module with -profile conda / -profile mamba + if (workflow.profile.tokenize(',').intersect(['conda', 'mamba']).size() >= 1) { + error("SEGGER_CREATE_DATASET module does not support Conda. Please use Docker / Singularity / Podman instead.") + } + + def args = task.ext.args ?: '' + prefix = task.ext.prefix ?: "${meta.id}" + + // check for platform values + if (!(params.format in ['xenium'])) { + error("${params.format} is an invalid platform type.") + } + + """ + export NUMBA_CACHE_DIR=\$PWD/.numba_cache + mkdir -p \$NUMBA_CACHE_DIR + + segger_create_dataset.py \\ + --bundle-dir ${base_dir} \\ + --output-dir ${prefix} \\ + --sample-type ${params.format} \\ + --tile-width ${params.tile_width} \\ + --tile-height ${params.tile_height} \\ + --n-workers ${task.cpus} \\ + ${args} + """ + + stub: + // Exit if running this module with -profile conda / -profile mamba + if (workflow.profile.tokenize(',').intersect(['conda', 'mamba']).size() >= 1) { + error("SEGGER_CREATE_DATASET module does not support Conda. Please use Docker / Singularity / Podman instead.") + } + + prefix = task.ext.prefix ?: "${meta.id}" + + """ + mkdir -p ${prefix}/ + touch "${prefix}/fake_file.txt" + """ +} diff --git a/modules/local/segger/create_dataset/meta.yml b/modules/local/segger/create_dataset/meta.yml new file mode 100644 index 00000000..f1573869 --- /dev/null +++ b/modules/local/segger/create_dataset/meta.yml @@ -0,0 +1,45 @@ +name: "segger_create_dataset" +description: Create a segger compatible dataset to speed up training and prediction of cell segmentations. +keywords: + - segmentation + - xenium + - imaging +tools: + - "segger": + description: "Segger uses graph neural networks and heterogeneous graphs to offer efficient cell segmentation at unmatched precision and accuracy." + homepage: "https://github.com/EliHei2/segger_dev" + documentation: "https://elihei2.github.io/segger_dev/user_guide/" + tool_dev_url: "https://github.com/EliHei2/segger_dev" + doi: "tbd" + licence: ["MIT"] + +input: + - - meta: + type: map + description: | + Groovy Map containing sample information + e.g. `[ id:'segger_run_id' ]` + - base_dir: + type: file + description: | + Directory containing the raw dataset (e.g., transcripts, boundaries). +output: + - - meta: + type: map + description: | + Groovy Map containing sample information + e.g. `[ id:'segger_run_id' ]` + - datasetdir: + description: | + Directory to save the processed Segger dataset (in PyTorch Geometric format). + - - versions: + type: file + description: File containing software versions + pattern: "versions.yml" + +authors: + - "@tobiaspk" + - "@khersameesh24" +maintainers: + - "@tobiaspk" + - "@khersameesh24" diff --git a/modules/local/segger/predict/main.nf b/modules/local/segger/predict/main.nf new file mode 100644 index 00000000..d4180394 --- /dev/null +++ b/modules/local/segger/predict/main.nf @@ -0,0 +1,54 @@ +process SEGGER_PREDICT { + tag "${meta.id}" + label 'process_xl' + label 'process_gpu' + + container "quay.io/dongzehe/segger:1.0.14" + + input: + tuple val(meta), path(segger_dataset) + path models_dir + path transcripts + + output: + tuple val(meta), path("benchmarks_dir"), emit: benchmarks + tuple val(meta), path("benchmarks_dir/*/segger_transcripts.parquet"), emit: transcripts + tuple val("${task.process}"), val('segger'), eval("pip show segger | sed -n 's/^Version: //p'"), topic: versions, emit: versions_segger + + when: + task.ext.when == null || task.ext.when + + script: + // Exit if running this module with -profile conda / -profile mamba + if (workflow.profile.tokenize(',').intersect(['conda', 'mamba']).size() >= 1) { + error("SEGGER_PREDICT module does not support Conda. Please use Docker / Singularity / Podman instead.") + } + + def args = task.ext.args ?: '' + prefix = task.ext.prefix ?: "${meta.id}" + """ + segger_predict.py \\ + --models-dir ${models_dir} \\ + --segger-data-dir ${segger_dataset} \\ + --transcripts-file ${transcripts} \\ + --benchmarks-dir benchmarks_dir \\ + --batch-size ${params.batch_size_predict} \\ + --use-cc ${params.cc_analysis} \\ + --knn-method ${params.segger_knn_method} \\ + --num-workers ${task.cpus} \\ + ${args} + """ + + stub: + // Exit if running this module with -profile conda / -profile mamba + if (workflow.profile.tokenize(',').intersect(['conda', 'mamba']).size() >= 1) { + error("SEGGER_PREDICT module does not support Conda. Please use Docker / Singularity / Podman instead.") + } + + prefix = task.ext.prefix ?: "${meta.id}" + + """ + mkdir -p "benchmarks_dir" + touch "benchmarks_dir/fake_file.txt" + """ +} diff --git a/modules/local/segger/predict/meta.yml b/modules/local/segger/predict/meta.yml new file mode 100644 index 00000000..fab577b2 --- /dev/null +++ b/modules/local/segger/predict/meta.yml @@ -0,0 +1,58 @@ +name: "segger_predict" +description: Predict cell segmentation on new data using a trained Segger model. +keywords: + - segmentation + - xenium + - imaging + - prediction +tools: + - "segger": + description: "Segger uses graph neural networks and heterogeneous graphs to offer efficient cell segmentation at unmatched precision and accuracy." + homepage: "https://github.com/EliHei2/segger_dev" + documentation: "https://elihei2.github.io/segger_dev/user_guide/" + tool_dev_url: "https://github.com/EliHei2/segger_dev" + doi: "tbd" + licence: ["MIT"] + +input: + - - meta: + type: map + description: | + Groovy Map containing sample information + e.g. `[ id:'segger_run_id' ]` + - segger_dataset: + type: file + description: | + File or directory with input data to run predictions on. + - - models_dir: + type: directory + description: | + Directory containing the trained model for inference. + - - transcripts_file: + type: file + description: | + Path to the transcripts.parquet file. + - - benchmarks_dir: + type: directory + description: | + Directory to save the prediction outputs. +output: + - - meta: + type: map + description: | + Groovy Map containing sample information + e.g. `[ id:'segger_run_id' ]` + - benchmarks_dir: + description: | + Directory to save the segmentation results, including cell boundaries and associations. + - - versions: + type: file + description: File containing software versions + pattern: "versions.yml" + +authors: + - "@tobiaspk" + - "@khersameesh24" +maintainers: + - "@tobiaspk" + - "@khersameesh24" diff --git a/modules/local/segger/train/main.nf b/modules/local/segger/train/main.nf new file mode 100644 index 00000000..4d813dbc --- /dev/null +++ b/modules/local/segger/train/main.nf @@ -0,0 +1,71 @@ +process SEGGER_TRAIN { + tag "${meta.id}" + label 'process_xl' + label 'process_gpu' + maxForks params.restrict_concurrency ? 1 : 0 + + container "quay.io/dongzehe/segger:1.0.14" + + input: + tuple val(meta), path(dataset_dir) + + output: + tuple val(meta), path("trained_models"), emit: trained_models + tuple val("${task.process}"), val('segger'), eval("pip show segger | sed -n 's/^Version: //p'"), topic: versions, emit: versions_segger + + when: + task.ext.when == null || task.ext.when + + script: + // Exit if running this module with -profile conda / -profile mamba + if (workflow.profile.tokenize(',').intersect(['conda', 'mamba']).size() >= 1) { + error("SEGGER_TRAIN module does not support Conda. Please use Docker / Singularity / Podman instead.") + } + + def args = task.ext.args ?: '' + def script_path = "/workspace/segger_dev/src/segger/cli/train_model.py" + prefix = task.ext.prefix ?: "${meta.id}" + // Scale GPU count with retries: 4 → 8 (capped at params.devices) + def gpu_count = 2 * task.attempt + def cuda_visible = gpu_count == 1 ? "export CUDA_VISIBLE_DEVICES=0" : "" + def accelerator = task.accelerator ? 'gpu' : 'auto' + + """ + # Set numba cache directory to avoid caching issues in container + export NUMBA_CACHE_DIR=\$PWD/.numba_cache + mkdir -p \$NUMBA_CACHE_DIR + + # GPU detection logging + echo "=== GPU Detection (SEGGER_TRAIN) ===" + echo "Requested devices: ${gpu_count} (attempt ${task.attempt}, max ${params.devices})" + echo "Accelerator: ${accelerator}" + nvidia-smi 2>/dev/null && echo "GPU available: yes" || echo "GPU available: no (nvidia-smi failed)" + python3 -c "import torch; print(f'PyTorch CUDA available: {torch.cuda.is_available()}'); print(f'CUDA device count: {torch.cuda.device_count()}')" 2>/dev/null || echo "PyTorch CUDA check failed" + echo "====================================" + + ${cuda_visible} + python3 ${script_path} \\ + --dataset_dir ${dataset_dir} \\ + --models_dir trained_models \\ + --sample_tag ${prefix} \\ + --batch_size ${params.batch_size_train} \\ + --max_epochs ${params.max_epochs} \\ + --devices ${gpu_count} \\ + --num_workers ${params.segger_num_workers} \\ + --accelerator ${accelerator} \\ + ${args} + """ + + stub: + // Exit if running this module with -profile conda / -profile mamba + if (workflow.profile.tokenize(',').intersect(['conda', 'mamba']).size() >= 1) { + error("SEGGER_TRAIN module does not support Conda. Please use Docker / Singularity / Podman instead.") + } + + prefix = task.ext.prefix ?: "${meta.id}" + + """ + mkdir -p trained_models/ + touch trained_models/fakefile.txt + """ +} diff --git a/modules/local/segger/train/meta.yml b/modules/local/segger/train/meta.yml new file mode 100644 index 00000000..40b838b8 --- /dev/null +++ b/modules/local/segger/train/meta.yml @@ -0,0 +1,47 @@ +name: "segger_train_model" +description: Train a Segger cell segmentation model using a segger-created dataset. +keywords: + - segmentation + - xenium + - imaging + - model-training +tools: + - "segger": + description: "Segger uses graph neural networks and heterogeneous graphs to offer efficient cell segmentation at unmatched precision and accuracy." + homepage: "https://github.com/EliHei2/segger_dev" + documentation: "https://elihei2.github.io/segger_dev/user_guide/" + tool_dev_url: "https://github.com/EliHei2/segger_dev" + doi: "tbd" + licence: ["MIT"] + +input: + - - meta: + type: map + description: | + Groovy Map containing sample information + e.g. `[ id:'segger_run_id' ]` + - dataset_dir: + type: file + description: | + Directory to segger created dataset(s). +output: + - - meta: + type: map + description: | + Groovy Map containing sample information + e.g. `[ id:'segger_run_id' ]` + - trained_models: + type: directory + description: | + Directory to save the trained model and checkpoints. + - - versions: + type: file + description: File containing software versions + pattern: "versions.yml" + +authors: + - "@tobiaspk" + - "@khersameesh24" +maintainers: + - "@tobiaspk" + - "@khersameesh24" diff --git a/modules/local/spatialdata/merge/main.nf b/modules/local/spatialdata/merge/main.nf new file mode 100644 index 00000000..9a57f877 --- /dev/null +++ b/modules/local/spatialdata/merge/main.nf @@ -0,0 +1,48 @@ +process SPATIALDATA_MERGE { + tag "${meta.id}" + label 'process_high_memory' + + container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ? + 'https://community-cr-prod.seqera.io/docker/registry/v2/blobs/sha256/cb/cb8fc03fa657c164c5d83f075578bbb5d9c10f1178165f94e94f33c67efca1a1/data' : + 'community.wave.seqera.io/library/spatialdata-io_spatialdata:b264928c30680e87' }" + + input: + tuple val(meta), path(raw_bundle, stageAs: "*"), path(redefined_bundle, stageAs: "*") + val(outputfolder) + + output: + tuple val(meta), path("spatialdata/${prefix}/${outputfolder}"), emit: merged_bundle + tuple val("${task.process}"), val('spatialdata'), eval('python3 -c "import spatialdata; print(spatialdata.__version__)"'), topic: versions, emit: versions_spatialdata + + when: + task.ext.when == null || task.ext.when + + script: + // Exit if running this module with -profile conda / -profile mamba + if (workflow.profile.tokenize(',').intersect(['conda', 'mamba']).size() >= 1) { + exit(1, "SPATIALDATA_MERGE module does not support Conda. Please use Docker / Singularity / Podman instead.") + } + + prefix = task.ext.prefix ?: "${meta.id}" + + """ + spatialdata_merge.py \\ + --raw-bundle ${raw_bundle} \\ + --redefined-bundle ${redefined_bundle} \\ + --prefix ${prefix} \\ + --output-folder ${outputfolder} + """ + + stub: + // Exit if running this module with -profile conda / -profile mamba + if (workflow.profile.tokenize(',').intersect(['conda', 'mamba']).size() >= 1) { + exit(1, "SPATIALDATA_MERGE module does not support Conda. Please use Docker / Singularity / Podman instead.") + } + + prefix = task.ext.prefix ?: "${meta.id}" + + """ + mkdir -p "spatialdata/${prefix}/${outputfolder}/" + touch "spatialdata/${prefix}/${outputfolder}/fake_file.txt" + """ +} diff --git a/modules/local/spatialdata/merge/meta.yml b/modules/local/spatialdata/merge/meta.yml new file mode 100644 index 00000000..47faf763 --- /dev/null +++ b/modules/local/spatialdata/merge/meta.yml @@ -0,0 +1,44 @@ +name: spatialdata_merge +description: Merge several SpatialData bundles together. +keywords: + - data + - spatialomics +tools: + - custom: + description: An open and universal framework for processing spatial omics data + homepage: https://github.com/scverse/spatialdata + documentation: https://spatialdata.scverse.org/en/latest/ + licence: ["BSD 3-Clause"] +input: + - meta: + type: map + description: | + Groovy Map containing sample information + e.g. [ id:'sample' ] + - ref_bundle: + type: folder + description: | + Path to reference (raw) spatialdata bundle. + - add_bundle: + type: folder + description: | + Path to spatialdata bundle that should be added. + +output: + - spatialxe_bundle: + - meta: + type: map + description: | + Groovy Map containing sample information + e.g. [ id:'sample' ] + - "spatialdata_spatialxe": + type: folder + description: Spatialdata folder + pattern: "${meta.id}/spatialdata_spatialxe/*" + - versions: + type: file + description: File containing software versions + pattern: "versions.yml" + +authors: + - "@heylf" diff --git a/modules/local/spatialdata/meta/main.nf b/modules/local/spatialdata/meta/main.nf new file mode 100644 index 00000000..40e2efae --- /dev/null +++ b/modules/local/spatialdata/meta/main.nf @@ -0,0 +1,49 @@ +process SPATIALDATA_META { + tag "${meta.id}" + label 'process_high' + + container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ? + 'https://community-cr-prod.seqera.io/docker/registry/v2/blobs/sha256/cb/cb8fc03fa657c164c5d83f075578bbb5d9c10f1178165f94e94f33c67efca1a1/data' : + 'community.wave.seqera.io/library/spatialdata-io_spatialdata:b264928c30680e87' }" + + input: + tuple val(meta), path(spatialdata_bundle, stageAs: "*"), path(xenium_bundle, stageAs: "*") + val(outputfolder) + + output: + tuple val(meta), path("spatialdata/${prefix}/${outputfolder}"), emit: metadata + tuple val("${task.process}"), val('spatialdata'), eval('python3 -c "import spatialdata; print(spatialdata.__version__)"'), topic: versions, emit: versions_spatialdata + + when: + task.ext.when == null || task.ext.when + + script: + // Exit if running this module with -profile conda / -profile mamba + if (workflow.profile.tokenize(',').intersect(['conda', 'mamba']).size() >= 1) { + exit(1, "SPATIALDATA_META module does not support Conda. Please use Docker / Singularity / Podman instead.") + } + + prefix = task.ext.prefix ?: "${meta.id}" + + """ + spatialdata_meta.py \\ + --spatialdata-bundle ${spatialdata_bundle} \\ + --xenium-bundle ${xenium_bundle} \\ + --prefix ${prefix} \\ + --metadata '${meta}' \\ + --output-folder ${outputfolder} + """ + + stub: + // Exit if running this module with -profile conda / -profile mamba + if (workflow.profile.tokenize(',').intersect(['conda', 'mamba']).size() >= 1) { + exit(1, "SPATIALDATA_META module does not support Conda. Please use Docker / Singularity / Podman instead.") + } + + prefix = task.ext.prefix ?: "${meta.id}" + + """ + mkdir -p "spatialdata/${prefix}/${outputfolder}/" + touch "spatialdata/${prefix}/${outputfolder}/fake_file.txt" + """ +} diff --git a/modules/local/spatialdata/meta/meta.yml b/modules/local/spatialdata/meta/meta.yml new file mode 100644 index 00000000..bf7615b4 --- /dev/null +++ b/modules/local/spatialdata/meta/meta.yml @@ -0,0 +1,44 @@ +name: spatialdata_meta +description: Add metadata to a SpatialData bundle. +keywords: + - data + - spatialomics +tools: + - custom: + description: An open and universal framework for processing spatial omics data + homepage: https://github.com/scverse/spatialdata + documentation: https://spatialdata.scverse.org/en/latest/ + licence: ["BSD 3-Clause"] +input: + - meta: + type: map + description: | + Groovy Map containing sample information + e.g. [ id:'sample' ] + - spatialdata_bundle: + type: folder + description: | + Path to spatialdata bundle. + - xenium_bundle: + type: folder + description: | + Path to Xeniumranger bundle. + +output: + - spatialxe_bundle: + - meta: + type: map + description: | + Groovy Map containing sample information + e.g. [ id:'sample' ] + - "spatialdata_spatialxe_final": + type: folder + description: Spatialdata folder + pattern: "${meta.id}/spatialdata_spatialxe_final/*" + - versions: + type: file + description: File containing software versions + pattern: "versions.yml" + +authors: + - "@heylf" diff --git a/modules/local/spatialdata/write/main.nf b/modules/local/spatialdata/write/main.nf new file mode 100644 index 00000000..43acc073 --- /dev/null +++ b/modules/local/spatialdata/write/main.nf @@ -0,0 +1,52 @@ +process SPATIALDATA_WRITE { + tag "${meta.id}" + label 'process_high' + + container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ? + 'https://community-cr-prod.seqera.io/docker/registry/v2/blobs/sha256/cb/cb8fc03fa657c164c5d83f075578bbb5d9c10f1178165f94e94f33c67efca1a1/data' : + 'community.wave.seqera.io/library/spatialdata-io_spatialdata:b264928c30680e87' }" + + input: + tuple val(meta), path(bundle, stageAs: "*") + val(outputfolder) + val(segmented_object) + val(coordinate_space) + + output: + tuple val(meta), path("spatialdata/${prefix}/${outputfolder}"), emit: spatialdata + tuple val("${task.process}"), val('spatialdata'), eval('python3 -c "import spatialdata; print(spatialdata.__version__)"'), topic: versions, emit: versions_spatialdata + + when: + task.ext.when == null || task.ext.when + + script: + // Exit if running this module with -profile conda / -profile mamba + if (workflow.profile.tokenize(',').intersect(['conda', 'mamba']).size() >= 1) { + exit(1, "SPATIALDATA_WRITE module does not support Conda. Please use Docker / Singularity / Podman instead.") + } + + prefix = task.ext.prefix ?: "${meta.id}" + + """ + spatialdata_write.py \\ + --bundle ${bundle} \\ + --prefix ${prefix} \\ + --output-folder ${outputfolder} \\ + --segmented-object ${segmented_object} \\ + --coordinate-space ${coordinate_space} \\ + --format ${params.format} + """ + + stub: + // Exit if running this module with -profile conda / -profile mamba + if (workflow.profile.tokenize(',').intersect(['conda', 'mamba']).size() >= 1) { + exit(1, "SPATIALDATA_WRITE module does not support Conda. Please use Docker / Singularity / Podman instead.") + } + + prefix = task.ext.prefix ?: "${meta.id}" + + """ + mkdir -p "spatialdata/${prefix}/${outputfolder}" + touch "spatialdata/${prefix}/${outputfolder}/fake_file.txt" + """ +} diff --git a/modules/local/spatialdata/write/meta.yml b/modules/local/spatialdata/write/meta.yml new file mode 100644 index 00000000..1c14653a --- /dev/null +++ b/modules/local/spatialdata/write/meta.yml @@ -0,0 +1,40 @@ +name: spatialdata_write +description: Convert 10x Xenium data bundle into a SpatialData bundle. +keywords: + - data + - spatialomics +tools: + - custom: + description: An open and universal framework for processing spatial omics data + homepage: https://github.com/scverse/spatialdata + documentation: https://spatialdata.scverse.org/en/latest/ + licence: ["BSD 3-Clause"] +input: + - meta: + type: map + description: | + Groovy Map containing sample information + e.g. [ id:'sample' ] + - bundle: + type: folder + description: | + Path to spatialomics data bundle (e.g., from Xeniumranger) + +output: + - spatialdata: + - meta: + type: map + description: | + Groovy Map containing sample information + e.g. [ id:'sample' ] + - "${outputfolder}": + type: folder + description: Spatialdata folder + pattern: "${meta.id}/spatialdata/*" + - versions: + type: file + description: File containing software versions + pattern: "versions.yml" + +authors: + - "@heylf" diff --git a/modules/local/utility/convert_mask_uint32/main.nf b/modules/local/utility/convert_mask_uint32/main.nf new file mode 100644 index 00000000..40e5c35c --- /dev/null +++ b/modules/local/utility/convert_mask_uint32/main.nf @@ -0,0 +1,48 @@ +/* + * CONVERT_MASK_UINT32: Convert segmentation mask to uint32 dtype. + * + * XeniumRanger import-segmentation requires uint32 masks. + * StarDist outputs int32 labels by default. + * + * Input: + * - meta: Sample metadata map + * - mask: Segmentation mask TIFF (any integer dtype) + * + * Output: + * - mask: uint32 segmentation mask TIFF + * - versions: Software versions + */ +process CONVERT_MASK_UINT32 { + tag "${meta.id}" + label 'process_low' + + container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ? + 'https://community-cr-prod.seqera.io/docker/registry/v2/blobs/sha256/d9/d964e0bef867bb2ff1a309c9c087d8d83ac734ce3aa315dd8311d4c1bfdafd8e/data' : + 'community.wave.seqera.io/library/python_pip_imagecodecs_nvidia-cublas-cu12_pruned:b668bcb6d531d350' }" + + input: + tuple val(meta), path(mask) + + output: + tuple val(meta), path("${prefix}_uint32_mask.tif"), emit: mask + tuple val("${task.process}"), val('python'), eval("python3 --version | sed 's/Python //'"), topic: versions, emit: versions_python + tuple val("${task.process}"), val('tifffile'), eval("python3 -c 'import tifffile; print(tifffile.__version__)'"), topic: versions, emit: versions_tifffile + tuple val("${task.process}"), val('numpy'), eval("python3 -c 'import numpy; print(numpy.__version__)'"), topic: versions, emit: versions_numpy + + when: + task.ext.when == null || task.ext.when + + script: + prefix = task.ext.prefix ?: "${meta.id}" + """ + utility_convert_mask_uint32.py \\ + --input ${mask} \\ + --output ${prefix}_uint32_mask.tif + """ + + stub: + prefix = task.ext.prefix ?: "${meta.id}" + """ + touch ${prefix}_uint32_mask.tif + """ +} diff --git a/modules/local/utility/convert_mask_uint32/meta.yml b/modules/local/utility/convert_mask_uint32/meta.yml new file mode 100644 index 00000000..0d6f4ff0 --- /dev/null +++ b/modules/local/utility/convert_mask_uint32/meta.yml @@ -0,0 +1,128 @@ +name: "convert_mask_uint32" +description: Convert a segmentation mask TIFF to uint32 dtype for downstream tools that require uint32 (e.g. XeniumRanger import-segmentation). +keywords: + - xenium + - segmentation + - mask + - tiff + - uint32 +tools: + - "python": + description: | + Python programming language interpreter, used here to load and rewrite + the segmentation mask with the required dtype. + homepage: "https://www.python.org/" + documentation: "https://docs.python.org/3/" + tool_dev_url: "https://github.com/python/cpython" + doi: "no DOI available" + licence: ["PSF-2.0"] + identifier: "" + - "tifffile": + description: | + Read and write TIFF files, including BigTIFF and OME-TIFF, in Python. + homepage: "https://github.com/cgohlke/tifffile" + documentation: "https://github.com/cgohlke/tifffile" + tool_dev_url: "https://github.com/cgohlke/tifffile" + doi: "no DOI available" + licence: ["BSD-3-Clause"] + identifier: "" + - "numpy": + description: | + The fundamental package for scientific computing with Python, + used to cast the mask array to uint32. + homepage: "https://numpy.org/" + documentation: "https://numpy.org/doc/stable/" + tool_dev_url: "https://github.com/numpy/numpy" + doi: "10.1038/s41586-020-2649-2" + licence: ["BSD-3-Clause"] + identifier: "" + +input: + - - meta: + type: map + description: | + Groovy Map containing sample information + e.g. [ id:'test' ] + - mask: + type: file + description: Segmentation mask TIFF (any integer dtype). + pattern: "*.{tif,tiff}" + ontologies: [] + +output: + mask: + - - meta: + type: map + description: | + Groovy Map containing sample information + e.g. [ id:'test' ] + - "*_uint32_mask.tif": + type: file + description: Segmentation mask TIFF cast to uint32. + pattern: "*_uint32_mask.tif" + ontologies: [] + versions_python: + - - ${task.process}: + type: string + description: The process the versions were collected from + - python: + type: string + description: The tool name + - "python3 --version | sed 's/Python //'": + type: eval + description: The expression to obtain the version of the tool + versions_tifffile: + - - ${task.process}: + type: string + description: The process the versions were collected from + - tifffile: + type: string + description: The tool name + - "python3 -c 'import tifffile; print(tifffile.__version__)'": + type: eval + description: The expression to obtain the version of the tool + versions_numpy: + - - ${task.process}: + type: string + description: The process the versions were collected from + - numpy: + type: string + description: The tool name + - "python3 -c 'import numpy; print(numpy.__version__)'": + type: eval + description: The expression to obtain the version of the tool + +topics: + versions: + - - ${task.process}: + type: string + description: The process the versions were collected from + - python: + type: string + description: The tool name + - "python3 --version | sed 's/Python //'": + type: eval + description: The expression to obtain the version of the tool + - - ${task.process}: + type: string + description: The process the versions were collected from + - tifffile: + type: string + description: The tool name + - "python3 -c 'import tifffile; print(tifffile.__version__)'": + type: eval + description: The expression to obtain the version of the tool + - - ${task.process}: + type: string + description: The process the versions were collected from + - numpy: + type: string + description: The tool name + - "python3 -c 'import numpy; print(numpy.__version__)'": + type: eval + description: The expression to obtain the version of the tool + +authors: + - "@an-altosian" +maintainers: + - "@an-altosian" diff --git a/modules/local/utility/downscale_morphology/main.nf b/modules/local/utility/downscale_morphology/main.nf new file mode 100644 index 00000000..ab4f478a --- /dev/null +++ b/modules/local/utility/downscale_morphology/main.nf @@ -0,0 +1,58 @@ +/* + * DOWNSCALE_MORPHOLOGY: Pre-downscale morphology image for cellpose + * + * Reduces image dimensions by a scale factor so that cellpose's internal + * rescaling (diam_mean/diameter) does not exceed GPU/CPU memory. + * The scale factor defaults to diameter/diam_mean (e.g., 9/30 = 0.3). + * After downscaling, cellpose should use --diameter 30 (no internal rescale). + * + * Input: + * - meta: Sample metadata map + * - image: Morphology OME-TIFF + * + * Output: + * - downscaled: Downscaled TIFF image + * - scale_info: JSON with scale factor and original dimensions + * - versions: Software versions + */ +process DOWNSCALE_MORPHOLOGY { + tag "${meta.id}" + label 'process_medium' + + container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ? + 'https://community-cr-prod.seqera.io/docker/registry/v2/blobs/sha256/cb/cb670191b7ae1a9fd5449746453916c7014b9ea622942ca76a7cb40da7deee46/data' : + 'community.wave.seqera.io/library/python_pip_cellpose:fdf7a8c3a305a26e' }" + + input: + tuple val(meta), path(image) + + output: + tuple val(meta), path("${prefix}/downscaled.tif"), emit: downscaled + tuple val(meta), path("${prefix}/scale_info.json"), emit: scale_info + tuple val("${task.process}"), val('python'), eval("python3 --version | sed 's/Python //'"), topic: versions, emit: versions_python + tuple val("${task.process}"), val('tifffile'), eval('python3 -c "import tifffile; print(tifffile.__version__)"'), topic: versions, emit: versions_tifffile + tuple val("${task.process}"), val('scikit-image'), eval('python3 -c "import skimage; print(skimage.__version__)"'), topic: versions, emit: versions_skimage + + when: + task.ext.when == null || task.ext.when + + script: + def diameter = task.ext.diameter ?: 9 + def diam_mean = 30 + prefix = task.ext.prefix ?: "${meta.id}" + """ + utility_downscale_morphology.py \\ + --image ${image} \\ + --diameter ${diameter} \\ + --diam-mean ${diam_mean} \\ + --prefix ${prefix} + """ + + stub: + prefix = task.ext.prefix ?: "${meta.id}" + """ + mkdir -p ${prefix} + touch ${prefix}/downscaled.tif + echo '{"scale": 0.3}' > ${prefix}/scale_info.json + """ +} diff --git a/modules/local/utility/downscale_morphology/meta.yml b/modules/local/utility/downscale_morphology/meta.yml new file mode 100644 index 00000000..83408899 --- /dev/null +++ b/modules/local/utility/downscale_morphology/meta.yml @@ -0,0 +1,138 @@ +name: "downscale_morphology" +description: Pre-downscale a morphology image so that cellpose's internal rescaling does not exceed memory limits. +keywords: + - xenium + - morphology + - downscale + - image processing + - cellpose +tools: + - "python": + description: | + Python programming language interpreter. + homepage: "https://www.python.org/" + documentation: "https://docs.python.org/3/" + tool_dev_url: "https://github.com/python/cpython" + doi: "no DOI available" + licence: ["PSF-2.0"] + identifier: "" + - "tifffile": + description: | + Read and write TIFF files, including BigTIFF and OME-TIFF, in Python. + homepage: "https://github.com/cgohlke/tifffile" + documentation: "https://github.com/cgohlke/tifffile" + tool_dev_url: "https://github.com/cgohlke/tifffile" + doi: "no DOI available" + licence: ["BSD-3-Clause"] + identifier: "" + - "scikit-image": + description: | + Image processing library for Python (scikit-image), used here for + anti-aliased image resizing. + homepage: "https://scikit-image.org/" + documentation: "https://scikit-image.org/docs/stable/" + tool_dev_url: "https://github.com/scikit-image/scikit-image" + doi: "10.7717/peerj.453" + licence: ["BSD-3-Clause"] + identifier: "" + +input: + - - meta: + type: map + description: | + Groovy Map containing sample information + e.g. [ id:'test' ] + - image: + type: file + description: Morphology OME-TIFF image. + pattern: "*.{ome.tif,ome.tiff,tif,tiff}" + ontologies: [] + +output: + downscaled: + - - meta: + type: map + description: | + Groovy Map containing sample information + e.g. [ id:'test' ] + - "*/downscaled.tif": + type: file + description: Downscaled TIFF image. + pattern: "*/downscaled.tif" + ontologies: [] + scale_info: + - - meta: + type: map + description: | + Groovy Map containing sample information + e.g. [ id:'test' ] + - "*/scale_info.json": + type: file + description: JSON file with scale factor and original/new dimensions. + pattern: "*/scale_info.json" + ontologies: [] + versions_python: + - - ${task.process}: + type: string + description: The process the versions were collected from + - python: + type: string + description: The tool name + - "python3 --version | sed 's/Python //'": + type: eval + description: The expression to obtain the version of the tool + versions_tifffile: + - - ${task.process}: + type: string + description: The process the versions were collected from + - tifffile: + type: string + description: The tool name + - 'python3 -c "import tifffile; print(tifffile.__version__)"': + type: eval + description: The expression to obtain the version of the tool + versions_skimage: + - - ${task.process}: + type: string + description: The process the versions were collected from + - scikit-image: + type: string + description: The tool name + - 'python3 -c "import skimage; print(skimage.__version__)"': + type: eval + description: The expression to obtain the version of the tool + +topics: + versions: + - - ${task.process}: + type: string + description: The process the versions were collected from + - python: + type: string + description: The tool name + - "python3 --version | sed 's/Python //'": + type: eval + description: The expression to obtain the version of the tool + - - ${task.process}: + type: string + description: The process the versions were collected from + - tifffile: + type: string + description: The tool name + - 'python3 -c "import tifffile; print(tifffile.__version__)"': + type: eval + description: The expression to obtain the version of the tool + - - ${task.process}: + type: string + description: The process the versions were collected from + - scikit-image: + type: string + description: The tool name + - 'python3 -c "import skimage; print(skimage.__version__)"': + type: eval + description: The expression to obtain the version of the tool + +authors: + - "@an-altosian" +maintainers: + - "@an-altosian" diff --git a/modules/local/utility/extract_dapi/main.nf b/modules/local/utility/extract_dapi/main.nf new file mode 100644 index 00000000..ef9a88bd --- /dev/null +++ b/modules/local/utility/extract_dapi/main.nf @@ -0,0 +1,50 @@ +/* + * EXTRACT_DAPI: Extract DAPI channel (channel 0) from multi-channel OME-TIFF. + * + * Xenium morphology_focus.ome.tif has multiple channels (DAPI, boundary, interior); + * StarDist 2D_versatile_fluo expects single-channel input. + * + * Input: + * - meta: Sample metadata map + * - image: Multi-channel OME-TIFF morphology image + * + * Output: + * - dapi: Single-channel DAPI TIFF + * - versions: Software versions + */ +process EXTRACT_DAPI { + tag "${meta.id}" + label 'process_low' + + container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ? + 'https://community-cr-prod.seqera.io/docker/registry/v2/blobs/sha256/d9/d964e0bef867bb2ff1a309c9c087d8d83ac734ce3aa315dd8311d4c1bfdafd8e/data' : + 'community.wave.seqera.io/library/python_pip_imagecodecs_nvidia-cublas-cu12_pruned:b668bcb6d531d350' }" + + input: + tuple val(meta), path(image) + + output: + tuple val(meta), path("${prefix}_dapi.tif"), emit: dapi + tuple val("${task.process}"), val('python'), eval("python3 --version | sed 's/Python //'"), topic: versions, emit: versions_python + tuple val("${task.process}"), val('tifffile'), eval("python3 -c 'import tifffile; print(tifffile.__version__)'"), topic: versions, emit: versions_tifffile + tuple val("${task.process}"), val('numpy'), eval("python3 -c 'import numpy; print(numpy.__version__)'"), topic: versions, emit: versions_numpy + + when: + task.ext.when == null || task.ext.when + + script: + prefix = task.ext.prefix ?: "${meta.id}" + def channel_index = task.ext.channel_index ?: 0 + """ + utility_extract_dapi.py \\ + --input ${image} \\ + --output ${prefix}_dapi.tif \\ + --channel-index ${channel_index} + """ + + stub: + prefix = task.ext.prefix ?: "${meta.id}" + """ + touch ${prefix}_dapi.tif + """ +} diff --git a/modules/local/utility/extract_dapi/meta.yml b/modules/local/utility/extract_dapi/meta.yml new file mode 100644 index 00000000..11ce4342 --- /dev/null +++ b/modules/local/utility/extract_dapi/meta.yml @@ -0,0 +1,126 @@ +name: "extract_dapi" +description: Extract a single channel (default DAPI / channel 0) from a multi-channel OME-TIFF morphology image. +keywords: + - xenium + - dapi + - morphology + - ome-tiff + - image processing +tools: + - "python": + description: | + Python programming language interpreter. + homepage: "https://www.python.org/" + documentation: "https://docs.python.org/3/" + tool_dev_url: "https://github.com/python/cpython" + doi: "no DOI available" + licence: ["PSF-2.0"] + identifier: "" + - "tifffile": + description: | + Read and write TIFF files, including BigTIFF and OME-TIFF, in Python. + homepage: "https://github.com/cgohlke/tifffile" + documentation: "https://github.com/cgohlke/tifffile" + tool_dev_url: "https://github.com/cgohlke/tifffile" + doi: "no DOI available" + licence: ["BSD-3-Clause"] + identifier: "" + - "numpy": + description: | + The fundamental package for scientific computing with Python. + homepage: "https://numpy.org/" + documentation: "https://numpy.org/doc/stable/" + tool_dev_url: "https://github.com/numpy/numpy" + doi: "10.1038/s41586-020-2649-2" + licence: ["BSD-3-Clause"] + identifier: "" + +input: + - - meta: + type: map + description: | + Groovy Map containing sample information + e.g. [ id:'test' ] + - image: + type: file + description: Multi-channel OME-TIFF morphology image. + pattern: "*.{ome.tif,ome.tiff,tif,tiff}" + ontologies: [] + +output: + dapi: + - - meta: + type: map + description: | + Groovy Map containing sample information + e.g. [ id:'test' ] + - "*_dapi.tif": + type: file + description: Single-channel TIFF containing the extracted channel. + pattern: "*_dapi.tif" + ontologies: [] + versions_python: + - - ${task.process}: + type: string + description: The process the versions were collected from + - python: + type: string + description: The tool name + - "python3 --version | sed 's/Python //'": + type: eval + description: The expression to obtain the version of the tool + versions_tifffile: + - - ${task.process}: + type: string + description: The process the versions were collected from + - tifffile: + type: string + description: The tool name + - "python3 -c 'import tifffile; print(tifffile.__version__)'": + type: eval + description: The expression to obtain the version of the tool + versions_numpy: + - - ${task.process}: + type: string + description: The process the versions were collected from + - numpy: + type: string + description: The tool name + - "python3 -c 'import numpy; print(numpy.__version__)'": + type: eval + description: The expression to obtain the version of the tool + +topics: + versions: + - - ${task.process}: + type: string + description: The process the versions were collected from + - python: + type: string + description: The tool name + - "python3 --version | sed 's/Python //'": + type: eval + description: The expression to obtain the version of the tool + - - ${task.process}: + type: string + description: The process the versions were collected from + - tifffile: + type: string + description: The tool name + - "python3 -c 'import tifffile; print(tifffile.__version__)'": + type: eval + description: The expression to obtain the version of the tool + - - ${task.process}: + type: string + description: The process the versions were collected from + - numpy: + type: string + description: The tool name + - "python3 -c 'import numpy; print(numpy.__version__)'": + type: eval + description: The expression to obtain the version of the tool + +authors: + - "@an-altosian" +maintainers: + - "@an-altosian" diff --git a/modules/local/utility/extract_preview_data/main.nf b/modules/local/utility/extract_preview_data/main.nf new file mode 100644 index 00000000..821effc5 --- /dev/null +++ b/modules/local/utility/extract_preview_data/main.nf @@ -0,0 +1,49 @@ +process EXTRACT_PREVIEW_DATA { + tag "${meta.id}" + label 'process_low' + + container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ? + 'https://community-cr-prod.seqera.io/docker/registry/v2/blobs/sha256/c6/c6ebf365fbfd7bdde9e1453d646f45c39eddde92df5922b9881785f347bdbc2b/data' : + 'community.wave.seqera.io/library/beautifulsoup4_pandas:a3f88f59088edad5' }" + + input: + tuple val(meta), path(preview_html) + + output: + tuple val(meta), path("${prefix}/*_mqc.tsv"), emit: mqc_data + tuple val(meta), path("${prefix}/*_mqc.png"), emit: mqc_img + tuple val("${task.process}"), val('python'), eval("python3 --version | sed 's/Python //'"), topic: versions, emit: versions_python + + when: + task.ext.when == null || task.ext.when + + script: + // Exit if running this module with -profile conda / -profile mamba + if (workflow.profile.tokenize(',').intersect(['conda', 'mamba']).size() >= 1) { + error("EXTRACT_PREVIEW_DATA module does not support Conda. Please use Docker / Singularity / Podman instead.") + } + + prefix = task.ext.prefix ?: "${meta.id}" + + """ + utility_extract_preview_data.py \\ + --preview-html ${preview_html} \\ + --prefix ${prefix} + """ + + stub: + // Exit if running this module with -profile conda / -profile mamba + if (workflow.profile.tokenize(',').intersect(['conda', 'mamba']).size() >= 1) { + error("EXTRACT_PREVIEW_DATA module does not support Conda. Please use Docker / Singularity / Podman instead.") + } + prefix = task.ext.prefix ?: "${meta.id}" + + """ + mkdir -p ${prefix} + touch ${prefix}/noise_distribution_mqc.tsv + touch ${prefix}/gene_structure_mqc.tsv + touch ${prefix}/umap_mqc.tsv + touch ${prefix}/transcript_plots_mqc.png + touch ${prefix}/noise_level_mqc.png + """ +} diff --git a/modules/local/utility/extract_preview_data/meta.yml b/modules/local/utility/extract_preview_data/meta.yml new file mode 100644 index 00000000..d1b448e0 --- /dev/null +++ b/modules/local/utility/extract_preview_data/meta.yml @@ -0,0 +1,99 @@ +name: "extract_preview_data" +description: Parse a Xenium preview HTML report and extract MultiQC-compatible TSV tables and embedded plot images. +keywords: + - xenium + - preview + - multiqc + - html + - parsing +tools: + - "python": + description: | + Python programming language interpreter. + homepage: "https://www.python.org/" + documentation: "https://docs.python.org/3/" + tool_dev_url: "https://github.com/python/cpython" + doi: "no DOI available" + licence: ["PSF-2.0"] + identifier: "" + - "beautifulsoup4": + description: | + Python library for pulling data out of HTML and XML files. + homepage: "https://www.crummy.com/software/BeautifulSoup/" + documentation: "https://www.crummy.com/software/BeautifulSoup/bs4/doc/" + tool_dev_url: "https://git.launchpad.net/beautifulsoup" + doi: "no DOI available" + licence: ["MIT"] + identifier: "" + - "pandas": + description: | + Powerful data structures for data analysis, time series, and statistics. + homepage: "https://pandas.pydata.org/" + documentation: "https://pandas.pydata.org/docs/" + tool_dev_url: "https://github.com/pandas-dev/pandas" + doi: "10.5281/zenodo.3509134" + licence: ["BSD-3-Clause"] + identifier: "" + +input: + - - meta: + type: map + description: | + Groovy Map containing sample information + e.g. [ id:'test' ] + - preview_html: + type: file + description: Xenium preview HTML report to extract data from. + pattern: "*.html" + ontologies: [] + +output: + mqc_data: + - - meta: + type: map + description: | + Groovy Map containing sample information + e.g. [ id:'test' ] + - "${prefix}/*_mqc.tsv": + type: file + description: MultiQC-compatible TSV tables extracted from the preview HTML. + pattern: "*_mqc.tsv" + ontologies: [] + mqc_img: + - - meta: + type: map + description: | + Groovy Map containing sample information + e.g. [ id:'test' ] + - "${prefix}/*_mqc.png": + type: file + description: MultiQC-compatible PNG plot images extracted from the preview HTML. + pattern: "*_mqc.png" + ontologies: [] + versions_python: + - - ${task.process}: + type: string + description: The process the versions were collected from + - python: + type: string + description: The tool name + - "python3 --version | sed 's/Python //'": + type: eval + description: The expression to obtain the version of the tool + +topics: + versions: + - - ${task.process}: + type: string + description: The process the versions were collected from + - python: + type: string + description: The tool name + - "python3 --version | sed 's/Python //'": + type: eval + description: The expression to obtain the version of the tool + +authors: + - "@khersameesh24" +maintainers: + - "@khersameesh24" diff --git a/modules/local/utility/get_coordinates/main.nf b/modules/local/utility/get_coordinates/main.nf new file mode 100644 index 00000000..2b672239 --- /dev/null +++ b/modules/local/utility/get_coordinates/main.nf @@ -0,0 +1,42 @@ +process GET_TRANSCRIPTS_COORDINATES { + tag "${meta.id}" + label 'process_low' + + container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ? + 'https://community-cr-prod.seqera.io/docker/registry/v2/blobs/sha256/94/9409ce399922a5746bea1b7df5668c3d1d79b9af49a15950d9818c4fe45ac749/data' : + 'community.wave.seqera.io/library/pandas_procs_pyarrow:d8f882b65dfea451' }" + + input: + tuple val(meta), path(transcripts) + + output: + stdout() + tuple val("${task.process}"), val('python'), eval("python3 --version | sed 's/Python //'"), topic: versions, emit: versions_python + + when: + task.ext.when == null || task.ext.when + + script: + // Exit if running this module with -profile conda / -profile mamba + if (workflow.profile.tokenize(',').intersect(['conda', 'mamba']).size() >= 1) { + error("GET_TRANSCRIPTS_COORDINATES module does not support Conda. Please use Docker / Singularity / Podman instead.") + } + + prefix = task.ext.prefix ?: "${meta.id}" + + """ + utility_get_coordinates.py \\ + --transcripts ${transcripts} + """ + + stub: + // Exit if running this module with -profile conda / -profile mamba + if (workflow.profile.tokenize(',').intersect(['conda', 'mamba']).size() >= 1) { + error("GET_TRANSCRIPTS_COORDINATES module does not support Conda. Please use Docker / Singularity / Podman instead.") + } + prefix = task.ext.prefix ?: "${meta.id}" + + """ + echo "0,0,1000,1000" + """ +} diff --git a/modules/local/utility/get_coordinates/meta.yml b/modules/local/utility/get_coordinates/meta.yml new file mode 100644 index 00000000..cf631eda --- /dev/null +++ b/modules/local/utility/get_coordinates/meta.yml @@ -0,0 +1,81 @@ +name: "get_transcripts_coordinates" +description: Read a transcripts parquet file and emit the transcript bounding-box coordinates to stdout. +keywords: + - xenium + - transcripts + - coordinates + - parquet + - bounding-box +tools: + - "python": + description: | + Python programming language interpreter. + homepage: "https://www.python.org/" + documentation: "https://docs.python.org/3/" + tool_dev_url: "https://github.com/python/cpython" + doi: "no DOI available" + licence: ["PSF-2.0"] + identifier: "" + - "pandas": + description: | + Powerful data structures for data analysis, time series, and statistics. + homepage: "https://pandas.pydata.org/" + documentation: "https://pandas.pydata.org/docs/" + tool_dev_url: "https://github.com/pandas-dev/pandas" + doi: "10.5281/zenodo.3509134" + licence: ["BSD-3-Clause"] + identifier: "" + - "pyarrow": + description: | + Python bindings for Apache Arrow, used to read Parquet files efficiently. + homepage: "https://arrow.apache.org/" + documentation: "https://arrow.apache.org/docs/python/" + tool_dev_url: "https://github.com/apache/arrow" + doi: "no DOI available" + licence: ["Apache-2.0"] + identifier: "" + +input: + - - meta: + type: map + description: | + Groovy Map containing sample information + e.g. [ id:'test' ] + - transcripts: + type: file + description: Transcripts parquet file from the Xenium bundle. + pattern: "*.parquet" + ontologies: [] + +output: + stdout: + - stdout: + type: string + description: Comma-separated transcript bounding-box coordinates printed to stdout. + versions_python: + - - ${task.process}: + type: string + description: The process the versions were collected from + - python: + type: string + description: The tool name + - "python3 --version | sed 's/Python //'": + type: eval + description: The expression to obtain the version of the tool + +topics: + versions: + - - ${task.process}: + type: string + description: The process the versions were collected from + - python: + type: string + description: The tool name + - "python3 --version | sed 's/Python //'": + type: eval + description: The expression to obtain the version of the tool + +authors: + - "@khersameesh24" +maintainers: + - "@khersameesh24" diff --git a/modules/local/utility/parquet_to_csv/main.nf b/modules/local/utility/parquet_to_csv/main.nf new file mode 100644 index 00000000..9c31fe41 --- /dev/null +++ b/modules/local/utility/parquet_to_csv/main.nf @@ -0,0 +1,46 @@ +process PARQUET_TO_CSV { + tag "$meta.id" + label 'process_low' + + container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ? + 'https://community-cr-prod.seqera.io/docker/registry/v2/blobs/sha256/94/9409ce399922a5746bea1b7df5668c3d1d79b9af49a15950d9818c4fe45ac749/data' : + 'community.wave.seqera.io/library/pandas_procs_pyarrow:d8f882b65dfea451' }" + + input: + tuple val(meta), path(transcripts) + val(extension) + + output: + tuple val(meta), path("${prefix}/*.csv*"), emit: transcripts_csv + tuple val("${task.process}"), val('pyarrow'), eval('python3 -c "import pyarrow; print(pyarrow.__version__)"'), topic: versions, emit: versions_pyarrow + + when: + task.ext.when == null || task.ext.when + + script: + // Exit if running this module with -profile conda / -profile mamba + if (workflow.profile.tokenize(',').intersect(['conda', 'mamba']).size() >= 1) { + error "PARQUET_TO_CSV module does not support Conda. Please use Docker / Singularity / Podman instead." + } + prefix = task.ext.prefix ?: "${meta.id}" + + """ + utility_parquet_to_csv.py \\ + --transcripts ${transcripts} \\ + --extension ${extension} \\ + --prefix ${prefix} + """ + + stub: + // Exit if running this module with -profile conda / -profile mamba + if (workflow.profile.tokenize(',').intersect(['conda', 'mamba']).size() >= 1) { + error "PARQUET_TO_CSV module does not support Conda. Please use Docker / Singularity / Podman instead." + } + + prefix = task.ext.prefix ?: "${meta.id}" + + """ + mkdir -p ${prefix} + touch "${prefix}/${transcripts}.csv" + """ +} diff --git a/modules/local/utility/parquet_to_csv/meta.yml b/modules/local/utility/parquet_to_csv/meta.yml new file mode 100644 index 00000000..60d58fcc --- /dev/null +++ b/modules/local/utility/parquet_to_csv/meta.yml @@ -0,0 +1,42 @@ +name: "parquet_to_csv" +description: Tool suite for spatial omics data conversions. +keywords: + - xenium +tools: + - "parquet_to_csv": + description: "Collects functions to convert data formats for various types of data processing and analysis for spatial omics data." + homepage: "https://github.com/heylf/spatialconverter" + documentation: "https://github.com/heylf/spatialconverter" + tool_dev_url: "https://github.com/heylf/spatialconverter" + doi: "tbd" + licence: [""] + +input: + - - meta: + type: map + description: | + Groovy Map containing sample information + e.g. `[ id:'segger_run_id' ]` + - transcripts: + type: file + description: | + Transcript file in parquet format. +output: + - - meta: + type: map + description: | + Groovy Map containing sample information + e.g. `[ id:'segger_run_id' ]` + - transcripts_csv: + type: file + description: | + Transcript file in csv format. + - - versions: + type: file + description: File containing software versions + pattern: "versions.yml" + +authors: + - "@heylf" +maintainers: + - "@heylf" diff --git a/modules/local/utility/reconstruct_patches/main.nf b/modules/local/utility/reconstruct_patches/main.nf new file mode 100644 index 00000000..c0e87b1b --- /dev/null +++ b/modules/local/utility/reconstruct_patches/main.nf @@ -0,0 +1,68 @@ +/* + * RECONSTRUCT_PATCHES: Reconstruct the patches directory structure from + * individually staged patch files for stitch_transcripts.py. + * + * Inputs: + * meta - sample metadata map + * grid_json - patch_grid.json from XENIUM_PATCH_DIVIDE + * patch_ids - list of patch identifiers (e.g. patch_0000, patch_0001, ...) + * csv_files - per-patch Baysor segmentation.csv files (staged into csv_?/ dirs) + * geojson_files - per-patch Baysor segmentation_polygons.json files (staged into geo_?/ dirs) + * + * Outputs: + * patches_dir - reconstructed patches/ directory containing patch_grid.json plus + * one subdirectory per patch with segmentation.csv and segmentation_polygons.json + * versions - topic-channel version emission for coreutils (cp) + */ +process RECONSTRUCT_PATCHES { + tag "$meta.id" + label 'process_single' + + container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ? + 'https://community-cr-prod.seqera.io/docker/registry/v2/blobs/sha256/b9/b900c562dadb26dedce5254f88ae85440d7a08cd5e7f72cc4c3ce5aef89b5aa8/data' : + 'community.wave.seqera.io/library/pip_pandas:257725bfe0d2df83' }" + + input: + tuple val(meta), path(grid_json), val(patch_ids), path(csv_files, stageAs: 'csv_?/*'), path(geojson_files, stageAs: 'geo_?/*') + + output: + tuple val(meta), path("patches"), emit: patches_dir + tuple val("${task.process}"), val('coreutils'), eval("cp --version | head -n1 | awk '{print \$NF}'"), topic: versions, emit: versions_coreutils + + when: + task.ext.when == null || task.ext.when + + script: + def ids = patch_ids instanceof List ? patch_ids : [patch_ids] + def csvs = csv_files instanceof List ? csv_files : [csv_files] + def geos = geojson_files instanceof List ? geojson_files : [geojson_files] + + def reconstruct_script = ids.withIndex().collect { pid, idx -> + [ + "mkdir -p patches/${pid}", + "cp '${csvs[idx]}' patches/${pid}/segmentation.csv", + "cp '${geos[idx]}' patches/${pid}/segmentation_polygons.json", + ].join('\n ') + }.join('\n ') + """ + mkdir -p patches + cp '${grid_json}' patches/patch_grid.json + + ${reconstruct_script} + """ + + stub: + def ids = patch_ids instanceof List ? patch_ids : [patch_ids] + def stub_files = ids.collect { pid -> + [ + "mkdir -p patches/${pid}", + "touch patches/${pid}/segmentation.csv", + "touch patches/${pid}/segmentation_polygons.json", + ].join('\n ') + }.join('\n ') + """ + mkdir -p patches + touch patches/patch_grid.json + ${stub_files} + """ +} diff --git a/modules/local/utility/reconstruct_patches/meta.yml b/modules/local/utility/reconstruct_patches/meta.yml new file mode 100644 index 00000000..b218c15a --- /dev/null +++ b/modules/local/utility/reconstruct_patches/meta.yml @@ -0,0 +1,87 @@ +name: "reconstruct_patches" +description: Reconstruct the patches directory structure from individually staged per-patch Baysor segmentation files for downstream stitching. +keywords: + - baysor + - xenium + - patches + - stitching + - segmentation +tools: + - "coreutils": + description: | + GNU core utilities (cp, mkdir) used to assemble the patches directory. + homepage: "https://www.gnu.org/software/coreutils/" + documentation: "https://www.gnu.org/software/coreutils/manual/coreutils.html" + tool_dev_url: "https://git.savannah.gnu.org/cgit/coreutils.git" + doi: "no DOI available" + licence: ["GPL-3.0-or-later"] + identifier: "" + +input: + - - meta: + type: map + description: | + Groovy Map containing sample information + e.g. [ id:'sample1' ] + - grid_json: + type: file + description: patch_grid.json describing the tile layout from XENIUM_PATCH_DIVIDE. + pattern: "*.json" + ontologies: [] + - patch_ids: + type: list + description: | + Ordered list of patch identifiers + (e.g. patch_0000, patch_0001, ...). + - csv_files: + type: list + description: Per-patch Baysor segmentation.csv files, one per patch_id. + pattern: "*.csv" + ontologies: [] + - geojson_files: + type: list + description: Per-patch Baysor segmentation_polygons.json files, one per patch_id. + pattern: "*.json" + ontologies: [] + +output: + patches_dir: + - - meta: + type: map + description: | + Groovy Map containing sample information + e.g. [ id:'sample1' ] + - "patches": + type: directory + description: | + Reconstructed patches directory containing patch_grid.json plus one + subdirectory per patch with segmentation.csv and + segmentation_polygons.json. + pattern: "patches" + versions_coreutils: + - - ${task.process}: + type: string + description: The process the versions were collected from + - coreutils: + type: string + description: The tool name + - "cp --version | head -n1 | awk '{print $NF}'": + type: eval + description: The expression to obtain the version of the tool + +topics: + versions: + - - ${task.process}: + type: string + description: The process the versions were collected from + - coreutils: + type: string + description: The tool name + - "cp --version | head -n1 | awk '{print $NF}'": + type: eval + description: The expression to obtain the version of the tool + +authors: + - "@an-altosian" +maintainers: + - "@an-altosian" diff --git a/modules/local/utility/resize_tif/main.nf b/modules/local/utility/resize_tif/main.nf new file mode 100644 index 00000000..35685b7c --- /dev/null +++ b/modules/local/utility/resize_tif/main.nf @@ -0,0 +1,48 @@ +process RESIZE_TIF { + tag "${meta.id}" + label 'process_low' + + container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ? + 'https://community-cr-prod.seqera.io/docker/registry/v2/blobs/sha256/6d/6d5aedb8fcf066eecd9f0dfac93bfffc8161bdae65b4502509d9953db2036a7e/data' : + 'community.wave.seqera.io/library/numpy_pandas_pyarrow_scikit-image_tifffile:131397039376b375' }" + + input: + tuple val(meta), path(transcripts), path(mask), path(metadata) + + output: + tuple val(meta), path("${meta.id}/resized_*.tif"), emit: resized_mask + tuple val("${task.process}"), val('python'), eval("python3 --version | sed 's/Python //'"), topic: versions, emit: versions_python + tuple val("${task.process}"), val('tifffile'), eval('python3 -c "import tifffile; print(tifffile.__version__)"'), topic: versions, emit: versions_tifffile + + when: + task.ext.when == null || task.ext.when + + script: + // Exit if running this module with -profile conda / -profile mamba + if (workflow.profile.tokenize(',').intersect(['conda', 'mamba']).size() >= 1) { + error("RESIZE_TIF module does not support Conda. Please use Docker / Singularity / Podman instead.") + } + + prefix = task.ext.prefix ?: "${meta.id}" + + """ + utility_resize_tif.py \\ + --mask ${mask} \\ + --transcripts ${transcripts} \\ + --metadata ${metadata} \\ + --prefix ${prefix} \\ + --mask-filename ${mask} + """ + + stub: + // Exit if running this module with -profile conda / -profile mamba + if (workflow.profile.tokenize(',').intersect(['conda', 'mamba']).size() >= 1) { + error("RESIZE_TIF module does not support Conda. Please use Docker / Singularity / Podman instead.") + } + prefix = task.ext.prefix ?: "${meta.id}" + + """ + mkdir -p ${prefix} + touch "${prefix}/resized_${mask}.tif" + """ +} diff --git a/modules/local/utility/resize_tif/meta.yml b/modules/local/utility/resize_tif/meta.yml new file mode 100644 index 00000000..482c7fd9 --- /dev/null +++ b/modules/local/utility/resize_tif/meta.yml @@ -0,0 +1,126 @@ +name: "resize_tif" +description: Resize a segmentation mask TIFF to match the full-resolution Xenium morphology image dimensions. +keywords: + - xenium + - tif + - resize + - mask + - segmentation +tools: + - "python": + description: | + Python programming language interpreter. + homepage: "https://www.python.org/" + documentation: "https://docs.python.org/3/" + tool_dev_url: "https://github.com/python/cpython" + doi: "no DOI available" + licence: ["PSF-2.0"] + identifier: "" + - "tifffile": + description: | + Read and write TIFF files, including BigTIFF and OME-TIFF, in Python. + homepage: "https://github.com/cgohlke/tifffile" + documentation: "https://github.com/cgohlke/tifffile" + tool_dev_url: "https://github.com/cgohlke/tifffile" + doi: "no DOI available" + licence: ["BSD-3-Clause"] + identifier: "" + - "numpy": + description: | + The fundamental package for scientific computing with Python. + homepage: "https://numpy.org/" + documentation: "https://numpy.org/doc/stable/" + tool_dev_url: "https://github.com/numpy/numpy" + doi: "10.1038/s41586-020-2649-2" + licence: ["BSD-3-Clause"] + identifier: "" + - "scikit-image": + description: | + Image processing routines for SciPy. + homepage: "https://scikit-image.org/" + documentation: "https://scikit-image.org/docs/stable/" + tool_dev_url: "https://github.com/scikit-image/scikit-image" + doi: "10.7717/peerj.453" + licence: ["BSD-3-Clause"] + identifier: "" + +input: + - - meta: + type: map + description: | + Groovy Map containing sample information + e.g. [ id:'test' ] + - transcripts: + type: file + description: Transcripts parquet file from the Xenium bundle. + pattern: "*.parquet" + ontologies: [] + - mask: + type: file + description: Segmentation mask TIFF to resize. + pattern: "*.{tif,tiff}" + ontologies: [] + - metadata: + type: file + description: Xenium experiment metadata file (used to read full-resolution dimensions). + pattern: "*" + ontologies: [] + +output: + resized_mask: + - - meta: + type: map + description: | + Groovy Map containing sample information + e.g. [ id:'test' ] + - "${meta.id}/resized_*.tif": + type: file + description: Segmentation mask TIFF resized to the full-resolution morphology image dimensions. + pattern: "*/resized_*.tif" + ontologies: [] + versions_python: + - - ${task.process}: + type: string + description: The process the versions were collected from + - python: + type: string + description: The tool name + - "python3 --version | sed 's/Python //'": + type: eval + description: The expression to obtain the version of the tool + versions_tifffile: + - - ${task.process}: + type: string + description: The process the versions were collected from + - tifffile: + type: string + description: The tool name + - 'python3 -c "import tifffile; print(tifffile.__version__)"': + type: eval + description: The expression to obtain the version of the tool + +topics: + versions: + - - ${task.process}: + type: string + description: The process the versions were collected from + - python: + type: string + description: The tool name + - "python3 --version | sed 's/Python //'": + type: eval + description: The expression to obtain the version of the tool + - - ${task.process}: + type: string + description: The process the versions were collected from + - tifffile: + type: string + description: The tool name + - 'python3 -c "import tifffile; print(tifffile.__version__)"': + type: eval + description: The expression to obtain the version of the tool + +authors: + - "@khersameesh24" +maintainers: + - "@khersameesh24" diff --git a/modules/local/utility/segger2xr/main.nf b/modules/local/utility/segger2xr/main.nf new file mode 100644 index 00000000..073748d7 --- /dev/null +++ b/modules/local/utility/segger2xr/main.nf @@ -0,0 +1,50 @@ +process SEGGER2XR { + tag "$meta.id" + label 'process_medium' + + container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ? + 'https://community-cr-prod.seqera.io/docker/registry/v2/blobs/sha256/cb/cb8fc03fa657c164c5d83f075578bbb5d9c10f1178165f94e94f33c67efca1a1/data' : + 'community.wave.seqera.io/library/spatialdata-io_spatialdata:b264928c30680e87' }" + + input: + tuple val(meta), path(transcripts) + + output: + tuple val(meta), path("${meta.id}/segmentation.csv") , emit: segmentation_csv + tuple val(meta), path("${meta.id}/transcripts.parquet") , emit: transcripts_parquet + tuple val(meta), path("${meta.id}/segmentation_polygons.json") , emit: viz_polygons + tuple val("${task.process}"), val('python'), eval("python3 --version | sed 's/Python //'"), topic: versions, emit: versions_python + + when: + task.ext.when == null || task.ext.when + + script: + // Exit if running this module with -profile conda / -profile mamba + if (workflow.profile.tokenize(',').intersect(['conda', 'mamba']).size() >= 1) { + error "SEGGER2XR module does not support Conda. Please use Docker / Singularity / Podman instead." + } + + def min_transcripts = task.ext.min_transcripts_per_cell ?: 3 + + """ + utility_segger2xr.py \\ + --transcripts ${transcripts} \\ + --prefix ${meta.id} \\ + --min-transcripts ${min_transcripts} + """ + + stub: + // Exit if running this module with -profile conda / -profile mamba + if (workflow.profile.tokenize(',').intersect(['conda', 'mamba']).size() >= 1) { + error "SEGGER2XR module does not support Conda. Please use Docker / Singularity / Podman instead." + } + + def prefix = task.ext.prefix ?: "${meta.id}" + + """ + mkdir -p ${prefix} + echo 'transcript_id,x,y,z,gene,cell,is_noise' > "${prefix}/segmentation.csv" + touch "${prefix}/transcripts.parquet" + echo '{"type":"FeatureCollection","features":[]}' > "${prefix}/segmentation_polygons.json" + """ +} diff --git a/modules/local/utility/segger2xr/meta.yml b/modules/local/utility/segger2xr/meta.yml new file mode 100644 index 00000000..9a7364bf --- /dev/null +++ b/modules/local/utility/segger2xr/meta.yml @@ -0,0 +1,101 @@ +name: "segger2xr" +description: Convert SEGGER segmentation output (transcripts parquet with cell assignments) into XeniumRanger-compatible segmentation files. +keywords: + - xenium + - segger + - xeniumranger + - segmentation + - conversion +tools: + - "python": + description: | + Python programming language interpreter. + homepage: "https://www.python.org/" + documentation: "https://docs.python.org/3/" + tool_dev_url: "https://github.com/python/cpython" + doi: "no DOI available" + licence: ["PSF-2.0"] + identifier: "" + - "segger": + description: | + SEGGER cell segmentation for spatial transcriptomics data. + homepage: "https://github.com/EliHei2/segger_dev" + documentation: "https://github.com/EliHei2/segger_dev" + tool_dev_url: "https://github.com/EliHei2/segger_dev" + doi: "no DOI available" + licence: ["MIT"] + identifier: "" + +input: + - - meta: + type: map + description: | + Groovy Map containing sample information + e.g. [ id:'test' ] + - transcripts: + type: file + description: Transcripts parquet file output by SEGGER with per-transcript cell assignments. + pattern: "*.parquet" + ontologies: [] + +output: + segmentation_csv: + - - meta: + type: map + description: | + Groovy Map containing sample information + e.g. [ id:'test' ] + - "${meta.id}/segmentation.csv": + type: file + description: Per-transcript segmentation table (transcript_id, x, y, z, gene, cell, is_noise). + pattern: "*/segmentation.csv" + ontologies: [] + transcripts_parquet: + - - meta: + type: map + description: | + Groovy Map containing sample information + e.g. [ id:'test' ] + - "${meta.id}/transcripts.parquet": + type: file + description: Transcripts parquet file in XeniumRanger-compatible format. + pattern: "*/transcripts.parquet" + ontologies: [] + viz_polygons: + - - meta: + type: map + description: | + Groovy Map containing sample information + e.g. [ id:'test' ] + - "${meta.id}/segmentation_polygons.json": + type: file + description: GeoJSON FeatureCollection of cell segmentation polygons for visualization. + pattern: "*/segmentation_polygons.json" + ontologies: [] + versions_python: + - - ${task.process}: + type: string + description: The process the versions were collected from + - python: + type: string + description: The tool name + - "python3 --version | sed 's/Python //'": + type: eval + description: The expression to obtain the version of the tool + +topics: + versions: + - - ${task.process}: + type: string + description: The process the versions were collected from + - python: + type: string + description: The tool name + - "python3 --version | sed 's/Python //'": + type: eval + description: The expression to obtain the version of the tool + +authors: + - "@heylf" +maintainers: + - "@heylf" diff --git a/modules/local/utility/split_transcripts/main.nf b/modules/local/utility/split_transcripts/main.nf new file mode 100644 index 00000000..f7057e31 --- /dev/null +++ b/modules/local/utility/split_transcripts/main.nf @@ -0,0 +1,46 @@ +process SPLIT_TRANSCRIPTS { + tag "$meta.id" + label 'process_low' + + container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ? + 'https://community-cr-prod.seqera.io/docker/registry/v2/blobs/sha256/b9/b900c562dadb26dedce5254f88ae85440d7a08cd5e7f72cc4c3ce5aef89b5aa8/data' : + 'community.wave.seqera.io/library/pip_pandas:257725bfe0d2df83' }" + + input: + tuple val(meta), path(transcripts) + val(x_bins) + val(y_bins) + + output: + tuple val(meta), path("${meta.id}/splits.csv"), emit: splits_csv + tuple val("${task.process}"), val('python'), eval("python3 --version | sed 's/Python //'"), topic: versions, emit: versions_python + + when: + task.ext.when == null || task.ext.when + + script: + // Exit if running this module with -profile conda / -profile mamba + if (workflow.profile.tokenize(',').intersect(['conda', 'mamba']).size() >= 1) { + error "SPLIT_TRANSCRIPTS module does not support Conda. Please use Docker / Singularity / Podman instead." + } + def prefix = task.ext.prefix ?: "${meta.id}" + + """ + utility_split_transcripts.py \\ + --transcripts ${transcripts} \\ + --x-bins ${x_bins} \\ + --y-bins ${y_bins} \\ + --prefix ${prefix} + """ + + stub: + // Exit if running this module with -profile conda / -profile mamba + if (workflow.profile.tokenize(',').intersect(['conda', 'mamba']).size() >= 1) { + error "SPLIT_TRANSCRIPTS module does not support Conda. Please use Docker / Singularity / Podman instead." + } + def prefix = task.ext.prefix ?: "${meta.id}" + """ + mkdir -p ${prefix} + touch "${prefix}/splits.csv" + """ +} diff --git a/modules/local/utility/split_transcripts/meta.yml b/modules/local/utility/split_transcripts/meta.yml new file mode 100644 index 00000000..8317c052 --- /dev/null +++ b/modules/local/utility/split_transcripts/meta.yml @@ -0,0 +1,54 @@ +name: "split_transcripts" +description: Split transcripts along x & y axes +keywords: + - baysor + - transcripts + - split_transcripts +tools: + - "baysor": + description: "Utility package to split transcripts for Baysor. Baysor is a tool that segments cells using spatial gene expression maps. Optionally, segmentation masks can be given as additional input." + homepage: "https://kharchenkolab.github.io/Baysor/dev/" + documentation: "https://kharchenkolab.github.io/Baysor/dev/" + tool_dev_url: "https://github.com/kharchenkolab/Baysor" + doi: "https://doi.org/10.1038/s41587-021-01044-w" + licence: ["MIT license"] + identifier: + +input: + - - meta: + type: map + description: | + Groovy Map containing sample information + e.g. [ id:'test' ] + - transcripts: + type: file + description: transcripts.parquet file from the xenium bundle + pattern: "*.parquet" + - x_bins: + type: integer + description: number of slices along the x axis (default - 10) + - y_bins: + type: integer + description: number of slices along the y axis (default - 10) + +output: + - - splits_csv: + - meta: + type: map + description: | + Groovy Map containing sample information + e.g. [ id:'test' ] + - "*.csv": + type: file + description: filtered transcripts.parquet + pattern: "splits.csv" + + - - versions: + type: file + description: File containing software versions + pattern: "versions.yml" + +authors: + - "@khersameesh24" +maintainers: + - "@khersameesh24" diff --git a/modules/local/utility/upscale_mask/main.nf b/modules/local/utility/upscale_mask/main.nf new file mode 100644 index 00000000..246290fc --- /dev/null +++ b/modules/local/utility/upscale_mask/main.nf @@ -0,0 +1,50 @@ +/* + * UPSCALE_MASK: Restore cellpose masks to original image resolution + * + * Uses nearest-neighbor interpolation to upscale segmentation masks + * back to original dimensions (from scale_info.json). + * + * Input: + * - meta: Sample metadata map + * - mask: Cellpose mask TIFF (downscaled resolution) + * - scale_info: JSON with original dimensions + * + * Output: + * - upscaled_mask: Mask at original resolution + * - versions: Software versions + */ +process UPSCALE_MASK { + tag "${meta.id}" + label 'process_medium' + + container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ? + 'https://community-cr-prod.seqera.io/docker/registry/v2/blobs/sha256/cb/cb670191b7ae1a9fd5449746453916c7014b9ea622942ca76a7cb40da7deee46/data' : + 'community.wave.seqera.io/library/python_pip_cellpose:fdf7a8c3a305a26e' }" + + input: + tuple val(meta), path(mask), path(scale_info) + + output: + tuple val(meta), path("${prefix}/upscaled_*.tif"), emit: upscaled_mask + tuple val("${task.process}"), val('python'), eval("python3 --version | sed 's/Python //'"), topic: versions, emit: versions_python + tuple val("${task.process}"), val('tifffile'), eval('python3 -c "import tifffile; print(tifffile.__version__)"'), topic: versions, emit: versions_tifffile + + when: + task.ext.when == null || task.ext.when + + script: + prefix = task.ext.prefix ?: "${meta.id}" + """ + utility_upscale_mask.py \\ + --mask ${mask} \\ + --scale-info ${scale_info} \\ + --prefix ${prefix} + """ + + stub: + prefix = task.ext.prefix ?: "${meta.id}" + """ + mkdir -p ${prefix} + touch ${prefix}/upscaled_mask.tif + """ +} diff --git a/modules/local/utility/upscale_mask/meta.yml b/modules/local/utility/upscale_mask/meta.yml new file mode 100644 index 00000000..92c85e12 --- /dev/null +++ b/modules/local/utility/upscale_mask/meta.yml @@ -0,0 +1,103 @@ +name: "upscale_mask" +description: Restore segmentation masks to the original image resolution using nearest-neighbor interpolation. +keywords: + - xenium + - mask + - upscale + - segmentation + - cellpose +tools: + - "python": + description: | + Python programming language interpreter. + homepage: "https://www.python.org/" + documentation: "https://docs.python.org/3/" + tool_dev_url: "https://github.com/python/cpython" + doi: "no DOI available" + licence: ["PSF-2.0"] + identifier: "" + - "tifffile": + description: | + Read and write TIFF files, including BigTIFF and OME-TIFF, in Python. + homepage: "https://github.com/cgohlke/tifffile" + documentation: "https://github.com/cgohlke/tifffile" + tool_dev_url: "https://github.com/cgohlke/tifffile" + doi: "no DOI available" + licence: ["BSD-3-Clause"] + identifier: "" + +input: + - - meta: + type: map + description: | + Groovy Map containing sample information + e.g. [ id:'test' ] + - mask: + type: file + description: Cellpose segmentation mask TIFF at downscaled resolution. + pattern: "*.tif" + ontologies: [] + - scale_info: + type: file + description: JSON file containing original image dimensions (orig_h, orig_w). + pattern: "*.json" + ontologies: [] + +output: + upscaled_mask: + - - meta: + type: map + description: | + Groovy Map containing sample information + e.g. [ id:'test' ] + - "*/upscaled_*.tif": + type: file + description: Segmentation mask upscaled to the original image resolution. + pattern: "*/upscaled_*.tif" + ontologies: [] + versions_python: + - - ${task.process}: + type: string + description: The process the versions were collected from + - python: + type: string + description: The tool name + - "python3 --version | sed 's/Python //'": + type: eval + description: The expression to obtain the version of the tool + versions_tifffile: + - - ${task.process}: + type: string + description: The process the versions were collected from + - tifffile: + type: string + description: The tool name + - 'python3 -c "import tifffile; print(tifffile.__version__)"': + type: eval + description: The expression to obtain the version of the tool + +topics: + versions: + - - ${task.process}: + type: string + description: The process the versions were collected from + - python: + type: string + description: The tool name + - "python3 --version | sed 's/Python //'": + type: eval + description: The expression to obtain the version of the tool + - - ${task.process}: + type: string + description: The process the versions were collected from + - tifffile: + type: string + description: The tool name + - 'python3 -c "import tifffile; print(tifffile.__version__)"': + type: eval + description: The expression to obtain the version of the tool + +authors: + - "@an-altosian" +maintainers: + - "@an-altosian" diff --git a/modules/local/xenium_patch/divide/main.nf b/modules/local/xenium_patch/divide/main.nf new file mode 100644 index 00000000..5032417e --- /dev/null +++ b/modules/local/xenium_patch/divide/main.nf @@ -0,0 +1,57 @@ +/* + * XENIUM_PATCH_DIVIDE: Split transcripts.parquet into overlapping patches. + * + * Input: + * - meta: Sample metadata map + * - transcripts: transcripts.parquet file + * - image: morphology image (for getting dimensions) + * + * Output: + * - grid: patch_grid.json metadata file + * - patch_transcripts: per-patch transcripts.parquet files (one per patch) + * - versions: Software versions + */ +process XENIUM_PATCH_DIVIDE { + tag "$meta.id" + label 'process_medium' + + container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ? + 'https://community-cr-prod.seqera.io/docker/registry/v2/blobs/sha256/f9/f9c8f3a2de4e2aa94500011f7d7d09276e9b6f2d79ee8737c9098fe22d4649bc/data' : + 'community.wave.seqera.io/library/sopa_procps-ng_pyarrow:c9ce8cd2ede79d72' }" + + input: + tuple val(meta), path(transcripts), path(image) + + output: + tuple val(meta), path("patches/patch_grid.json") , emit: grid + tuple val(meta), path("patches/patch_*/transcripts.parquet") , emit: patch_transcripts + tuple val("${task.process}"), val('python'), eval("python3 --version | sed 's/Python //'"), topic: versions, emit: versions_python + tuple val("${task.process}"), val('pyarrow'), eval('python3 -c "import pyarrow; print(pyarrow.__version__)"'), topic: versions, emit: versions_pyarrow + + when: + task.ext.when == null || task.ext.when + + script: + def tile_width = task.ext.tile_width ?: 2000 + def overlap = task.ext.overlap ?: 50 + def balanced = task.ext.balanced + def balanced_flag = balanced == true || balanced == 'true' ? '--balanced' : '' + """ + divide_transcripts.py \\ + --transcripts ${transcripts} \\ + --output patches \\ + --tile-width ${tile_width} \\ + --overlap ${overlap} \\ + ${balanced_flag} \\ + --image-width \$(python3 -c "import tifffile; print(tifffile.imread('${image}').shape[-1])") \\ + --image-height \$(python3 -c "import tifffile; print(tifffile.imread('${image}').shape[-2])") + + """ + + stub: + """ + mkdir -p patches/patch_0_0 + touch patches/patch_0_0/transcripts.parquet + echo '{}' > patches/patch_grid.json + """ +} diff --git a/modules/local/xenium_patch/divide/meta.yml b/modules/local/xenium_patch/divide/meta.yml new file mode 100644 index 00000000..28983c90 --- /dev/null +++ b/modules/local/xenium_patch/divide/meta.yml @@ -0,0 +1,115 @@ +name: "xenium_patch_divide" +description: Split transcripts.parquet into overlapping spatial patches for tiled segmentation processing. +keywords: + - xenium + - patches + - tiling + - transcripts + - parquet +tools: + - "python": + description: | + Python programming language interpreter. + homepage: "https://www.python.org/" + documentation: "https://docs.python.org/3/" + tool_dev_url: "https://github.com/python/cpython" + doi: "no DOI available" + licence: ["PSF-2.0"] + identifier: "" + - "pyarrow": + description: | + Python bindings for Apache Arrow, used to read and write Parquet + files efficiently. + homepage: "https://arrow.apache.org/docs/python/" + documentation: "https://arrow.apache.org/docs/python/" + tool_dev_url: "https://github.com/apache/arrow" + doi: "no DOI available" + licence: ["Apache-2.0"] + identifier: "" + +input: + - - meta: + type: map + description: | + Groovy Map containing sample information + e.g. [ id:'test' ] + - transcripts: + type: file + description: transcripts.parquet file from the Xenium bundle. + pattern: "*.parquet" + ontologies: [] + - image: + type: file + description: Morphology image (used to read full-resolution dimensions). + pattern: "*.{ome.tif,ome.tiff,tif,tiff}" + ontologies: [] + +output: + grid: + - - meta: + type: map + description: | + Groovy Map containing sample information + e.g. [ id:'test' ] + - "patches/patch_grid.json": + type: file + description: JSON file describing the patch grid (offsets, sizes, overlaps). + pattern: "patches/patch_grid.json" + ontologies: [] + patch_transcripts: + - - meta: + type: map + description: | + Groovy Map containing sample information + e.g. [ id:'test' ] + - "patches/patch_*/transcripts.parquet": + type: file + description: Per-patch transcripts.parquet files (one per patch). + pattern: "patches/patch_*/transcripts.parquet" + ontologies: [] + versions_python: + - - ${task.process}: + type: string + description: The process the versions were collected from + - python: + type: string + description: The tool name + - "python3 --version | sed 's/Python //'": + type: eval + description: The expression to obtain the version of the tool + versions_pyarrow: + - - ${task.process}: + type: string + description: The process the versions were collected from + - pyarrow: + type: string + description: The tool name + - 'python3 -c "import pyarrow; print(pyarrow.__version__)"': + type: eval + description: The expression to obtain the version of the tool + +topics: + versions: + - - ${task.process}: + type: string + description: The process the versions were collected from + - python: + type: string + description: The tool name + - "python3 --version | sed 's/Python //'": + type: eval + description: The expression to obtain the version of the tool + - - ${task.process}: + type: string + description: The process the versions were collected from + - pyarrow: + type: string + description: The tool name + - 'python3 -c "import pyarrow; print(pyarrow.__version__)"': + type: eval + description: The expression to obtain the version of the tool + +authors: + - "@an-altosian" +maintainers: + - "@an-altosian" diff --git a/modules/local/xenium_patch/stitch/main.nf b/modules/local/xenium_patch/stitch/main.nf new file mode 100644 index 00000000..c674a409 --- /dev/null +++ b/modules/local/xenium_patch/stitch/main.nf @@ -0,0 +1,56 @@ +/* + * XENIUM_PATCH_STITCH: Stitch per-patch segmentation results into unified output. + * + * Uses sopa's solve_conflicts() to resolve overlapping cells at patch boundaries. + * + * Input: + * - meta: Sample metadata map + * - patches: Directory containing patch subdirectories and patch_grid.json + * + * Output: + * - xr_polygons_transcript: Stitched cell polygons and transcript metadata + * - versions: Software versions + */ +process XENIUM_PATCH_STITCH { + tag "$meta.id" + label 'process_medium' + + container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ? + 'https://community-cr-prod.seqera.io/docker/registry/v2/blobs/sha256/f9/f9c8f3a2de4e2aa94500011f7d7d09276e9b6f2d79ee8737c9098fe22d4649bc/data' : + 'community.wave.seqera.io/library/sopa_procps-ng_pyarrow:c9ce8cd2ede79d72' }" + + input: + tuple val(meta), path(patches) + + output: + tuple val(meta), + path("output/xr-cell-polygons.geojson"), + path("output/xr-transcript-metadata.csv") , emit: xr_polygons_transcript + tuple val("${task.process}"), val('python'), eval("python3 --version | sed 's/Python //'"), topic: versions, emit: versions_python + tuple val("${task.process}"), val('sopa'), eval('python3 -c "import sopa; print(sopa.__version__)"'), topic: versions, emit: versions_sopa + + when: + task.ext.when == null || task.ext.when + + script: + def args = task.ext.args ?: '' + """ + xenium_patch_stitch_transcripts.py \\ + --patches ${patches} \\ + --output output \\ + ${args} + + # Post-process: ensure all GeoJSON geometries are Polygon and + # reconcile dropped cells in the transcript CSV. + xenium_patch_stitch_postprocess.py \\ + --geojson output/xr-cell-polygons.geojson \\ + --csv output/xr-transcript-metadata.csv + """ + + stub: + """ + mkdir -p output + echo '{"type":"FeatureCollection","features":[]}' > output/xr-cell-polygons.geojson + echo 'transcript_id,x,y,z,gene,cell,is_noise' > output/xr-transcript-metadata.csv + """ +} diff --git a/modules/local/xenium_patch/stitch/meta.yml b/modules/local/xenium_patch/stitch/meta.yml new file mode 100644 index 00000000..958ef907 --- /dev/null +++ b/modules/local/xenium_patch/stitch/meta.yml @@ -0,0 +1,104 @@ +name: "xenium_patch_stitch" +description: Stitch per-patch segmentation results into a unified output using sopa's solve_conflicts() to resolve overlapping cells at patch boundaries. +keywords: + - xenium + - patches + - stitching + - sopa + - segmentation +tools: + - "python": + description: | + Python programming language interpreter. + homepage: "https://www.python.org/" + documentation: "https://docs.python.org/3/" + tool_dev_url: "https://github.com/python/cpython" + doi: "no DOI available" + licence: ["PSF-2.0"] + identifier: "" + - "sopa": + description: | + Spatial-omics pipeline that handles tile-based segmentation, conflict + resolution, and aggregation across multiple platforms (Xenium, + MERSCOPE, CosMx, etc.). + homepage: "https://gustaveroussy.github.io/sopa/" + documentation: "https://gustaveroussy.github.io/sopa/" + tool_dev_url: "https://github.com/gustaveroussy/sopa" + doi: "10.1038/s41467-024-48981-z" + licence: ["BSD-3-Clause"] + identifier: "" + +input: + - - meta: + type: map + description: | + Groovy Map containing sample information + e.g. [ id:'test' ] + - patches: + type: directory + description: Directory containing per-patch subdirectories and a patch_grid.json file. + pattern: "patches" + +output: + xr_polygons_transcript: + - - meta: + type: map + description: | + Groovy Map containing sample information + e.g. [ id:'test' ] + - "output/xr-cell-polygons.geojson": + type: file + description: Stitched cell polygons in GeoJSON format compatible with XeniumRanger. + pattern: "output/xr-cell-polygons.geojson" + ontologies: [] + - "output/xr-transcript-metadata.csv": + type: file + description: Per-transcript cell assignments and noise flags after stitching. + pattern: "output/xr-transcript-metadata.csv" + ontologies: [] + versions_python: + - - ${task.process}: + type: string + description: The process the versions were collected from + - python: + type: string + description: The tool name + - "python3 --version | sed 's/Python //'": + type: eval + description: The expression to obtain the version of the tool + versions_sopa: + - - ${task.process}: + type: string + description: The process the versions were collected from + - sopa: + type: string + description: The tool name + - 'python3 -c "import sopa; print(sopa.__version__)"': + type: eval + description: The expression to obtain the version of the tool + +topics: + versions: + - - ${task.process}: + type: string + description: The process the versions were collected from + - python: + type: string + description: The tool name + - "python3 --version | sed 's/Python //'": + type: eval + description: The expression to obtain the version of the tool + - - ${task.process}: + type: string + description: The process the versions were collected from + - sopa: + type: string + description: The tool name + - 'python3 -c "import sopa; print(sopa.__version__)"': + type: eval + description: The expression to obtain the version of the tool + +authors: + - "@an-altosian" +maintainers: + - "@an-altosian" diff --git a/modules/nf-core/cellpose/Dockerfile b/modules/nf-core/cellpose/Dockerfile new file mode 100644 index 00000000..34053226 --- /dev/null +++ b/modules/nf-core/cellpose/Dockerfile @@ -0,0 +1,25 @@ +# GPU image for cellpose segmentation +# Base: PyTorch with CUDA 12.4 (consistent CUDA support) +FROM pytorch/pytorch:2.5.1-cuda12.4-cudnn9-runtime + +# Install system dependencies +RUN apt-get update && apt-get install -y --no-install-recommends \ + procps \ + libgl1 \ + libglib2.0-0 \ + && rm -rf /var/lib/apt/lists/* + +# Upgrade pip +RUN pip install --no-cache-dir --upgrade pip + +# Install cellpose and its dependencies +RUN pip install --no-cache-dir \ + cellpose==4.0.8 \ + numpy \ + scipy \ + matplotlib \ + scikit-image \ + opencv-python-headless + +# Set default shell +CMD ["/bin/bash"] diff --git a/modules/nf-core/cellpose/cellpose.diff b/modules/nf-core/cellpose/cellpose.diff new file mode 100644 index 00000000..f04e6f11 --- /dev/null +++ b/modules/nf-core/cellpose/cellpose.diff @@ -0,0 +1,32 @@ +Changes in component 'nf-core/cellpose' +'modules/nf-core/cellpose/environment.yml' is unchanged +'modules/nf-core/cellpose/meta.yml' is unchanged +'modules/nf-core/cellpose/tests/main.nf.test' is unchanged +'modules/nf-core/cellpose/tests/main.nf.test.snap' is unchanged +'modules/nf-core/cellpose/tests/nextflow_wflows.config' is unchanged +Changes in 'cellpose/main.nf': +--- modules/nf-core/cellpose/main.nf ++++ modules/nf-core/cellpose/main.nf +@@ -1,7 +1,7 @@ + process CELLPOSE { + tag "${meta.id}" +- label 'process_medium' +- label 'process_gpu' ++ label 'process_high' ++ label 'process_gpu_single' + + conda "${moduleDir}/environment.yml" + container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ? +@@ -45,6 +45,11 @@ + ${model_command} \\ + ${args} + ++ # Fail fast if cellpose detected zero cells ++ if grep -q "No cell pixels found" .cellpose/run.log 2>/dev/null; then ++ echo "ERROR: cellpose detected 0 cells" >&2; exit 1 ++ fi ++ + mkdir -p ${prefix} + mv *masks.tif ${prefix}/ + mv *flows.tif ${prefix}/ 2>/dev/null || true +************************************************************ diff --git a/modules/nf-core/cellpose/environment.yml b/modules/nf-core/cellpose/environment.yml new file mode 100644 index 00000000..e22d2601 --- /dev/null +++ b/modules/nf-core/cellpose/environment.yml @@ -0,0 +1,9 @@ +--- +# yaml-language-server: $schema=https://raw.githubusercontent.com/nf-core/modules/master/modules/environment-schema.json +channels: + - conda-forge + - bioconda +dependencies: + - conda-forge::python=3.12.12 + - pip: + - cellpose==4.0.9 diff --git a/modules/nf-core/cellpose/main.nf b/modules/nf-core/cellpose/main.nf index f100904f..57fda819 100644 --- a/modules/nf-core/cellpose/main.nf +++ b/modules/nf-core/cellpose/main.nf @@ -1,58 +1,68 @@ process CELLPOSE { - tag "$meta.id" - label 'process_medium' + tag "${meta.id}" + label 'process_high' + label 'process_gpu_single' - container "docker.io/biocontainers/cellpose:3.0.1_cv1" + conda "${moduleDir}/environment.yml" + container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ? + 'https://community-cr-prod.seqera.io/docker/registry/v2/blobs/sha256/cb/cb670191b7ae1a9fd5449746453916c7014b9ea622942ca76a7cb40da7deee46/data' : + 'community.wave.seqera.io/library/python_pip_cellpose:fdf7a8c3a305a26e' }" input: tuple val(meta), path(image) path(model) output: - tuple val(meta), path("*masks.tif") , emit: mask - tuple val(meta), path("*flows.tif") , emit: flows, optional: true - path "versions.yml" , emit: versions + tuple val(meta), path("${prefix}/*masks.tif"), emit: mask + tuple val(meta), path("${prefix}/*flows.tif"), emit: flows, optional: true + tuple val(meta), path("${prefix}/*seg.npy"), emit: cells, optional: true + tuple val("${task.process}"), val('cellpose'), eval("cellpose --version | sed -n 's/cellpose version:[[:space:]]*//p' | tr -d '[:space:]'"), topic: versions, emit: versions_cellpose + tuple val("${task.process}"), val('python'), eval("cellpose --version | sed -n 's/python version:[[:space:]]*//p' | tr -d '[:space:]'"), topic: versions, emit: versions_python + tuple val("${task.process}"), val('torch'), eval("cellpose --version | sed -n 's/torch version:[[:space:]]*//p' | tr -d '[:space:]'"), topic: versions, emit: versions_torch when: task.ext.when == null || task.ext.when script: - // Exit if running this module with -profile conda / -profile mamba - if (workflow.profile.tokenize(',').intersect(['conda', 'mamba']).size() >= 1) { - error "I did not manage to create a cellpose module in Conda that works in all OSes. Please use Docker / Singularity / Podman instead." - } def args = task.ext.args ?: '' - def prefix = task.ext.prefix ?: "${meta.id}" - def model_command = model ? "--pretrained_model $model" : "" - def VERSION = '3.0.1' + def model_command = model ? "--pretrained_model ${model}" : "" + def gpu_flag = task.accelerator ? "--use_gpu" : "" + prefix = task.ext.prefix ?: "${meta.id}" """ + export OMP_NUM_THREADS=${task.cpus} + export MKL_NUM_THREADS=${task.cpus} + # Container runs as root with HOME=/ which is not writable + export HOME=\$PWD + export MPLCONFIGDIR=\$PWD/.matplotlib + export CELLPOSE_LOCAL_MODELS_PATH=\$PWD/.cellpose + mkdir -p \$MPLCONFIGDIR \$CELLPOSE_LOCAL_MODELS_PATH + cellpose \\ - --image_path $image \\ + --image_path ${image} \\ --save_tif \\ - $model_command \\ - $args + --verbose \\ + ${gpu_flag} \\ + ${model_command} \\ + ${args} + + # Fail fast if cellpose detected zero cells + if grep -q "No cell pixels found" .cellpose/run.log 2>/dev/null; then + echo "ERROR: cellpose detected 0 cells" >&2; exit 1 + fi - cat <<-END_VERSIONS > versions.yml - "${task.process}": - cellpose: $VERSION - END_VERSIONS + mkdir -p ${prefix} + mv *masks.tif ${prefix}/ + mv *flows.tif ${prefix}/ 2>/dev/null || true + mv *seg.npy ${prefix}/ 2>/dev/null || true """ + stub: - // Exit if running this module with -profile conda / -profile mamba - if (workflow.profile.tokenize(',').intersect(['conda', 'mamba']).size() >= 1) { - error "I did not manage to create a cellpose module in Conda that works in all OSes. Please use Docker / Singularity / Podman instead." - } - def prefix = task.ext.prefix ?: "${meta.id}" - def VERSION = "3.0.1" // WARN: Version information not provided by tool on CLI. Please update this string when bumping container versions. def name = image.name def base = name.lastIndexOf('.') != -1 ? name[0..name.lastIndexOf('.') - 1] : name - """ - touch ${base}_cp_masks.tif + prefix = task.ext.prefix ?: "${meta.id}" - cat <<-END_VERSIONS > versions.yml - "${task.process}": - cellpose: $VERSION - END_VERSIONS """ - + mkdir -p ${prefix} + touch ${prefix}/${base}_cp_masks.tif + """ } diff --git a/modules/nf-core/cellpose/meta.yml b/modules/nf-core/cellpose/meta.yml index 5397944b..cb7a89a7 100644 --- a/modules/nf-core/cellpose/meta.yml +++ b/modules/nf-core/cellpose/meta.yml @@ -1,9 +1,11 @@ name: "cellpose" -description: cellpose segments cells in images +description: cellpose segments cells in images using GPU-accelerated deep learning keywords: - segmentation - image - cellpose + - gpu + - spatial-transcriptomics tools: - "cellpose": description: "cellpose is an anatomical segmentation algorithm written in Python @@ -22,41 +24,134 @@ input: (sample id) - image: type: file - description: tif file for ready for segmentation + description: tif file ready for segmentation pattern: "*.{tif,tiff}" - - - model: - type: file - description: Optional input file. Cellpose 2 model trained by user using human-in-the-loop - approach. + ontologies: [] + - model: + type: file + description: | + Optional custom cellpose model file. When provided, passed as + --pretrained_model to cellpose. Pass [] (empty list) to use the + default model (cpsam in cellpose 4). + pattern: "*" + ontologies: [] output: - - mask: - - meta: + mask: + - - meta: type: map description: | Groovy Map containing sample information [sample id] - - "*masks.tif": + - "${prefix}/*masks.tif": type: file description: labelled mask output from cellpose in tif format - pattern: "*.{tif, tiff}" - - flows: - - meta: + pattern: "${prefix}/*masks.tif" + ontologies: [] + flows: + - - meta: type: map description: | Groovy Map containing sample information [sample id] - - "*flows.tif": + - "${prefix}/*flows.tif": type: file description: cell flow output from cellpose - pattern: "*.{tif}" - - versions: - - versions.yml: + pattern: "${prefix}/*flows.tif" + ontologies: [] + cells: + - - meta: + type: map + description: | + Groovy Map containing sample information + [sample id] + - "${prefix}/*seg.npy": type: file - description: File containing software versions - pattern: "versions.yml" + description: numpy array with cell segmentation data + pattern: "${prefix}/*seg.npy" + ontologies: [] + versions_cellpose: + - - ${task.process}: + type: string + description: The name of the process + - cellpose: + type: string + description: The name of the tool + - "cellpose --version | sed -n 's/cellpose version:[[:space:]]*//p' | tr -d '[:space:]'": + type: eval + description: The expression to obtain the version of the tool + versions_python: + - - ${task.process}: + type: string + description: The name of the process + - python: + type: string + description: The name of the tool + - "cellpose --version | sed -n 's/python version:[[:space:]]*//p' | tr -d '[:space:]'": + type: eval + description: The expression to obtain the version of the tool + versions_torch: + - - ${task.process}: + type: string + description: The name of the process + - torch: + type: string + description: The name of the tool + - "cellpose --version | sed -n 's/torch version:[[:space:]]*//p' | tr -d '[:space:]'": + type: eval + description: The expression to obtain the version of the tool +topics: + versions: + - - ${task.process}: + type: string + description: The name of the process + - cellpose: + type: string + description: The name of the tool + - "cellpose --version | sed -n 's/cellpose version:[[:space:]]*//p' | tr -d '[:space:]'": + type: eval + description: The expression to obtain the version of the tool + - - ${task.process}: + type: string + description: The name of the process + - python: + type: string + description: The name of the tool + - "cellpose --version | sed -n 's/python version:[[:space:]]*//p' | tr -d '[:space:]'": + type: eval + description: The expression to obtain the version of the tool + - - ${task.process}: + type: string + description: The name of the process + - torch: + type: string + description: The name of the tool + - "cellpose --version | sed -n 's/torch version:[[:space:]]*//p' | tr -d '[:space:]'": + type: eval + description: The expression to obtain the version of the tool +notes: | + When `accelerator` is set (e.g. `accelerator = 1`), the module + automatically passes `--use_gpu` to cellpose. The container (built via Seqera + Containers) includes PyTorch 2.10.0 with CUDA 12.8 and falls back to CPU + automatically when no GPU is available. Use the `process_gpu` label to request + GPU resources from your executor. When running with conda/mamba, GPU support + depends on having a CUDA-enabled PyTorch installation in your environment. + + Model selection via the model input channel: + - Custom model file: file("/path/to/model") + - Default (cpsam): [] + + Additional cellpose CLI arguments can be passed via `task.ext.args`: + ext.args = '--diameter 30 --flow_threshold 0.4 --cellprob_threshold 0' + + Deprecated in cellpose 4.0.1+: `--chan`, `--chan2`, `--invert`, `--all_channels`, + `--diam_mean`, `--pretrained_model_ortho`. Do not pass these via ext.args. + + Model weights are not bundled in the container. Cellpose downloads them on first + use to `$CELLPOSE_LOCAL_MODELS_PATH` (set to the work directory). authors: - "@josenimo" - "@FloWuenne" + - "@dongzehe" maintainers: - "@josenimo" - "@FloWuenne" diff --git a/modules/nf-core/cellpose/tests/main.nf.test.snap b/modules/nf-core/cellpose/tests/main.nf.test.snap index 03b4dbfc..e76ca251 100644 --- a/modules/nf-core/cellpose/tests/main.nf.test.snap +++ b/modules/nf-core/cellpose/tests/main.nf.test.snap @@ -2,31 +2,26 @@ "flows": { "content": [ [ - [ - { - "id": "test" - }, - "cycif_tonsil_registered.ome_flows.tif:md5,de79a792d4bebd2f9753ceb47a0de5f7" - ] + ] ], "meta": { - "nf-test": "0.8.4", - "nextflow": "23.10.1" + "nf-test": "0.9.2", + "nextflow": "25.04.6" }, - "timestamp": "2024-03-18T14:22:16.855256249" + "timestamp": "2025-07-04T13:21:35.054592365" }, "versions": { "content": [ [ - "versions.yml:md5,ce42208b574084f390cf58b4c19b5717" + "versions.yml:md5,398393e73a80fc622873765256d5ec79" ] ], "meta": { - "nf-test": "0.8.4", - "nextflow": "23.10.1" + "nf-test": "0.9.2", + "nextflow": "25.04.6" }, - "timestamp": "2024-03-18T14:22:16.875087557" + "timestamp": "2025-07-04T13:21:35.145644688" }, "cellpose - stub": { "content": [ @@ -43,7 +38,7 @@ ], "2": [ - "versions.yml:md5,ce42208b574084f390cf58b4c19b5717" + "versions.yml:md5,398393e73a80fc622873765256d5ec79" ], "flows": [ @@ -57,15 +52,15 @@ ] ], "versions": [ - "versions.yml:md5,ce42208b574084f390cf58b4c19b5717" + "versions.yml:md5,398393e73a80fc622873765256d5ec79" ] } ], "meta": { - "nf-test": "0.8.4", - "nextflow": "23.10.1" + "nf-test": "0.9.2", + "nextflow": "25.04.6" }, - "timestamp": "2024-03-18T14:22:39.339792992" + "timestamp": "2025-07-04T13:21:52.227498747" }, "mask": { "content": [ @@ -74,14 +69,14 @@ { "id": "test" }, - "cycif_tonsil_registered.ome_cp_masks.tif:md5,001ad312413f18bc2615741bd3ad12cf" + "cycif_tonsil_registered.ome_cp_masks.tif:md5,b151d6718fcb770b2fa3989da632d96e" ] ] ], "meta": { - "nf-test": "0.8.4", - "nextflow": "23.10.1" + "nf-test": "0.9.2", + "nextflow": "25.04.6" }, - "timestamp": "2024-03-18T14:22:16.8369758" + "timestamp": "2025-07-04T13:21:34.95450341" } } \ No newline at end of file diff --git a/modules/nf-core/cellpose/tests/tags.yml b/modules/nf-core/cellpose/tests/tags.yml deleted file mode 100644 index 1280d1f9..00000000 --- a/modules/nf-core/cellpose/tests/tags.yml +++ /dev/null @@ -1,2 +0,0 @@ -cellpose: - - "modules/nf-core/cellpose/**" diff --git a/modules/nf-core/custom/dumpsoftwareversions/main.nf b/modules/nf-core/custom/dumpsoftwareversions/main.nf deleted file mode 100644 index 3df21765..00000000 --- a/modules/nf-core/custom/dumpsoftwareversions/main.nf +++ /dev/null @@ -1,24 +0,0 @@ -process CUSTOM_DUMPSOFTWAREVERSIONS { - label 'process_single' - - // Requires `pyyaml` which does not have a dedicated container but is in the MultiQC container - conda "bioconda::multiqc=1.13" - container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ? - 'https://depot.galaxyproject.org/singularity/multiqc:1.13--pyhdfd78af_0' : - 'quay.io/biocontainers/multiqc:1.13--pyhdfd78af_0' }" - - input: - path versions - - output: - path "software_versions.yml" , emit: yml - path "software_versions_mqc.yml", emit: mqc_yml - path "versions.yml" , emit: versions - - when: - task.ext.when == null || task.ext.when - - script: - def args = task.ext.args ?: '' - template 'dumpsoftwareversions.py' -} diff --git a/modules/nf-core/custom/dumpsoftwareversions/meta.yml b/modules/nf-core/custom/dumpsoftwareversions/meta.yml deleted file mode 100644 index 60b546a0..00000000 --- a/modules/nf-core/custom/dumpsoftwareversions/meta.yml +++ /dev/null @@ -1,34 +0,0 @@ -name: custom_dumpsoftwareversions -description: Custom module used to dump software versions within the nf-core pipeline template -keywords: - - custom - - version -tools: - - custom: - description: Custom module used to dump software versions within the nf-core pipeline template - homepage: https://github.com/nf-core/tools - documentation: https://github.com/nf-core/tools - licence: ["MIT"] -input: - - versions: - type: file - description: YML file containing software versions - pattern: "*.yml" - -output: - - yml: - type: file - description: Standard YML file containing software versions - pattern: "software_versions.yml" - - mqc_yml: - type: file - description: MultiQC custom content YML file containing software versions - pattern: "software_versions_mqc.yml" - - versions: - type: file - description: File containing software versions - pattern: "versions.yml" - -authors: - - "@drpatelh" - - "@grst" diff --git a/modules/nf-core/custom/dumpsoftwareversions/templates/dumpsoftwareversions.py b/modules/nf-core/custom/dumpsoftwareversions/templates/dumpsoftwareversions.py deleted file mode 100755 index e55b8d43..00000000 --- a/modules/nf-core/custom/dumpsoftwareversions/templates/dumpsoftwareversions.py +++ /dev/null @@ -1,102 +0,0 @@ -#!/usr/bin/env python - - -"""Provide functions to merge multiple versions.yml files.""" - - -import platform -from textwrap import dedent - -import yaml - - -def _make_versions_html(versions): - """Generate a tabular HTML output of all versions for MultiQC.""" - html = [ - dedent( - """\\ - - - - - - - - - - """ - ) - ] - for process, tmp_versions in sorted(versions.items()): - html.append("") - for i, (tool, version) in enumerate(sorted(tmp_versions.items())): - html.append( - dedent( - f"""\\ - - - - - - """ - ) - ) - html.append("") - html.append("
    Process Name Software Version
    {process if (i == 0) else ''}{tool}{version}
    ") - return "\\n".join(html) - - -def main(): - """Load all version files and generate merged output.""" - versions_this_module = {} - versions_this_module["${task.process}"] = { - "python": platform.python_version(), - "yaml": yaml.__version__, - } - - with open("$versions") as f: - versions_by_process = yaml.load(f, Loader=yaml.BaseLoader) | versions_this_module - - # aggregate versions by the module name (derived from fully-qualified process name) - versions_by_module = {} - for process, process_versions in versions_by_process.items(): - module = process.split(":")[-1] - try: - if versions_by_module[module] != process_versions: - raise AssertionError( - "We assume that software versions are the same between all modules. " - "If you see this error-message it means you discovered an edge-case " - "and should open an issue in nf-core/tools. " - ) - except KeyError: - versions_by_module[module] = process_versions - - versions_by_module["Workflow"] = { - "Nextflow": "$workflow.nextflow.version", - "$workflow.manifest.name": "$workflow.manifest.version", - } - - versions_mqc = { - "id": "software_versions", - "section_name": "${workflow.manifest.name} Software Versions", - "section_href": "https://github.com/${workflow.manifest.name}", - "plot_type": "html", - "description": "are collected at run time from the software output.", - "data": _make_versions_html(versions_by_module), - } - - with open("software_versions.yml", "w") as f: - yaml.dump(versions_by_module, f, default_flow_style=False) - with open("software_versions_mqc.yml", "w") as f: - yaml.dump(versions_mqc, f, default_flow_style=False) - - with open("versions.yml", "w") as f: - yaml.dump(versions_this_module, f, default_flow_style=False) - - -if __name__ == "__main__": - main() diff --git a/modules/nf-core/multiqc/environment.yml b/modules/nf-core/multiqc/environment.yml new file mode 100644 index 00000000..009874d4 --- /dev/null +++ b/modules/nf-core/multiqc/environment.yml @@ -0,0 +1,7 @@ +--- +# yaml-language-server: $schema=https://raw.githubusercontent.com/nf-core/modules/master/modules/environment-schema.json +channels: + - conda-forge + - bioconda +dependencies: + - bioconda::multiqc=1.33 diff --git a/modules/nf-core/multiqc/main.nf b/modules/nf-core/multiqc/main.nf index 68f66bea..ae8f5c86 100644 --- a/modules/nf-core/multiqc/main.nf +++ b/modules/nf-core/multiqc/main.nf @@ -1,53 +1,48 @@ process MULTIQC { + tag "${meta.id}" label 'process_single' - conda "bioconda::multiqc=1.13" - container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ? - 'https://depot.galaxyproject.org/singularity/multiqc:1.13--pyhdfd78af_0' : - 'quay.io/biocontainers/multiqc:1.13--pyhdfd78af_0' }" + conda "${moduleDir}/environment.yml" + container "community.wave.seqera.io/library/multiqc-xenium-extra_multiqc_polars_scanpy_scipy:4e27199c6ca05c8b" input: - path multiqc_files, stageAs: "?/*" - path(multiqc_config) - path(extra_multiqc_config) - path(multiqc_logo) + tuple val(meta), path(multiqc_files, stageAs: "?/*"), path(multiqc_config, stageAs: "?/*"), path(multiqc_logo), path(replace_names), path(sample_names) output: - path "*multiqc_report.html", emit: report - path "*_data" , emit: data - path "*_plots" , optional:true, emit: plots - path "versions.yml" , emit: versions + tuple val(meta), path("*.html"), emit: report + tuple val(meta), path("*_data"), emit: data + tuple val(meta), path("*_plots"), emit: plots, optional: true + // MultiQC should not push its versions to the `versions` topic. Its input depends on the versions topic to be resolved thus outputting to the topic will let the pipeline hang forever + tuple val("${task.process}"), val('multiqc'), eval('multiqc --version | sed "s/.* //g"'), emit: versions when: task.ext.when == null || task.ext.when script: def args = task.ext.args ?: '' - def config = multiqc_config ? "--config $multiqc_config" : '' - def extra_config = extra_multiqc_config ? "--config $extra_multiqc_config" : '' + def prefix = task.ext.prefix ? "--filename ${task.ext.prefix}.html" : '' + def config = multiqc_config ? multiqc_config instanceof List ? "--config ${multiqc_config.join(' --config ')}" : "--config ${multiqc_config}" : "" + def logo = multiqc_logo ? "--cl-config 'custom_logo: \"${multiqc_logo}\"'" : '' + def replace = replace_names ? "--replace-names ${replace_names}" : '' + def samples = sample_names ? "--sample-names ${sample_names}" : '' """ multiqc \\ --force \\ - $args \\ - $config \\ - $extra_config \\ + ${args} \\ + ${config} \\ + ${prefix} \\ + ${logo} \\ + ${replace} \\ + ${samples} \\ . - - cat <<-END_VERSIONS > versions.yml - "${task.process}": - multiqc: \$( multiqc --version | sed -e "s/multiqc, version //g" ) - END_VERSIONS """ stub: """ - touch multiqc_data - touch multiqc_plots + mkdir multiqc_data + touch multiqc_data/.stub + mkdir multiqc_plots + touch multiqc_plots/.stub touch multiqc_report.html - - cat <<-END_VERSIONS > versions.yml - "${task.process}": - multiqc: \$( multiqc --version | sed -e "s/multiqc, version //g" ) - END_VERSIONS """ } diff --git a/modules/nf-core/multiqc/meta.yml b/modules/nf-core/multiqc/meta.yml index ebc29b27..ef434a9a 100644 --- a/modules/nf-core/multiqc/meta.yml +++ b/modules/nf-core/multiqc/meta.yml @@ -1,5 +1,6 @@ -name: MultiQC -description: Aggregate results from bioinformatics analyses across many samples into a single report +name: multiqc +description: Aggregate results from bioinformatics analyses across many samples + into a single report keywords: - QC - bioinformatics tools @@ -11,45 +12,122 @@ tools: It's a general use tool, perfect for summarising the output from numerous bioinformatics tools. homepage: https://multiqc.info/ documentation: https://multiqc.info/docs/ - licence: ["GPL-3.0-or-later"] - + licence: + - "GPL-3.0-or-later" + identifier: biotools:multiqc input: - - multiqc_files: - type: file - description: | - List of reports / files recognised by MultiQC, for example the html and zip output of FastQC - - multiqc_config: - type: file - description: Optional config yml for MultiQC - pattern: "*.{yml,yaml}" - - extra_multiqc_config: - type: file - description: Second optional config yml for MultiQC. Will override common sections in multiqc_config. - pattern: "*.{yml,yaml}" - - multiqc_logo: - type: file - description: Optional logo file for MultiQC - pattern: "*.{png}" - + - - meta: + type: map + description: | + Groovy Map containing sample information + e.g. [ id:'sample1', single_end:false ] + - multiqc_files: + type: file + description: | + List of reports / files recognised by MultiQC, for example the html and zip output of FastQC + ontologies: [] + - multiqc_config: + type: file + description: Optional config yml for MultiQC + pattern: "*.{yml,yaml}" + ontologies: + - edam: http://edamontology.org/format_3750 + - multiqc_logo: + type: file + description: Optional logo file for MultiQC + pattern: "*.{png}" + ontologies: [] + - replace_names: + type: file + description: | + Optional two-column sample renaming file. First column a set of + patterns, second column a set of corresponding replacements. Passed via + MultiQC's `--replace-names` option. + pattern: "*.{tsv}" + ontologies: + - edam: http://edamontology.org/format_3475 + - sample_names: + type: file + description: | + Optional TSV file with headers, passed to the MultiQC --sample_names + argument. + pattern: "*.{tsv}" + ontologies: + - edam: http://edamontology.org/format_3475 output: - - report: - type: file - description: MultiQC report file - pattern: "multiqc_report.html" - - data: - type: dir - description: MultiQC data dir - pattern: "multiqc_data" - - plots: - type: file - description: Plots created by MultiQC - pattern: "*_data" - - versions: - type: file - description: File containing software versions - pattern: "versions.yml" + report: + - - meta: + type: map + description: | + Groovy Map containing sample information + e.g. [ id:'sample1', single_end:false ] + - "*.html": + type: file + description: MultiQC report file + pattern: ".html" + ontologies: [] + data: + - - meta: + type: map + description: | + Groovy Map containing sample information + e.g. [ id:'sample1', single_end:false ] + - "*_data": + type: directory + description: MultiQC data dir + pattern: "multiqc_data" + plots: + - - meta: + type: map + description: | + Groovy Map containing sample information + e.g. [ id:'sample1', single_end:false ] + - "*_plots": + type: file + description: Plots created by MultiQC + pattern: "*_plots" + ontologies: [] + versions: + - - ${task.process}: + type: string + description: The process the versions were collected from + - multiqc: + type: string + description: The tool name + - multiqc --version | sed "s/.* //g": + type: eval + description: The expression to obtain the version of the tool authors: - "@abhi18av" - "@bunop" - "@drpatelh" - "@jfy133" +maintainers: + - "@abhi18av" + - "@bunop" + - "@drpatelh" + - "@jfy133" +containers: + conda: + linux/amd64: + lock_file: https://wave.seqera.io/v1alpha1/builds/bd-ee7739d47738383b_1/condalock + linux/arm64: + lock_file: https://wave.seqera.io/v1alpha1/builds/bd-58d7dee710ab3aa8_1/condalock + docker: + linux/amd64: + build_id: bd-ee7739d47738383b_1 + name: community.wave.seqera.io/library/multiqc:1.33--ee7739d47738383b + scanId: sc-6ddec592dcadd583_4 + linux/arm64: + build_id: bd-58d7dee710ab3aa8_1 + name: community.wave.seqera.io/library/multiqc:1.33--58d7dee710ab3aa8 + scanId: sc-a04c42273e34c55c_2 + singularity: + linux/amd64: + build_id: bd-e3576ddf588fa00d_1 + https: https://community-cr-prod.seqera.io/docker/registry/v2/blobs/sha256/34/34e733a9ae16a27e80fe00f863ea1479c96416017f24a907996126283e7ecd4d/data + name: oras://community.wave.seqera.io/library/multiqc:1.33--e3576ddf588fa00d + linux/arm64: + build_id: bd-2537ca5f8445e3c2_1 + https: https://community-cr-prod.seqera.io/docker/registry/v2/blobs/sha256/78/78b89e91d89e9cc99ad5ade5be311f347838cb2acbfb4f13bc343b170be09ce4/data + name: oras://community.wave.seqera.io/library/multiqc:1.33--2537ca5f8445e3c2 diff --git a/modules/nf-core/multiqc/multiqc.diff b/modules/nf-core/multiqc/multiqc.diff new file mode 100644 index 00000000..a208f7c0 --- /dev/null +++ b/modules/nf-core/multiqc/multiqc.diff @@ -0,0 +1,23 @@ +Changes in component 'nf-core/multiqc' +Changes in 'multiqc/main.nf': +--- modules/nf-core/multiqc/main.nf ++++ modules/nf-core/multiqc/main.nf +@@ -3,9 +3,7 @@ + label 'process_single' + + conda "${moduleDir}/environment.yml" +- container "${workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container +- ? 'https://community-cr-prod.seqera.io/docker/registry/v2/blobs/sha256/34/34e733a9ae16a27e80fe00f863ea1479c96416017f24a907996126283e7ecd4d/data' +- : 'community.wave.seqera.io/library/multiqc:1.33--ee7739d47738383b'}" ++ container "community.wave.seqera.io/library/multiqc-xenium-extra_multiqc_polars_scanpy_scipy:4e27199c6ca05c8b" + + input: + tuple val(meta), path(multiqc_files, stageAs: "?/*"), path(multiqc_config, stageAs: "?/*"), path(multiqc_logo), path(replace_names), path(sample_names) + +'modules/nf-core/multiqc/environment.yml' is unchanged +'modules/nf-core/multiqc/meta.yml' is unchanged +'modules/nf-core/multiqc/tests/nextflow.config' is unchanged +'modules/nf-core/multiqc/tests/main.nf.test' is unchanged +'modules/nf-core/multiqc/tests/main.nf.test.snap' is unchanged +'modules/nf-core/multiqc/tests/custom_prefix.config' is unchanged +************************************************************ diff --git a/modules/nf-core/multiqc/tests/custom_prefix.config b/modules/nf-core/multiqc/tests/custom_prefix.config new file mode 100644 index 00000000..b30b1358 --- /dev/null +++ b/modules/nf-core/multiqc/tests/custom_prefix.config @@ -0,0 +1,5 @@ +process { + withName: 'MULTIQC' { + ext.prefix = "custom_prefix" + } +} diff --git a/modules/nf-core/multiqc/tests/main.nf.test b/modules/nf-core/multiqc/tests/main.nf.test new file mode 100644 index 00000000..4cbdb95d --- /dev/null +++ b/modules/nf-core/multiqc/tests/main.nf.test @@ -0,0 +1,211 @@ +nextflow_process { + + name "Test Process MULTIQC" + script "../main.nf" + process "MULTIQC" + + tag "modules" + tag "modules_nfcore" + tag "multiqc" + + config "./nextflow.config" + + test("sarscov2 single-end [fastqc]") { + + when { + process { + """ + input[0] = channel.of([ + [ id: 'FASTQC' ], + file(params.modules_testdata_base_path + 'genomics/sarscov2/illumina/fastqc/test_fastqc.zip', checkIfExists: true), + [], + [], + [], + [] + ]) + """ + } + } + + then { + assert process.success + assert snapshot( + sanitizeOutput(process.out).collectEntries { key, val -> + if (key == "data") { + return [key, val.collect { [path(it[1]).list().collect { file(it.toString()).name }] }] + } + else if (key == "plots") { + return [key, val.collect { [ + "pdf", + path("${it[1]}/pdf").list().collect { file(it.toString()).name }, + "png", + path("${it[1]}/png").list().collect { file(it.toString()).name }, + "svg", + path("${it[1]}/svg").list().collect { file(it.toString()).name }] }] + } + else if (key == "report") { + return [key, file(val[0][1].toString()).name] + } + return [key, val] + } + ).match() + } + } + + test("sarscov2 single-end [fastqc] - custom prefix") { + config "./custom_prefix.config" + + when { + process { + """ + input[0] = channel.of([ + [ id: 'FASTQC' ], + file(params.modules_testdata_base_path + 'genomics/sarscov2/illumina/fastqc/test_fastqc.zip', checkIfExists: true), + [], + [], + [], + [] + ]) + """ + } + } + + then { + assert process.success + assert snapshot( + sanitizeOutput(process.out).collectEntries { key, val -> + if (key == "data") { + return [key, val.collect { [path(it[1]).list().collect { file(it.toString()).name }] }] + } + else if (key == "plots") { + return [key, val.collect { [ + "pdf", + path("${it[1]}/pdf").list().collect { file(it.toString()).name }, + "png", + path("${it[1]}/png").list().collect { file(it.toString()).name }, + "svg", + path("${it[1]}/svg").list().collect { file(it.toString()).name }] }] + } + else if (key == "report") { + return [key, file(val[0][1].toString()).name] + } + return [key, val] + } + ).match() + } + } + + test("sarscov2 single-end [fastqc] [config]") { + + when { + process { + """ + input[0] = channel.of([ + [ id: 'FASTQC' ], + file(params.modules_testdata_base_path + 'genomics/sarscov2/illumina/fastqc/test_fastqc.zip', checkIfExists: true), + file("https://raw.githubusercontent.com/nf-core/seqinspector/1.0.0/assets/multiqc_config.yml", checkIfExists: true), + [], + [], + [] + ]) + """ + } + } + + then { + assert process.success + assert snapshot( + sanitizeOutput(process.out).collectEntries { key, val -> + if (key == "data") { + return [key, val.collect { [path(it[1]).list().collect { file(it.toString()).name }] }] + } + else if (key == "plots") { + return [key, val.collect { [ + "pdf", + path("${it[1]}/pdf").list().collect { file(it.toString()).name }, + "png", + path("${it[1]}/png").list().collect { file(it.toString()).name }, + "svg", + path("${it[1]}/svg").list().collect { file(it.toString()).name }] }] + } + else if (key == "report") { + return [key, file(val[0][1].toString()).name] + } + return [key, val] + } + ).match() + } + } + + test("sarscov2 single-end [fastqc] [multiple configs]") { + + when { + process { + """ + input[0] = channel.of([ + [ id: 'FASTQC' ], + file(params.modules_testdata_base_path + 'genomics/sarscov2/illumina/fastqc/test_fastqc.zip', checkIfExists: true), + [ + file("https://raw.githubusercontent.com/nf-core/seqinspector/1.0.0/assets/multiqc_config.yml", checkIfExists: true), + file("https://raw.githubusercontent.com/nf-core/seqinspector/1.0.0/assets/multiqc_config.yml", checkIfExists: true) + ], + [], + [], + [] + ]) + """ + } + } + + then { + assert process.success + assert snapshot( + sanitizeOutput(process.out).collectEntries { key, val -> + if (key == "data") { + return [key, val.collect { [path(it[1]).list().collect { file(it.toString()).name }] }] + } + else if (key == "plots") { + return [key, val.collect { [ + "pdf", + path("${it[1]}/pdf").list().collect { file(it.toString()).name }, + "png", + path("${it[1]}/png").list().collect { file(it.toString()).name }, + "svg", + path("${it[1]}/svg").list().collect { file(it.toString()).name }] }] + } + else if (key == "report") { + return [key, file(val[0][1].toString()).name] + } + return [key, val] + } + ).match() + } + } + + test("sarscov2 single-end [fastqc] - stub") { + + options "-stub" + + when { + process { + """ + input[0] = channel.of([ + [ id: 'FASTQC' ], + file(params.modules_testdata_base_path + 'genomics/sarscov2/illumina/fastqc/test_fastqc.zip', checkIfExists: true), + [], + [], + [], + [] + ]) + """ + } + } + + then { + assert process.success + assertAll( + { assert snapshot(sanitizeOutput(process.out)).match() } + ) + } + } +} diff --git a/modules/nf-core/multiqc/tests/main.nf.test.snap b/modules/nf-core/multiqc/tests/main.nf.test.snap new file mode 100644 index 00000000..3bfc524f --- /dev/null +++ b/modules/nf-core/multiqc/tests/main.nf.test.snap @@ -0,0 +1,422 @@ +{ + "sarscov2 single-end [fastqc] [multiple configs]": { + "content": [ + { + "data": [ + [ + [ + "fastqc-status-check-heatmap.txt", + "fastqc_overrepresented_sequences_plot.txt", + "fastqc_per_base_n_content_plot.txt", + "fastqc_per_base_sequence_quality_plot.txt", + "fastqc_per_sequence_gc_content_plot_Counts.txt", + "fastqc_per_sequence_gc_content_plot_Percentages.txt", + "fastqc_per_sequence_quality_scores_plot.txt", + "fastqc_sequence_counts_plot.txt", + "fastqc_sequence_duplication_levels_plot.txt", + "fastqc_sequence_length_distribution_plot.txt", + "fastqc_top_overrepresented_sequences_table.txt", + "llms-full.txt", + "multiqc.log", + "multiqc.parquet", + "multiqc_citations.txt", + "multiqc_data.json", + "multiqc_fastqc.txt", + "multiqc_general_stats.txt", + "multiqc_sources.txt" + ] + ] + ], + "plots": [ + [ + "pdf", + [ + "fastqc-status-check-heatmap.pdf", + "fastqc_overrepresented_sequences_plot.pdf", + "fastqc_per_base_n_content_plot.pdf", + "fastqc_per_base_sequence_quality_plot.pdf", + "fastqc_per_sequence_gc_content_plot_Counts.pdf", + "fastqc_per_sequence_gc_content_plot_Percentages.pdf", + "fastqc_per_sequence_quality_scores_plot.pdf", + "fastqc_sequence_counts_plot-cnt.pdf", + "fastqc_sequence_counts_plot-pct.pdf", + "fastqc_sequence_duplication_levels_plot.pdf", + "fastqc_sequence_length_distribution_plot.pdf", + "fastqc_top_overrepresented_sequences_table.pdf" + ], + "png", + [ + "fastqc-status-check-heatmap.png", + "fastqc_overrepresented_sequences_plot.png", + "fastqc_per_base_n_content_plot.png", + "fastqc_per_base_sequence_quality_plot.png", + "fastqc_per_sequence_gc_content_plot_Counts.png", + "fastqc_per_sequence_gc_content_plot_Percentages.png", + "fastqc_per_sequence_quality_scores_plot.png", + "fastqc_sequence_counts_plot-cnt.png", + "fastqc_sequence_counts_plot-pct.png", + "fastqc_sequence_duplication_levels_plot.png", + "fastqc_sequence_length_distribution_plot.png", + "fastqc_top_overrepresented_sequences_table.png" + ], + "svg", + [ + "fastqc-status-check-heatmap.svg", + "fastqc_overrepresented_sequences_plot.svg", + "fastqc_per_base_n_content_plot.svg", + "fastqc_per_base_sequence_quality_plot.svg", + "fastqc_per_sequence_gc_content_plot_Counts.svg", + "fastqc_per_sequence_gc_content_plot_Percentages.svg", + "fastqc_per_sequence_quality_scores_plot.svg", + "fastqc_sequence_counts_plot-cnt.svg", + "fastqc_sequence_counts_plot-pct.svg", + "fastqc_sequence_duplication_levels_plot.svg", + "fastqc_sequence_length_distribution_plot.svg", + "fastqc_top_overrepresented_sequences_table.svg" + ] + ] + ], + "report": "multiqc_report.html", + "versions": [ + [ + "MULTIQC", + "multiqc", + "1.33" + ] + ] + } + ], + "timestamp": "2026-03-17T16:15:42.577775492", + "meta": { + "nf-test": "0.9.4", + "nextflow": "25.10.4" + } + }, + "sarscov2 single-end [fastqc]": { + "content": [ + { + "data": [ + [ + [ + "fastqc-status-check-heatmap.txt", + "fastqc_overrepresented_sequences_plot.txt", + "fastqc_per_base_n_content_plot.txt", + "fastqc_per_base_sequence_quality_plot.txt", + "fastqc_per_sequence_gc_content_plot_Counts.txt", + "fastqc_per_sequence_gc_content_plot_Percentages.txt", + "fastqc_per_sequence_quality_scores_plot.txt", + "fastqc_sequence_counts_plot.txt", + "fastqc_sequence_duplication_levels_plot.txt", + "fastqc_sequence_length_distribution_plot.txt", + "fastqc_top_overrepresented_sequences_table.txt", + "llms-full.txt", + "multiqc.log", + "multiqc.parquet", + "multiqc_citations.txt", + "multiqc_data.json", + "multiqc_fastqc.txt", + "multiqc_general_stats.txt", + "multiqc_software_versions.txt", + "multiqc_sources.txt" + ] + ] + ], + "plots": [ + [ + "pdf", + [ + "fastqc-status-check-heatmap.pdf", + "fastqc_overrepresented_sequences_plot.pdf", + "fastqc_per_base_n_content_plot.pdf", + "fastqc_per_base_sequence_quality_plot.pdf", + "fastqc_per_sequence_gc_content_plot_Counts.pdf", + "fastqc_per_sequence_gc_content_plot_Percentages.pdf", + "fastqc_per_sequence_quality_scores_plot.pdf", + "fastqc_sequence_counts_plot-cnt.pdf", + "fastqc_sequence_counts_plot-pct.pdf", + "fastqc_sequence_duplication_levels_plot.pdf", + "fastqc_sequence_length_distribution_plot.pdf", + "fastqc_top_overrepresented_sequences_table.pdf" + ], + "png", + [ + "fastqc-status-check-heatmap.png", + "fastqc_overrepresented_sequences_plot.png", + "fastqc_per_base_n_content_plot.png", + "fastqc_per_base_sequence_quality_plot.png", + "fastqc_per_sequence_gc_content_plot_Counts.png", + "fastqc_per_sequence_gc_content_plot_Percentages.png", + "fastqc_per_sequence_quality_scores_plot.png", + "fastqc_sequence_counts_plot-cnt.png", + "fastqc_sequence_counts_plot-pct.png", + "fastqc_sequence_duplication_levels_plot.png", + "fastqc_sequence_length_distribution_plot.png", + "fastqc_top_overrepresented_sequences_table.png" + ], + "svg", + [ + "fastqc-status-check-heatmap.svg", + "fastqc_overrepresented_sequences_plot.svg", + "fastqc_per_base_n_content_plot.svg", + "fastqc_per_base_sequence_quality_plot.svg", + "fastqc_per_sequence_gc_content_plot_Counts.svg", + "fastqc_per_sequence_gc_content_plot_Percentages.svg", + "fastqc_per_sequence_quality_scores_plot.svg", + "fastqc_sequence_counts_plot-cnt.svg", + "fastqc_sequence_counts_plot-pct.svg", + "fastqc_sequence_duplication_levels_plot.svg", + "fastqc_sequence_length_distribution_plot.svg", + "fastqc_top_overrepresented_sequences_table.svg" + ] + ] + ], + "report": "multiqc_report.html", + "versions": [ + [ + "MULTIQC", + "multiqc", + "1.33" + ] + ] + } + ], + "timestamp": "2026-03-17T16:21:17.072841555", + "meta": { + "nf-test": "0.9.4", + "nextflow": "25.10.4" + } + }, + "sarscov2 single-end [fastqc] - stub": { + "content": [ + { + "data": [ + [ + { + "id": "FASTQC" + }, + [ + ".stub:md5,d41d8cd98f00b204e9800998ecf8427e" + ] + ] + ], + "plots": [ + [ + { + "id": "FASTQC" + }, + [ + ".stub:md5,d41d8cd98f00b204e9800998ecf8427e" + ] + ] + ], + "report": [ + [ + { + "id": "FASTQC" + }, + "multiqc_report.html:md5,d41d8cd98f00b204e9800998ecf8427e" + ] + ], + "versions": [ + [ + "MULTIQC", + "multiqc", + "1.33" + ] + ] + } + ], + "timestamp": "2026-02-26T15:14:39.789193051", + "meta": { + "nf-test": "0.9.4", + "nextflow": "25.10.4" + } + }, + "sarscov2 single-end [fastqc] [config]": { + "content": [ + { + "data": [ + [ + [ + "fastqc-status-check-heatmap.txt", + "fastqc_overrepresented_sequences_plot.txt", + "fastqc_per_base_n_content_plot.txt", + "fastqc_per_base_sequence_quality_plot.txt", + "fastqc_per_sequence_gc_content_plot_Counts.txt", + "fastqc_per_sequence_gc_content_plot_Percentages.txt", + "fastqc_per_sequence_quality_scores_plot.txt", + "fastqc_sequence_counts_plot.txt", + "fastqc_sequence_duplication_levels_plot.txt", + "fastqc_sequence_length_distribution_plot.txt", + "fastqc_top_overrepresented_sequences_table.txt", + "llms-full.txt", + "multiqc.log", + "multiqc.parquet", + "multiqc_citations.txt", + "multiqc_data.json", + "multiqc_fastqc.txt", + "multiqc_general_stats.txt", + "multiqc_sources.txt" + ] + ] + ], + "plots": [ + [ + "pdf", + [ + "fastqc-status-check-heatmap.pdf", + "fastqc_overrepresented_sequences_plot.pdf", + "fastqc_per_base_n_content_plot.pdf", + "fastqc_per_base_sequence_quality_plot.pdf", + "fastqc_per_sequence_gc_content_plot_Counts.pdf", + "fastqc_per_sequence_gc_content_plot_Percentages.pdf", + "fastqc_per_sequence_quality_scores_plot.pdf", + "fastqc_sequence_counts_plot-cnt.pdf", + "fastqc_sequence_counts_plot-pct.pdf", + "fastqc_sequence_duplication_levels_plot.pdf", + "fastqc_sequence_length_distribution_plot.pdf", + "fastqc_top_overrepresented_sequences_table.pdf" + ], + "png", + [ + "fastqc-status-check-heatmap.png", + "fastqc_overrepresented_sequences_plot.png", + "fastqc_per_base_n_content_plot.png", + "fastqc_per_base_sequence_quality_plot.png", + "fastqc_per_sequence_gc_content_plot_Counts.png", + "fastqc_per_sequence_gc_content_plot_Percentages.png", + "fastqc_per_sequence_quality_scores_plot.png", + "fastqc_sequence_counts_plot-cnt.png", + "fastqc_sequence_counts_plot-pct.png", + "fastqc_sequence_duplication_levels_plot.png", + "fastqc_sequence_length_distribution_plot.png", + "fastqc_top_overrepresented_sequences_table.png" + ], + "svg", + [ + "fastqc-status-check-heatmap.svg", + "fastqc_overrepresented_sequences_plot.svg", + "fastqc_per_base_n_content_plot.svg", + "fastqc_per_base_sequence_quality_plot.svg", + "fastqc_per_sequence_gc_content_plot_Counts.svg", + "fastqc_per_sequence_gc_content_plot_Percentages.svg", + "fastqc_per_sequence_quality_scores_plot.svg", + "fastqc_sequence_counts_plot-cnt.svg", + "fastqc_sequence_counts_plot-pct.svg", + "fastqc_sequence_duplication_levels_plot.svg", + "fastqc_sequence_length_distribution_plot.svg", + "fastqc_top_overrepresented_sequences_table.svg" + ] + ] + ], + "report": "multiqc_report.html", + "versions": [ + [ + "MULTIQC", + "multiqc", + "1.33" + ] + ] + } + ], + "timestamp": "2026-03-17T16:15:30.372239611", + "meta": { + "nf-test": "0.9.4", + "nextflow": "25.10.4" + } + }, + "sarscov2 single-end [fastqc] - custom prefix": { + "content": [ + { + "data": [ + [ + [ + "fastqc-status-check-heatmap.txt", + "fastqc_overrepresented_sequences_plot.txt", + "fastqc_per_base_n_content_plot.txt", + "fastqc_per_base_sequence_quality_plot.txt", + "fastqc_per_sequence_gc_content_plot_Counts.txt", + "fastqc_per_sequence_gc_content_plot_Percentages.txt", + "fastqc_per_sequence_quality_scores_plot.txt", + "fastqc_sequence_counts_plot.txt", + "fastqc_sequence_duplication_levels_plot.txt", + "fastqc_sequence_length_distribution_plot.txt", + "fastqc_top_overrepresented_sequences_table.txt", + "llms-full.txt", + "multiqc.log", + "multiqc.parquet", + "multiqc_citations.txt", + "multiqc_data.json", + "multiqc_fastqc.txt", + "multiqc_general_stats.txt", + "multiqc_software_versions.txt", + "multiqc_sources.txt" + ] + ] + ], + "plots": [ + [ + "pdf", + [ + "fastqc-status-check-heatmap.pdf", + "fastqc_overrepresented_sequences_plot.pdf", + "fastqc_per_base_n_content_plot.pdf", + "fastqc_per_base_sequence_quality_plot.pdf", + "fastqc_per_sequence_gc_content_plot_Counts.pdf", + "fastqc_per_sequence_gc_content_plot_Percentages.pdf", + "fastqc_per_sequence_quality_scores_plot.pdf", + "fastqc_sequence_counts_plot-cnt.pdf", + "fastqc_sequence_counts_plot-pct.pdf", + "fastqc_sequence_duplication_levels_plot.pdf", + "fastqc_sequence_length_distribution_plot.pdf", + "fastqc_top_overrepresented_sequences_table.pdf" + ], + "png", + [ + "fastqc-status-check-heatmap.png", + "fastqc_overrepresented_sequences_plot.png", + "fastqc_per_base_n_content_plot.png", + "fastqc_per_base_sequence_quality_plot.png", + "fastqc_per_sequence_gc_content_plot_Counts.png", + "fastqc_per_sequence_gc_content_plot_Percentages.png", + "fastqc_per_sequence_quality_scores_plot.png", + "fastqc_sequence_counts_plot-cnt.png", + "fastqc_sequence_counts_plot-pct.png", + "fastqc_sequence_duplication_levels_plot.png", + "fastqc_sequence_length_distribution_plot.png", + "fastqc_top_overrepresented_sequences_table.png" + ], + "svg", + [ + "fastqc-status-check-heatmap.svg", + "fastqc_overrepresented_sequences_plot.svg", + "fastqc_per_base_n_content_plot.svg", + "fastqc_per_base_sequence_quality_plot.svg", + "fastqc_per_sequence_gc_content_plot_Counts.svg", + "fastqc_per_sequence_gc_content_plot_Percentages.svg", + "fastqc_per_sequence_quality_scores_plot.svg", + "fastqc_sequence_counts_plot-cnt.svg", + "fastqc_sequence_counts_plot-pct.svg", + "fastqc_sequence_duplication_levels_plot.svg", + "fastqc_sequence_length_distribution_plot.svg", + "fastqc_top_overrepresented_sequences_table.svg" + ] + ] + ], + "report": "custom_prefix.html", + "versions": [ + [ + "MULTIQC", + "multiqc", + "1.33" + ] + ] + } + ], + "timestamp": "2026-03-17T16:15:18.189023981", + "meta": { + "nf-test": "0.9.4", + "nextflow": "25.10.4" + } + } +} \ No newline at end of file diff --git a/modules/nf-core/multiqc/tests/nextflow.config b/modules/nf-core/multiqc/tests/nextflow.config new file mode 100644 index 00000000..374dfef2 --- /dev/null +++ b/modules/nf-core/multiqc/tests/nextflow.config @@ -0,0 +1,6 @@ +process { + withName: 'MULTIQC' { + ext.prefix = null + ext.args = '-p' + } +} diff --git a/modules/nf-core/opt/flip/main.nf b/modules/nf-core/opt/flip/main.nf new file mode 100644 index 00000000..66be07d0 --- /dev/null +++ b/modules/nf-core/opt/flip/main.nf @@ -0,0 +1,60 @@ +process OPT_FLIP { + tag "$meta.id" + label 'process_high' + + container "khersameesh24/opt:v0.0.1" + + input: + tuple val(meta), path(probes_fasta) + tuple val(meta2), path(ref_annot_gff), path(ref_annot_fa) + + output: + tuple val(meta), path("${prefix}/fwd_oriented.fa"), emit: fwd_oriented_fa + path "versions.yml" , emit: versions + + when: + task.ext.when == null || task.ext.when + + script: + // Exit if running this module with -profile conda / -profile mamba + if (workflow.profile.tokenize(',').intersect(['conda', 'mamba']).size() >= 1) { + error "OPT_FLIP module does not support Conda. Please use Docker / Singularity / Podman instead." + } + + def args = task.ext.args ?: '' + prefix = task.ext.prefix ?: "${meta.id}" + + """ + opt \\ + -o ${prefix} \\ + -p ${task.cpus} \\ + flip \\ + -q ${probes_fasta} \\ + -a ${ref_annot_gff} \\ + -t ${ref_annot_fa} \\ + ${args} + + cat <<-END_VERSIONS > versions.yml + "${task.process}": + opt: \$(opt --version) + END_VERSIONS + """ + + stub: + // Exit if running this module with -profile conda / -profile mamba + if (workflow.profile.tokenize(',').intersect(['conda', 'mamba']).size() >= 1) { + error "OPT_FLIP module does not support Conda. Please use Docker / Singularity / Podman instead." + } + + prefix = task.ext.prefix ?: "${meta.id}" + + """ + mkdir -p ${prefix} + touch "${prefix}/fwd_oriented.fa" + + cat <<-END_VERSIONS > versions.yml + "${task.process}": + opt: \$(opt --version) + END_VERSIONS + """ +} diff --git a/modules/nf-core/opt/flip/meta.yml b/modules/nf-core/opt/flip/meta.yml new file mode 100644 index 00000000..f0e4c57c --- /dev/null +++ b/modules/nf-core/opt/flip/meta.yml @@ -0,0 +1,68 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/nf-core/modules/master/modules/meta-schema.json +name: "opt_flip" +description: "flip corrects probes that are aligning to the opposite strand of their intended target genes by reverse complementing them" +keywords: + - opt + - opt flip + - transcripts + - off-target probes + - align probes +tools: + - "opt": + description: "opt is a simple program that aligns probe sequences to transcript sequences to detect potential off-target probe activity" + homepage: "https://github.com/JEFworks-Lab/off-target-probe-tracker" + documentation: "https://github.com/JEFworks-Lab/off-target-probe-tracker/blob/main/README.md" + tool_dev_url: "https://github.com/JEFworks-Lab/off-target-probe-tracker" + licence: [GPL-3.0 license] + +input: + - - meta: + type: map + description: | + Groovy Map containing sample information of the probe panel sequences used for the xenium experiment + e.g. `[ id:'breast_cancer_probe_panel_sequences' ]` + - probes_fasta: + type: file + description: Fasta file for the probe sequences used in the xenium experiment + pattern: "*.fasta" + ontologies: [] + - - meta2: + type: map + description: | + Groovy Map containing the information of the genomic features and fasta files used as references + e.g. `[ id:'gencode_references' ]` + - ref_annot_gff: + type: file + description: Reference annotations in gff format + pattern: "*.gff" + ontologies: [] + - ref_annot_fa: + type: file + description: Reference annotations in fasta format + pattern: "*.fa" + ontologies: [] + +output: + fwd_oriented_fa: + - - meta: + type: map + description: | + Groovy Map containing information of the forward oriented fasta generated with the probes panel sequences 'opt flip' + e.g. `[ id:'breast_cancer_probe_panel_sequences' ]` + - "${meta.id}/fwd_oriented.fa": + type: file + description: The forward oriented fasta file + pattern: "*.fa" + ontologies: [] + versions: + - "versions.yml": + type: file + description: File containing software versions + pattern: "versions.yml" + ontologies: + - edam: "http://edamontology.org/format_3750" # YAML + +authors: + - "@khersameesh24" +maintainers: + - "@khersameesh24" diff --git a/modules/nf-core/opt/flip/opt-flip.diff b/modules/nf-core/opt/flip/opt-flip.diff new file mode 100644 index 00000000..09c6098d --- /dev/null +++ b/modules/nf-core/opt/flip/opt-flip.diff @@ -0,0 +1,48 @@ +Changes in component 'nf-core/opt/flip' +Changes in 'opt/flip/main.nf': +--- modules/nf-core/opt/flip/main.nf ++++ modules/nf-core/opt/flip/main.nf +@@ -9,8 +9,8 @@ + tuple val(meta2), path(ref_annot_gff), path(ref_annot_fa) + + output: +- tuple val(meta), path("${meta.id}/fwd_oriented.fa"), emit: fwd_oriented_fa +- path "versions.yml" , emit: versions ++ tuple val(meta), path("${prefix}/fwd_oriented.fa"), emit: fwd_oriented_fa ++ path "versions.yml" , emit: versions + + when: + task.ext.when == null || task.ext.when +@@ -20,9 +20,10 @@ + if (workflow.profile.tokenize(',').intersect(['conda', 'mamba']).size() >= 1) { + error "OPT_FLIP module does not support Conda. Please use Docker / Singularity / Podman instead." + } +- def args = task.ext.args ?: '' +- def prefix = task.ext.prefix ?: "${meta.id}" + ++ def args = task.ext.args ?: '' ++ prefix = task.ext.prefix ?: "${meta.id}" ++ + """ + opt \\ + -o ${prefix} \\ +@@ -40,8 +41,13 @@ + """ + + stub: +- def prefix = task.ext.prefix ?: "${meta.id}" ++ // Exit if running this module with -profile conda / -profile mamba ++ if (workflow.profile.tokenize(',').intersect(['conda', 'mamba']).size() >= 1) { ++ error "OPT_FLIP module does not support Conda. Please use Docker / Singularity / Podman instead." ++ } + ++ prefix = task.ext.prefix ?: "${meta.id}" ++ + """ + mkdir -p ${prefix} + touch "${prefix}/fwd_oriented.fa" + +'modules/nf-core/opt/flip/meta.yml' is unchanged +'modules/nf-core/opt/flip/tests/main.nf.test' is unchanged +'modules/nf-core/opt/flip/tests/main.nf.test.snap' is unchanged +************************************************************ diff --git a/modules/nf-core/opt/flip/tests/main.nf.test b/modules/nf-core/opt/flip/tests/main.nf.test new file mode 100644 index 00000000..77fd9ef4 --- /dev/null +++ b/modules/nf-core/opt/flip/tests/main.nf.test @@ -0,0 +1,61 @@ +nextflow_process { + + name "Test Process OPT_FLIP" + script "../main.nf" + process "OPT_FLIP" + + tag "modules" + tag "modules_nfcore" + tag "opt" + tag "opt/flip" + + test("testrun panel probe sequences") { + + when { + process { + """ + input[0] = [ + [ id:'test_run' ], + file('https://raw.githubusercontent.com/khersameesh24/test-datasets/opt/testdata/panel_probe_sequences.fasta', checkIfExists: true) + ] + input[1] = [ + [ id:'test_run' ], + file(params.modules_testdata_base_path + 'genomics/sarscov2/genome/genome.gtf', checkIfExists: true), + file(params.modules_testdata_base_path + 'genomics/sarscov2/genome/genome.fasta', checkIfExists: true) + ] + """ + } + } + then { + assertAll( + { assert process.success }, + { assert snapshot(process.out).match() } + ) + } + } + + test("testrun panel probe sequences -stub") { + options "-stub" + when { + process { + """ + input[0] = [ + [ id:'test_run' ], + file('https://raw.githubusercontent.com/khersameesh24/test-datasets/opt/testdata/panel_probe_sequences.fasta', checkIfExists: true) + ] + input[1] = [ + [ id:'test_run' ], + file(params.modules_testdata_base_path + 'genomics/sarscov2/genome/genome.gtf', checkIfExists: true), + file(params.modules_testdata_base_path + 'genomics/sarscov2/genome/genome.fasta', checkIfExists: true) + ] + """ + } + } + then { + assertAll( + { assert process.success }, + { assert snapshot(process.out).match() } + ) + } + } +} diff --git a/modules/nf-core/opt/flip/tests/main.nf.test.snap b/modules/nf-core/opt/flip/tests/main.nf.test.snap new file mode 100644 index 00000000..4bc5b35e --- /dev/null +++ b/modules/nf-core/opt/flip/tests/main.nf.test.snap @@ -0,0 +1,68 @@ +{ + "testrun panel probe sequences -stub": { + "content": [ + { + "0": [ + [ + { + "id": "test_run" + }, + "fwd_oriented.fa:md5,d41d8cd98f00b204e9800998ecf8427e" + ] + ], + "1": [ + "versions.yml:md5,57dbc672cf0bf40854c18d241f2d7d2e" + ], + "fwd_oriented_fa": [ + [ + { + "id": "test_run" + }, + "fwd_oriented.fa:md5,d41d8cd98f00b204e9800998ecf8427e" + ] + ], + "versions": [ + "versions.yml:md5,57dbc672cf0bf40854c18d241f2d7d2e" + ] + } + ], + "meta": { + "nf-test": "0.9.2", + "nextflow": "25.04.6" + }, + "timestamp": "2025-09-14T12:43:08.105988801" + }, + "testrun panel probe sequences": { + "content": [ + { + "0": [ + [ + { + "id": "test_run" + }, + "fwd_oriented.fa:md5,535289c04851ad94e091ec7c14ff6bcd" + ] + ], + "1": [ + "versions.yml:md5,57dbc672cf0bf40854c18d241f2d7d2e" + ], + "fwd_oriented_fa": [ + [ + { + "id": "test_run" + }, + "fwd_oriented.fa:md5,535289c04851ad94e091ec7c14ff6bcd" + ] + ], + "versions": [ + "versions.yml:md5,57dbc672cf0bf40854c18d241f2d7d2e" + ] + } + ], + "meta": { + "nf-test": "0.9.2", + "nextflow": "25.04.6" + }, + "timestamp": "2025-09-14T12:47:41.966183182" + } +} \ No newline at end of file diff --git a/modules/nf-core/opt/stat/main.nf b/modules/nf-core/opt/stat/main.nf new file mode 100644 index 00000000..e8de5860 --- /dev/null +++ b/modules/nf-core/opt/stat/main.nf @@ -0,0 +1,60 @@ +process OPT_STAT { + tag "$meta.id" + label 'process_high' + + container "khersameesh24/opt:v0.0.1" + + input: + tuple val(meta), path(probe_targets) + tuple val(meta2), path(fwd_oriented_probes) + path(gene_synonyms) + + output: + tuple val(meta), path("${prefix}/collapsed_summary.tsv"), emit: summary + path "versions.yml" , emit: versions + + when: + task.ext.when == null || task.ext.when + + script: + // Exit if running this module with -profile conda / -profile mamba + if (workflow.profile.tokenize(',').intersect(['conda', 'mamba']).size() >= 1) { + error "OPT_STAT module does not support Conda. Please use Docker / Singularity / Podman instead." + } + def args = task.ext.args ?: '' + def synonyms = gene_synonyms ? "-s ${gene_synonyms}": "" + prefix = task.ext.prefix ?: "${meta.id}" + + """ + opt \\ + -o ${prefix} \\ + stat \\ + -i ${probe_targets} \\ + -q ${fwd_oriented_probes} \\ + ${synonyms} \\ + ${args} + + cat <<-END_VERSIONS > versions.yml + "${task.process}": + opt: \$(opt --version) + END_VERSIONS + """ + + stub: + // Exit if running this module with -profile conda / -profile mamba + if (workflow.profile.tokenize(',').intersect(['conda', 'mamba']).size() >= 1) { + error "OPT_STAT module does not support Conda. Please use Docker / Singularity / Podman instead." + } + + prefix = task.ext.prefix ?: "${meta.id}" + + """ + mkdir -p ${prefix} + touch "${prefix}/collapsed_summary.tsv" + + cat <<-END_VERSIONS > versions.yml + "${task.process}": + opt: \$(opt --version) + END_VERSIONS + """ +} diff --git a/modules/nf-core/opt/stat/meta.yml b/modules/nf-core/opt/stat/meta.yml new file mode 100644 index 00000000..ae2d712b --- /dev/null +++ b/modules/nf-core/opt/stat/meta.yml @@ -0,0 +1,72 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/nf-core/modules/master/modules/meta-schema.json +name: "opt_stat" +description: "stat summarizes opt binding predictions" +keywords: + - opt + - opt stat + - transcripts + - binding predictions + - off-target probes + - align probes + - summary stats +tools: + - "opt": + description: "opt is a simple program that aligns probe sequences to transcript + sequences to detect potential off-target probe activity" + homepage: "https://github.com/JEFworks-Lab/off-target-probe-tracker" + documentation: "https://github.com/JEFworks-Lab/off-target-probe-tracker/blob/main/README.md" + tool_dev_url: "https://github.com/JEFworks-Lab/off-target-probe-tracker" + licence: [GPL-3.0 license] + identifier: "" + +input: + - - meta: + type: map + description: | + Groovy Map containing information of the probe targets generated from the panel sequences with `opt track` + e.g. `[ id:'breast_cancer_probe_panel_sequences' ]` + - probe_targets: + type: file + description: Generated probe targets + pattern: "*.tsv" + ontologies: [] + - - meta2: + type: map + description: | + Groovy Map containing information of the forward oriented fasta generated with the probes panel sequences 'opt flip' + e.g. `[ id:'breast_cancer_probe_panel_sequences' ]` + - fwd_oriented_probes: + type: file + description: The forward oriented fasta file + pattern: "*.fa" + ontologies: [] + - gene_synonyms: + type: file + description: Gene synonyms that may have been counted as off-targets but + simply differ in name (optional input) + pattern: "*.csv" + ontologies: [] +output: + summary: + - - meta: + type: map + description: | + Groovy Map containing summary of the forward oriented probes generated with the panel sequences 'opt flip and track' + e.g. `[ id:'breast_cancer_probe_panel_sequences' ]` + - "${meta.id}/collapsed_summary.tsv": + type: file + description: tsv file containing the summary stats + pattern: "*.tsv" + ontologies: [] + versions: + - versions.yml: + type: file + description: File containing software versions + pattern: "versions.yml" + ontologies: + - edam: "http://edamontology.org/format_3750" # YAML + +authors: + - "@khersameesh24" +maintainers: + - "@khersameesh24" diff --git a/modules/nf-core/opt/stat/opt-stat.diff b/modules/nf-core/opt/stat/opt-stat.diff new file mode 100644 index 00000000..348e77b5 --- /dev/null +++ b/modules/nf-core/opt/stat/opt-stat.diff @@ -0,0 +1,44 @@ +Changes in component 'nf-core/opt/stat' +Changes in 'opt/stat/main.nf': +--- modules/nf-core/opt/stat/main.nf ++++ modules/nf-core/opt/stat/main.nf +@@ -10,8 +10,8 @@ + path(gene_synonyms) + + output: +- tuple val(meta), path("${meta.id}/collapsed_summary.tsv"), emit: summary +- path "versions.yml" , emit: versions ++ tuple val(meta), path("${prefix}/collapsed_summary.tsv"), emit: summary ++ path "versions.yml" , emit: versions + + when: + task.ext.when == null || task.ext.when +@@ -22,8 +22,8 @@ + error "OPT_STAT module does not support Conda. Please use Docker / Singularity / Podman instead." + } + def args = task.ext.args ?: '' +- def prefix = task.ext.prefix ?: "${meta.id}" + def synonyms = gene_synonyms ? "-s ${gene_synonyms}": "" ++ prefix = task.ext.prefix ?: "${meta.id}" + + """ + opt \\ +@@ -41,7 +41,12 @@ + """ + + stub: +- def prefix = task.ext.prefix ?: "${meta.id}" ++ // Exit if running this module with -profile conda / -profile mamba ++ if (workflow.profile.tokenize(',').intersect(['conda', 'mamba']).size() >= 1) { ++ error "OPT_STAT module does not support Conda. Please use Docker / Singularity / Podman instead." ++ } ++ ++ prefix = task.ext.prefix ?: "${meta.id}" + + """ + mkdir -p ${prefix} + +'modules/nf-core/opt/stat/meta.yml' is unchanged +'modules/nf-core/opt/stat/tests/main.nf.test' is unchanged +'modules/nf-core/opt/stat/tests/main.nf.test.snap' is unchanged +************************************************************ diff --git a/modules/nf-core/opt/stat/tests/main.nf.test b/modules/nf-core/opt/stat/tests/main.nf.test new file mode 100644 index 00000000..5204a81f --- /dev/null +++ b/modules/nf-core/opt/stat/tests/main.nf.test @@ -0,0 +1,63 @@ +nextflow_process { + + name "Test Process OPT_STAT" + script "../main.nf" + process "OPT_STAT" + + tag "modules" + tag "modules_nfcore" + tag "opt" + tag "opt/stat" + + test("testrun panel probe sequences") { + + when { + process { + """ + input[0] = [ + [ id:'test_run' ], + file('https://raw.githubusercontent.com/khersameesh24/test-datasets/opt/testdata/probe2targets.tsv', checkIfExists: true) + ] + input[1] = [ + [ id:'test_run' ], + file('https://raw.githubusercontent.com/khersameesh24/test-datasets/opt/testdata/fwd_oriented.fa', checkIfExists: true) + ] + input[2] = [] + // input[2] = [file('https://raw.githubusercontent.com/khersameesh24/test-datasets/opt/testdata/gene_synonyms.csv', checkIfExists: true)] + """ + } + } + then { + assertAll( + { assert process.success }, + { assert snapshot(process.out).match() } + ) + } + } + + test("testrun panel probe sequences -stub") { + options "-stub" + when { + process { + """ + input[0] = [ + [ id:'test_run' ], + file('https://raw.githubusercontent.com/khersameesh24/test-datasets/opt/testdata/probe2targets.tsv', checkIfExists: true) + ] + input[1] = [ + [ id:'test_run' ], + file('https://raw.githubusercontent.com/khersameesh24/test-datasets/opt/testdata/fwd_oriented.fa', checkIfExists: true) + ] + input[2] = [] + // input[2] = [file('https://raw.githubusercontent.com/khersameesh24/test-datasets/opt/testdata/gene_synonyms.csv', checkIfExists: true)] + """ + } + } + then { + assertAll( + { assert process.success }, + { assert snapshot(process.out).match() } + ) + } + } +} diff --git a/modules/nf-core/opt/stat/tests/main.nf.test.snap b/modules/nf-core/opt/stat/tests/main.nf.test.snap new file mode 100644 index 00000000..b14671fe --- /dev/null +++ b/modules/nf-core/opt/stat/tests/main.nf.test.snap @@ -0,0 +1,68 @@ +{ + "testrun panel probe sequences -stub": { + "content": [ + { + "0": [ + [ + { + "id": "test_run" + }, + "collapsed_summary.tsv:md5,d41d8cd98f00b204e9800998ecf8427e" + ] + ], + "1": [ + "versions.yml:md5,a23b08ea3b2da18863a5611dd0adbaa1" + ], + "summary": [ + [ + { + "id": "test_run" + }, + "collapsed_summary.tsv:md5,d41d8cd98f00b204e9800998ecf8427e" + ] + ], + "versions": [ + "versions.yml:md5,a23b08ea3b2da18863a5611dd0adbaa1" + ] + } + ], + "meta": { + "nf-test": "0.9.2", + "nextflow": "25.04.6" + }, + "timestamp": "2025-09-14T12:23:32.959930982" + }, + "testrun panel probe sequences": { + "content": [ + { + "0": [ + [ + { + "id": "test_run" + }, + "collapsed_summary.tsv:md5,b2884d9c8c89124d3cbbbc1223d81c99" + ] + ], + "1": [ + "versions.yml:md5,a23b08ea3b2da18863a5611dd0adbaa1" + ], + "summary": [ + [ + { + "id": "test_run" + }, + "collapsed_summary.tsv:md5,b2884d9c8c89124d3cbbbc1223d81c99" + ] + ], + "versions": [ + "versions.yml:md5,a23b08ea3b2da18863a5611dd0adbaa1" + ] + } + ], + "meta": { + "nf-test": "0.9.2", + "nextflow": "25.04.6" + }, + "timestamp": "2025-09-14T12:58:57.17786325" + } +} \ No newline at end of file diff --git a/modules/nf-core/opt/track/main.nf b/modules/nf-core/opt/track/main.nf new file mode 100644 index 00000000..ff92645e --- /dev/null +++ b/modules/nf-core/opt/track/main.nf @@ -0,0 +1,59 @@ +process OPT_TRACK { + tag "$meta.id" + label 'process_high' + + container "khersameesh24/opt:v0.0.1" + + input: + tuple val(meta), path(fwd_oriented_fa) + tuple val(meta2), path(ref_annot_gff), path(ref_annot_fa) + + output: + tuple val(meta), path("${prefix}/probe2targets.tsv"), emit: probes2target + path "versions.yml" , emit: versions + + when: + task.ext.when == null || task.ext.when + + script: + // Exit if running this module with -profile conda / -profile mamba + if (workflow.profile.tokenize(',').intersect(['conda', 'mamba']).size() >= 1) { + error "OPT_TRACK module does not support Conda. Please use Docker / Singularity / Podman instead." + } + def args = task.ext.args ?: '' + prefix = task.ext.prefix ?: "${meta.id}" + + """ + opt \\ + -o ${prefix} \\ + -p ${task.cpus} \\ + track \\ + -q ${fwd_oriented_fa} \\ + -a ${ref_annot_gff} \\ + -t ${ref_annot_fa} \\ + ${args} + + + cat <<-END_VERSIONS > versions.yml + "${task.process}": + opt: \$(opt --version) + END_VERSIONS + """ + + stub: + // Exit if running this module with -profile conda / -profile mamba + if (workflow.profile.tokenize(',').intersect(['conda', 'mamba']).size() >= 1) { + error "OPT_TRACK module does not support Conda. Please use Docker / Singularity / Podman instead." + } + prefix = task.ext.prefix ?: "${meta.id}" + + """ + mkdir -p ${prefix} + touch "${prefix}/probe2targets.tsv" + + cat <<-END_VERSIONS > versions.yml + "${task.process}": + opt: \$(opt --version) + END_VERSIONS + """ +} diff --git a/modules/nf-core/opt/track/meta.yml b/modules/nf-core/opt/track/meta.yml new file mode 100644 index 00000000..6024d773 --- /dev/null +++ b/modules/nf-core/opt/track/meta.yml @@ -0,0 +1,69 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/nf-core/modules/master/modules/meta-schema.json +name: "opt_track" +description: "track aligns query probe sequences to any target transcriptome" +keywords: + - opt + - opt track + - transcripts + - off-target probes + - align probes + - traget transcriptome +tools: + - "opt": + description: "opt is a simple program that aligns probe sequences to transcript sequences to detect potential off-target probe activity" + homepage: "https://github.com/JEFworks-Lab/off-target-probe-tracker" + documentation: "https://github.com/JEFworks-Lab/off-target-probe-tracker/blob/main/README.md" + tool_dev_url: "https://github.com/JEFworks-Lab/off-target-probe-tracker" + licence: [GPL-3.0 license] + +input: + - - meta: + type: map + description: | + Groovy Map containing information of the forward oriented fasta generated with the probes panel sequences generated with `opt flip` + e.g. `[ id:'breast_cancer_probe_panel_sequences' ]` + - fwd_oriented_fa: + type: file + description: Forward oriented fasta file generated by the opt flip command + pattern: "*.fa" + ontologies: [] + - - meta2: + type: map + description: | + Groovy Map containing the information of the genomic features and fasta files used as references + e.g. `[ id:'gencode_references' ]` + - ref_annot_gff: + type: file + description: Reference annotation in gff format + pattern: "*.gff" + ontologies: [] + - ref_annot_fa: + type: file + description: Reference annotation in fasta format + pattern: "*.fa" + ontologies: [] + +output: + probes2target: + - - meta: + type: map + description: | + Groovy Map containing sample information + e.g. `[ id:'sample1' ]` + - "${meta.id}/probe2targets.tsv": + type: file + description: TSV file containing the gene and transcript information to which each probe aligns + pattern: "*.tsv" + ontologies: [] + versions: + - "versions.yml": + type: file + description: File containing software versions + pattern: "versions.yml" + ontologies: + - edam: "http://edamontology.org/format_3750" # YAML + +authors: + - "@khersameesh24" +maintainers: + - "@khersameesh24" diff --git a/modules/nf-core/opt/track/opt-track.diff b/modules/nf-core/opt/track/opt-track.diff new file mode 100644 index 00000000..6a0098ad --- /dev/null +++ b/modules/nf-core/opt/track/opt-track.diff @@ -0,0 +1,44 @@ +Changes in component 'nf-core/opt/track' +Changes in 'opt/track/main.nf': +--- modules/nf-core/opt/track/main.nf ++++ modules/nf-core/opt/track/main.nf +@@ -9,8 +9,8 @@ + tuple val(meta2), path(ref_annot_gff), path(ref_annot_fa) + + output: +- tuple val(meta), path("${meta.id}/probe2targets.tsv"), emit: probes2target +- path "versions.yml" , emit: versions ++ tuple val(meta), path("${prefix}/probe2targets.tsv"), emit: probes2target ++ path "versions.yml" , emit: versions + + when: + task.ext.when == null || task.ext.when +@@ -21,7 +21,7 @@ + error "OPT_TRACK module does not support Conda. Please use Docker / Singularity / Podman instead." + } + def args = task.ext.args ?: '' +- def prefix = task.ext.prefix ?: "${meta.id}" ++ prefix = task.ext.prefix ?: "${meta.id}" + + """ + opt \\ +@@ -41,8 +41,12 @@ + """ + + stub: +- def prefix = task.ext.prefix ?: "${meta.id}" +- ++ // Exit if running this module with -profile conda / -profile mamba ++ if (workflow.profile.tokenize(',').intersect(['conda', 'mamba']).size() >= 1) { ++ error "OPT_TRACK module does not support Conda. Please use Docker / Singularity / Podman instead." ++ } ++ prefix = task.ext.prefix ?: "${meta.id}" ++ + """ + mkdir -p ${prefix} + touch "${prefix}/probe2targets.tsv" + +'modules/nf-core/opt/track/meta.yml' is unchanged +'modules/nf-core/opt/track/tests/main.nf.test' is unchanged +'modules/nf-core/opt/track/tests/main.nf.test.snap' is unchanged +************************************************************ diff --git a/modules/nf-core/opt/track/tests/main.nf.test b/modules/nf-core/opt/track/tests/main.nf.test new file mode 100644 index 00000000..4fbf9a19 --- /dev/null +++ b/modules/nf-core/opt/track/tests/main.nf.test @@ -0,0 +1,60 @@ +nextflow_process { + + name "Test Process OPT_TRACK" + script "../main.nf" + process "OPT_TRACK" + + tag "modules" + tag "modules_nfcore" + tag "opt" + tag "opt/track" + + test("testrun panel probe sequences") { + + when { + process { + """ + input[0] = [ + [ id:'test_run' ], file('https://raw.githubusercontent.com/khersameesh24/test-datasets/opt/testdata/fwd_oriented.fa', checkIfExists: true) + ] + input[1] = [ + [ id:'test_run' ], + file(params.modules_testdata_base_path + 'genomics/sarscov2/genome/genome.gtf', checkIfExists: true), + file(params.modules_testdata_base_path + 'genomics/sarscov2/genome/genome.fasta', checkIfExists: true) + ] + """ + } + } + then { + assertAll( + { assert process.success }, + { assert snapshot(process.out).match() } + ) + } + } + + test("testrun panel probe sequences -stub") { + options "-stub" + when { + process { + """ + input[0] = [ + [ id:'test_run' ], file('https://raw.githubusercontent.com/khersameesh24/test-datasets/opt/testdata/fwd_oriented.fa', checkIfExists: true) + ] + + input[1] = [ + [ id:'test_run' ], + file(params.modules_testdata_base_path + 'genomics/sarscov2/genome/genome.gtf', checkIfExists: true), + file(params.modules_testdata_base_path + 'genomics/sarscov2/genome/genome.fasta', checkIfExists: true) + ] + """ + } + } + then { + assertAll( + { assert process.success }, + { assert snapshot(process.out).match() } + ) + } + } +} diff --git a/modules/nf-core/opt/track/tests/main.nf.test.snap b/modules/nf-core/opt/track/tests/main.nf.test.snap new file mode 100644 index 00000000..3dda7a91 --- /dev/null +++ b/modules/nf-core/opt/track/tests/main.nf.test.snap @@ -0,0 +1,68 @@ +{ + "testrun panel probe sequences -stub": { + "content": [ + { + "0": [ + [ + { + "id": "test_run" + }, + "probe2targets.tsv:md5,d41d8cd98f00b204e9800998ecf8427e" + ] + ], + "1": [ + "versions.yml:md5,daf660f286a817fa0eed7703a5f65706" + ], + "probes2target": [ + [ + { + "id": "test_run" + }, + "probe2targets.tsv:md5,d41d8cd98f00b204e9800998ecf8427e" + ] + ], + "versions": [ + "versions.yml:md5,daf660f286a817fa0eed7703a5f65706" + ] + } + ], + "meta": { + "nf-test": "0.9.2", + "nextflow": "25.04.6" + }, + "timestamp": "2025-09-14T12:20:29.220245783" + }, + "testrun panel probe sequences": { + "content": [ + { + "0": [ + [ + { + "id": "test_run" + }, + "probe2targets.tsv:md5,e15465df3845d7a6acf64dd3be04391b" + ] + ], + "1": [ + "versions.yml:md5,daf660f286a817fa0eed7703a5f65706" + ], + "probes2target": [ + [ + { + "id": "test_run" + }, + "probe2targets.tsv:md5,e15465df3845d7a6acf64dd3be04391b" + ] + ], + "versions": [ + "versions.yml:md5,daf660f286a817fa0eed7703a5f65706" + ] + } + ], + "meta": { + "nf-test": "0.9.2", + "nextflow": "25.04.6" + }, + "timestamp": "2025-09-14T12:20:19.999359259" + } +} \ No newline at end of file diff --git a/modules/nf-core/stardist/environment.yml b/modules/nf-core/stardist/environment.yml new file mode 100644 index 00000000..ddba86ea --- /dev/null +++ b/modules/nf-core/stardist/environment.yml @@ -0,0 +1,26 @@ +--- +# yaml-language-server: $schema=https://raw.githubusercontent.com/nf-core/modules/master/modules/environment-schema.json +channels: + - conda-forge + - bioconda +dependencies: + - conda-forge::python=3.12.12 + - pip: + - stardist==0.9.2 + - tifffile==2026.3.3 + - imagecodecs==2026.1.14 + # For conda GPU support, use tensorflow[and-cuda]==2.20.0 instead + # The followings are needed in seqera containers UI + - tensorflow==2.20.0 + - nvidia-cublas-cu12==12.9.1.4 + - nvidia-cuda-cupti-cu12==12.9.79 + - nvidia-cuda-nvcc-cu12==12.9.86 + - nvidia-cuda-nvrtc-cu12==12.9.86 + - nvidia-cuda-runtime-cu12==12.9.79 + - nvidia-cudnn-cu12==9.19.0.56 + - nvidia-cufft-cu12==11.4.1.4 + - nvidia-curand-cu12==10.3.10.19 + - nvidia-cusolver-cu12==11.7.5.82 + - nvidia-cusparse-cu12==12.5.10.65 + - nvidia-nccl-cu12==2.29.7 + - nvidia-nvjitlink-cu12==12.9.86 diff --git a/modules/nf-core/stardist/main.nf b/modules/nf-core/stardist/main.nf new file mode 100644 index 00000000..01f4638c --- /dev/null +++ b/modules/nf-core/stardist/main.nf @@ -0,0 +1,41 @@ +process STARDIST { + tag "$meta.id" + label 'process_high' + label 'process_gpu_single' + + conda "${moduleDir}/environment.yml" + container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ? + 'https://community-cr-prod.seqera.io/docker/registry/v2/blobs/sha256/d9/d964e0bef867bb2ff1a309c9c087d8d83ac734ce3aa315dd8311d4c1bfdafd8e/data' : + 'community.wave.seqera.io/library/python_pip_imagecodecs_nvidia-cublas-cu12_pruned:b668bcb6d531d350' }" + + input: + tuple val(meta), path(image) + tuple val(model_name), path(model_path) + + output: + tuple val(meta), path("*.stardist.tif"), emit: mask + tuple val("${task.process}"), val('stardist'), eval("pip show stardist | sed -n 's/^Version: //p'"), topic: versions, emit: versions_stardist + tuple val("${task.process}"), val('python'), eval("python --version | sed 's/Python //'"), topic: versions, emit: versions_python + tuple val("${task.process}"), val('tensorflow'), eval("pip show tensorflow | sed -n 's/^Version: //p'"), topic: versions, emit: versions_tensorflow + tuple val("${task.process}"), val('tifffile'), eval("pip show tifffile | sed -n 's/^Version: //p'"), topic: versions, emit: versions_tifffile + + when: + task.ext.when == null || task.ext.when + + script: + def args = task.ext.args ?: '' + def model_command = model_path ? "-m ${model_path}" : model_name ? "-m ${model_name}" : "" + """ + stardist-predict2d \\ + -i $image \\ + -o . \\ + $model_command \\ + $args + """ + + stub: + def prefix = task.ext.prefix ?: "${meta.id}" + """ + touch ${prefix}.stardist.tif + """ +} diff --git a/modules/nf-core/stardist/meta.yml b/modules/nf-core/stardist/meta.yml new file mode 100644 index 00000000..a7ecb790 --- /dev/null +++ b/modules/nf-core/stardist/meta.yml @@ -0,0 +1,157 @@ +name: "stardist" +description: Cell and nuclear segmentation with star-convex shapes +keywords: + - stardist + - segmentation + - image + - gpu + - spatial-transcriptomics +tools: + - "stardist": + description: "Stardist is an cell segmentation tool developed in Python by Martin + Weigert and Uwe Schmidt" + homepage: "https://stardist.net/" + documentation: "https://stardist.net/faq/" + tool_dev_url: "https://github.com/stardist/stardist" + doi: "10.1109/ISBIC56247.2022.9854534" + licence: + - "BSD 3-Clause" + identifier: "" +input: + - - meta: + type: map + description: | + Groovy Map containing sample information + e.g. `[ id:'sample1' ]` + - image: + type: file + description: Single channel nuclear image + pattern: "*.{tiff,tif}" + ontologies: [] + - - model_name: + type: string + description: | + Name of a pretrained StarDist model (e.g. '2D_versatile_fluo', + '2D_versatile_he'). Used when model_path is not provided. + Pass '' (empty string) if providing a custom model path or + passing -m via ext.args. + - model_path: + type: file + description: | + Optional path to a custom StarDist model directory. When provided, + takes precedence over model_name. Pass [] (empty list) to use a + pretrained model name instead. + pattern: "*" + ontologies: [] +output: + mask: + - - meta: + type: map + description: | + Groovy Map containing sample information + e.g. `[ id:'sample1' ]` + - "*.stardist.tif": + type: file + description: labelled mask output from stardist in tif format. + pattern: "*.{tiff,tif}" + ontologies: [] + versions_stardist: + - - ${task.process}: + type: string + description: The name of the process + - stardist: + type: string + description: The name of the tool + - "pip show stardist | sed -n 's/^Version: //p'": + type: eval + description: The expression to obtain the version of the tool + versions_python: + - - ${task.process}: + type: string + description: The name of the process + - python: + type: string + description: The name of the tool + - python --version | sed 's/Python //': + type: eval + description: The expression to obtain the version of the tool + versions_tensorflow: + - - ${task.process}: + type: string + description: The name of the process + - tensorflow: + type: string + description: The name of the tool + - "pip show tensorflow | sed -n 's/^Version: //p'": + type: eval + description: The expression to obtain the version of the tool + versions_tifffile: + - - ${task.process}: + type: string + description: The name of the process + - tifffile: + type: string + description: The name of the tool + - "pip show tifffile | sed -n 's/^Version: //p'": + type: eval + description: The expression to obtain the version of the tool +topics: + versions: + - - ${task.process}: + type: string + description: The name of the process + - stardist: + type: string + description: The name of the tool + - "pip show stardist | sed -n 's/^Version: //p'": + type: eval + description: The expression to obtain the version of the tool + - - ${task.process}: + type: string + description: The name of the process + - python: + type: string + description: The name of the tool + - python --version | sed 's/Python //': + type: eval + description: The expression to obtain the version of the tool + - - ${task.process}: + type: string + description: The name of the process + - tensorflow: + type: string + description: The name of the tool + - "pip show tensorflow | sed -n 's/^Version: //p'": + type: eval + description: The expression to obtain the version of the tool + - - ${task.process}: + type: string + description: The name of the process + - tifffile: + type: string + description: The name of the tool + - "pip show tifffile | sed -n 's/^Version: //p'": + type: eval + description: The expression to obtain the version of the tool +authors: + - "@migueLib" + - "@dongzehe" +maintainers: + - "@migueLib" +notes: | + GPU support: The container (built via Seqera Containers) includes TensorFlow 2.20.0 + with CUDA support and falls back to CPU automatically when no GPU is available. + Use the `process_gpu` label to request GPU resources from your executor. + When running with conda/mamba, GPU support depends on having a CUDA-enabled + TensorFlow installation in your environment. + + Model selection via the model input channel [model_name, model_path]: + - Pretrained model: [ '2D_versatile_fluo', [] ] + - Custom model directory: [ '', file("/path/to/model") ] + - Via ext.args only: [ '', [] ] (then set ext.args = '-m ...') + + Additional stardist CLI arguments can be passed via `task.ext.args`: + ext.args = '--n_tiles 4,4 --prob_thresh 0.5 --nms_thresh 0.3' + + Model weights are not bundled in the container. StarDist downloads pretrained + models on first use to ~/.keras/models/. diff --git a/modules/nf-core/stardist/stardist.diff b/modules/nf-core/stardist/stardist.diff new file mode 100644 index 00000000..aa1a361b --- /dev/null +++ b/modules/nf-core/stardist/stardist.diff @@ -0,0 +1,13 @@ +Changes in module 'nf-core/stardist' +--- modules/nf-core/stardist/main.nf ++++ modules/nf-core/stardist/main.nf +@@ -1,7 +1,7 @@ + process STARDIST { + tag "$meta.id" +- label 'process_medium' +- label 'process_gpu' ++ label 'process_high' ++ label 'process_gpu_single' + + conda "${moduleDir}/environment.yml" + container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ? diff --git a/modules/nf-core/stardist/tests/main.nf.test b/modules/nf-core/stardist/tests/main.nf.test new file mode 100644 index 00000000..5b3e2089 --- /dev/null +++ b/modules/nf-core/stardist/tests/main.nf.test @@ -0,0 +1,59 @@ +nextflow_process { + name "Test Process STARDIST" + script "../main.nf" + process "STARDIST" + config "./nextflow.config" + + tag "modules" + tag "modules_nfcore" + tag "stardist" + + test("stardist2d - tif") { + + when { + process { + """ + input[0] = [ + [ id:'test' ], // meta map + file(params.modules_testdata_base_path + 'imaging/segmentation/nuclear_image.tif', checkIfExists: true) + ] + input[1] = [ '2D_versatile_fluo', [] ] + """ + } + } + + then { + assertAll( + { assert process.success }, + { assert snapshot(process.out).match() } + ) + } + + } + + test("stardist2d - tif - stub") { + + options "-stub" + + when { + process { + """ + input[0] = [ + [ id:'test' ], // meta map + file(params.modules_testdata_base_path + 'imaging/segmentation/nuclear_image.tif', checkIfExists: true) + ] + input[1] = [ '2D_versatile_fluo', [] ] + """ + } + } + + then { + assertAll( + { assert process.success }, + { assert snapshot(process.out).match() } + ) + } + + } + +} diff --git a/modules/nf-core/stardist/tests/main.nf.test.snap b/modules/nf-core/stardist/tests/main.nf.test.snap new file mode 100644 index 00000000..acbbc472 --- /dev/null +++ b/modules/nf-core/stardist/tests/main.nf.test.snap @@ -0,0 +1,168 @@ +{ + "stardist2d - tif - stub": { + "content": [ + { + "0": [ + [ + { + "id": "test" + }, + "test.stardist.tif:md5,d41d8cd98f00b204e9800998ecf8427e" + ] + ], + "1": [ + [ + "STARDIST", + "stardist", + "0.9.2" + ] + ], + "2": [ + [ + "STARDIST", + "python", + "3.12.12" + ] + ], + "3": [ + [ + "STARDIST", + "tensorflow", + "2.20.0" + ] + ], + "4": [ + [ + "STARDIST", + "tifffile", + "2026.3.3" + ] + ], + "mask": [ + [ + { + "id": "test" + }, + "test.stardist.tif:md5,d41d8cd98f00b204e9800998ecf8427e" + ] + ], + "versions_python": [ + [ + "STARDIST", + "python", + "3.12.12" + ] + ], + "versions_stardist": [ + [ + "STARDIST", + "stardist", + "0.9.2" + ] + ], + "versions_tensorflow": [ + [ + "STARDIST", + "tensorflow", + "2.20.0" + ] + ], + "versions_tifffile": [ + [ + "STARDIST", + "tifffile", + "2026.3.3" + ] + ] + } + ], + "meta": { + "nf-test": "0.9.3", + "nextflow": "25.10.2" + }, + "timestamp": "2026-03-04T22:33:02.205105908" + }, + "stardist2d - tif": { + "content": [ + { + "0": [ + [ + { + "id": "test" + }, + "nuclear_image.stardist.tif:md5,ca29a05346d9e0d4c15bb133122560ce" + ] + ], + "1": [ + [ + "STARDIST", + "stardist", + "0.9.2" + ] + ], + "2": [ + [ + "STARDIST", + "python", + "3.12.12" + ] + ], + "3": [ + [ + "STARDIST", + "tensorflow", + "2.20.0" + ] + ], + "4": [ + [ + "STARDIST", + "tifffile", + "2026.3.3" + ] + ], + "mask": [ + [ + { + "id": "test" + }, + "nuclear_image.stardist.tif:md5,ca29a05346d9e0d4c15bb133122560ce" + ] + ], + "versions_python": [ + [ + "STARDIST", + "python", + "3.12.12" + ] + ], + "versions_stardist": [ + [ + "STARDIST", + "stardist", + "0.9.2" + ] + ], + "versions_tensorflow": [ + [ + "STARDIST", + "tensorflow", + "2.20.0" + ] + ], + "versions_tifffile": [ + [ + "STARDIST", + "tifffile", + "2026.3.3" + ] + ] + } + ], + "meta": { + "nf-test": "0.9.3", + "nextflow": "25.10.2" + }, + "timestamp": "2026-03-06T15:21:31.45100204" + } +} diff --git a/modules/nf-core/stardist/tests/nextflow.config b/modules/nf-core/stardist/tests/nextflow.config new file mode 100644 index 00000000..5926218d --- /dev/null +++ b/modules/nf-core/stardist/tests/nextflow.config @@ -0,0 +1,3 @@ +docker { + fixOwnership = true +} diff --git a/modules/nf-core/untar/environment.yml b/modules/nf-core/untar/environment.yml new file mode 100644 index 00000000..9b926b1f --- /dev/null +++ b/modules/nf-core/untar/environment.yml @@ -0,0 +1,12 @@ +--- +# yaml-language-server: $schema=https://raw.githubusercontent.com/nf-core/modules/master/modules/environment-schema.json +channels: + - conda-forge + - bioconda +dependencies: + - conda-forge::coreutils=9.5 + - conda-forge::grep=3.11 + - conda-forge::gzip=1.13 + - conda-forge::lbzip2=2.5 + - conda-forge::sed=4.8 + - conda-forge::tar=1.34 diff --git a/modules/nf-core/untar/main.nf b/modules/nf-core/untar/main.nf new file mode 100644 index 00000000..e42b2c56 --- /dev/null +++ b/modules/nf-core/untar/main.nf @@ -0,0 +1,80 @@ +process UNTAR { + tag "${archive}" + label 'process_single' + + conda "${moduleDir}/environment.yml" + container "${workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container + ? 'https://community-cr-prod.seqera.io/docker/registry/v2/blobs/sha256/52/52ccce28d2ab928ab862e25aae26314d69c8e38bd41ca9431c67ef05221348aa/data' + : 'community.wave.seqera.io/library/coreutils_grep_gzip_lbzip2_pruned:838ba80435a629f8'}" + + input: + tuple val(meta), path(archive) + + output: + tuple val(meta), path("${prefix}"), emit: untar + tuple val("${task.process}"), val('untar'), eval('tar --version 2>&1 | head -1 | sed "s/tar (GNU tar) //; s/ Copyright.*//"'), emit: versions_untar, topic: versions + + when: + task.ext.when == null || task.ext.when + + script: + def args = task.ext.args ?: '' + def args2 = task.ext.args2 ?: '' + prefix = task.ext.prefix ?: (meta.id ? "${meta.id}" : archive.baseName.toString().replaceFirst(/\.tar$/, "")) + + """ + mkdir ${prefix} + + ## Ensures --strip-components only applied when top level of tar contents is a directory + ## If just files or multiple directories, place all in prefix + if [[ \$(tar -taf ${archive} | grep -o -P "^.*?\\/" | uniq | wc -l) -eq 1 ]]; then + tar \\ + -C ${prefix} --strip-components 1 \\ + -xavf \\ + ${args} \\ + ${archive} \\ + ${args2} + else + tar \\ + -C ${prefix} \\ + -xavf \\ + ${args} \\ + ${archive} \\ + ${args2} + fi + + """ + + stub: + prefix = task.ext.prefix ?: (meta.id ? "${meta.id}" : archive.toString().replaceFirst(/\.[^\.]+(.gz)?$/, "")) + """ + mkdir ${prefix} + touch ${prefix}/morphology.ome.tif + touch ${prefix}/transcripts.parquet + touch ${prefix}/gene_panel.json + touch ${prefix}/experiment.xenium + + ## Dry-run untaring the archive to get the files and place all in prefix + if [[ \$(tar -taf ${archive} | grep -o -P "^.*?\\/" | uniq | wc -l) -eq 1 ]]; then + for i in `tar -tf ${archive}`; + do + if [[ \$(echo "\${i}" | grep -E "/\$") == "" ]]; + then + touch \${i} + else + mkdir -p \${i} + fi + done + else + for i in `tar -tf ${archive}`; + do + if [[ \$(echo "\${i}" | grep -E "/\$") == "" ]]; + then + touch ${prefix}/\${i} + else + mkdir -p ${prefix}/\${i} + fi + done + fi + """ +} diff --git a/modules/nf-core/untar/meta.yml b/modules/nf-core/untar/meta.yml new file mode 100644 index 00000000..571d8078 --- /dev/null +++ b/modules/nf-core/untar/meta.yml @@ -0,0 +1,73 @@ +name: untar +description: Extract files from tar, tar.gz, tar.bz2, tar.xz archives +keywords: + - untar + - uncompress + - extract +tools: + - untar: + description: | + Extract tar, tar.gz, tar.bz2, tar.xz files. + documentation: https://www.gnu.org/software/tar/manual/ + licence: ["GPL-3.0-or-later"] + identifier: "" +input: + - - meta: + type: map + description: | + Groovy Map containing sample information + e.g. [ id:'test', single_end:false ] + - archive: + type: file + description: File to be untarred + pattern: "*.{tar,tar.gz,tar.bz2,tar.xz}" + ontologies: + - edam: http://edamontology.org/format_3981 # TAR format + - edam: http://edamontology.org/format_3989 # GZIP format +output: + untar: + - - meta: + type: map + description: | + Groovy Map containing sample information + e.g. [ id:'test', single_end:false ] + pattern: "*/" + - ${prefix}: + type: map + description: | + Groovy Map containing sample information + e.g. [ id:'test', single_end:false ] + pattern: "*/" + versions_untar: + - - ${task.process}: + type: string + description: The name of the process + - untar: + type: string + description: The name of the tool + - tar --version 2>&1 | head -1 | sed "s/tar (GNU tar) //; s/ Copyright.*//": + type: eval + description: The expression to obtain the version of the tool + +topics: + versions: + - - ${task.process}: + type: string + description: The name of the process + - untar: + type: string + description: The name of the tool + - tar --version 2>&1 | head -1 | sed "s/tar (GNU tar) //; s/ Copyright.*//": + type: eval + description: The expression to obtain the version of the tool + +authors: + - "@joseespinosa" + - "@drpatelh" + - "@matthdsm" + - "@jfy133" +maintainers: + - "@joseespinosa" + - "@drpatelh" + - "@matthdsm" + - "@jfy133" diff --git a/modules/nf-core/untar/tests/main.nf.test b/modules/nf-core/untar/tests/main.nf.test new file mode 100644 index 00000000..c957517a --- /dev/null +++ b/modules/nf-core/untar/tests/main.nf.test @@ -0,0 +1,85 @@ +nextflow_process { + + name "Test Process UNTAR" + script "../main.nf" + process "UNTAR" + tag "modules" + tag "modules_nfcore" + tag "untar" + + test("test_untar") { + + when { + process { + """ + input[0] = [ [], file(params.modules_testdata_base_path + 'genomics/sarscov2/genome/db/kraken2.tar.gz', checkIfExists: true) ] + """ + } + } + + then { + assertAll ( + { assert process.success }, + { assert snapshot(process.out).match() }, + ) + } + } + + test("test_untar_onlyfiles") { + + when { + process { + """ + input[0] = [ [], file(params.modules_testdata_base_path + 'generic/tar/hello.tar.gz', checkIfExists: true) ] + """ + } + } + + then { + assertAll ( + { assert process.success }, + { assert snapshot(process.out).match() }, + ) + } + } + + test("test_untar - stub") { + + options "-stub" + + when { + process { + """ + input[0] = [ [], file(params.modules_testdata_base_path + 'genomics/sarscov2/genome/db/kraken2.tar.gz', checkIfExists: true) ] + """ + } + } + + then { + assertAll ( + { assert process.success }, + { assert snapshot(process.out).match() }, + ) + } + } + + test("test_untar_onlyfiles - stub") { + + options "-stub" + + when { + process { + """ + input[0] = [ [], file(params.modules_testdata_base_path + 'generic/tar/hello.tar.gz', checkIfExists: true) ] + """ + } + } + + then { + assertAll ( + { assert process.success }, + { assert snapshot(process.out).match() }, + ) + } + } +} diff --git a/modules/nf-core/untar/tests/main.nf.test.snap b/modules/nf-core/untar/tests/main.nf.test.snap new file mode 100644 index 00000000..ceb91b79 --- /dev/null +++ b/modules/nf-core/untar/tests/main.nf.test.snap @@ -0,0 +1,158 @@ +{ + "test_untar_onlyfiles": { + "content": [ + { + "0": [ + [ + [ + + ], + [ + "hello.txt:md5,e59ff97941044f85df5297e1c302d260" + ] + ] + ], + "1": [ + "versions.yml:md5,6063247258c56fd271d076bb04dd7536" + ], + "untar": [ + [ + [ + + ], + [ + "hello.txt:md5,e59ff97941044f85df5297e1c302d260" + ] + ] + ], + "versions": [ + "versions.yml:md5,6063247258c56fd271d076bb04dd7536" + ] + } + ], + "meta": { + "nf-test": "0.8.4", + "nextflow": "24.04.3" + }, + "timestamp": "2024-07-10T12:04:28.231047" + }, + "test_untar_onlyfiles - stub": { + "content": [ + { + "0": [ + [ + [ + + ], + [ + "hello.txt:md5,d41d8cd98f00b204e9800998ecf8427e" + ] + ] + ], + "1": [ + "versions.yml:md5,6063247258c56fd271d076bb04dd7536" + ], + "untar": [ + [ + [ + + ], + [ + "hello.txt:md5,d41d8cd98f00b204e9800998ecf8427e" + ] + ] + ], + "versions": [ + "versions.yml:md5,6063247258c56fd271d076bb04dd7536" + ] + } + ], + "meta": { + "nf-test": "0.8.4", + "nextflow": "24.04.3" + }, + "timestamp": "2024-07-10T12:04:45.773103" + }, + "test_untar - stub": { + "content": [ + { + "0": [ + [ + [ + + ], + [ + "hash.k2d:md5,d41d8cd98f00b204e9800998ecf8427e", + "opts.k2d:md5,d41d8cd98f00b204e9800998ecf8427e", + "taxo.k2d:md5,d41d8cd98f00b204e9800998ecf8427e" + ] + ] + ], + "1": [ + "versions.yml:md5,6063247258c56fd271d076bb04dd7536" + ], + "untar": [ + [ + [ + + ], + [ + "hash.k2d:md5,d41d8cd98f00b204e9800998ecf8427e", + "opts.k2d:md5,d41d8cd98f00b204e9800998ecf8427e", + "taxo.k2d:md5,d41d8cd98f00b204e9800998ecf8427e" + ] + ] + ], + "versions": [ + "versions.yml:md5,6063247258c56fd271d076bb04dd7536" + ] + } + ], + "meta": { + "nf-test": "0.8.4", + "nextflow": "24.04.3" + }, + "timestamp": "2024-07-10T12:04:36.777441" + }, + "test_untar": { + "content": [ + { + "0": [ + [ + [ + + ], + [ + "hash.k2d:md5,8b8598468f54a7087c203ad0190555d9", + "opts.k2d:md5,a033d00cf6759407010b21700938f543", + "taxo.k2d:md5,094d5891cdccf2f1468088855c214b2c" + ] + ] + ], + "1": [ + "versions.yml:md5,6063247258c56fd271d076bb04dd7536" + ], + "untar": [ + [ + [ + + ], + [ + "hash.k2d:md5,8b8598468f54a7087c203ad0190555d9", + "opts.k2d:md5,a033d00cf6759407010b21700938f543", + "taxo.k2d:md5,094d5891cdccf2f1468088855c214b2c" + ] + ] + ], + "versions": [ + "versions.yml:md5,6063247258c56fd271d076bb04dd7536" + ] + } + ], + "meta": { + "nf-test": "0.8.4", + "nextflow": "24.04.3" + }, + "timestamp": "2024-07-10T12:04:19.377674" + } +} \ No newline at end of file diff --git a/modules/nf-core/untar/untar.diff b/modules/nf-core/untar/untar.diff new file mode 100644 index 00000000..bfea8b3e --- /dev/null +++ b/modules/nf-core/untar/untar.diff @@ -0,0 +1,20 @@ +Changes in component 'nf-core/untar' +Upstream git_sha: 447f7bc0fa41dfc2400c8cad4c0291880dc060cf +'modules/nf-core/untar/meta.yml' is unchanged +Changes in 'untar/main.nf': +--- modules/nf-core/untar/main.nf ++++ modules/nf-core/untar/main.nf +@@ -49,6 +49,10 @@ + prefix = task.ext.prefix ?: (meta.id ? "${meta.id}" : archive.toString().replaceFirst(/\.[^\.]+(.gz)?$/, "")) + """ + mkdir ${prefix} ++ touch ${prefix}/morphology.ome.tif ++ touch ${prefix}/transcripts.parquet ++ touch ${prefix}/gene_panel.json ++ touch ${prefix}/experiment.xenium + + ## Dry-run untaring the archive to get the files and place all in prefix + if [[ \$(tar -taf ${archive} | grep -o -P "^.*?\\/" | uniq | wc -l) -eq 1 ]]; then + +'modules/nf-core/untar/environment.yml' is unchanged +************************************************************ diff --git a/modules/nf-core/unzip/environment.yml b/modules/nf-core/unzip/environment.yml new file mode 100644 index 00000000..24615895 --- /dev/null +++ b/modules/nf-core/unzip/environment.yml @@ -0,0 +1,7 @@ +--- +# yaml-language-server: $schema=https://raw.githubusercontent.com/nf-core/modules/master/modules/environment-schema.json +channels: + - conda-forge + - bioconda +dependencies: + - conda-forge::p7zip=16.02 diff --git a/modules/nf-core/unzip/main.nf b/modules/nf-core/unzip/main.nf new file mode 100644 index 00000000..d9417fb3 --- /dev/null +++ b/modules/nf-core/unzip/main.nf @@ -0,0 +1,50 @@ +process UNZIP { + tag "$archive" + label 'process_single' + + conda "${moduleDir}/environment.yml" + container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ? + 'https://depot.galaxyproject.org/singularity/p7zip:16.02' : + 'biocontainers/p7zip:16.02' }" + + input: + tuple val(meta), path(archive) + + output: + tuple val(meta), path("${prefix}/"), emit: unzipped_archive + path "versions.yml" , emit: versions + + when: + task.ext.when == null || task.ext.when + + script: + def args = task.ext.args ?: '' + if ( archive instanceof List && archive.name.size > 1 ) { error "[UNZIP] error: 7za only accepts a single archive as input. Please check module input." } + prefix = task.ext.prefix ?: ( meta.id ? "${meta.id}" : archive.baseName) + """ + 7za \\ + x \\ + -o"${prefix}"/ \\ + $args \\ + $archive + + cat <<-END_VERSIONS > versions.yml + "${task.process}": + 7za: \$(echo \$(7za --help) | sed 's/.*p7zip Version //; s/(.*//') + END_VERSIONS + """ + + stub: + if ( archive instanceof List && archive.name.size > 1 ) { error "[UNZIP] error: 7za only accepts a single archive as input. Please check module input." } + prefix = task.ext.prefix ?: ( meta.id ? "${meta.id}" : archive.baseName) + """ + mkdir -p "${prefix}/morphology_focus" + touch "${prefix}/transcripts.parquet" + touch "${prefix}/morphology_focus/morphology_focus_0000.ome.tif" + + cat <<-END_VERSIONS > versions.yml + "${task.process}": + 7za: \$(echo \$(7za --help) | sed 's/.*p7zip Version //; s/(.*//') + END_VERSIONS + """ +} diff --git a/modules/nf-core/unzip/meta.yml b/modules/nf-core/unzip/meta.yml new file mode 100644 index 00000000..ba1eb912 --- /dev/null +++ b/modules/nf-core/unzip/meta.yml @@ -0,0 +1,50 @@ +name: unzip +description: Unzip ZIP archive files +keywords: + - unzip + - decompression + - zip + - archiving +tools: + - unzip: + description: p7zip is a quick port of 7z.exe and 7za.exe (command line version + of 7zip, see www.7-zip.org) for Unix. + homepage: https://sourceforge.net/projects/p7zip/ + documentation: https://sourceforge.net/projects/p7zip/ + tool_dev_url: https://sourceforge.net/projects/p7zip" + licence: ["LGPL-2.1-or-later"] + identifier: "" +input: + - - meta: + type: map + description: | + Groovy Map containing sample information + e.g. [ id:'test', single_end:false ] + - archive: + type: file + description: ZIP file + pattern: "*.zip" + ontologies: + - edam: http://edamontology.org/format_3987 # ZIP format +output: + unzipped_archive: + - - meta: + type: map + description: | + Groovy Map containing sample information + e.g. [ id:'test', single_end:false ] + - ${prefix}/: + type: directory + description: Directory contents of the unzipped archive + pattern: "${archive.baseName}/" + versions: + - versions.yml: + type: file + description: File containing software versions + pattern: "versions.yml" + ontologies: + - edam: http://edamontology.org/format_3750 # YAML +authors: + - "@jfy133" +maintainers: + - "@jfy133" diff --git a/modules/nf-core/unzip/tests/main.nf.test b/modules/nf-core/unzip/tests/main.nf.test new file mode 100644 index 00000000..238b68d8 --- /dev/null +++ b/modules/nf-core/unzip/tests/main.nf.test @@ -0,0 +1,54 @@ +nextflow_process { + + name "Test Process UNZIP" + script "../main.nf" + process "UNZIP" + + tag "modules" + tag "modules_nfcore" + tag "unzip" + + test("generic [tar] [tar_gz]") { + + when { + process { + """ + input[0] = [ + [ id: 'hello' ], + file(params.modules_testdata_base_path + 'generic/tar/hello.tar.gz', checkIfExists: true) + ] + """ + } + } + + then { + assertAll( + { assert process.success }, + { assert snapshot(process.out).match() } + ) + } + } + + test("generic [tar] [tar_gz] stub") { + + options "-stub" + + when { + process { + """ + input[0] = [ + [ id: 'hello' ], + file(params.modules_testdata_base_path + 'generic/tar/hello.tar.gz', checkIfExists: true) + ] + """ + } + } + + then { + assertAll( + { assert process.success }, + { assert snapshot(process.out).match() } + ) + } + } +} diff --git a/modules/nf-core/unzip/tests/main.nf.test.snap b/modules/nf-core/unzip/tests/main.nf.test.snap new file mode 100644 index 00000000..cdd2ab16 --- /dev/null +++ b/modules/nf-core/unzip/tests/main.nf.test.snap @@ -0,0 +1,76 @@ +{ + "generic [tar] [tar_gz] stub": { + "content": [ + { + "0": [ + [ + { + "id": "hello" + }, + [ + + ] + ] + ], + "1": [ + "versions.yml:md5,52c55ce814e8bc9edc5a6c625ed794b8" + ], + "unzipped_archive": [ + [ + { + "id": "hello" + }, + [ + + ] + ] + ], + "versions": [ + "versions.yml:md5,52c55ce814e8bc9edc5a6c625ed794b8" + ] + } + ], + "meta": { + "nf-test": "0.8.4", + "nextflow": "24.04.2" + }, + "timestamp": "2024-06-30T19:16:37.11550986" + }, + "generic [tar] [tar_gz]": { + "content": [ + { + "0": [ + [ + { + "id": "hello" + }, + [ + "hello.tar:md5,80c66db79a773bc87b3346035ff9593e" + ] + ] + ], + "1": [ + "versions.yml:md5,52c55ce814e8bc9edc5a6c625ed794b8" + ], + "unzipped_archive": [ + [ + { + "id": "hello" + }, + [ + "hello.tar:md5,80c66db79a773bc87b3346035ff9593e" + ] + ] + ], + "versions": [ + "versions.yml:md5,52c55ce814e8bc9edc5a6c625ed794b8" + ] + } + ], + "meta": { + "nf-test": "0.8.4", + "nextflow": "24.04.2" + }, + "timestamp": "2024-06-30T19:16:25.120242571" + } +} \ No newline at end of file diff --git a/modules/nf-core/unzip/unzip.diff b/modules/nf-core/unzip/unzip.diff new file mode 100644 index 00000000..e4a293c0 --- /dev/null +++ b/modules/nf-core/unzip/unzip.diff @@ -0,0 +1,18 @@ +Changes in component 'nf-core/unzip' +'modules/nf-core/unzip/environment.yml' is unchanged +'modules/nf-core/unzip/meta.yml' is unchanged +Changes in 'unzip/main.nf': +--- modules/nf-core/unzip/main.nf ++++ modules/nf-core/unzip/main.nf +@@ -38,7 +38,9 @@ + if ( archive instanceof List && archive.name.size > 1 ) { error "[UNZIP] error: 7za only accepts a single archive as input. Please check module input." } + prefix = task.ext.prefix ?: ( meta.id ? "${meta.id}" : archive.baseName) + """ +- mkdir "${prefix}" ++ mkdir -p "${prefix}/morphology_focus" ++ touch "${prefix}/transcripts.parquet" ++ touch "${prefix}/morphology_focus/morphology_focus_0000.ome.tif" + + cat <<-END_VERSIONS > versions.yml + "${task.process}": +************************************************************ diff --git a/modules/nf-core/xeniumranger/import-segmentation/main.nf b/modules/nf-core/xeniumranger/import-segmentation/main.nf index 50b17272..49310531 100644 --- a/modules/nf-core/xeniumranger/import-segmentation/main.nf +++ b/modules/nf-core/xeniumranger/import-segmentation/main.nf @@ -2,80 +2,68 @@ process XENIUMRANGER_IMPORT_SEGMENTATION { tag "$meta.id" label 'process_high' - container "nf-core/xeniumranger:3.0.1" + container "nf-core/xeniumranger:4.0" input: - tuple val(meta), path(xenium_bundle) - val(expansion_distance) - path(coordinate_transform) - path(nuclei) - path(cells) - path(transcript_assignment) - path(viz_polygons) + tuple val(meta), path(xenium_bundle, stageAs: "bundle/"), path(transcript_assignment), path(viz_polygons), path(nuclei), path(cells), path(coordinate_transform), val(units) output: - tuple val(meta), path("**/outs/**"), emit: outs - path "versions.yml", emit: versions + tuple val(meta), path("${prefix}"), emit: outs + tuple val("${task.process}"), val("xeniumranger"), eval("xeniumranger -V | sed -e 's/.*xenium-//'"), emit: versions_xeniumranger, topic: versions when: task.ext.when == null || task.ext.when script: + // Exit if running this module with -profile conda / -profile mamba if (workflow.profile.tokenize(',').intersect(['conda', 'mamba']).size() >= 1) { - error "XENIUMRANGER_IMPORT-SEGMENTATION module does not support Conda. Please use Docker / Singularity / Podman instead." + error "XENIUMRANGER_IMPORT_SEGMENTATION module does not support Conda. Please use Docker / Singularity / Podman instead." } - def args = task.ext.args ?: '' - def prefix = task.ext.prefix ?: "${meta.id}" - // image based segmentation options - def expansion_distance = expansion_distance ? "--expansion-distance=\"${expansion_distance}\"": "" // expansion distance (default - 5, range - 0 - 100) - def coordinate_transform = coordinate_transform ? "--coordinate-transform=\"${coordinate_transform}\"": "" + prefix = task.ext.prefix ?: "${meta.id}" - def nuclei_detection = nuclei ? "--nuclei=\"${nuclei}\"": "" - def cells = cells ? "--cells=\"${cells}\"": "" + // nuclei and cells are for image segmentation results + // transcript_assignment and viz_polygons are for transcript assignment results + // they are mutually exclusive + if ((nuclei || cells) && (transcript_assignment || viz_polygons)) { + error "--nuclei and --cells are for image segmentation results, which are mutually exclusive with --transcript-assignment and --viz-polygons for transcript assignment results. Please use only one of them." + } - // transcript based segmentation - def transcript_assignment = transcript_assignment ? "--transcript-assignment=\"${transcript_assignment}\"": "" - def viz_polygons = viz_polygons ? "--viz-polygons=\"${viz_polygons}\"":"" + def assembled_args = [] + if (task.ext.args) { assembled_args << task.ext.args.trim() } + if (nuclei) { assembled_args << "--nuclei=\"${nuclei}\"" } + if (cells) { assembled_args << "--cells=\"${cells}\"" } + if (transcript_assignment) { assembled_args << "--transcript-assignment=\"${transcript_assignment}\"" } + if (viz_polygons) { assembled_args << "--viz-polygons=\"${viz_polygons}\"" } + if (nuclei) { assembled_args << "--expansion-distance=${params.expansion_distance}" } + if (coordinate_transform) { + assembled_args << "--coordinate-transform=\"${coordinate_transform}\"" + // if coordinate_transform is provided, units must be microns + assembled_args << "--units=\"microns\"" + } else if (units) { + assembled_args << "--units=\"${units}\"" + } - // shared argument - def units = coordinate_transform ? "--units=microns": "--units=pixels" + def args = assembled_args ? assembled_args.join(" \\\n ") : "" """ xeniumranger import-segmentation \\ - --id="${prefix}" \\ + --id="XENIUMRANGER_IMPORT_SEGMENTATION" \\ --xenium-bundle="${xenium_bundle}" \\ --localcores=${task.cpus} \\ --localmem=${task.memory.toGiga()} \\ - ${coordinate_transform} \\ - ${nuclei_detection} \\ - ${cells} \\ - ${expansion_distance} \\ - ${transcript_assignment} \\ - ${viz_polygons} \\ - ${units} \\ ${args} - cat <<-END_VERSIONS > versions.yml - "${task.process}": - xeniumranger: \$(xeniumranger -V | sed -e "s/xeniumranger-/- /g") - END_VERSIONS + rm -rf "${prefix}" + mv XENIUMRANGER_IMPORT_SEGMENTATION/outs "${prefix}" """ stub: - // Exit if running this module with -profile conda / -profile mamba - if (workflow.profile.tokenize(',').intersect(['conda', 'mamba']).size() >= 1) { - error "XENIUMRANGER_IMPORT-SEGMENTATION module does not support Conda. Please use Docker / Singularity / Podman instead." - } - def prefix = task.ext.prefix ?: "${meta.id}" + prefix = task.ext.prefix ?: "${meta.id}" """ - mkdir -p "${prefix}/outs/" - touch "${prefix}/outs/fake_file.txt" - - cat <<-END_VERSIONS > versions.yml - "${task.process}": - xeniumranger: \$(xeniumranger -V | sed -e "s/xeniumranger-/- /g") - END_VERSIONS + mkdir -p "${prefix}" + touch "${prefix}/experiment.xenium" """ + } diff --git a/modules/nf-core/xeniumranger/import-segmentation/meta.yml b/modules/nf-core/xeniumranger/import-segmentation/meta.yml index c3a34ec5..e222df58 100644 --- a/modules/nf-core/xeniumranger/import-segmentation/meta.yml +++ b/modules/nf-core/xeniumranger/import-segmentation/meta.yml @@ -1,9 +1,14 @@ name: xeniumranger_import_segmentation -description: The xeniumranger import-segmentation module allows you to specify 2D - nuclei and/or cell segmentation results for assigning transcripts to cells and recalculate - all Xenium Onboard Analysis (XOA) outputs that depend on segmentation. Segmentation - results can be generated by community-developed tools or prior Xenium segmentation - result. +description: | + The xeniumranger import-segmentation module runs `xeniumranger import-segmentation` + to recompute Xenium Onboard Analysis outputs using external segmentation results. + It supports two execution modes mirroring the Xenium Ranger CLI: an image-based + mode that accepts nuclei and/or cell masks (TIFF/NPY) or GeoJSON polygons together + with optional coordinate transforms and unit definitions, and a transcript-based + mode that ingests Baysor-style transcript assignment CSV files plus visualization + polygons. Use the image-based inputs when providing label masks or polygons, or + switch to the transcript-based inputs when supplying transcript-level assignments + so the appropriate command-line arguments are passed to Xenium Ranger. keywords: - spatial - segmentation @@ -28,57 +33,109 @@ input: type: map description: | Groovy Map containing run information - e.g. [id:'xenium_bundle_path'] + e.g. [ id:'xenium_sample' ] - xenium_bundle: type: directory - description: Path to the xenium output bundle generated by the Xenium Onboard - Analysis pipeline - - - expansion_distance: - type: integer - description: Nuclei boundary expansion distance in µm. Only for use when nucleus - segmentation provided as input. Default-5 (accepted range 0 - 100) - - - coordinate_transform: + description: Path to the Xenium output bundle generated by the Xenium + Onboard Analysis pipeline + - transcript_assignment: type: file - description: Image alignment file containing similarity transform matrix e.g., - the _imagealignment.csv file exported from Xenium Explorer - - - nuclei: + optional: true + description: | + Transcript assignment CSV with cell assignment, such as from Baysor v0.6, (transcript-based mode). + Mutually exclusive with image-based inputs (`nuclei`, `cells`). Required when using + transcript-based mode. Passed to `--transcript-assignment`. + pattern: "*.csv" + ontologies: + - edam: http://edamontology.org/format_3752 # CSV + - viz_polygons: type: file + optional: true description: | - Label mask (TIFF or NPY), polygons of nucleus segmentations (GeoJSON FeatureCollection), or Xenium Onboard Analysis cells.zarr.zip (the nucleus masks as input). - --nuclei will use nucleusGeometry polygon if it exists in the GeoJSON (i.e., for QuPath-like GeoJSON files), - or geometry if it does not. Error if --transcript-assignment argument is used. - - - cells: + Cell boundary polygons (GeoJSON) for visualization, such as from Baysor v0.6 (transcript-based mode). + Mutually exclusive with image-based inputs (`nuclei`, `cells`). Required when using + `transcript_assignment`. Passed to `--viz-polygons`. + pattern: "*.{json,geojson}" + ontologies: + - edam: http://edamontology.org/format_3464 # JSON + - nuclei: type: file + optional: true description: | - Label mask (TIFF or NPY), polygons of cell segmentations (GeoJSON FeatureCollection), or Xenium Onboard Analysis cells.zarr.zip (the cell masks as input). - Features with a non-cell objectType will be ignored. Error if --transcript-assignment argument is used. - In Xenium Ranger v2.0, --nuclei no longer needs to be used with --cells. - - - transcript_assignment: + Nucleus segmentation input as label mask (TIFF/NPY), polygons (GeoJSON), or Xenium Onboard + Analysis cells.zarr.zip (image-based mode). + Mutually exclusive with transcript-based inputs + (`transcript_assignment`, `viz_polygons`). Passed to `--nuclei`. + pattern: "*.{tif,tiff,npy,json,geojson,zarr.zip}" + ontologies: + - edam: http://edamontology.org/format_4003 # NumPy format + - edam: http://edamontology.org/format_3464 # JSON + - cells: type: file + optional: true description: | - Transcript CSV with cell assignment from Baysor v0.6. Error if --cells or --nuclei arguments are used. - - - viz_polygons: + Cell segmentation input as label mask (TIFF/NPY), polygons (GeoJSON), or Xenium Onboard + Analysis cells.zarr.zip (image-based mode). + Mutually exclusive with transcript-based inputs + (`transcript_assignment`, `viz_polygons`). Passed to `--cells`. + pattern: "*.{tif,tiff,npy,json,geojson,zarr.zip}" + ontologies: + - edam: http://edamontology.org/format_4003 # NumPy format + - edam: http://edamontology.org/format_3464 # JSON + - coordinate_transform: type: file + optional: true + description: | + Image alignment file containing similarity transform matrix (e.g., `_imagealignment.csv` from + Xenium Explorer). Only used with image-based mode inputs (`nuclei`, `cells`). `units` will be automatically set to "microns". Passed to `--coordinate-transform`. + pattern: "*.csv" + ontologies: + - edam: http://edamontology.org/format_3752 # CSV + - units: + type: string + optional: true description: | - Cell boundary polygons (GeoJSON) for visualization from Baysor v0.6. Required if --transcript-assignment argument used. Error if --cells or --nuclei arguments used. + Units for segmentation results. Must be one of two options: "microns" (physical space) or + "pixels" (pixel space). Can be used with both image-based and transcript-based modes. + Default: "pixels". Must be "microns" if `coordinate_transform` is used. For Baysor v0.6 + inputs, must be "microns". Passed to `--units`. + enum: + - "microns" + - "pixels" output: - - outs: - - meta: - type: file - description: Files containing the outputs of Cell Ranger, see official 10X Genomics - documentation for a complete list - pattern: "${meta.id}/outs/*" - - "**/outs/**": - type: file - description: Files containing the outputs of xenium ranger, see official 10X - Genomics documentation for a complete list of outputs - pattern: "${meta.id}/outs/*" - - versions: - - versions.yml: - type: file - description: File containing software versions - pattern: "versions.yml" + outs: + - - meta: + type: map + description: Groovy Map containing sample information e.g. [ id:'test' ] + - ${prefix}: + type: directory + description: Directory containing the output xenium bundle of Xenium + Ranger + pattern: "${prefix}" + versions_xeniumranger: + - - ${task.process}: + type: string + description: The process the versions were collected from + - xeniumranger: + type: string + description: The tool name + - xeniumranger -V | sed -e 's/.*xenium-//': + type: string + description: The command used to generate the version of the tool +topics: + versions: + - - ${task.process}: + type: string + description: The process the versions were collected from + - xeniumranger: + type: string + description: The tool name + - xeniumranger -V | sed -e 's/.*xenium-//': + type: string + description: The command used to generate the version of the tool authors: - "@khersameesh24" + - "@dongzehe" maintainers: - "@khersameesh24" + - "@dongzehe" diff --git a/modules/nf-core/xeniumranger/import-segmentation/tests/main.nf.test b/modules/nf-core/xeniumranger/import-segmentation/tests/main.nf.test index 54d3ba00..e61cdc11 100644 --- a/modules/nf-core/xeniumranger/import-segmentation/tests/main.nf.test +++ b/modules/nf-core/xeniumranger/import-segmentation/tests/main.nf.test @@ -26,7 +26,7 @@ nextflow_process { when { process { """ - input[0] = Channel.of([ + input[0] = channel.of([ [id: "test_xeniumranger_import-segmentation"], ]).combine(UNZIP.out.unzipped_archive.map { it[1] }) input[1] = 0 @@ -93,7 +93,7 @@ nextflow_process { when { process { """ - input[0] = Channel.of([ + input[0] = channel.of([ [id: "test_xeniumranger_import-segmentation"], ]).combine(UNZIP.out.unzipped_archive.map { it[1] }) input[1] = 0 @@ -159,7 +159,7 @@ nextflow_process { when { process { """ - input[0] = Channel.of([ + input[0] = channel.of([ [id: "test_xeniumranger_import-segmentation"], ]).combine(UNZIP.out.unzipped_archive.map { it[1] }) input[1] = 0 @@ -225,7 +225,7 @@ nextflow_process { when { process { """ - input[0] = Channel.of([ + input[0] = channel.of([ [id: "test_xeniumranger_import-segmentation"], ]).combine(UNZIP.out.unzipped_archive.map { it[1] }) input[1] = 0 @@ -292,7 +292,7 @@ nextflow_process { when { process { """ - input[0] = Channel.of([ + input[0] = channel.of([ [id: "test_xeniumranger_import-segmentation"], ]).combine(UNZIP.out.unzipped_archive.map { it[1] }) input[1] = 0 diff --git a/modules/nf-core/xeniumranger/import-segmentation/xeniumranger-import-segmentation.diff b/modules/nf-core/xeniumranger/import-segmentation/xeniumranger-import-segmentation.diff new file mode 100644 index 00000000..ea2652a5 --- /dev/null +++ b/modules/nf-core/xeniumranger/import-segmentation/xeniumranger-import-segmentation.diff @@ -0,0 +1,14 @@ +Changes in component 'nf-core/xeniumranger/import-segmentation' +'modules/nf-core/xeniumranger/import-segmentation/meta.yml' is unchanged +Changes in 'xeniumranger/import-segmentation/main.nf': +--- modules/nf-core/xeniumranger/import-segmentation/main.nf ++++ modules/nf-core/xeniumranger/import-segmentation/main.nf +@@ -36,6 +36,7 @@ + if (cells) { assembled_args << "--cells=\"${cells}\"" } + if (transcript_assignment) { assembled_args << "--transcript-assignment=\"${transcript_assignment}\"" } + if (viz_polygons) { assembled_args << "--viz-polygons=\"${viz_polygons}\"" } ++ if (nuclei) { assembled_args << "--expansion-distance=${params.expansion_distance}" } + if (coordinate_transform) { + assembled_args << "--coordinate-transform=\"${coordinate_transform}\"" + // if coordinate_transform is provided, units must be microns +************************************************************ diff --git a/modules/nf-core/xeniumranger/relabel/main.nf b/modules/nf-core/xeniumranger/relabel/main.nf index b06fe9ea..bf04a971 100644 --- a/modules/nf-core/xeniumranger/relabel/main.nf +++ b/modules/nf-core/xeniumranger/relabel/main.nf @@ -2,55 +2,46 @@ process XENIUMRANGER_RELABEL { tag "$meta.id" label 'process_high' - container "nf-core/xeniumranger:3.0.1" + container "nf-core/xeniumranger:4.0" input: - tuple val(meta), path(xenium_bundle) - path(gene_panel) + tuple val(meta), path(xenium_bundle, stageAs: "bundle/"), path(panel) output: - tuple val(meta), path("**/outs/**"), emit: outs - path "versions.yml", emit: versions + tuple val(meta), path("${prefix}"), emit: outs + tuple val("${task.process}"), val("xeniumranger"), eval("xeniumranger -V | sed -e 's/.*xenium-//'"), emit: versions_xeniumranger, topic: versions when: task.ext.when == null || task.ext.when script: + // Exit if running this module with -profile conda / -profile mamba if (workflow.profile.tokenize(',').intersect(['conda', 'mamba']).size() >= 1) { error "XENIUMRANGER_RELABEL module does not support Conda. Please use Docker / Singularity / Podman instead." } - def args = task.ext.args ?: '' - def prefix = task.ext.prefix ?: "${meta.id}" + + prefix = task.ext.prefix ?: "${meta.id}" + def args = task.ext.args ?: "" """ xeniumranger relabel \\ - --id="${prefix}" \\ + --id="XENIUMRANGER_RELABEL" \\ --xenium-bundle="${xenium_bundle}" \\ - --panel="${gene_panel}" \\ + --panel="${panel}" \\ --localcores=${task.cpus} \\ --localmem=${task.memory.toGiga()} \\ ${args} - cat <<-END_VERSIONS > versions.yml - "${task.process}": - xeniumranger: \$(xeniumranger -V | sed -e "s/xeniumranger-/- /g") - END_VERSIONS + rm -rf "${prefix}" + mv XENIUMRANGER_RELABEL/outs "${prefix}" """ stub: - // Exit if running this module with -profile conda / -profile mamba - if (workflow.profile.tokenize(',').intersect(['conda', 'mamba']).size() >= 1) { - error "XENIUMRANGER_RELABEL module does not support Conda. Please use Docker / Singularity / Podman instead." - } - def prefix = task.ext.prefix ?: "${meta.id}" - """ - mkdir -p "${prefix}/outs/" - touch "${prefix}/outs/fake_file.txt" + prefix = task.ext.prefix ?: "${meta.id}" - cat <<-END_VERSIONS > versions.yml - "${task.process}": - xeniumranger: \$(xeniumranger -V | sed -e "s/xeniumranger-/- /g") - END_VERSIONS + """ + mkdir -p "${prefix}" + touch "${prefix}/experiment.xenium" """ } diff --git a/modules/nf-core/xeniumranger/relabel/meta.yml b/modules/nf-core/xeniumranger/relabel/meta.yml index 85c1dbfa..8b61e0bb 100644 --- a/modules/nf-core/xeniumranger/relabel/meta.yml +++ b/modules/nf-core/xeniumranger/relabel/meta.yml @@ -1,6 +1,6 @@ name: xeniumranger_relabel -description: The xeniumranger relabel module allows you to change the gene labels - applied to decoded transcripts. +description: The xeniumranger relabel module allows you to change the gene + labels applied to decoded transcripts. keywords: - spatial - relabel @@ -15,39 +15,58 @@ tools: documentation: "https://www.10xgenomics.com/support/software/xenium-ranger/latest/getting-started" tool_dev_url: "https://www.10xgenomics.com/support/software/xenium-ranger/latest/analysis" licence: - - 10x Genomics EULA + - "10x Genomics EULA" identifier: "" input: - - meta: type: map description: | Groovy Map containing run information - e.g. [id:'xenium_bundle_path'] + e.g. [ id:'xenium_bundle_path' ] - xenium_bundle: type: directory - description: Path to the xenium output bundle generated by the Xenium Onboard - Analysis pipeline - - - gene_panel: + description: Path to the xenium output bundle generated by the Xenium + Onboard Analysis pipeline + - panel: type: file - description: Gene panel JSON file to use for relabeling decoded transcripts + description: Path to the gene panel file + pattern: "*.json" + ontologies: + - edam: http://edamontology.org/format_3464 # JSON output: - - outs: - - meta: - type: file - description: Files containing the outputs of Cell Ranger, see official 10X Genomics - documentation for a complete list - pattern: "${meta.id}/outs/*" - - "**/outs/**": - type: file - description: Files containing the outputs of xenium ranger, see official 10X - Genomics documentation for a complete list of outputs - pattern: "${meta.id}/outs/*" - - versions: - - versions.yml: - type: file - description: File containing software versions - pattern: "versions.yml" + outs: + - - meta: + type: map + description: Groovy Map containing sample information e.g. [ id:'test' ] + - "${prefix}": + type: directory + description: Directory containing the output xenium bundle of Xenium + Ranger + pattern: "${prefix}" + versions_xeniumranger: + - - "${task.process}": + type: string + description: The process the versions were collected from + - xeniumranger: + type: string + description: The tool name + - "xeniumranger -V | sed -e 's/.*xenium-//'": + type: string + description: The command used to generate the version of the tool +topics: + versions: + - - "${task.process}": + type: string + description: The process the versions were collected from + - xeniumranger: + type: string + description: The tool name + - "xeniumranger -V | sed -e 's/.*xenium-//'": + type: string + description: The command used to generate the version of the tool authors: - "@khersameesh24" + - "@dongzehe" maintainers: - "@khersameesh24" + - "@dongzehe" diff --git a/modules/nf-core/xeniumranger/relabel/tests/main.nf.test b/modules/nf-core/xeniumranger/relabel/tests/main.nf.test index d0962738..7c37a32a 100644 --- a/modules/nf-core/xeniumranger/relabel/tests/main.nf.test +++ b/modules/nf-core/xeniumranger/relabel/tests/main.nf.test @@ -26,7 +26,7 @@ nextflow_process { when { process { """ - input[0] = Channel.of([ + input[0] = channel.of([ [id: "test_xeniumranger_relabel"], ]).combine(UNZIP.out.unzipped_archive.map { it[1] }) input[1] = UNZIP.out.unzipped_archive.map { it[1] } + "/gene_panel.json" @@ -76,7 +76,7 @@ nextflow_process { when { process { """ - input[0] = Channel.of([ + input[0] = channel.of([ [id: "test_xeniumranger_relabel"], ]).combine(UNZIP.out.unzipped_archive.map { it[1] }) input[1] = UNZIP.out.unzipped_archive.map { it[1] } + "/gene_panel.json" diff --git a/modules/nf-core/xeniumranger/resegment/main.nf b/modules/nf-core/xeniumranger/resegment/main.nf index 5d28fa69..df2b0ea7 100644 --- a/modules/nf-core/xeniumranger/resegment/main.nf +++ b/modules/nf-core/xeniumranger/resegment/main.nf @@ -2,69 +2,53 @@ process XENIUMRANGER_RESEGMENT { tag "$meta.id" label 'process_high' - container "nf-core/xeniumranger:3.0.1" + container "nf-core/xeniumranger:4.0" input: - tuple val(meta), path(xenium_bundle) - val(expansion_distance) - val(dapi_filter) - val(boundary_stain) - val(interior_stain) + tuple val(meta), path(xenium_bundle, stageAs: "bundle/") output: - tuple val(meta), path("**/outs/**"), emit: outs - path "versions.yml", emit: versions + tuple val(meta), path("${prefix}"), emit: outs + tuple val("${task.process}"), val("xeniumranger"), eval("xeniumranger -V | sed -e 's/.*xenium-//'"), emit: versions_xeniumranger, topic: versions when: task.ext.when == null || task.ext.when script: + // Exit if running this module with -profile conda / -profile mamba if (workflow.profile.tokenize(',').intersect(['conda', 'mamba']).size() >= 1) { error "XENIUMRANGER_RESEGMENT module does not support Conda. Please use Docker / Singularity / Podman instead." } - def args = task.ext.args ?: "" - def prefix = task.ext.prefix ?: "${meta.id}" - def expansion_distance = expansion_distance ? "--expansion-distance=\"${expansion_distance}\"": "" - def dapi_filter = dapi_filter ? "--dapi-filter=\"${dapi_filter}\"": "" + prefix = task.ext.prefix ?: "${meta.id}" + def args = task.ext.args ?: "" // Do not use boundary stain in analysis, but keep default interior stain and DAPI - def boundary_stain = boundary_stain ? "--boundary-stain=disable": "" + def boundary_stain = params.boundary_stain ? "" : "--boundary-stain=disable" // Do not use interior stain in analysis, but keep default boundary stain and DAPI - def interior_stain = interior_stain ? "--interior-stain=disable": "" + def interior_stain = params.interior_stain ? "" : "--interior-stain=disable" """ xeniumranger resegment \\ - --id="${prefix}" \\ + --id="XENIUMRANGER_RESEGMENT" \\ --xenium-bundle="${xenium_bundle}" \\ - ${expansion_distance} \\ - ${dapi_filter} \\ + --expansion-distance=${params.expansion_distance} \\ + --dapi-filter=${params.dapi_filter} \\ ${boundary_stain} \\ ${interior_stain} \\ --localcores=${task.cpus} \\ --localmem=${task.memory.toGiga()} \\ ${args} - cat <<-END_VERSIONS > versions.yml - "${task.process}": - xeniumranger: \$(xeniumranger -V | sed -e "s/xeniumranger-/- /g") - END_VERSIONS + rm -rf "${prefix}" + mv XENIUMRANGER_RESEGMENT/outs "${prefix}" """ stub: - // Exit if running this module with -profile conda / -profile mamba - if (workflow.profile.tokenize(',').intersect(['conda', 'mamba']).size() >= 1) { - error "XENIUMRANGER_RESEGMENT module does not support Conda. Please use Docker / Singularity / Podman instead." - } - def prefix = task.ext.prefix ?: "${meta.id}" + prefix = task.ext.prefix ?: "${meta.id}" """ - mkdir -p "${prefix}/outs/" - touch "${prefix}/outs/fake_file.txt" - - cat <<-END_VERSIONS > versions.yml - "${task.process}": - xeniumranger: \$(xeniumranger -V | sed -e "s/xeniumranger-/- /g") - END_VERSIONS + mkdir -p "${prefix}" + touch "${prefix}/experiment.xenium" """ } diff --git a/modules/nf-core/xeniumranger/resegment/meta.yml b/modules/nf-core/xeniumranger/resegment/meta.yml index af0e8882..687c6723 100644 --- a/modules/nf-core/xeniumranger/resegment/meta.yml +++ b/modules/nf-core/xeniumranger/resegment/meta.yml @@ -1,5 +1,7 @@ name: xeniumranger_resegment -description: The xeniumranger resegment module allows you to generate a new segmentation of the morphology image space by rerunning the Xenium Onboard Analysis (XOA) segmentation algorithms with modified parameters. +description: The xeniumranger resegment module allows you to generate a new + segmentation of the morphology image space by rerunning the Xenium Onboard + Analysis (XOA) segmentation algorithms with modified parameters. keywords: - spatial - resegment @@ -24,35 +26,44 @@ input: e.g. [ id:'xenium_experiment' ] - xenium_bundle: type: directory - description: Path to the xenium output bundle generated by the Xenium Onboard Analysis pipeline - - - expansion_distance: - type: integer - description: Nuclei boundary expansion distance in µm. Only for use when nucleus segmentation provided as input. Default-5 (accepted range 0 - 100) - - - dapi_filter: - type: integer - description: Minimum intensity in photoelectrons to filter nuclei default-100 range of values is 0 to 99th percentile of image stack or 1000, whichever is larger - - - boundary_stain: - type: string - description: Specify the name of the boundary stain to use or disable possible options are default-ATP1A1/CD45/E-Cadherin or disable - - - interior_stain: - type: string - description: Specify the name of the interior stain to use or disable possible options are default-18S or disable + description: Path to the xenium output bundle generated by the Xenium + Onboard Analysis pipeline output: - - outs: - - meta: - type: file - description: Files containing the outputs of Cell Ranger, see official 10X Genomics documentation for a complete list - pattern: "${meta.id}/outs/*" - - "**/outs/**": - type: file - description: Files containing the outputs of xenium ranger, see official 10X Genomics documentation for a complete list of outputs - pattern: "${meta.id}/outs/*" - - versions: - - versions.yml: - type: file - description: File containing software versions - pattern: "versions.yml" + outs: + - - meta: + type: map + description: | + Groovy Map containing sample information + e.g. [ id:'test' ] + - "${prefix}": + type: directory + description: Directory containing the output xenium bundle of Xenium + Ranger + pattern: "${prefix}" + versions_xeniumranger: + - - "${task.process}": + type: string + description: The process the versions were collected from + - xeniumranger: + type: string + description: The tool name + - "xeniumranger -V | sed -e 's/.*xenium-//'": + type: string + description: The command used to generate the version of the tool +topics: + versions: + - - "${task.process}": + type: string + description: The process the versions were collected from + - xeniumranger: + type: string + description: The tool name + - "xeniumranger -V | sed -e 's/.*xenium-//'": + type: string + description: The command used to generate the version of the tool authors: - "@khersameesh24" + - "@dongzehe" maintainers: - "@khersameesh24" + - "@dongzehe" diff --git a/modules/nf-core/xeniumranger/resegment/tests/main.nf.test b/modules/nf-core/xeniumranger/resegment/tests/main.nf.test index 861c2414..36f26f46 100644 --- a/modules/nf-core/xeniumranger/resegment/tests/main.nf.test +++ b/modules/nf-core/xeniumranger/resegment/tests/main.nf.test @@ -26,7 +26,7 @@ nextflow_process { when { process { """ - input[0] = Channel.of([ + input[0] = channel.of([ [id: "test_xeniumranger_resegment"], ]).combine(UNZIP.out.unzipped_archive.map { it[1] }) input[1] = [] @@ -57,7 +57,7 @@ nextflow_process { when { process { """ - input[0] = Channel.of([ + input[0] = channel.of([ [id: "test_xeniumranger_resegment"], ]).combine(UNZIP.out.unzipped_archive.map { it[1] }) input[1] = [] diff --git a/modules/nf-core/xeniumranger/resegment/xeniumranger-resegment.diff b/modules/nf-core/xeniumranger/resegment/xeniumranger-resegment.diff new file mode 100644 index 00000000..afa09c26 --- /dev/null +++ b/modules/nf-core/xeniumranger/resegment/xeniumranger-resegment.diff @@ -0,0 +1,30 @@ +Changes in component 'nf-core/xeniumranger/resegment' +'modules/nf-core/xeniumranger/resegment/meta.yml' is unchanged +Changes in 'xeniumranger/resegment/main.nf': +--- modules/nf-core/xeniumranger/resegment/main.nf ++++ modules/nf-core/xeniumranger/resegment/main.nf +@@ -24,10 +24,24 @@ + prefix = task.ext.prefix ?: "${meta.id}" + def args = task.ext.args ?: "" + ++ // Do not use boundary stain in analysis, but keep default interior stain and DAPI ++ def boundary_stain = params.boundary_stain ? "" : "--boundary-stain=disable" ++ // Do not use interior stain in analysis, but keep default boundary stain and DAPI ++ def interior_stain = params.interior_stain ? "" : "--interior-stain=disable" ++ + """ + xeniumranger resegment \\ + --id="XENIUMRANGER_RESEGMENT" \\ + --xenium-bundle="${xenium_bundle}" \\ ++ --expansion-distance=${params.expansion_distance} \\ ++ --dapi-filter=${params.dapi_filter} \\ ++ ${boundary_stain} \\ ++ ${interior_stain} \\ + --localcores=${task.cpus} \\ + --localmem=${task.memory.toGiga()} \\ + ${args} +'modules/nf-core/xeniumranger/resegment/tests/main.nf.test.snap' is unchanged +'modules/nf-core/xeniumranger/resegment/tests/tags.yml' is unchanged +'modules/nf-core/xeniumranger/resegment/tests/nextflow.config' is unchanged +'modules/nf-core/xeniumranger/resegment/tests/main.nf.test' is unchanged +************************************************************ diff --git a/nextflow.config b/nextflow.config index 5048b3ff..73be717e 100644 --- a/nextflow.config +++ b/nextflow.config @@ -9,15 +9,114 @@ // Global default params, used in configs params { - // TODO nf-core: Specify your pipeline's command line flags // Input options - input = null + input = null // path to the samplesheet.csv containing meta,bundle,image + outdir = null // path to generate pipeline results at + mode = null // please check nextflow_schema.json for the modes you can select + method = null // name of the method to run for image or coordinate or segfree approaches + gene_panel = null // path to gene panel json file if `relabel_genes` is true + qupath_polygons = null // Path to qupath segmentation results in GeoJSON format + alignment_csv = null // image alignment file format a 3x3 transformation matrix, where the last row is [0,0,1] + cellpose_model = null // custom cellpose model to use for running or starting training + stardist_model = '2D_versatile_fluo' // stardist pretrained model for cell segmentation + stardist_nuclei_model = '2D_versatile_fluo' // stardist pretrained model for nuclei segmentation + stardist_prob_thresh = null // stardist probability threshold + stardist_nms_thresh = null // stardist NMS threshold + stardist_n_tiles = "8 8" // tiling for large images (Xenium images are ~20K×25K) + segmentation_mask = null // prior segmentation mask + probes_fasta = null // Fasta file for the probe sequences used in the xenium experiment + reference_annotations = null // Path to the genomic features (.gff) and fasta (.fa) files used as reference annotations + gene_synonyms = null // Gene synonyms that may have been counted as off-targets but simply differ in name + // GPU flag (set to true by the gpu profile) + use_gpu = false + gpu_queue = null // AWS Batch queue for GPU tasks (e.g., SEGGER, ProSeg) + cellpose_queue = null // AWS Batch queue for Cellpose (single large GPU) + + // execution specific + sharpen_tiff = false // wether to sharpen the morphology-focus tiff + nucleus_segmentation_only = false // to only run nucleus segmentation while running segmentation methods & XR_IMP-SEG + cell_segmentation_only = true // to only run cell segmentation while running segmentation methods & XR_IMP-SEG + cellpose_downscale = false // pre-downscale morphology image to avoid cellpose OOM on large images + + // Xeniumranger specific + xeniumranger_only = false // to generate redefined bundle with just changing the xr specific params + relabel_genes = false // wether to correct gene names with gene_panel.json + expansion_distance = 5 // default nuclear expansion distance in XOA v2.0 & later + dapi_filter = 100 // adjust the minimum peak intensity to use more nuclei + interior_stain = true // interior stain is enabled by default - false to disable + boundary_stain = true // boundary stain is enabled by default - false to disable + + // Segger specific + segmentation_refinement = false // wether to run segmentation refinement step (segger) + segger_accelerator = 'cpu' // either 'cuda' or 'cpu' + segger_knn_method = 'kd_tree' // 'cuda' - ensure your system has CUDA installed and configured properly + segger_num_workers = 4 // number of data-loader workers for segger + segger_model = null // path to a pre-trained segger model checkpoint + + // Proseg specific + format = 'xenium' // preset value set as `xenium` + + // Segmentation methods + image_seg_methods = ["cellpose", "xeniumranger", "baysor", "stardist"] + transcript_seg_methods = ["proseg", "segger", "baysor"] + segfree_methods = ["ficture", "baysor"] + + // Ficture specific + negative_control_regex = null + features = null + + // Baysor specific + filter_transcripts = false + min_qv = 20 + max_x = 24000.0 + min_x = 0.0 + max_y = 24000.0 + min_y = 0.0 + + // Generic tiling parameters (for proseg and other methods) + tiling = false // enable tiled segmentation (divide → parallel segmentation → stitch) + patch_grid = '3x3' // grid layout for tiling (rows x cols) + patch_overlap = 50 // overlap between patches in microns + patch_filter_method = null // post-stitch cell filtering: 'empirical', 'distribution', 'both', or null + patch_filter_iqr_multiplier = 3.0 // IQR multiplier for empirical cell size filtering + patch_filter_z_threshold = 4.0 // z-score threshold for distribution cell size filtering + + // Baysor-specific parameters + baysor_scale = 30 // Baysor --scale for non-tiled runs + baysor_config = null // path to baysor config TOML (optional) + baysor_tiling = true // enable tiled Baysor (divide → per-patch Baysor → stitch) + baysor_tiling_micron = 1200 // tile width in microns for Baysor tiling + baysor_tiling_overlap = 200 // overlap between Baysor patches in microns + baysor_tiling_balanced = true // balance transcripts across tiles (merge sparse tiles) + baysor_tiling_scale = 39 // Baysor --scale for tiled runs (larger to compensate for EM on tiles) + baysor_tiling_min_mols_per_cell = 120 // --min-molecules-per-cell for tiled Baysor + baysor_tiling_min_transcripts_per_cell = 50 // post-stitch cell filtering threshold + + // Baysor prior segmentation + // null — no prior (random EM init) + // 'cells' — Xenium bundle cell_id column (column-based, works with tiling) + // 'cellpose' — run Cellpose cell mask as image prior (non-tiled only) + baysor_prior = null + baysor_prior_confidence = 0.2 // prior-segmentation-confidence [0-1] + + // Segger specific + tile_width = 120 + tile_height = 120 + batch_size_train = 4 // larger batch size can speed up training, but requires more memory + devices = 4 // Use multiple GPUs by increasing the devices parameter to further accelerate training + max_epochs = 200 // increasing #epochs can improve model performance with more learning cycles, but extends training time + batch_size_predict = 1 // larger batch size can speed up training, but requires more memory + cc_analysis = false // to control connected component analysis + + // qc specific + run_qc = true // whether to run the qc layer of pipeline + offtarget_probe_tracking = false // whether to run off-target probe tracking (provide probe_fasta, reference sequences, gene synonyms ) + + // utility modules + csplit_x_bins = 2 // number of tiles along the x axis (total number of bins is product of x_bins * y_bins) + csplit_y_bins = 2 // number of tiles along the y axis - // References - genome = null - igenomes_base = 's3://ngi-igenomes/igenomes' - igenomes_ignore = false // MultiQC options multiqc_config = null multiqc_title = null @@ -25,210 +124,281 @@ params { max_multiqc_email_size = '25.MB' multiqc_methods_description = null - // Boilerplate options - outdir = null - tracedir = "${params.outdir}/pipeline_info" - publish_dir_mode = 'copy' - email = null - email_on_fail = null - plaintext_email = false - monochrome_logs = false - hook_url = null - help = false - version = false - validate_params = true - show_hidden_params = false - schema_ignore_params = 'genomes' + // pipeline dev and testing option + buffer_samples = false // process one sample at a time from the multi-sample samplesheet + buffer_size = 1 // buffer size 0 means no buffering of samples + restrict_concurrency = false // restrict running certain process in parallel + // Boilerplate options + publish_dir_mode = 'copy' + email = null + email_on_fail = null + plaintext_email = false + monochromeLogs = false + monochrome_logs = false + hook_url = System.getenv('HOOK_URL') + help = false + help_full = false + show_hidden = false + version = false + pipelines_testdata_base_path = 'https://raw.githubusercontent.com/nf-core/test-datasets/' + trace_report_suffix = new java.util.Date().format( 'yyyy-MM-dd_HH-mm-ss') // Config options + config_profile_name = null + config_profile_description = null + custom_config_version = 'master' custom_config_base = "https://raw.githubusercontent.com/nf-core/configs/${params.custom_config_version}" - config_profile_description = null config_profile_contact = null config_profile_url = null - config_profile_name = null + test_data_base = 'https://raw.githubusercontent.com/nf-core/test-datasets/spatialxe' - // Max resource options - // Defaults only, expecting to be overwritten - max_memory = '128.GB' - max_cpus = 16 - max_time = '240.h' - + // Schema validation default options + validate_params = true } // Load base.config by default for all pipelines includeConfig 'conf/base.config' -// Load nf-core custom profiles from different Institutions -try { - includeConfig "${params.custom_config_base}/nfcore_custom.config" -} catch (Exception e) { - System.err.println("WARNING: Could not load nf-core/config profiles: ${params.custom_config_base}/nfcore_custom.config") -} - -// Load nf-core/spatialxe custom profiles from different institutions. -// Warning: Uncomment only if a pipeline-specific instititutional config already exists on nf-core/configs! -// try { -// includeConfig "${params.custom_config_base}/pipeline/spatialxe.config" -// } catch (Exception e) { -// System.err.println("WARNING: Could not load nf-core/config/spatialxe profiles: ${params.custom_config_base}/pipeline/spatialxe.config") -// } - - profiles { - debug { process.beforeScript = 'echo $HOSTNAME' } + debug { + dumpHashes = true + process.beforeScript = 'echo $HOSTNAME' + cleanup = false + nextflow.enable.configProcessNamesValidation = true + } conda { - conda.enabled = true - docker.enabled = false - singularity.enabled = false - podman.enabled = false - shifter.enabled = false - charliecloud.enabled = false + conda.enabled = false + docker.enabled = false + singularity.enabled = false + podman.enabled = false + shifter.enabled = false + charliecloud.enabled = false + conda.channels = ['conda-forge', 'bioconda'] + apptainer.enabled = false } mamba { - conda.enabled = true - conda.useMamba = true - docker.enabled = false - singularity.enabled = false - podman.enabled = false - shifter.enabled = false - charliecloud.enabled = false + conda.enabled = false + conda.useMamba = false + docker.enabled = false + singularity.enabled = false + podman.enabled = false + shifter.enabled = false + charliecloud.enabled = false + apptainer.enabled = false } docker { docker.enabled = true - docker.userEmulation = true + docker.fixOwnership = true + conda.enabled = false singularity.enabled = false podman.enabled = false shifter.enabled = false charliecloud.enabled = false + apptainer.enabled = false + docker.runOptions = '-u $(id -u):$(id -g)' } arm { - docker.runOptions = '-u $(id -u):$(id -g) --platform=linux/amd64' + docker.runOptions = '-u $(id -u):$(id -g) --platform=linux/amd64' } singularity { - singularity.enabled = true - singularity.autoMounts = true - docker.enabled = false - podman.enabled = false - shifter.enabled = false - charliecloud.enabled = false + singularity.enabled = true + singularity.autoMounts = true + singularity.pullTimeout = '100 min' + conda.enabled = false + docker.enabled = false + podman.enabled = false + shifter.enabled = false + charliecloud.enabled = false + apptainer.enabled = false } podman { - podman.enabled = true - docker.enabled = false - singularity.enabled = false - shifter.enabled = false - charliecloud.enabled = false + podman.enabled = true + conda.enabled = false + docker.enabled = false + singularity.enabled = false + shifter.enabled = false + charliecloud.enabled = false + apptainer.enabled = false } shifter { - shifter.enabled = true - docker.enabled = false - singularity.enabled = false - podman.enabled = false - charliecloud.enabled = false + shifter.enabled = true + conda.enabled = false + docker.enabled = false + singularity.enabled = false + podman.enabled = false + charliecloud.enabled = false + apptainer.enabled = false } charliecloud { - charliecloud.enabled = true - docker.enabled = false - singularity.enabled = false - podman.enabled = false - shifter.enabled = false + charliecloud.enabled = true + conda.enabled = false + docker.enabled = false + singularity.enabled = false + podman.enabled = false + shifter.enabled = false + apptainer.enabled = false + } + apptainer { + apptainer.enabled = true + apptainer.autoMounts = true + conda.enabled = false + docker.enabled = false + singularity.enabled = false + podman.enabled = false + shifter.enabled = false + charliecloud.enabled = false + } + wave { + apptainer.ociAutoPull = true + singularity.ociAutoPull = true + wave.enabled = true + wave.freeze = true + wave.strategy = 'conda,container' } gitpod { - executor.name = 'local' - executor.cpus = 16 - executor.memory = 60.GB + executor.name = 'local' + executor.cpus = 8 + executor.memory = 16.GB + } + gpu { + params.use_gpu = true + docker.runOptions = '-u $(id -u):$(id -g) --gpus all' + apptainer.runOptions = '--nv' + singularity.runOptions = '--nv' + } + aws { + process { + withLabel:process_gpu { + // Must repeat base.config label properties — profile withLabel replaces, not merges + ext.use_gpu = { params.use_gpu } + accelerator = { params.use_gpu ? 1 : null } + containerOptions = { "--shm-size ${task.memory.toGiga().intValue()}g" } + queue = { params.gpu_queue ?: null } + } + withLabel:process_gpu_single { + ext.use_gpu = { params.use_gpu } + accelerator = { params.use_gpu ? 1 : null } + containerOptions = { "--shm-size ${task.memory.toGiga().intValue()}g" } + queue = { params.cellpose_queue ?: params.gpu_queue ?: null } + } + } } test { includeConfig 'conf/test.config' } test_full { includeConfig 'conf/test_full.config' } } -// Load igenomes.config if required -if (!params.igenomes_ignore) { - includeConfig 'conf/igenomes.config' -} else { - params.genomes = [:] -} +// Load nf-core custom profiles from different institutions │ +includeConfig params.custom_config_base && (!System.getenv('NXF_OFFLINE') || !params.custom_config_base.startsWith('http')) ? "${params.custom_config_base}/nfcore_custom.config" : "/dev/null" + +// Load nf-core/spatialxe custom profiles from different institutions. +includeConfig !System.getenv('NXF_OFFLINE') && params.custom_config_base ? "${params.custom_config_base}/pipeline/spatialxe.config" : "/dev/null" +// Set default registry for Apptainer, Docker, Podman, Charliecloud and Singularity independent of -profile +// Will not be used unless Apptainer / Docker / Podman / Charliecloud / Singularity are enabled +// Set to your registry if you have a mirror of containers +apptainer.registry = 'quay.io' +docker.registry = 'quay.io' +podman.registry = 'quay.io' +singularity.registry = 'quay.io' +charliecloud.registry = 'quay.io' // Export these variables to prevent local Python/R libraries from conflicting with those in the container // The JULIA depot path has been adjusted to a fixed path `/usr/local/share/julia` that needs to be used for packages in the container. // See https://apeltzer.github.io/post/03-julia-lang-nextflow/ for details on that. Once we have a common agreement on where to keep Julia packages, this is adjustable. env { + PYTORCH_CUDA_ALLOC_CONF = "expandable_segments:True" PYTHONNOUSERSITE = 1 R_PROFILE_USER = "/.Rprofile" R_ENVIRON_USER = "/.Renviron" JULIA_DEPOT_PATH = "/usr/local/share/julia" + MPLCONFIGDIR = "./tmp" + NUMBA_CACHE_DIR = "./tmp" + NUMBA_DISABLE_CACHE = 1 } -// Capture exit codes from upstream processes when piping -process.shell = ['/bin/bash', '-euo', 'pipefail'] +// Set bash options +process.shell = [ + "bash", + "-C", // No clobber - prevent output redirection from overwriting files. + "-e", // Exit if a tool returns a non-zero status/exit code + "-u", // Treat unset variables and parameters as an error + "-o", // Returns the status of the last command to exit.. + "pipefail" // ..with a non-zero status or zero if all successfully execute +] + +// Disable process selector warnings by default. Use debug profile to enable warnings. +nextflow.enable.configProcessNamesValidation = false +nextflow.enable.moduleBinaries = true -def trace_timestamp = new java.util.Date().format( 'yyyy-MM-dd_HH-mm-ss') timeline { enabled = true - file = "${params.tracedir}/execution_timeline_${trace_timestamp}.html" + file = "${params.outdir}/pipeline_info/execution_timeline_${params.trace_report_suffix}.html" } report { enabled = true - file = "${params.tracedir}/execution_report_${trace_timestamp}.html" + file = "${params.outdir}/pipeline_info/execution_report_${params.trace_report_suffix}.html" } trace { - enabled = true - file = "${params.tracedir}/execution_trace_${trace_timestamp}.txt" + enabled = true + overwrite = true + file = "${params.outdir}/pipeline_info/execution_trace_${params.trace_report_suffix}.txt" } dag { enabled = true - file = "${params.tracedir}/pipeline_dag_${trace_timestamp}.html" + file = "${params.outdir}/pipeline_info/pipeline_dag_${params.trace_report_suffix}.html" } manifest { name = 'nf-core/spatialxe' - author = """Florian Heyl""" + contributors = [ + [ + name: 'Sameesh Kher', + affiliation: 'German Cancer Research Center (DKFZ), Heidelberg, DE', + email: 'sameesh.kher@dkfz-heidelberg.de', + github: '@khersameesh24', + contribution: ['author', 'maintainer'], + orcid: '0009-0008-2420-6464' + ], + [ + name: 'Florian Heyl', + affiliation: 'German Cancer Research Center (DKFZ), Heidelberg, DE', + email: 'florian.heyl@dkfz-heidelberg.de', + github: '@heylf', + contribution: ['author', 'maintainer'], + orcid: '' + ], + [ + name: 'Dongze He', + affiliation: 'Altos Labs, San Diego, USA', + email: 'dongzehe.zaza@gmail.com', + github: '@dongzehe', + contribution: ['contributor'], + orcid: '0000-0001-8259-7434' + ] + ] homePage = 'https://github.com/nf-core/spatialxe' - description = """A pipeline for spatialomics Xenium data.""" + description = """A pipeline for spatialomics 10x Xenium In Situ data.""" mainScript = 'main.nf' - nextflowVersion = '!>=22.10.1' - version = '1.0dev' + defaultBranch = 'master' + nextflowVersion = '!>=25.04.0' + version = '1.0.0' doi = '' } -// Load modules.config for DSL2 module specific options -includeConfig 'conf/modules.config' +// Nextflow plugins +plugins { + id 'nf-schema@2.5.1' // Validation of pipeline parameters and creation of an input channel from a sample sheet +} -// Function to ensure that resource requirements don't go beyond -// a maximum limit -def check_max(obj, type) { - if (type == 'memory') { - try { - if (obj.compareTo(params.max_memory as nextflow.util.MemoryUnit) == 1) - return params.max_memory as nextflow.util.MemoryUnit - else - return obj - } catch (all) { - println " ### ERROR ### Max memory '${params.max_memory}' is not valid! Using default value: $obj" - return obj - } - } else if (type == 'time') { - try { - if (obj.compareTo(params.max_time as nextflow.util.Duration) == 1) - return params.max_time as nextflow.util.Duration - else - return obj - } catch (all) { - println " ### ERROR ### Max time '${params.max_time}' is not valid! Using default value: $obj" - return obj - } - } else if (type == 'cpus') { - try { - return Math.min( obj, params.max_cpus as int ) - } catch (all) { - println " ### ERROR ### Max cpus '${params.max_cpus}' is not valid! Using default value: $obj" - return obj - } - } +validation { + defaultIgnoreParams = ["genomes"] + monochromeLogs = params.monochrome_logs } + +// Load modules.config for DSL2 module specific options +includeConfig 'conf/modules.config' diff --git a/nextflow_schema.json b/nextflow_schema.json index 5454acf0..0c47e336 100644 --- a/nextflow_schema.json +++ b/nextflow_schema.json @@ -1,25 +1,26 @@ { - "$schema": "http://json-schema.org/draft-07/schema", + "$schema": "https://json-schema.org/draft/2020-12/schema", "$id": "https://raw.githubusercontent.com/nf-core/spatialxe/master/nextflow_schema.json", "title": "nf-core/spatialxe pipeline parameters", - "description": "A pipeline for spatialomics Xenium data.", + "description": "A pipeline for spatialomics 10x Xenium In Situ data.", "type": "object", - "definitions": { + "$defs": { "input_output_options": { "title": "Input/output options", "type": "object", "fa_icon": "fas fa-terminal", "description": "Define where the pipeline should find input data and save output data.", - "required": ["input", "outdir"], + "required": ["input", "outdir", "mode"], "properties": { "input": { "type": "string", "format": "file-path", + "exists": true, + "schema": "assets/schema_input.json", "mimetype": "text/csv", "pattern": "^\\S+\\.csv$", - "schema": "assets/schema_input.json", - "description": "Path to comma-separated file containing information about the samples in the experiment.", - "help_text": "You will need to create a design file with information about the samples in your experiment before running the pipeline. Use this parameter to specify its location. It has to be a comma-separated file with 3 columns, and a header row. See [usage docs](https://nf-co.re/spatialxe/usage#samplesheet-input).", + "description": "Path to comma-separated file containing information about the Xenium experiment. (eg; meta,path-to-xenium-bundle,path-to-morphology.ome.tif))", + "help_text": "You will need to create a design file with information about the samples in your experiment before running the pipeline. Use this parameter to specify its location. It has to be a comma-separated file with 3 columns, and a header row.", "fa_icon": "fas fa-file-csv" }, "outdir": { @@ -28,6 +29,79 @@ "description": "The output directory where the results will be saved. You have to use absolute paths to storage on Cloud infrastructure.", "fa_icon": "fas fa-folder-open" }, + "mode": { + "type": "string", + "description": "Mode in which the pipeline is to be run. Either image-based segmentation, coordinate-based segmentation, segmentation-free analysis or data preview.", + "enum": ["image", "coordinate", "segfree", "preview", "qc"] + }, + "method": { + "type": "string", + "enum": ["cellpose", "xeniumranger", "baysor", "proseg", "segger", "ficture", "stardist"], + "description": "Segmentation method to run." + }, + "gene_panel": { + "type": "string", + "format": "file-path", + "description": "Path to gene panel JSON file to use for relabeling transcripts with the correct gene." + }, + "qupath_polygons": { + "type": "string", + "description": "Path to qupath segmentation file in GeoJSON format.", + "format": "file-path" + }, + "alignment_csv": { + "type": "string", + "description": "Image alignment file containing similarity transform matrix. (e.g., the _imagealignment.csv file exported from Xenium Explorer)", + "format": "file-path" + }, + "cellpose_model": { + "type": "string", + "description": "Model to use for running or starting training.", + "format": "file-path" + }, + "stardist_model": { + "type": "string", + "default": "2D_versatile_fluo", + "description": "StarDist pretrained model for cell segmentation (e.g., '2D_versatile_fluo', '2D_versatile_he')." + }, + "stardist_nuclei_model": { + "type": "string", + "default": "2D_versatile_fluo", + "description": "StarDist pretrained model for nuclei segmentation." + }, + "stardist_prob_thresh": { + "type": "number", + "description": "StarDist object probability threshold. Lower values detect more objects." + }, + "stardist_nms_thresh": { + "type": "number", + "description": "StarDist non-maximum suppression threshold. Lower values reduce overlapping detections." + }, + "stardist_n_tiles": { + "type": "string", + "default": "8 8", + "description": "StarDist tiling for large images (e.g., '4 4'). Reduces memory usage." + }, + "segmentation_mask": { + "type": "string", + "description": "Prior segmentation mask from other segmentation methods.", + "format": "file-path" + }, + "probes_fasta": { + "type": "string", + "description": "Fasta file for the probe sequences used in the xenium experiment.", + "format": "file-path" + }, + "reference_annotations": { + "type": "string", + "description": "Path to the directory containing genomic features (.gff) and fasta (.fa) files used as reference annotations.", + "format": "file-path" + }, + "gene_synonyms": { + "type": "string", + "description": "Gene synonyms that may have been counted as off-targets but simply differ in name.", + "format": "file-path" + }, "email": { "type": "string", "description": "Email address for completion summary.", @@ -42,41 +116,313 @@ } } }, - "reference_genome_options": { - "title": "Reference genome options", + "segmentation_options": { + "title": "Segmentation options", "type": "object", - "fa_icon": "fas fa-dna", - "description": "Reference genome related files and options required for the workflow.", + "description": "Options for the segmentation layer of the spatialxe pipeline", + "default": "", "properties": { - "genome": { + "run_qc": { + "type": "boolean", + "description": "Whether to run the qc layer in the pipeline.", + "default": true + }, + "offtarget_probe_tracking": { + "type": "boolean", + "description": "Whether to run the off-target probe tracking.", + "default": false + }, + "segmentation_refinement": { + "type": "boolean", + "description": "Whether to run refinement on the image-based segmentation methods. Runs coordinate-based methods after the initial image-based segmentation run." + }, + "relabel_genes": { + "type": "boolean", + "description": "Whether to relabel genes with gene_panel.json file. True when gene_panel is provided." + }, + "xeniumranger_only": { + "type": "boolean", + "description": "Whether to run vanilla xeniumranger workflow." + }, + "cell_segmentation_only": { + "type": "boolean", + "description": "Whether to only run nucleus segmentation." + }, + "nucleus_segmentation_only": { + "type": "boolean", + "description": "Whether to only run nucleus segmentation." + }, + "expansion_distance": { + "type": "integer", + "default": 5, + "description": "Nuclei boundary expansion distance in µm. Default: 5 (Min: 0, Max: 15 if either boundary-stain or interior-stain are enabled and 100 if nucleus-expansion only)" + }, + "dapi_filter": { + "type": "integer", + "default": 100, + "description": "Minimum intensity in photoelectrons (pe) to filter nuclei. Default: 100. (appropriate range of values is 0 to 99th percentile of image stack or 1000, whichever is larger)" + }, + "interior_stain": { + "type": "boolean", + "default": true, + "description": "Specify the name of the interior stain to use or disable. Supported for cell segmentation staining workflow output bundles. Possible options are: \\\"18S\\\" (default) or \\\"disable\\\"" + }, + "boundary_stain": { + "type": "boolean", + "default": true, + "description": "Specify the name of the boundary stain to use or disable. Supported for cell segmentation staining workflow output bundles. Possible options are: \\\"ATP1A1/CD45/E-Cadherin\\\" (default) or \\\"disable\\\"" + }, + "use_gpu": { + "type": "boolean", + "default": false, + "description": "Enable GPU acceleration (set automatically by the gpu profile)." + }, + "gpu_queue": { + "type": "string", + "description": "AWS Batch queue for GPU tasks (e.g., Segger, ProSeg)." + }, + "cellpose_queue": { + "type": "string", + "description": "AWS Batch queue for Cellpose (single large GPU)." + }, + "cellpose_downscale": { + "type": "boolean", + "default": false, + "description": "Pre-downscale morphology image to avoid Cellpose OOM on large images." + }, + "sharpen_tiff": { + "type": "boolean", + "description": "Whether to enhance the morphology.ome.tif file." + }, + "segger_accelerator": { "type": "string", - "description": "Name of iGenomes reference.", - "fa_icon": "fas fa-book", - "help_text": "If using a reference genome configured in the pipeline using iGenomes, use this parameter to give the ID for the reference. This is then used to build the full paths for all required reference genome files e.g. `--genome GRCh38`. \n\nSee the [nf-core website docs](https://nf-co.re/usage/reference_genomes) for more details." + "default": "cpu", + "enum": ["cpu", "cuda"], + "description": "Device used for training. (e.g., cuda for GPU or cpu)" }, - "fasta": { + "segger_knn_method": { + "type": "string", + "default": "kd_tree", + "enum": ["kd_tree", "cuda"], + "description": "Method for KNN computation. (e.g., cuda for GPU-based computation)" + }, + "segger_num_workers": { + "type": "integer", + "default": 4, + "description": "Number of data-loader workers for Segger." + }, + "segger_model": { "type": "string", "format": "file-path", - "mimetype": "text/plain", - "pattern": "^\\S+\\.fn?a(sta)?(\\.gz)?$", - "description": "Path to FASTA genome file.", - "help_text": "This parameter is *mandatory* if `--genome` is not specified. If you don't have a BWA index available this will be generated for you automatically. Combine with `--save_reference` to save BWA index for future runs.", - "fa_icon": "far fa-file-code" + "description": "Path to a pre-trained Segger model checkpoint." }, - "igenomes_base": { + "format": { "type": "string", - "format": "directory-path", - "description": "Directory / URL base for iGenomes references.", - "default": "s3://ngi-igenomes/igenomes", - "fa_icon": "fas fa-cloud-download-alt", - "hidden": true + "default": "xenium", + "description": "Preset value for the proseg segmentation method." + }, + "image_seg_methods": { + "type": "array", + "items": { + "type": "string", + "enum": ["cellpose", "xeniumranger", "baysor", "stardist"] + }, + "description": "List of image-based segmentation methods." + }, + "transcript_seg_methods": { + "type": "array", + "items": { + "type": "string", + "enum": ["proseg", "segger", "baysor"] + }, + "description": "List of transcript-based segmentation methods." + }, + "segfree_methods": { + "type": "array", + "items": { + "type": "string", + "enum": ["ficture", "baysor"] + }, + "description": "List of segmentation-free methods." + }, + "negative_control_regex": { + "type": "string", + "description": "Regex used to identify or match negative control samples in a dataset." }, - "igenomes_ignore": { + "features": { + "type": "string", + "description": "List of features to be passed to the ficture method. (eg: TP53,OCIAD1,BCAS3,SOX)" + }, + "filter_transcripts": { "type": "boolean", - "description": "Do not load the iGenomes reference config.", - "fa_icon": "fas fa-ban", - "hidden": true, - "help_text": "Do not load `igenomes.config` when running the pipeline. You may choose this option if you observe clashes between custom parameters and those supplied in `igenomes.config`." + "description": "Whether to filter the transcripts.parquet file before running Baysor segmentation.", + "default": false + }, + "baysor_scale": { + "type": "integer", + "default": 30, + "description": "Baysor --scale parameter for non-tiled runs." + }, + "baysor_config": { + "type": "string", + "format": "file-path", + "description": "Path to Baysor config TOML file (optional)." + }, + "baysor_tiling": { + "type": "boolean", + "default": true, + "description": "Enable tiled Baysor segmentation (divide transcripts into patches, run Baysor per patch, stitch results)." + }, + "baysor_tiling_micron": { + "type": "integer", + "default": 1200, + "description": "Tile width in microns for Baysor tiling." + }, + "baysor_tiling_overlap": { + "type": "integer", + "default": 200, + "description": "Overlap between Baysor patches in microns." + }, + "baysor_tiling_balanced": { + "type": "boolean", + "default": true, + "description": "Balance transcripts across tiles by merging sparse tiles." + }, + "baysor_tiling_scale": { + "type": "integer", + "default": 39, + "description": "Baysor --scale for tiled runs (larger to compensate for EM on smaller tiles)." + }, + "baysor_tiling_min_mols_per_cell": { + "type": "integer", + "default": 120, + "description": "Minimum molecules per cell (--min-molecules-per-cell) for tiled Baysor." + }, + "baysor_tiling_min_transcripts_per_cell": { + "type": "integer", + "default": 50, + "description": "Post-stitch cell filtering threshold: minimum transcripts per cell." + }, + "baysor_prior": { + "type": "string", + "enum": ["cells", "cellpose"], + "description": "Prior segmentation type for Baysor. 'cells' uses Xenium bundle cell_id column; 'cellpose' uses Cellpose mask as image prior." + }, + "baysor_prior_confidence": { + "type": "number", + "default": 0.2, + "description": "Baysor prior-segmentation-confidence (0-1)." + }, + "min_qv": { + "type": "number", + "default": 20, + "description": "Minimum Q-Score to pass filtering." + }, + "min_x": { + "type": "number", + "description": "only keep transcripts whose x-coordinate is greater than specified limit, if no limit is specified, the default minimum value will be 0.0" + }, + "max_x": { + "type": "number", + "description": "only keep transcripts whose x-coordinate is less than specified limit, if no limit is specified, the default value will retain all transcripts since Xenium slide is <24000 microns in x and y (default: 24000.0)" + }, + "min_y": { + "type": "number", + "description": "only keep transcripts whose y-coordinate is greater than specified limit, if no limit is specified, the default minimum value will be 0.0" + }, + "max_y": { + "type": "number", + "description": "only keep transcripts whose y-coordinate is less than specified limit, if no limit is specified, the default value will retain all transcripts since Xenium slide is <24000 microns in x and y (default: 24000.0)" + }, + "tiling": { + "type": "boolean", + "description": "Enable tiled segmentation for large datasets. Divides transcripts into overlapping patches, runs segmentation in parallel per patch, then stitches results.", + "default": false + }, + "patch_grid": { + "type": "string", + "description": "Grid layout for tiling (rows x cols), e.g. '3x3', '4x4'.", + "default": "3x3" + }, + "patch_overlap": { + "type": "integer", + "description": "Overlap between adjacent patches in microns.", + "default": 50 + }, + "patch_filter_method": { + "type": "string", + "description": "Post-stitch cell size filtering method. Options: 'empirical' (IQR-based), 'distribution' (z-score), 'both', or null to disable.", + "enum": ["empirical", "distribution", "both"] + }, + "patch_filter_iqr_multiplier": { + "type": "number", + "description": "IQR multiplier for empirical cell size filtering during stitching.", + "default": 3.0 + }, + "patch_filter_z_threshold": { + "type": "number", + "description": "Z-score threshold for distribution-based cell size filtering during stitching.", + "default": 4.0 + }, + "csplit_x_bins": { + "type": "integer", + "default": 2, + "description": "Number of tiles along the x axis for cell-type separability." + }, + "csplit_y_bins": { + "type": "integer", + "default": 2, + "description": "Number of tiles along the y axis for cell-type separability." + }, + "tile_width": { + "type": "integer", + "description": "Width of the tiles in pixels", + "default": 120 + }, + "tile_height": { + "type": "integer", + "description": "Height of the tiles in pixels", + "default": 120 + }, + "batch_size_train": { + "type": "integer", + "description": "Number of samples to process per training batch", + "default": 4 + }, + "devices": { + "type": "integer", + "description": "Number of devices (GPUs) to use during training", + "default": 4 + }, + "max_epochs": { + "type": "integer", + "description": "Number of training epochs", + "default": 200 + }, + "batch_size_predict": { + "type": "integer", + "description": "Number of samples to process per batch during prediction", + "default": 1 + }, + "cc_analysis": { + "type": "boolean", + "description": "Whether to use connected components for grouping transcripts without direct nucleus association", + "default": false + }, + "buffer_samples": { + "type": "boolean", + "description": "Process only one sample at a time from a multi-sample samplesheet.", + "default": false + }, + "buffer_size": { + "type": "integer", + "description": "Number of sample(s) to process at a time from a multi-sample samplesheet. Works if buffered_samples is true.", + "default": 1 + }, + "restrict_concurrency": { + "type": "boolean", + "description": "Restrict parallelizing a process. Eg. restrict running cellpose cell and nuclei segmentation together if the resources are limited.", + "default": false } } }, @@ -125,41 +471,11 @@ "description": "Institutional config URL link.", "hidden": true, "fa_icon": "fas fa-users-cog" - } - } - }, - "max_job_request_options": { - "title": "Max job request options", - "type": "object", - "fa_icon": "fab fa-acquisitions-incorporated", - "description": "Set the top limit for requested resources for any single job.", - "help_text": "If you are running on a smaller system, a pipeline step requesting more resources than are available may cause the Nextflow to stop the run with an error. These options allow you to cap the maximum resources requested by any single job so that the pipeline will run on your system.\n\nNote that you can not _increase_ the resources requested by any job using these options. For that you will need your own configuration file. See [the nf-core website](https://nf-co.re/usage/configuration) for details.", - "properties": { - "max_cpus": { - "type": "integer", - "description": "Maximum number of CPUs that can be requested for any single job.", - "default": 16, - "fa_icon": "fas fa-microchip", - "hidden": true, - "help_text": "Use to set an upper-limit for the CPU requirement for each process. Should be an integer e.g. `--max_cpus 1`" }, - "max_memory": { + "test_data_base": { "type": "string", - "description": "Maximum amount of memory that can be requested for any single job.", - "default": "128.GB", - "fa_icon": "fas fa-memory", - "pattern": "^\\d+(\\.\\d+)?\\.?\\s*(K|M|G|T)?B$", - "hidden": true, - "help_text": "Use to set an upper-limit for the memory requirement for each process. Should be a string in the format integer-unit e.g. `--max_memory '8.GB'`" - }, - "max_time": { - "type": "string", - "description": "Maximum amount of time that can be requested for any single job.", - "default": "240.h", - "fa_icon": "far fa-clock", - "pattern": "^(\\d+\\.?\\s*(s|m|h|day)\\s*)+$", - "hidden": true, - "help_text": "Use to set an upper-limit for the time requirement for each process. Should be a string in the format integer-unit e.g. `--max_time '2.h'`" + "default": "https://raw.githubusercontent.com/nf-core/test-datasets/spatialxe", + "description": "Base path / URL for data used in the test profiles." } } }, @@ -228,6 +544,7 @@ }, "multiqc_config": { "type": "string", + "format": "file-path", "description": "Custom config file to supply to MultiQC.", "fa_icon": "fas fa-cog", "hidden": true @@ -243,13 +560,6 @@ "description": "Custom MultiQC yaml file containing HTML including a methods description.", "fa_icon": "fas fa-cog" }, - "tracedir": { - "type": "string", - "description": "Directory to keep pipeline Nextflow logs and reports.", - "default": "${params.outdir}/pipeline_info", - "fa_icon": "fas fa-cogs", - "hidden": true - }, "validate_params": { "type": "boolean", "description": "Boolean whether to validate parameters against the schema at runtime", @@ -257,31 +567,46 @@ "fa_icon": "fas fa-check-square", "hidden": true }, - "show_hidden_params": { + "monochromeLogs": { "type": "boolean", - "fa_icon": "far fa-eye-slash", - "description": "Show all params when using `--help`", - "hidden": true, - "help_text": "By default, parameters set as _hidden_ in the schema are not shown on the command line when a user runs with `--help`. Specifying this option will tell the pipeline to show all parameters." + "description": "Do not use coloured log outputs", + "hidden": true + }, + "pipelines_testdata_base_path": { + "type": "string", + "default": "https://raw.githubusercontent.com/nf-core/test-datasets/", + "description": "Base URL or local path to location of pipeline test dataset files", + "hidden": true + }, + "trace_report_suffix": { + "type": "string", + "fa_icon": "far calendar", + "description": "Suffix to add to the trace report filename. Default is the date and time in the format yyyy-MM-dd_HH-mm-ss", + "hidden": true + }, + "help_full": { + "type": "boolean", + "description": "Display the full detailed help message." + }, + "show_hidden": { + "type": "boolean", + "description": "Display hidden parameters in the help message (only works when --help or --help_full are provided)." } } } }, "allOf": [ { - "$ref": "#/definitions/input_output_options" - }, - { - "$ref": "#/definitions/reference_genome_options" + "$ref": "#/$defs/input_output_options" }, { - "$ref": "#/definitions/institutional_config_options" + "$ref": "#/$defs/segmentation_options" }, { - "$ref": "#/definitions/max_job_request_options" + "$ref": "#/$defs/institutional_config_options" }, { - "$ref": "#/definitions/generic_options" + "$ref": "#/$defs/generic_options" } ] } diff --git a/nf-test.config b/nf-test.config new file mode 100644 index 00000000..3a1fff59 --- /dev/null +++ b/nf-test.config @@ -0,0 +1,24 @@ +config { + // location for all nf-test tests + testsDir "." + + // nf-test directory including temporary files for each test + workDir System.getenv("NFT_WORKDIR") ?: ".nf-test" + + // location of an optional nextflow.config file specific for executing tests + configFile "tests/nextflow.config" + + // ignore tests coming from the nf-core/modules repo + ignore 'modules/nf-core/**/tests/*', 'subworkflows/nf-core/**/tests/*' + + // run all test with defined profile(s) from the main nextflow.config + profile "test" + + // list of filenames or patterns that should be trigger a full test run + triggers 'nextflow.config', 'nf-test.config', 'conf/test.config', 'tests/nextflow.config', 'tests/.nftignore' + + // load the necessary plugins + plugins { + load "nft-utils@0.0.3" + } +} diff --git a/pyproject.toml b/pyproject.toml deleted file mode 100644 index 0d62beb6..00000000 --- a/pyproject.toml +++ /dev/null @@ -1,10 +0,0 @@ -# Config file for Python. Mostly used to configure linting of bin/check_samplesheet.py with Black. -# Should be kept the same as nf-core/tools to avoid fighting with template synchronisation. -[tool.black] -line-length = 120 -target_version = ["py37", "py38", "py39", "py310"] - -[tool.isort] -profile = "black" -known_first_party = ["nf_core"] -multi_line_output = 3 diff --git a/ro-crate-metadata.json b/ro-crate-metadata.json new file mode 100644 index 00000000..89f81f6a --- /dev/null +++ b/ro-crate-metadata.json @@ -0,0 +1,350 @@ +{ + "@context": [ + "https://w3id.org/ro/crate/1.1/context", + { + "GithubService": "https://w3id.org/ro/terms/test#GithubService", + "JenkinsService": "https://w3id.org/ro/terms/test#JenkinsService", + "PlanemoEngine": "https://w3id.org/ro/terms/test#PlanemoEngine", + "TestDefinition": "https://w3id.org/ro/terms/test#TestDefinition", + "TestInstance": "https://w3id.org/ro/terms/test#TestInstance", + "TestService": "https://w3id.org/ro/terms/test#TestService", + "TestSuite": "https://w3id.org/ro/terms/test#TestSuite", + "TravisService": "https://w3id.org/ro/terms/test#TravisService", + "definition": "https://w3id.org/ro/terms/test#definition", + "engineVersion": "https://w3id.org/ro/terms/test#engineVersion", + "instance": "https://w3id.org/ro/terms/test#instance", + "resource": "https://w3id.org/ro/terms/test#resource", + "runsOn": "https://w3id.org/ro/terms/test#runsOn" + } + ], + "@graph": [ + { + "@id": "./", + "@type": "Dataset", + "creativeWorkStatus": "Stable", + "datePublished": "2026-04-28T15:09:48+00:00", + "description": "

    \n \n \n \"nf-core/spatialxe\"\n \n

    \n\n[![Open in GitHub Codespaces](https://github.com/codespaces/badge.svg)](https://github.com/codespaces/new/nf-core/spatialxe)\n[![GitHub Actions CI Status](https://github.com/nf-core/spatialxe/actions/workflows/nf-test.yml/badge.svg)](https://github.com/nf-core/spatialxe/actions/workflows/nf-test.yml)\n[![GitHub Actions Linting Status](https://github.com/nf-core/spatialxe/actions/workflows/linting.yml/badge.svg)](https://github.com/nf-core/spatialxe/actions/workflows/linting.yml)[![AWS CI](https://img.shields.io/badge/CI%20tests-full%20size-FF9900?labelColor=000000&logo=Amazon%20AWS)](https://nf-co.re/spatialxe/results)[![Cite with Zenodo](http://img.shields.io/badge/DOI-10.5281/zenodo.XXXXXXX-1073c8?labelColor=000000)](https://doi.org/10.5281/zenodo.XXXXXXX)\n[![nf-test](https://img.shields.io/badge/unit_tests-nf--test-337ab7.svg)](https://www.nf-test.com)\n\n[![Nextflow](https://img.shields.io/badge/version-%E2%89%A525.04.0-green?style=flat&logo=nextflow&logoColor=white&color=%230DC09D&link=https%3A%2F%2Fnextflow.io)](https://www.nextflow.io/)\n[![nf-core template version](https://img.shields.io/badge/nf--core_template-3.4.1-green?style=flat&logo=nfcore&logoColor=white&color=%2324B064&link=https%3A%2F%2Fnf-co.re)](https://github.com/nf-core/tools/releases/tag/3.4.1)\n[![run with docker](https://img.shields.io/badge/run%20with-docker-0db7ed?labelColor=000000&logo=docker)](https://www.docker.com/)\n[![run with singularity](https://img.shields.io/badge/run%20with-singularity-1d355c.svg?labelColor=000000)](https://sylabs.io/docs/)\n[![Launch on Seqera Platform](https://img.shields.io/badge/Launch%20%F0%9F%9A%80-Seqera%20Platform-%234256e7)](https://cloud.seqera.io/launch?pipeline=https://github.com/nf-core/spatialxe)\n\n[![Get help on Slack](http://img.shields.io/badge/slack-nf--core%20%23spatialxe-4A154B?labelColor=000000&logo=slack)](https://nfcore.slack.com/channels/spatialxe)[![Follow on Bluesky](https://img.shields.io/badge/bluesky-%40nf__core-1185fe?labelColor=000000&logo=bluesky)](https://bsky.app/profile/nf-co.re)[![Follow on Mastodon](https://img.shields.io/badge/mastodon-nf__core-6364ff?labelColor=FFFFFF&logo=mastodon)](https://mstdn.science/@nf_core)[![Watch on YouTube](http://img.shields.io/badge/youtube-nf--core-FF0000?labelColor=000000&logo=youtube)](https://www.youtube.com/c/nf-core)\n\n## Introduction\n\n**nf-core/spatialxe** is a bioinformatics best-practice processing and quality control pipeline for Xenium data. The current plan for the pipeline implementation is shown in the metromap below. **The pipeline is under active developement and changes might occure frequently**.\n\n![nf-core/spatialxe-metromap](docs/images/spatialxe-metromap.png)\n\n> [!NOTE]\n> We are currently testing the pipeline for the [10x Atera system](https://www.10xgenomics.com/platforms/atera).\n\n## Tools supported\n\nThe pipeline supports the following tools:\n\n- Segmenation methods:\n - [Baysor](https://doi.org/10.1038/s41587-021-01044-w)\n - [Cellpose](https://doi.org/10.1038/s41592-020-01018-x)\n - [Xenium ranger (XR)](https://www.10xgenomics.com/support/software/xenium-ranger/latest)\n - [StarDist](https://doi.org/10.48550/arXiv.2203.02284)\n- Segmentation free methods:\n - [Ficture](https://doi.org/10.1038/s41592-024-02415-2)\n - [Baysor](https://doi.org/10.1038/s41587-021-01044-w)\n- Transcript assignment methods:\n - [Segger](https://doi.org/10.1101/2025.03.14.643160)\n - [Proseg](https://doi.org/10.1038/s41592-025-02697-0)\n- Utility methods:\n - [SpatialData](https://doi.org/10.1038/s41592-024-02212-x)\n - [Baysor](https://doi.org/10.1038/s41587-021-01044-w)\n- QC methods:\n - [MultiQC Xenium Extra Plugin](https://github.com/MultiQC/xenium-extra)\n - [OPT](https://github.com/JEFworks-Lab/off-target-probe-tracker)\n\n## Usage\n\nOn release, automated continuous integration tests run the pipeline on a full-sized dataset on the AWS cloud infrastructure. This ensures that the pipeline runs on AWS, has sensible resource allocation defaults set to run on real-world datasets, and permits the persistent storage of results to benchmark between pipeline releases and other analysis sources. The results obtained from the full-sized test can be viewed on the [nf-core website](https://nf-co.re/spatialxe/results).\n\n> [!NOTE]\n> The pipeline does not support conda currently. We are working on it.\n\n## Quick Start\n\n`samplesheet.csv`:\n\n```csv\nsample,bundle,image\ntest_sample,/path/to/xenium-bundle,/path/to/morphology.ome.tif\n```\n\nNow, you can run the pipeline using:\n\n### Run image-based segmentation mode
    \n\n`CELLPOSE -> BAYSOR -> XR-IMPORT_SEGMENTATION -> SPATIALDATA -> QC`\n\n```bash\nnextflow run nf-core/spatialxe \\\n -profile \\\n --input samplesheet.csv \\\n --outdir \\\n --mode \n```\n\n### Run coordinate-based segmentation mode
    \n\n`PROSEG -> PROSEG2BAYSOR -> XR-IMPORT_SEGMENTATION -> SPATIALDATA -> QC`\n\n```bash\nnextflow run nf-core/spatialxe \\\n -profile \\\n --input samplesheet.csv \\\n --outdir \\\n --mode coordinate\n```\n\n### Run segfree mode
    \n\n`BAYSOR_SEGFREE`\n\n```bash\nnextflow run nf-core/spatialxe \\\n -profile \\\n --input samplesheet.csv \\\n --outdir \\\n --mode segfree\n```\n\n### Run preview mode
    \n\n`BAYSOR_PREVIEW`\n\n```bash\nnextflow run nf-core/spatialxe \\\n -profile \\\n --input samplesheet.csv \\\n --outdir \\\n --mode preview\n```\n\n### Run just the quality control
    \n\n```bash\nnextflow run nf-core/spatialxe \\\n -profile \\\n --input samplesheet.csv \\\n --outdir \\\n --mode qc\n```\n\n### Additional information\n\n> [!WARNING]\n> Please provide pipeline parameters via the CLI or Nextflow `-params-file` option. Custom config files including those provided by the `-c` Nextflow option can be used to provide any configuration _**except for parameters**_; see [docs](https://nf-co.re/docs/usage/getting_started/configuration#custom-configuration-files).\n\nFor more details and further functionality, please refer to the [usage documentation](https://nf-co.re/spatialxe/usage) and the [parameter documentation](https://nf-co.re/spatialxe/parameters).\n\n## Pipeline output\n\nTo see the results of an example test run with a full size dataset refer to the [results](https://nf-co.re/spatialxe/results) tab on the nf-core website pipeline page.\nFor more details about the output files and reports, please refer to the\n[output documentation](https://nf-co.re/spatialxe/output).\n\n## Runtime and resource estimations\n\n| Tool | Compute | Runtime (min / med / max) | Peak RSS (min / med / max) |\n| ------------------------- | ------- | ------------------------- | -------------------------- |\n| Cellpose | GPU | 1m / 4m / 1.4h | 10 GB / 26 GB / 554 GB |\n| Cellpose | CPU | 1.3h / 2.3h / 6.5h | 161 GB / 426 GB / 1115 GB |\n| StarDist | GPU | 1m / 4m / 7m | 5 GB / 12 GB / 18 GB |\n| StarDist | CPU | 5m / 6m / 7m | 18 GB / 18 GB / 18 GB |\n| Segger (create_dataset) | GPU | 2m / 9m / 31m | 1.7 GB / 14 GB / 50 GB |\n| Segger (create_dataset) | CPU | 13m / 21m / 46m | 13 GB / 19 GB / 49 GB |\n| Segger (train) | GPU | 10m / 43m / 2.9h | 30 GB / 33 GB / 60 GB |\n| Segger (predict) | GPU | 2m / 16m / 59m | 10 GB / 25 GB / 87 GB |\n| Baysor (whole-image) | CPU | 2m / 30m / 17h | 6 GB / 10 GB / 650 GB |\n| Baysor (tiled) | CPU | 1m / 18m / 13h | 0.2 GB / 34 GB / 530 GB |\n| Proseg | CPU | 1m / 18m / 6.8h | 279 MB / 3.8 GB / 136 GB |\n| XeniumRanger (resegment) | CPU | 18m / 39m / 3.7h | 28 GB / 54 GB / 60 GB |\n| XeniumRanger (import_seg) | CPU | 2m / 7m / 2.7h | 2.6 GB / 11 GB / 51 GB |\n| Ficture (preprocess) | CPU | 3m / 4m / 13m | 331 MB / 357 MB / 21 GB |\n\n- Cellpose GPU vs CPU: 35x faster on GPU (4m median vs 2.3h), 16x less memory (26 GB vs 426 GB)\n- Segger: Only tool that truly requires GPU for all 3 steps (create_dataset, train, predict)\n- StarDist: Very fast on CPU, GPU is not necessary to run its default model\n\n## Credits\n\nnf-core/spatialxe is mainly developed by [Sameesh Kher](https://github.com/khersameesh24), [Dongze He](https://github.com/dongzehe), and [Florian Heyl](https://github.com/heylf).\n\nWe thank the following people for their extensive assistance in the development of this pipeline:\n\n- Tobias Krause\n- Kre\u0161imir Be\u0161tak (kbestak)\n- Matthias H\u00f6rtenhuber (mashehu)\n- Maxime Garcia (maxulysse)\n- K\u00fcbra Narc\u0131 (kubranarci)\n\n## Contributions and Support\n\nIf you would like to contribute to this pipeline, please see the [contributing guidelines](.github/CONTRIBUTING.md).\n\nFor further information or help, don't hesitate to get in touch on the [Slack `#spatialxe` channel](https://nfcore.slack.com/channels/spatialxe) (you can join with [this invite](https://nf-co.re/join/slack)).\n\n## Citations\n\n\n\nAn extensive list of references for the tools used by the pipeline can be found in the [`CITATIONS.md`](CITATIONS.md) file.\n\nYou can cite the `nf-core` publication as follows:\n\n> **The nf-core framework for community-curated bioinformatics pipelines.**\n>\n> Philip Ewels, Alexander Peltzer, Sven Fillinger, Harshil Patel, Johannes Alneberg, Andreas Wilm, Maxime Ulysse Garcia, Paolo Di Tommaso & Sven Nahnsen.\n>\n> _Nat Biotechnol._ 2020 Feb 13. doi: [10.1038/s41587-020-0439-x](https://dx.doi.org/10.1038/s41587-020-0439-x).\n", + "hasPart": [ + { + "@id": "main.nf" + }, + { + "@id": "assets/" + }, + { + "@id": "bin/" + }, + { + "@id": "conf/" + }, + { + "@id": "docs/" + }, + { + "@id": "docs/images/" + }, + { + "@id": "modules/" + }, + { + "@id": "modules/local/" + }, + { + "@id": "modules/nf-core/" + }, + { + "@id": "workflows/" + }, + { + "@id": "subworkflows/" + }, + { + "@id": "nextflow.config" + }, + { + "@id": "README.md" + }, + { + "@id": "nextflow_schema.json" + }, + { + "@id": "CHANGELOG.md" + }, + { + "@id": "LICENSE" + }, + { + "@id": "CODE_OF_CONDUCT.md" + }, + { + "@id": "CITATIONS.md" + }, + { + "@id": "modules.json" + }, + { + "@id": "docs/usage.md" + }, + { + "@id": "docs/output.md" + }, + { + "@id": ".nf-core.yml" + }, + { + "@id": ".pre-commit-config.yaml" + }, + { + "@id": ".prettierignore" + } + ], + "isBasedOn": "https://github.com/nf-core/spatialxe", + "license": "MIT", + "mainEntity": { + "@id": "main.nf" + }, + "mentions": [ + { + "@id": "#52641598-65b9-4e1a-96ba-d6c73f464012" + } + ], + "name": "nf-core/spatialxe" + }, + { + "@id": "ro-crate-metadata.json", + "@type": "CreativeWork", + "about": { + "@id": "./" + }, + "conformsTo": [ + { + "@id": "https://w3id.org/ro/crate/1.1" + }, + { + "@id": "https://w3id.org/workflowhub/workflow-ro-crate/1.0" + } + ] + }, + { + "@id": "main.nf", + "@type": [ + "File", + "SoftwareSourceCode", + "ComputationalWorkflow" + ], + "creator": [ + { + "@id": "https://orcid.org/0009-0008-2420-6464" + } + ], + "dateCreated": "", + "dateModified": "2026-04-28T17:09:48Z", + "dct:conformsTo": "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/", + "keywords": [ + "nf-core", + "nextflow", + "10x-genomics", + "image-processing", + "spatial", + "spatial-data-analysis", + "spatial-transcriptomics", + "transcriptomics", + "xenium" + ], + "license": [ + "MIT" + ], + "maintainer": [ + { + "@id": "https://orcid.org/0009-0008-2420-6464" + } + ], + "name": [ + "nf-core/spatialxe" + ], + "programmingLanguage": { + "@id": "https://w3id.org/workflowhub/workflow-ro-crate#nextflow" + }, + "sdPublisher": { + "@id": "https://nf-co.re/" + }, + "url": [ + "https://github.com/nf-core/spatialxe", + "https://nf-co.re/spatialxe/1.0.0/" + ], + "version": [ + "1.0.0" + ] + }, + { + "@id": "https://w3id.org/workflowhub/workflow-ro-crate#nextflow", + "@type": "ComputerLanguage", + "identifier": { + "@id": "https://www.nextflow.io/" + }, + "name": "Nextflow", + "url": { + "@id": "https://www.nextflow.io/" + }, + "version": "!>=25.04.0" + }, + { + "@id": "#52641598-65b9-4e1a-96ba-d6c73f464012", + "@type": "TestSuite", + "instance": [ + { + "@id": "#8f995e93-3115-404e-8f6b-5d8a5db77b5e" + } + ], + "mainEntity": { + "@id": "main.nf" + }, + "name": "Test suite for nf-core/spatialxe" + }, + { + "@id": "#8f995e93-3115-404e-8f6b-5d8a5db77b5e", + "@type": "TestInstance", + "name": "GitHub Actions workflow for testing nf-core/spatialxe", + "resource": "repos/nf-core/spatialxe/actions/workflows/nf-test.yml", + "runsOn": { + "@id": "https://w3id.org/ro/terms/test#GithubService" + }, + "url": "https://api.github.com" + }, + { + "@id": "https://w3id.org/ro/terms/test#GithubService", + "@type": "TestService", + "name": "Github Actions", + "url": { + "@id": "https://github.com" + } + }, + { + "@id": "assets/", + "@type": "Dataset", + "description": "Additional files" + }, + { + "@id": "bin/", + "@type": "Dataset", + "description": "Scripts that must be callable from a pipeline process" + }, + { + "@id": "conf/", + "@type": "Dataset", + "description": "Configuration files" + }, + { + "@id": "docs/", + "@type": "Dataset", + "description": "Markdown files for documenting the pipeline" + }, + { + "@id": "docs/images/", + "@type": "Dataset", + "description": "Images for the documentation files" + }, + { + "@id": "modules/", + "@type": "Dataset", + "description": "Modules used by the pipeline" + }, + { + "@id": "modules/local/", + "@type": "Dataset", + "description": "Pipeline-specific modules" + }, + { + "@id": "modules/nf-core/", + "@type": "Dataset", + "description": "nf-core modules" + }, + { + "@id": "workflows/", + "@type": "Dataset", + "description": "Main pipeline workflows to be executed in main.nf" + }, + { + "@id": "subworkflows/", + "@type": "Dataset", + "description": "Smaller subworkflows" + }, + { + "@id": "nextflow.config", + "@type": "File", + "description": "Main Nextflow configuration file" + }, + { + "@id": "README.md", + "@type": "File", + "description": "Basic pipeline usage information" + }, + { + "@id": "nextflow_schema.json", + "@type": "File", + "description": "JSON schema for pipeline parameter specification" + }, + { + "@id": "CHANGELOG.md", + "@type": "File", + "description": "Information on changes made to the pipeline" + }, + { + "@id": "LICENSE", + "@type": "File", + "description": "The license - should be MIT" + }, + { + "@id": "CODE_OF_CONDUCT.md", + "@type": "File", + "description": "The nf-core code of conduct" + }, + { + "@id": "CITATIONS.md", + "@type": "File", + "description": "Citations needed when using the pipeline" + }, + { + "@id": "modules.json", + "@type": "File", + "description": "Version information for modules from nf-core/modules" + }, + { + "@id": "docs/usage.md", + "@type": "File", + "description": "Usage documentation" + }, + { + "@id": "docs/output.md", + "@type": "File", + "description": "Output documentation" + }, + { + "@id": ".nf-core.yml", + "@type": "File", + "description": "nf-core configuration file, configuring template features and linting rules" + }, + { + "@id": ".pre-commit-config.yaml", + "@type": "File", + "description": "Configuration file for pre-commit hooks" + }, + { + "@id": ".prettierignore", + "@type": "File", + "description": "Ignore file for prettier" + }, + { + "@id": "https://nf-co.re/", + "@type": "Organization", + "name": "nf-core", + "url": "https://nf-co.re/" + }, + { + "@id": "https://orcid.org/0009-0008-2420-6464", + "@type": "Person", + "email": "khersameesh24@gmail.com", + "name": "Sameesh Kher" + } + ] +} \ No newline at end of file diff --git a/subworkflows/local/baysor_generate_preview/main.nf b/subworkflows/local/baysor_generate_preview/main.nf new file mode 100644 index 00000000..2494fcbd --- /dev/null +++ b/subworkflows/local/baysor_generate_preview/main.nf @@ -0,0 +1,49 @@ +// +// Run baysor create_dataset & preview +// + +include { BAYSOR_PREVIEW } from '../../../modules/local/baysor/preview/main' +include { BAYSOR_CREATE_DATASET } from '../../../modules/local/baysor/create_dataset/main' +include { EXTRACT_PREVIEW_DATA } from '../../../modules/local/utility/extract_preview_data/main' +include { PARQUET_TO_CSV } from '../../../modules/local/utility/parquet_to_csv/main' + +workflow BAYSOR_GENERATE_PREVIEW { + take: + ch_transcripts_file // channel: [ val(meta), ["path-to-transcripts.parquet"] ] + ch_config // channel: ["path-to-xenium.toml"] + + main: + + ch_preview_mqc_html = channel.empty() + ch_preview_mqc_png = channel.empty() + + + // run parquet to csv + PARQUET_TO_CSV(ch_transcripts_file, ".csv") + + // generate randomised sample data + BAYSOR_CREATE_DATASET(PARQUET_TO_CSV.out.transcripts_csv, 0.3) + + // run baysor preview if param - generate_preview is true + ch_sampled_transcripts = BAYSOR_CREATE_DATASET.out.sampled_transcripts + ch_baysor_preview_input = ch_sampled_transcripts + .combine(ch_config) + .map { meta, transcripts, config -> + tuple( + meta, + transcripts, + config + ) + } + BAYSOR_PREVIEW(ch_baysor_preview_input) + + // clean the preview html file generated + EXTRACT_PREVIEW_DATA(BAYSOR_PREVIEW.out.preview_html) + + ch_preview_mqc_html = EXTRACT_PREVIEW_DATA.out.mqc_data + ch_preview_mqc_png = EXTRACT_PREVIEW_DATA.out.mqc_img + + emit: + preview_html = ch_preview_mqc_html // channel: [ val(meta), ["*_mqc.tsv"] ] + preview_img = ch_preview_mqc_png // channel: [ val(meta), ["*_mqc.png"] ] +} diff --git a/subworkflows/local/baysor_generate_preview/meta.yml b/subworkflows/local/baysor_generate_preview/meta.yml new file mode 100644 index 00000000..3ca9fa8e --- /dev/null +++ b/subworkflows/local/baysor_generate_preview/meta.yml @@ -0,0 +1,38 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/nf-core/modules/master/subworkflows/yaml-schema.json +name: "baysor_generate_preview" +description: quick preview to get meaning from the data and to get some guesses about the parameters of the full baysor run +keywords: + - baysor + - preview + - transcripts + - report + - preview_html + - html_report + - visualization +components: + - baysor/preview + - baysor/create/dataset + - parquet/to/csv + - extract/preview/data +input: + - ch_transcripts_parquet: + description: | + input parquet file from the xenium bundle + Structure: [ val(meta), path("path-to-transcripts.parquet") ] + - ch_config: + description: | + config file for the xenium baysor run (stored in assets/config/xenium.toml) + Structure: [ path("path-to-xenium.toml") ] +output: + - preview_html: + description: | + Preview html file generated with the baysor preview command + Structure: [ val(meta), path("path-to-preview.html") ] + - versions: + description: | + Files containing software versions + Structure: [ path(versions.yml) ] +authors: + - "@khersameesh24" +maintainers: + - "@khersameesh24" diff --git a/subworkflows/local/baysor_generate_segfree/main.nf b/subworkflows/local/baysor_generate_segfree/main.nf new file mode 100644 index 00000000..74a160f0 --- /dev/null +++ b/subworkflows/local/baysor_generate_segfree/main.nf @@ -0,0 +1,52 @@ +// +// Run baysor segfree +// + +include { BAYSOR_PREPROCESS_TRANSCRIPTS } from '../../../modules/local/baysor/preprocess/main' +include { BAYSOR_SEGFREE } from '../../../modules/local/baysor/segfree/main' +// include a module to process the output loom file with scapny or anndata + +workflow BAYSOR_GENERATE_SEGFREE { + take: + ch_transcripts_file // channel: [ val(meta), ["transcripts.parquet"] ] + ch_config // channel: [ ["path-to-xenium.toml"] ] + max_x // value: spatial filter upper x bound + max_y // value: spatial filter upper y bound + min_qv // value: minimum transcript QV + min_x // value: spatial filter lower x bound + min_y // value: spatial filter lower y bound + + main: + + ch_transcripts = channel.empty() + + // Always preprocess transcripts.parquet to CSV for Baysor 0.7.1 compatibility. + // Baysor's Julia Parquet.jl cannot read zstd-compressed parquet files from Xenium bundles. + // Also applies optional spatial/QV filtering when filter_transcripts is true. + BAYSOR_PREPROCESS_TRANSCRIPTS( + ch_transcripts_file, + min_qv, + max_x, + min_x, + max_y, + min_y, + ) + ch_transcripts = BAYSOR_PREPROCESS_TRANSCRIPTS.out.transcripts_file + + // run baysor segfree + ch_baysor_segfree_input = ch_transcripts + .combine(ch_config) + .map { meta, transcripts, config -> + tuple( + meta, + transcripts, + config + ) + } + BAYSOR_SEGFREE( + ch_baysor_segfree_input + ) + + emit: + ncvs = BAYSOR_SEGFREE.out.ncvs // channel: [ val(meta), ["ncvs.loom"] ] +} diff --git a/subworkflows/local/baysor_generate_segfree/meta.yml b/subworkflows/local/baysor_generate_segfree/meta.yml new file mode 100644 index 00000000..f8c951c9 --- /dev/null +++ b/subworkflows/local/baysor_generate_segfree/meta.yml @@ -0,0 +1,32 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/nf-core/modules/master/subworkflows/yaml-schema.json +name: "baysor_generate_segfree" +description: with segfree analyses that don't require segmentation, can be run on local neighborhoods instead (Neighborhood Composition Vectors (NCVs)) +keywords: + - baysor + - segfree + - neighborhoods + - loom +components: + - baysor/segfree +input: + - ch_transcripts_parquet: + description: | + input parquet file from the xenium bundle + Structure: [ val(meta), path("path-to-transcripts.parquet") ] + - ch_config: + description: | + config file for the xenium baysor run (stored in assets/config/xenium.toml) + Structure: [ path("path-to-xenium.toml") ] +output: + - ncvs: + description: | + loom file generated with the baysor segfree command + Structure: [ val(meta), path("path-to-ncvs.loom") ] + - versions: + description: | + Files containing software versions + Structure: [ path(versions.yml) ] +authors: + - "@khersameesh24" +maintainers: + - "@khersameesh24" diff --git a/subworkflows/local/baysor_run_prior_segmentation_mask/main.nf b/subworkflows/local/baysor_run_prior_segmentation_mask/main.nf new file mode 100644 index 00000000..d5acc0a1 --- /dev/null +++ b/subworkflows/local/baysor_run_prior_segmentation_mask/main.nf @@ -0,0 +1,83 @@ +// +// Run baysor run & import-segmentation +// + +include { BAYSOR_PREPROCESS_TRANSCRIPTS } from '../../../modules/local/baysor/preprocess/main' +include { BAYSOR_RUN } from '../../../modules/local/baysor/run/main' +include { XENIUMRANGER_IMPORT_SEGMENTATION } from '../../../modules/nf-core/xeniumranger/import-segmentation/main' + + +workflow BAYSOR_RUN_PRIOR_SEGMENTATION_MASK { + take: + ch_bundle_path // channel: [ val(meta), ["path-to-xenium-bundle"] ] + ch_transcripts_file // channel: [ val(meta), ["path-to-transcripts.parquet"] ] + ch_segmentation_mask // channel: [ ["path-to-prior-segmentation-mask"] ] + ch_config // channel: [ "path-to-xenium.toml" ] + max_x // value: spatial filter upper x bound + max_y // value: spatial filter upper y bound + min_qv // value: minimum transcript QV + min_x // value: spatial filter lower x bound + min_y // value: spatial filter lower y bound + + main: + + ch_transcripts = channel.empty() + + ch_redefined_bundle = channel.empty() + ch_coordinate_space = channel.value("pixels") + + // Always preprocess transcripts.parquet to CSV for Baysor 0.7.1 compatibility. + // Baysor's Julia Parquet.jl cannot read zstd-compressed parquet files from Xenium bundles. + // Also applies optional spatial/QV filtering when filter_transcripts is true. + BAYSOR_PREPROCESS_TRANSCRIPTS( + ch_transcripts_file, + min_qv, + max_x, + min_x, + max_y, + min_y, + ) + ch_transcripts = BAYSOR_PREPROCESS_TRANSCRIPTS.out.transcripts_file + + + // run baysor with prior segmentation mask + ch_baysor_input = ch_transcripts + .combine(ch_segmentation_mask) + .combine(ch_config) + .map { meta, transcripts, mask, config -> + tuple( + meta, + transcripts, + mask, + config, + 30, + ) + } + BAYSOR_RUN(ch_baysor_input) + + + // run import-segmentation with baysor outs + ch_imp_seg_inputs = ch_bundle_path + .combine(BAYSOR_RUN.out.segmentation, by: 0) + .map { meta, bundle, _segmentation_csv, polygons2d -> + tuple( + meta, + bundle, + [], + [], + polygons2d, + polygons2d, + [], + ch_coordinate_space.val, + ) + } + XENIUMRANGER_IMPORT_SEGMENTATION( + ch_imp_seg_inputs + ) + + ch_redefined_bundle = XENIUMRANGER_IMPORT_SEGMENTATION.out.outs + + emit: + coordinate_space = ch_coordinate_space // channel: [ "pixels" ] + redefined_bundle = ch_redefined_bundle // channel: [ val(meta), ["redefined-xenium-bundle"] ] +} diff --git a/subworkflows/local/baysor_run_prior_segmentation_mask/meta.yml b/subworkflows/local/baysor_run_prior_segmentation_mask/meta.yml new file mode 100644 index 00000000..0ce3b51c --- /dev/null +++ b/subworkflows/local/baysor_run_prior_segmentation_mask/meta.yml @@ -0,0 +1,62 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/nf-core/modules/master/subworkflows/yaml-schema.json +name: "baysor_run_prior_segmentation_mask" +description: to run the `baysor run` command if a prior segmentation mask is available +keywords: + - baysor + - baysor run + - segmentation + - segmentation mask + - xeniumranger import-segmentation + - image-based segmentation + - transcript filtering + - polygons +components: + - baysor/preprocess/transcripts + - baysor/run + - xeniumranger/import/segmentation +input: + - ch_bundle_path: + description: | + path to the xenium bundle + Structure: [ val(meta), path("path-to-xenium-bundle") ] + - ch_transcripts_parquet: + description: | + input parquet file from the xenium bundle + Structure: [ val(meta), path("path-to-transcripts.parquet") ] + - ch_segmentation_mask: + description: | + prior segmentation mask filepath + Structure: [ path("path-to-prior-segmentation-mask.tif") ] + - ch_config: + description: | + config file for the xenium baysor run (stored in assets/config/xenium.toml) + Structure: [ path("path-to-xenium.toml") ] +output: + - segmentation: + description: | + the segmentation.csv file generated from the baysor run command + Structure: [ val(meta), path("segmentation.csv") ] + - polygons2d: + description: | + the segmentation_polygons_2d.json file generated from the baysor run command + Structure: [ val(meta), path("segmentation_polygons_2d.json") ] + - htmls: + description: | + the html files generated from the baysor run command + Structure: [ val(meta), path("*.html") ] + - coordinate_space: + description: | + the coordinate space in which xeniumranger import-segmentation was run + Structure: [ val("microns") ] + - redefined_bundle: + description: | + the redefined xenium bundle generated with the segmentation results from baysor + Structure: [ val(meta), ["redefined-xenium-bundle"] ] + - versions: + description: | + Files containing software versions + Structure: [ path(versions.yml) ] +authors: + - "@khersameesh24" +maintainers: + - "@khersameesh24" diff --git a/subworkflows/local/baysor_run_transcripts_parquet/main.nf b/subworkflows/local/baysor_run_transcripts_parquet/main.nf new file mode 100644 index 00000000..23637669 --- /dev/null +++ b/subworkflows/local/baysor_run_transcripts_parquet/main.nf @@ -0,0 +1,166 @@ +// +// Unified Baysor subworkflow: handles both tiled and non-tiled paths. +// +// When baysor_tiling=true: divide → per-patch Baysor → stitch → xeniumranger +// When baysor_tiling=false: preprocess → Baysor → xeniumranger +// +// Prior segmentation support: +// Column-based (cells): works with both tiled and non-tiled +// Image-based (cellpose): non-tiled only (mask passed to Baysor) +// + +include { XENIUM_PATCH_DIVIDE } from '../../../modules/local/xenium_patch/divide/main' +include { PARQUET_TO_CSV } from '../../../modules/local/parquet_to_csv/main' +include { BAYSOR_RUN } from '../../../modules/local/baysor/run/main' +include { BAYSOR_PREPROCESS_TRANSCRIPTS } from '../../../modules/local/baysor/preprocess/main' +include { XENIUM_PATCH_STITCH } from '../../../modules/local/xenium_patch/stitch/main' +include { RECONSTRUCT_PATCHES } from '../../../modules/local/utility/reconstruct_patches/main' +include { XENIUMRANGER_IMPORT_SEGMENTATION } from '../../../modules/nf-core/xeniumranger/import-segmentation/main' + + +workflow BAYSOR_RUN_TRANSCRIPTS_PARQUET { + + take: + ch_bundle_path // channel: [ val(meta), ["path-to-xenium-bundle"] ] + ch_transcripts_file // channel: [ val(meta), ["transcripts.parquet"] ] + ch_morphology_image // channel: [ val(meta), ["morphology_focus.ome.tif"] ] + ch_config // channel: ["path-to-xenium.toml"] + ch_prior_mask // channel: [ val(meta), ["resized_mask.tif"] ] or empty (cellpose) + baysor_config // value: path to baysor config TOML (or null) + baysor_scale // value: Baysor --scale for non-tiled runs + baysor_tiling // value: bool — enable tiling + baysor_tiling_scale // value: Baysor --scale for tiled runs + max_x // value: spatial filter upper x bound + max_y // value: spatial filter upper y bound + min_qv // value: minimum transcript QV + min_x // value: spatial filter lower x bound + min_y // value: spatial filter lower y bound + + main: + + ch_coordinate_space = channel.value("microns") + + if ( baysor_tiling ) { + + // ── TILED PATH ────────────────────────────────────────────────── + + // Step 1: Divide transcripts into overlapping patches + ch_divide_input = ch_transcripts_file + .join(ch_morphology_image, by: 0) + + XENIUM_PATCH_DIVIDE ( ch_divide_input ) + + // Step 2: Fan out patches for parallel processing + ch_patches = XENIUM_PATCH_DIVIDE.out.patch_transcripts + .transpose() + .map { meta, parquet_file -> + def patch_id = parquet_file.parent.name + def patch_meta = meta.clone() + patch_meta.sample_id = meta.id + patch_meta.patch_id = patch_id + patch_meta.id = "${meta.id}_${patch_id}" + tuple(patch_meta, parquet_file) + } + + // Step 2b: Convert parquet to CSV (Baysor Julia Parquet.jl incompatibility) + PARQUET_TO_CSV ( ch_patches ) + + // Step 3: Run Baysor on each patch independently + // Use baysor_tiling_scale (larger than baysor_scale) to compensate for EM + // convergence producing smaller cells on tile-sized datasets. + BAYSOR_RUN ( + PARQUET_TO_CSV.out.csv.map { meta, transcripts -> + tuple(meta, transcripts, [], baysor_config ? file(baysor_config) : [], baysor_tiling_scale) + } + ) + + // Step 4: Gather patch results per sample and reconstruct patches directory + ch_baysor_results = BAYSOR_RUN.out.segmentation + .map { patch_meta, csv, polygons -> + tuple(patch_meta.sample_id, [patch_meta.patch_id, csv, polygons]) + } + .groupTuple(by: 0) + .map { sample_id, patch_data -> + def sorted = patch_data.sort { it[0] } + def patch_ids = sorted.collect { it[0] } + def csvs = sorted.collect { it[1] } + def geojsons = sorted.collect { it[2] } + tuple(sample_id, patch_ids, csvs, geojsons) + } + + ch_stitch_input = ch_baysor_results + .join( + XENIUM_PATCH_DIVIDE.out.grid + .map { meta, grid -> tuple(meta.id, grid) } + ) + .map { sample_id, patch_ids, csvs, geojsons, grid_json -> + def meta = [id: sample_id] + tuple(meta, grid_json, patch_ids, csvs, geojsons) + } + + // Step 5: Stitch patch results + RECONSTRUCT_PATCHES ( ch_stitch_input ) + XENIUM_PATCH_STITCH ( RECONSTRUCT_PATCHES.out.patches_dir ) + + // Step 6: xeniumranger import-segmentation (tiled) + // spatialxe signature: meta, bundle, transcript_assignment, viz_polygons, nuclei, cells, coordinate_transform, units + ch_xr = ch_bundle_path + .combine(XENIUM_PATCH_STITCH.out.xr_polygons_transcript, by: 0) + .map { + meta, bundle, xr_cell_polygons, xr_transcript_metadata -> tuple( + meta, bundle, + xr_transcript_metadata, + xr_cell_polygons, + [], [], [], + "microns" + ) + } + + XENIUMRANGER_IMPORT_SEGMENTATION (ch_xr) + + } else { + + // ── NON-TILED PATH ────────────────────────────────────────────── + + // Preprocess: parquet → CSV with optional spatial/QV filtering + BAYSOR_PREPROCESS_TRANSCRIPTS( + ch_transcripts_file, + min_qv, + max_x, + min_x, + max_y, + min_y, + ) + + // Run Baysor on full transcripts (with optional image-based prior mask) + ch_csv_with_mask = BAYSOR_PREPROCESS_TRANSCRIPTS.out.transcripts_file + .join(ch_prior_mask, by: 0, remainder: true) + .map { meta, transcripts, mask -> + tuple(meta, transcripts, mask ?: []) + } + ch_baysor_input = ch_csv_with_mask + .combine(ch_config) + .map { meta, transcripts, mask, config -> + tuple(meta, transcripts, mask, config, baysor_scale) + } + BAYSOR_RUN(ch_baysor_input) + + // xeniumranger import-segmentation (non-tiled) + // spatialxe signature: meta, bundle, transcript_assignment, viz_polygons, nuclei, cells, coordinate_transform, units + ch_xr = ch_bundle_path + .combine(BAYSOR_RUN.out.segmentation, by: 0) + .map { meta, bundle, segmentation_csv, polygons2d -> + tuple(meta, bundle, + segmentation_csv, + polygons2d, + [], [], [], + ch_coordinate_space.val) + } + + XENIUMRANGER_IMPORT_SEGMENTATION(ch_xr) + } + + emit: + redefined_bundle = XENIUMRANGER_IMPORT_SEGMENTATION.out.outs + coordinate_space = ch_coordinate_space +} diff --git a/subworkflows/local/baysor_run_transcripts_parquet/meta.yml b/subworkflows/local/baysor_run_transcripts_parquet/meta.yml new file mode 100644 index 00000000..839cb2ce --- /dev/null +++ b/subworkflows/local/baysor_run_transcripts_parquet/meta.yml @@ -0,0 +1,58 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/nf-core/modules/master/subworkflows/yaml-schema.json +name: "baysor_run_transcripts_parquet" +description: to run the `baysor run` command with the transcripts.parquet file as a coordinate-based segmentation run +keywords: + - baysor + - baysor run + - segmentation + - xeniumranger import-segmentation + - coordinate-based segmentation + - transcript filtering + - polygons +components: + - baysor/preprocess/transcripts + - baysor/run + - xeniumranger/import/segmentation + - split/transcripts +input: + - ch_bundle_path: + description: | + path to the xenium bundle + Structure: [ val(meta), path("path-to-xenium-bundle") ] + - ch_transcripts_parquet: + description: | + input parquet file from the xenium bundle + Structure: [ val(meta), path("path-to-transcripts.parquet") ] + - ch_config: + description: | + config file for the xenium baysor run (stored in assets/config/xenium.toml) + Structure: [ path("path-to-xenium.toml") ] +output: + - segmentation: + description: | + the segmentation.csv file generated from the baysor run command + Structure: [ val(meta), path("segmentation.csv") ] + - polygons2d: + description: | + the segmentation_polygons_2d.json file generated from the baysor run command + Structure: [ val(meta), path("segmentation_polygons_2d.json") ] + - htmls: + description: | + the html files generated from the baysor run command + Structure: [ val(meta), path("*.html") ] + - coordinate_space: + description: | + the coordinate space in which xeniumranger import-segmentation was run + Structure: [ val("microns") ] + - redefined_bundle: + description: | + the redefined xenium bundle generated with the segmentation results from baysor + Structure: [ val(meta), ["redefined-xenium-bundle"] ] + - versions: + description: | + Files containing software versions + Structure: [ path(versions.yml) ] +authors: + - "@khersameesh24" +maintainers: + - "@khersameesh24" diff --git a/subworkflows/local/baysor_run_transcripts_parquet_tiled/main.nf b/subworkflows/local/baysor_run_transcripts_parquet_tiled/main.nf new file mode 100644 index 00000000..0815bbe9 --- /dev/null +++ b/subworkflows/local/baysor_run_transcripts_parquet_tiled/main.nf @@ -0,0 +1,104 @@ +// +// Runs baysor with tiling: divide transcripts -> preprocess per patch -> baysor per patch -> stitch -> xeniumranger +// + +include { XENIUM_PATCH_DIVIDE } from '../../../modules/local/xenium_patch/divide/main' +include { BAYSOR_PREPROCESS_TRANSCRIPTS } from '../../../modules/local/baysor/preprocess/main' +include { BAYSOR_RUN } from '../../../modules/local/baysor/run/main' +include { XENIUM_PATCH_STITCH } from '../../../modules/local/xenium_patch/stitch/main' +include { XENIUMRANGER_IMPORT_SEGMENTATION } from '../../../modules/nf-core/xeniumranger/import-segmentation/main' + +workflow BAYSOR_RUN_TRANSCRIPTS_PARQUET_TILED { + + take: + ch_bundle_path // channel: [ val(meta), ["xenium-bundle"] ] + ch_transcripts_file // channel: [ val(meta), ["transcripts.parquet"] ] + ch_config // channel: ["path-to-xenium.toml"] + max_x // value: spatial filter upper x bound + max_y // value: spatial filter upper y bound + min_qv // value: minimum transcript QV + min_x // value: spatial filter lower x bound + min_y // value: spatial filter lower y bound + + main: + + ch_coordinate_space = channel.value("microns") + + // Step 1: Divide transcripts into overlapping patches + XENIUM_PATCH_DIVIDE ( ch_transcripts_file ) + + // Step 2: Fan out patches for parallel processing + ch_patches = XENIUM_PATCH_DIVIDE.out.patch_transcripts + .transpose() + .map { meta, parquet_file -> + def patch_id = parquet_file.parent.name + def patch_meta = meta.clone() + patch_meta.sample_id = meta.id + patch_meta.patch_id = patch_id + patch_meta.id = "${meta.id}_${patch_id}" + tuple(patch_meta, parquet_file) + } + + // Step 3: Preprocess each patch's parquet to CSV for Baysor 0.7.1 compatibility + // Baysor's Julia Parquet.jl cannot read zstd-compressed parquet files + BAYSOR_PREPROCESS_TRANSCRIPTS ( + ch_patches, + min_qv, + max_x, + min_x, + max_y, + min_y, + ) + + // Step 4: Run Baysor on each patch independently + ch_baysor_input = BAYSOR_PREPROCESS_TRANSCRIPTS.out.transcripts_file + .combine(ch_config) + .map { meta, transcripts, config -> + tuple(meta, transcripts, [], config, 30) + } + + BAYSOR_RUN ( ch_baysor_input ) + + // Step 5: Gather patch results per sample for stitching + ch_for_stitch = BAYSOR_RUN.out.segmentation + .map { patch_meta, csv, polygons -> + tuple(patch_meta.sample_id, [patch_meta.patch_id, csv, polygons]) + } + .groupTuple(by: 0) + .map { sample_id, patch_data -> + def sorted = patch_data.sort { it[0] } + def patch_ids = sorted.collect { it[0] } + def csvs = sorted.collect { it[1] } + def geojsons = sorted.collect { it[2] } + tuple(sample_id, patch_ids, csvs, geojsons) + } + + // Combine with grid metadata from DIVIDE + ch_stitch_input = ch_for_stitch + .join( + XENIUM_PATCH_DIVIDE.out.grid + .map { meta, grid -> tuple(meta.id, grid) } + ) + .map { sample_id, patch_ids, csvs, geojsons, grid_json -> + def meta = [id: sample_id] + tuple(meta, grid_json, patch_ids, csvs, geojsons) + } + + // Step 6: Stitch patch results into unified segmentation output + XENIUM_PATCH_STITCH ( ch_stitch_input ) + + // Step 7: Run xeniumranger import-segmentation + // Note: Cell size filtering is handled inline by STITCH via --filter-method + ch_xr = ch_bundle_path + .combine(XENIUM_PATCH_STITCH.out.xr_polygons_transcript, by: 0) + .combine(ch_coordinate_space) + .map { meta, bundle, geojson, csv, coord_space -> + tuple(meta, bundle, csv, geojson, [], [], [], coord_space) + } + + XENIUMRANGER_IMPORT_SEGMENTATION ( ch_xr ) + + emit: + coordinate_space = ch_coordinate_space // channel: [ "microns" ] + redefined_bundle = XENIUMRANGER_IMPORT_SEGMENTATION.out.outs // channel: [ val(meta), ["redefined-xenium-bundle"] ] +} diff --git a/subworkflows/local/cellpose_baysor_import_segmentation/main.nf b/subworkflows/local/cellpose_baysor_import_segmentation/main.nf new file mode 100644 index 00000000..38bbcc74 --- /dev/null +++ b/subworkflows/local/cellpose_baysor_import_segmentation/main.nf @@ -0,0 +1,191 @@ +// +// Run the cellpose, baysor and import-segmentation flow +// + +include { RESOLIFT } from '../../../modules/local/resolift/main' +include { BAYSOR_RUN } from '../../../modules/local/baysor/run/main' +include { CELLPOSE as CELLPOSE_CELLS } from '../../../modules/nf-core/cellpose/main' +include { EXTRACT_DAPI } from '../../../modules/local/utility/extract_dapi/main' +include { STARDIST as STARDIST_NUCLEI } from '../../../modules/nf-core/stardist/main' +include { CONVERT_MASK_UINT32 } from '../../../modules/local/utility/convert_mask_uint32/main' +include { BAYSOR_PREPROCESS_TRANSCRIPTS } from '../../../modules/local/baysor/preprocess/main' +include { RESIZE_TIF } from '../../../modules/local/utility/resize_tif/main' +include { GET_TRANSCRIPTS_COORDINATES } from '../../../modules/local/utility/get_coordinates/main' +include { XENIUMRANGER_IMPORT_SEGMENTATION } from '../../../modules/nf-core/xeniumranger/import-segmentation/main' + +workflow CELLPOSE_BAYSOR_IMPORT_SEGMENTATION { + take: + ch_morphology_image // channel: [ val(meta), ["path-to-morphology.ome.tif"] ] + ch_bundle_path // channel: [ val(meta), ["path-to-xenium-bundle"] ] + ch_transcripts_file // channel: [ val(meta), ["path-to-transcripts.parquet"] ] + ch_experiment_metadata // channel: [ val(meta), ["path-to-experiment.xenium"] ] + ch_config // channel: ["path-to-xenium.toml"] + cell_segmentation_only // value: bool + cellpose_model // value: path to cellpose model (or null) + max_x // value: spatial filter upper x bound + max_y // value: spatial filter upper y bound + min_qv // value: minimum transcript QV + min_x // value: spatial filter lower x bound + min_y // value: spatial filter lower y bound + nucleus_segmentation_only // value: bool + sharpen_tiff // value: bool + stardist_nuclei_model // value: stardist pretrained model name + + main: + + ch_transcripts = channel.empty() + ch_imp_seg_inputs = channel.empty() + ch_coordinate_space = channel.value("microns") + + + // Use empty list when no model is provided; path input for official cellpose module + cellpose_model_path = cellpose_model ? file(cellpose_model) : [] + stardist_model = stardist_nuclei_model ?: '2D_versatile_fluo' + + // sharpen morphology tiff if param - sharpen_tiff is true + if (sharpen_tiff) { + + RESOLIFT(ch_morphology_image) + + ch_image = RESOLIFT.out.enhanced_tiff + } + else { + + ch_image = ch_morphology_image + } + + + // run cellpose on the morphology (enhanced) tiff + if (cell_segmentation_only) { + + CELLPOSE_CELLS(ch_image, cellpose_model_path) + } + + if (nucleus_segmentation_only) { + + // Extract DAPI channel, run StarDist, convert to uint32 + EXTRACT_DAPI(ch_image) + + STARDIST_NUCLEI(EXTRACT_DAPI.out.dapi, [stardist_model, []]) + + CONVERT_MASK_UINT32(STARDIST_NUCLEI.out.mask) + } + + + // Always preprocess transcripts.parquet to CSV for Baysor 0.7.1 compatibility. + // Baysor's Julia Parquet.jl cannot read zstd-compressed parquet files from Xenium bundles. + // Also applies optional spatial/QV filtering when filter_transcripts is true. + BAYSOR_PREPROCESS_TRANSCRIPTS( + ch_transcripts_file, + min_qv, + max_x, + min_x, + max_y, + min_y, + ) + ch_transcripts = BAYSOR_PREPROCESS_TRANSCRIPTS.out.transcripts_file + + + // run baysor with cellpose results + if (nucleus_segmentation_only) { + + // check if the size of the segmentation mask matches the max transcripts coordinate range + ch_resizetif_input = ch_transcripts + .combine(CONVERT_MASK_UINT32.out.mask, by: 0) + .combine(ch_experiment_metadata, by: 0) + .map { meta, transcripts, mask, exp_meta -> + tuple( + meta, + transcripts, + mask, + exp_meta, + ) + } + RESIZE_TIF(ch_resizetif_input) + + // run baysor with nuclei mask + ch_baysor_input = ch_transcripts + .combine(RESIZE_TIF.out.resized_mask, by: 0) + .combine(ch_config) + .map { meta, transcripts, mask, config -> + tuple( + meta, + transcripts, + mask, + config, + 30, + ) + } + BAYSOR_RUN(ch_baysor_input) + } + else if (cell_segmentation_only) { + + // check if the size of the segmentation mask matches the max transcripts coordinate range + ch_resizetif_input = ch_transcripts + .combine(CELLPOSE_CELLS.out.mask, by: 0) + .combine(ch_experiment_metadata, by: 0) + .map { meta, transcripts, mask, exp_meta -> + tuple( + meta, + transcripts, + mask, + exp_meta, + ) + } + RESIZE_TIF(ch_resizetif_input) + + // run baysor with cell mask + ch_baysor_input = ch_transcripts + .combine(RESIZE_TIF.out.resized_mask, by: 0) + .combine(ch_config) + .map { meta, transcripts, mask, config -> + tuple( + meta, + transcripts, + mask, + config, + 30, + ) + } + BAYSOR_RUN(ch_baysor_input) + } + else { + + // run baysor without cell/nuclei mask + ch_baysor_input = ch_transcripts + .combine(ch_config) + .map { meta, transcripts, config -> + tuple( + meta, + transcripts, + [], + config, + 30, + ) + } + BAYSOR_RUN(ch_baysor_input) + } + + + // run import-segmentation with baysor outs + ch_imp_seg_inputs = ch_bundle_path + .combine(BAYSOR_RUN.out.segmentation, by: 0) + .map { meta, bundle, segmentation_csv, polygons2d -> + tuple( + meta, + bundle, + segmentation_csv, + polygons2d, + [], + [], + [], + ch_coordinate_space.val, + ) + } + + XENIUMRANGER_IMPORT_SEGMENTATION(ch_imp_seg_inputs) + + emit: + coordinate_space = ch_coordinate_space // channel: [ val("microns") ] + redefined_bundle = XENIUMRANGER_IMPORT_SEGMENTATION.out.outs // channel: [ val(meta), ["redefined-xenium-bundle"] ] +} diff --git a/subworkflows/local/cellpose_baysor_import_segmentation/meta.yml b/subworkflows/local/cellpose_baysor_import_segmentation/meta.yml new file mode 100644 index 00000000..8db892d7 --- /dev/null +++ b/subworkflows/local/cellpose_baysor_import_segmentation/meta.yml @@ -0,0 +1,90 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/nf-core/modules/master/subworkflows/yaml-schema.json +name: "cellpose_baysor_import_segmentation" +description: | + combine image-based segmentation approach with cellpose and integrate results from coordinate-based segmentation + through baysor to run import-segmentation in micron coordinate space +keywords: + - baysor + - cellpose + - baysor run + - segmentation + - xeniumranger import-segmentation + - image-based segmentation + - coordinate-based segmentation + - transcript filtering + - polygons +components: + - cellpose + - resolift + - resize/tif + - get/transcripts/coordinates + - baysor/preprocess/transcripts + - baysor/run + - xeniumranger/import/segmentation + - split/transcripts +input: + - ch_morphology_image: + description: | + path to the morphology.ome.tif file + Structure: [ val(meta), path("path-to-morphology.ome.tif") ] + - ch_bundle_path: + description: | + path to the xenium bundle + Structure: [ val(meta), path("path-to-xenium-bundle") ] + - ch_transcripts_parquet: + description: | + input parquet file from the xenium bundle + Structure: [ val(meta), path("path-to-transcripts.parquet") ] + - ch_config: + description: | + config file for the xenium baysor run (stored in assets/config/xenium.toml) + Structure: [ path("path-to-xenium.toml") ] +output: + - cells_mask: + description: | + cell segmentation mask generated by running Cellpose with the cpsam algorithm + Structure: [ val(meta), path("*masks.tif") ] + - cells_flows: + description: | + cell flows generated by running Cellpose with the cpsam algorithm + Structure: [ val(meta), path("*flows.tif") ] + - cells_cells: + description: | + cell segmentation mask as a numpy array generated by running Cellpose with the cpsam algorithm + Structure: [ val(meta), path("*seg.npy") ] + - nuclei_mask: + description: | + nuclei segmentation mask generated by running Cellpose with the nuclei algorithm + Structure: [ val(meta), path("*masks.tif") ] + - nuclei_flows: + description: | + nuclei flows generated by running Cellpose with the nuclei algorithm + Structure: [ val(meta), path("*masks.tif") ] + - nuclei_cells: + description: | + nuclei segmentation mask as a numpy array generated by running Cellpose with the nuclei algorithm + Structure: [ val(meta), path("*seg.npy") ] + - segmentation: + description: | + the segmentation.csv file generated from the baysor run command + Structure: [ val(meta), path("*segmentation.csv") ] + - polygons2d: + description: | + the segmentation_polygons_2d.json file generated from the baysor run command + Structure: [ val(meta), path("*segmentation_polygons_2d.json") ] + - coordinate_space: + description: | + the coordinate space in which xeniumranger import-segmentation was run + Structure: [ val("microns") ] + - redefined_bundle: + description: | + the redefined xenium bundle generated with the segmentation results from baysor + Structure: [ val(meta), ["redefined-xenium-bundle"] ] + - versions: + description: | + Files containing software versions + Structure: [ path(versions.yml) ] +authors: + - "@khersameesh24" +maintainers: + - "@khersameesh24" diff --git a/subworkflows/local/cellpose_resolift_morphology_ome_tif/main.nf b/subworkflows/local/cellpose_resolift_morphology_ome_tif/main.nf new file mode 100644 index 00000000..6bb38ded --- /dev/null +++ b/subworkflows/local/cellpose_resolift_morphology_ome_tif/main.nf @@ -0,0 +1,141 @@ +// +// Run cellpose on the morphology tiff +// + +include { RESOLIFT } from '../../../modules/local/resolift/main' +include { DOWNSCALE_MORPHOLOGY } from '../../../modules/local/utility/downscale_morphology/main' +include { UPSCALE_MASK as UPSCALE_CELLS } from '../../../modules/local/utility/upscale_mask/main' +include { CELLPOSE as CELLPOSE_CELLS } from '../../../modules/nf-core/cellpose/main' +include { EXTRACT_DAPI } from '../../../modules/local/utility/extract_dapi/main' +include { STARDIST as STARDIST_NUCLEI } from '../../../modules/nf-core/stardist/main' +include { CONVERT_MASK_UINT32 } from '../../../modules/local/utility/convert_mask_uint32/main' +include { XENIUMRANGER_IMPORT_SEGMENTATION } from '../../../modules/nf-core/xeniumranger/import-segmentation/main' + +workflow CELLPOSE_RESOLIFT_MORPHOLOGY_OME_TIF { + take: + ch_morphology_image // channel: [ val(meta), ["path-to-morphology.ome.tiff"] ] + ch_bundle_path // channel: [ val(meta), ["path-to-xenium-bundle"] ] + cellpose_downscale // value: bool + cellpose_model // value: path to cellpose model (or null) + nucleus_segmentation_only // value: bool + sharpen_tiff // value: bool + stardist_nuclei_model // value: stardist pretrained model name + + main: + + ch_imp_seg_inputs = channel.empty() + ch_coordinate_space = channel.value("pixels") + + // Use empty list when no model is provided; path input for official cellpose module + cellpose_model_path = cellpose_model ? file(cellpose_model) : [] + stardist_model = stardist_nuclei_model ?: '2D_versatile_fluo' + + // sharpen morphology tiff if param - sharpen_tiff is true + if (sharpen_tiff) { + + RESOLIFT(ch_morphology_image) + + ch_image = RESOLIFT.out.enhanced_tiff + } + else { + + ch_image = ch_morphology_image + } + + // Optional pre-downscale for large images to avoid cellpose OOM + // Only needed when running cellpose for cells (not nucleus_segmentation_only) + if (cellpose_downscale && !nucleus_segmentation_only) { + + DOWNSCALE_MORPHOLOGY(ch_image) + + ch_cellpose_input = DOWNSCALE_MORPHOLOGY.out.downscaled + ch_scale_info = DOWNSCALE_MORPHOLOGY.out.scale_info + } + else { + + ch_cellpose_input = ch_image + ch_scale_info = channel.empty() + } + + // run cellpose on morphology tiff (or downscaled version) + if (!nucleus_segmentation_only) { + CELLPOSE_CELLS(ch_cellpose_input, cellpose_model_path) + } + + // StarDist for nuclei — extract DAPI first, then run on original resolution + EXTRACT_DAPI(ch_image) + + STARDIST_NUCLEI(EXTRACT_DAPI.out.dapi, [stardist_model, []]) + + // Convert StarDist mask to uint32 for XeniumRanger compatibility + CONVERT_MASK_UINT32(STARDIST_NUCLEI.out.mask) + + ch_nuclei_mask = CONVERT_MASK_UINT32.out.mask + + // Upscale cellpose cells mask back to original resolution if downscaled + // StarDist nuclei mask is already at original resolution (no upscale needed) + if (cellpose_downscale) { + + if (!nucleus_segmentation_only) { + ch_cells_for_upscale = CELLPOSE_CELLS.out.mask + .combine(ch_scale_info, by: 0) + UPSCALE_CELLS(ch_cells_for_upscale) + ch_cells_mask = UPSCALE_CELLS.out.upscaled_mask + } + } + else { + + if (!nucleus_segmentation_only) { + ch_cells_mask = CELLPOSE_CELLS.out.mask + } + } + + // run import-segmentation with cellpose results + if (nucleus_segmentation_only) { + + ch_imp_seg_inputs = ch_bundle_path + .combine(ch_nuclei_mask, by: 0) + .combine(ch_coordinate_space) + .map { meta, bundle, nuclei_seg, coord_space -> + tuple( + meta, + bundle, + [], + [], + nuclei_seg, + [], + [], + coord_space, + ) + } + XENIUMRANGER_IMPORT_SEGMENTATION( + ch_imp_seg_inputs + ) + } + else { + + ch_imp_seg_inputs = ch_bundle_path + .combine(ch_cells_mask, by: 0) + .combine(ch_nuclei_mask, by: 0) + .combine(ch_coordinate_space) + .map { meta, bundle, cells_seg, nuclei_seg, coord_space -> + tuple( + meta, + bundle, + [], + [], + nuclei_seg, + cells_seg, + [], + coord_space, + ) + } + XENIUMRANGER_IMPORT_SEGMENTATION( + ch_imp_seg_inputs + ) + } + + emit: + coordinate_space = ch_coordinate_space // channel: [ ["pixels"] ] + redefined_bundle = XENIUMRANGER_IMPORT_SEGMENTATION.out.outs // channel: [ val(meta), ["redefined-xenium-bundle"] ] +} diff --git a/subworkflows/local/cellpose_resolift_morphology_ome_tif/meta.yml b/subworkflows/local/cellpose_resolift_morphology_ome_tif/meta.yml new file mode 100644 index 00000000..9dfdc52d --- /dev/null +++ b/subworkflows/local/cellpose_resolift_morphology_ome_tif/meta.yml @@ -0,0 +1,63 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/nf-core/modules/master/subworkflows/yaml-schema.json +name: "cellpose_resolift_morphology_ome_tif" +description: | + image-based segmentation approach with cellpose to run import-segmentation in pixel coordinate space +keywords: + - cellpose + - segmentation + - xeniumranger import-segmentation + - image-based segmentation +components: + - cellpose + - resolift + - xeniumranger/import/segmentation +input: + - ch_morphology_image: + description: | + path to the morphology.ome.tif file + Structure: [ val(meta), path("path-to-morphology.ome.tif") ] + - ch_bundle_path: + description: | + path to the xenium bundle + Structure: [ val(meta), path("path-to-xenium-bundle") ] +output: + - cells_mask: + description: | + cell segmentation mask generated by running Cellpose with the cpsam algorithm + Structure: [ val(meta), path("*masks.tif") ] + - cells_flows: + description: | + cell flows generated by running Cellpose with the cpsam algorithm + Structure: [ val(meta), path("*flows.tif") ] + - cells_cells: + description: | + cell segmentation mask as a numpy array generated by running Cellpose with the cpsam algorithm + Structure: [ val(meta), path("*seg.npy") ] + - nuclei_mask: + description: | + nuclei segmentation mask generated by running Cellpose with the nuclei algorithm + Structure: [ val(meta), path("*masks.tif") ] + - nuclei_flows: + description: | + nuclei flows generated by running Cellpose with the nuclei algorithm + Structure: [ val(meta), path("*masks.tif") ] + - nuclei_cells: + description: | + nuclei segmentation mask as a numpy array generated by running Cellpose with the nuclei algorithm + Structure: [ val(meta), path("*seg.npy") ] + - coordinate_space: + description: | + the coordinate space in which xeniumranger import-segmentation was run + Structure: [ val("pixels") ] + - redefined_bundle: + description: | + the redefined xenium bundle generated with the segmentation results from baysor + Structure: [ val(meta), ["redefined-xenium-bundle"] ] + - versions: + description: | + Files containing software versions + Structure: [ path(versions.yml) ] +authors: + - "@khersameesh24" +maintainers: + - "@khersameesh24" diff --git a/subworkflows/local/ficture_preprocess_model/main.nf b/subworkflows/local/ficture_preprocess_model/main.nf new file mode 100644 index 00000000..c45713ec --- /dev/null +++ b/subworkflows/local/ficture_preprocess_model/main.nf @@ -0,0 +1,39 @@ +// +// Run ficture preprocess and model modules +// + +include { FICTURE_PREPROCESS } from '../../../modules/local/ficture/preprocess/main' +include { FICTURE } from '../../../modules/local/ficture/model/main' +include { PARQUET_TO_CSV } from '../../../modules/local/utility/parquet_to_csv/main' + + + +workflow FICTURE_PREPROCESS_MODEL { + take: + ch_transcripts_file // channel: [ val(meta), [ "transcripts.parquet" ] ] + ch_features // channel: [ ["features"] ] + features // value: path to features list (or null) + + main: + + // convert parquet to csv + PARQUET_TO_CSV(ch_transcripts_file, ".csv") + + // run ficture preprocessing + ch_transcripts = PARQUET_TO_CSV.out.transcripts_csv + + FICTURE_PREPROCESS(ch_transcripts, ch_features) + + // run the ficture wrapper pipeline + ch_features_clean = features ? FICTURE_PREPROCESS.out.features : channel.value([]) + FICTURE( + FICTURE_PREPROCESS.out.transcripts, + FICTURE_PREPROCESS.out.coordinate_minmax, + ch_features_clean, + ) + emit: + transcripts = FICTURE_PREPROCESS.out.transcripts // channel: [ val(meta), [ "*processed_transcripts.tsv.gz" ] ] + coordinate_minmax = FICTURE_PREPROCESS.out.coordinate_minmax // channel: [ "*coordinate_minmax.tsv" ] + features = FICTURE_PREPROCESS.out.features // channel: [ "*feature.clean.tsv.gz" ] + results = FICTURE.out.results // channel: [ val(meta), [ "results/** ] ] +} diff --git a/subworkflows/local/ficture_preprocess_model/meta.yml b/subworkflows/local/ficture_preprocess_model/meta.yml new file mode 100644 index 00000000..0a970613 --- /dev/null +++ b/subworkflows/local/ficture_preprocess_model/meta.yml @@ -0,0 +1,38 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/nf-core/modules/master/subworkflows/yaml-schema.json +name: "ficture_preprocess_model" +description: Scalable segmentation-free analysis of sub-micron resolution spatial transcriptomics +keywords: + - ficture + - ficture preprocess + - segmentation-free analysis + - pixel level factor analysis +components: + - ficture/preprocess + - ficture + - parquet/to/csv +input: + - ch_transcripts_parquet: + description: | + file containing the molecular or pixel level information, the required columns are X, Y, gene, and Count + Structure: [ val(meta), path("transcripts.parquet") ] + - ch_features: + description: | + unique names of genes that should be used in analysis + Structure: [ [gene1,gene2,gene3,gene4] ] +output: + - transcripts: + description: xyz + - coordinate_minmax: + description: xyz + - features: + description: xyz + - results: + description: xyz + - versions: + description: | + Files containing software versions + Structure: [ path(versions.yml) ] +authors: + - "@khersameesh24" +maintainers: + - "@khersameesh24" diff --git a/subworkflows/local/input_check.nf b/subworkflows/local/input_check.nf deleted file mode 100644 index 0aecf87f..00000000 --- a/subworkflows/local/input_check.nf +++ /dev/null @@ -1,44 +0,0 @@ -// -// Check input samplesheet and get read channels -// - -include { SAMPLESHEET_CHECK } from '../../modules/local/samplesheet_check' - -workflow INPUT_CHECK { - take: - samplesheet // file: /path/to/samplesheet.csv - - main: - SAMPLESHEET_CHECK ( samplesheet ) - .csv - .splitCsv ( header:true, sep:',' ) - .map { create_fastq_channel(it) } - .set { reads } - - emit: - reads // channel: [ val(meta), [ reads ] ] - versions = SAMPLESHEET_CHECK.out.versions // channel: [ versions.yml ] -} - -// Function to get list of [ meta, [ fastq_1, fastq_2 ] ] -def create_fastq_channel(LinkedHashMap row) { - // create meta map - def meta = [:] - meta.id = row.sample - meta.single_end = row.single_end.toBoolean() - - // add path(s) of the fastq file(s) to the meta map - def fastq_meta = [] - if (!file(row.fastq_1).exists()) { - exit 1, "ERROR: Please check input samplesheet -> Read 1 FastQ file does not exist!\n${row.fastq_1}" - } - if (meta.single_end) { - fastq_meta = [ meta, [ file(row.fastq_1) ] ] - } else { - if (!file(row.fastq_2).exists()) { - exit 1, "ERROR: Please check input samplesheet -> Read 2 FastQ file does not exist!\n${row.fastq_2}" - } - fastq_meta = [ meta, [ file(row.fastq_1), file(row.fastq_2) ] ] - } - return fastq_meta -} diff --git a/subworkflows/local/opt_flip_track_stat/main.nf b/subworkflows/local/opt_flip_track_stat/main.nf new file mode 100644 index 00000000..839768d2 --- /dev/null +++ b/subworkflows/local/opt_flip_track_stat/main.nf @@ -0,0 +1,34 @@ +include { OPT_FLIP } from '../../../modules/nf-core/opt/flip/main' +include { OPT_TRACK } from '../../../modules/nf-core/opt/track/main' +include { OPT_STAT } from '../../../modules/nf-core/opt/stat/main' + + +workflow OPT_FLIP_TRACK_STAT { + take: + ch_probe_fasta // channel: [ val(meta), [ "panel_probes_sequences.fasta" ] ] + ch_references // channel: [ val(meta), ["reference_annotations.gff"], ["reference_annotations.fa"] ] + ch_gene_synonyms // channel: [ "path-to-gene-synonyms" ] + + main: + + ch_versions = channel.empty() + ch_summary = channel.empty() + + // correct probes that are aligning to opposite strand with `flip` + OPT_FLIP(ch_probe_fasta, ch_references) + ch_versions = ch_versions.mix(OPT_FLIP.out.versions) + + // align query probe sequences to target transcriptome + OPT_TRACK(OPT_FLIP.out.fwd_oriented_fa, ch_references) + ch_versions = ch_versions.mix(OPT_TRACK.out.versions) + + // summarizes opt binding predictions + OPT_STAT(OPT_TRACK.out.probes2target, OPT_FLIP.out.fwd_oriented_fa, ch_gene_synonyms) + ch_versions = ch_versions.mix(OPT_STAT.out.versions) + + ch_summary = OPT_STAT.out.summary + + emit: + summary = ch_summary // channel: [ val(meta), ["collapsed_summary.tsv", "other-summary-files"]] + versions = ch_versions // channel: [ versions.yml ] +} diff --git a/subworkflows/local/opt_flip_track_stat/meta.yml b/subworkflows/local/opt_flip_track_stat/meta.yml new file mode 100644 index 00000000..07ccc8b7 --- /dev/null +++ b/subworkflows/local/opt_flip_track_stat/meta.yml @@ -0,0 +1,51 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/nf-core/modules/master/subworkflows/yaml-schema.json +name: "opt_flip_track_stat" +description: opt is a simple program that aligns probe sequences to transcript sequences + to detect potential off-target probe activity +keywords: + - opt + - flip + - track + - stat + - off-target probes +components: + - opt/flip + - opt/track + - opt/stat +input: + - ch_probe_fasta: + type: file + description: | + Input channel containing the sample info. and associated probe panel sequences fasta file + Structure: [ val(meta), path("porbe_panel_seqeunces.fasta") ] + pattern: "*.fasta" + - ch_references: + type: file + description: | + Input channel containing the sample info. and the references to be used + Structure: [ val(meta), path("reference_annotations.gff"), path("reference_annotations.fa") ] + pattern: "*.{fa,gff}" + - ch_gene_synonyms: + type: file + description: | + Input channel containing the Gene synonyms that may have been counted as off-targets but + simply differ in name (optional input) + Structure: [ val(meta), path("gene_synonyms.csv") ] + pattern: "*.csv" +output: + - summary: + type: file + description: | + Groovy Map containing summary of the forward oriented probes generated with the panel sequences opt flip and track + Structure: [ val(meta), path("collapsed_summary.tsv") ] + pattern: "*.tsv" + - versions: + type: file + description: | + File containing software versions + Structure: [ path(versions.yml) ] + pattern: "versions.yml" +authors: + - "@khersameesh24" +maintainers: + - "@khersameesh24" diff --git a/subworkflows/local/proseg_preset_proseg2baysor/main.nf b/subworkflows/local/proseg_preset_proseg2baysor/main.nf new file mode 100644 index 00000000..3f9d8c99 --- /dev/null +++ b/subworkflows/local/proseg_preset_proseg2baysor/main.nf @@ -0,0 +1,50 @@ +// +// Runs proseg for the xenium format and proseg2baysor to generate cell ploygons +// + +include { PROSEG } from '../../../modules/local/proseg/preset/main' +include { PROSEG2BAYSOR } from '../../../modules/local/proseg/proseg2baysor/main' +include { XENIUMRANGER_IMPORT_SEGMENTATION } from '../../../modules/nf-core/xeniumranger/import-segmentation/main' + +workflow PROSEG_PRESET_PROSEG2BAYSOR { + take: + ch_bundle_path // channel: [ val(meta), ["path-to-xenium-bundle"] ] + ch_transcripts_file // channel: [ val(meta), [ "transcripts.parquet" ] ] + + main: + + ch_coordinate_space = channel.value("microns") + + // run proseg with the xenium format + PROSEG(ch_transcripts_file) + + + // run proseg-to-baysor on the zarr output from proseg v3 + PROSEG2BAYSOR(PROSEG.out.zarr) + + + // run xeniumranger import-segmentation + ch_imp_seg_inputs = ch_bundle_path + .combine(PROSEG2BAYSOR.out.xr_metadata, by: 0) + .combine(PROSEG2BAYSOR.out.xr_polygons, by: 0) + .map { meta, bundle, metadata, polygons2d -> + tuple( + meta, + bundle, + metadata, + polygons2d, + [], + [], + [], + ch_coordinate_space.val, + ) + } + + XENIUMRANGER_IMPORT_SEGMENTATION( + ch_imp_seg_inputs + ) + + emit: + coordinate_space = ch_coordinate_space // channel: [ "microns" ] + redefined_bundle = XENIUMRANGER_IMPORT_SEGMENTATION.out.outs // channel: [ val(meta), ["redefined-xenium-bundle"] ] +} diff --git a/subworkflows/local/proseg_preset_proseg2baysor/meta.yml b/subworkflows/local/proseg_preset_proseg2baysor/meta.yml new file mode 100644 index 00000000..ec8ffbbb --- /dev/null +++ b/subworkflows/local/proseg_preset_proseg2baysor/meta.yml @@ -0,0 +1,52 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/nf-core/modules/master/subworkflows/yaml-schema.json +name: "proseg_preset_proseg2baysor" +description: to run proseg with the transcripts.parquet file as a coordinate-based segmentation run +keywords: + - proseg + - segmentation + - xeniumranger import-segmentation + - coordinate-based segmentation + - polygons + - metadata +components: + - proseg + - proseg2baysor + - xeniumranger/import/segmentation +input: + - ch_bundle_path: + description: | + path to the xenium bundle + Structure: [ val(meta), path("path-to-xenium-bundle") ] + - ch_transcripts_parquet: + description: | + input parquet file from the xenium bundle + Structure: [ val(meta), path("path-to-transcripts.parquet") ] +output: + - cell_polygons_2d: + description: | + the cell-polygons.geojson.gz file generated from proseg + Structure: [ val(meta), path("cell-polygons.geojson.gz") ] + - xr_polygons: + description: | + xeniumranger-compatible polygon file generated from the proseg command + Structure: [ val(meta), path("xr-cell-polygons.geojson") ] + - xr_metadata: + description: | + xeniumranger-compatible metadata file generated from the proseg command + Structure: [ val(meta), path("xr-transcript-metadata.csv") ] + - coordinate_space: + description: | + the coordinate space in which xeniumranger import-segmentation was run + Structure: [ val("microns") ] + - redefined_bundle: + description: | + the redefined xenium bundle generated with the segmentation results from baysor + Structure: [ val(meta), ["redefined-xenium-bundle"] ] + - versions: + description: | + Files containing software versions + Structure: [ path(versions.yml) ] +authors: + - "@khersameesh24" +maintainers: + - "@khersameesh24" diff --git a/subworkflows/local/proseg_preset_proseg2baysor_tiled/main.nf b/subworkflows/local/proseg_preset_proseg2baysor_tiled/main.nf new file mode 100644 index 00000000..afeca1a0 --- /dev/null +++ b/subworkflows/local/proseg_preset_proseg2baysor_tiled/main.nf @@ -0,0 +1,86 @@ +// +// Runs proseg with tiling: divide transcripts -> proseg per patch -> proseg2baysor -> stitch -> xeniumranger +// + +include { XENIUM_PATCH_DIVIDE } from '../../../modules/local/xenium_patch/divide/main' +include { PROSEG } from '../../../modules/local/proseg/preset/main' +include { PROSEG2BAYSOR } from '../../../modules/local/proseg/proseg2baysor/main' +include { XENIUM_PATCH_STITCH } from '../../../modules/local/xenium_patch/stitch/main' +include { XENIUMRANGER_IMPORT_SEGMENTATION } from '../../../modules/nf-core/xeniumranger/import-segmentation/main' + +workflow PROSEG_PRESET_PROSEG2BAYSOR_TILED { + + take: + ch_bundle_path // channel: [ val(meta), ["path-to-xenium-bundle"] ] + ch_transcripts_file // channel: [ val(meta), [ "transcripts.parquet" ] ] + + main: + + ch_coordinate_space = channel.value("microns") + + // Step 1: Divide transcripts into overlapping patches + XENIUM_PATCH_DIVIDE ( ch_transcripts_file ) + + // Step 2: Fan out patches for parallel processing + // transpose() emits one item per patch file: [meta, parquet_path] + ch_patches = XENIUM_PATCH_DIVIDE.out.patch_transcripts + .transpose() + .map { meta, parquet_file -> + def patch_id = parquet_file.parent.name + def patch_meta = meta.clone() + patch_meta.sample_id = meta.id + patch_meta.patch_id = patch_id + patch_meta.id = "${meta.id}_${patch_id}" + tuple(patch_meta, parquet_file) + } + + // Step 3: Run proseg on each patch independently + PROSEG ( ch_patches ) + + // Step 4: Convert proseg output to baysor format per patch + PROSEG2BAYSOR ( PROSEG.out.zarr ) + + // Step 5: Gather patch results per sample for stitching + ch_for_stitch = PROSEG2BAYSOR.out.xr_polygons + .join(PROSEG2BAYSOR.out.xr_metadata, by: 0) + .map { patch_meta, geojson, csv -> + tuple(patch_meta.sample_id, [patch_meta.patch_id, csv, geojson]) + } + .groupTuple(by: 0) + .map { sample_id, patch_data -> + def sorted = patch_data.sort { it[0] } + def patch_ids = sorted.collect { it[0] } + def csvs = sorted.collect { it[1] } + def geojsons = sorted.collect { it[2] } + tuple(sample_id, patch_ids, csvs, geojsons) + } + + // Combine with grid metadata from DIVIDE + ch_stitch_input = ch_for_stitch + .join( + XENIUM_PATCH_DIVIDE.out.grid + .map { meta, grid -> tuple(meta.id, grid) } + ) + .map { sample_id, patch_ids, csvs, geojsons, grid_json -> + def meta = [id: sample_id] + tuple(meta, grid_json, patch_ids, csvs, geojsons) + } + + // Step 6: Stitch patch results into unified segmentation output + XENIUM_PATCH_STITCH ( ch_stitch_input ) + + // Step 7: Run xeniumranger import-segmentation + // Note: Cell size filtering is handled inline by STITCH via --filter-method + ch_xr = ch_bundle_path + .combine(XENIUM_PATCH_STITCH.out.xr_polygons_transcript, by: 0) + .combine(ch_coordinate_space) + .map { meta, bundle, geojson, csv, coord_space -> + tuple(meta, bundle, csv, geojson, [], [], [], coord_space) + } + + XENIUMRANGER_IMPORT_SEGMENTATION ( ch_xr ) + + emit: + coordinate_space = ch_coordinate_space // channel: [ "microns" ] + redefined_bundle = XENIUMRANGER_IMPORT_SEGMENTATION.out.outs // channel: [ val(meta), ["redefined-xenium-bundle"] ] +} diff --git a/subworkflows/local/segger_create_train_predict/main.nf b/subworkflows/local/segger_create_train_predict/main.nf new file mode 100644 index 00000000..e2486150 --- /dev/null +++ b/subworkflows/local/segger_create_train_predict/main.nf @@ -0,0 +1,77 @@ +// +// Run segger create_dataset, train and predict modules & parquet_to_csv +// + +include { SEGGER2XR } from '../../../modules/local/utility/segger2xr/main' +include { SEGGER_TRAIN } from '../../../modules/local/segger/train/main' +include { SEGGER_PREDICT } from '../../../modules/local/segger/predict/main' +include { SEGGER_CREATE_DATASET } from '../../../modules/local/segger/create_dataset/main' +include { XENIUMRANGER_IMPORT_SEGMENTATION } from '../../../modules/nf-core/xeniumranger/import-segmentation/main' + +workflow SEGGER_CREATE_TRAIN_PREDICT { + take: + ch_bundle // channel: [ val(meta), ["path-to-xenium-bundle"] ] + ch_transcripts_file // channel: [ val(meta), [bundle + "/transcripts.parquet"]] + segger_model // value: path to a pre-trained segger model checkpoint (or null) + + main: + + // Note: spatialxe uses "pixels" but per 10x docs, transcript-based segmentation + // (like Baysor/Segger) must use "microns" since Xenium coordinates are in microns + ch_coordinate_space = channel.value("microns") + + // create dataset (always needed for predict step) + SEGGER_CREATE_DATASET(ch_bundle) + + // Determine model source and join all PREDICT inputs by meta. + // Without meta-based join, queue channels align by emission order, + // which is non-deterministic and causes cross-sample input mispairing. + if (segger_model) { + // Use pre-trained model - skip training + def model_path = file(segger_model) + ch_predict_paired = SEGGER_CREATE_DATASET.out.datasetdir + .join(ch_transcripts_file) + .map { meta, dataset, tx -> [meta, dataset, model_path, tx] } + } else { + // Train a new model per sample, join all inputs by meta + SEGGER_TRAIN(SEGGER_CREATE_DATASET.out.datasetdir) + ch_predict_paired = SEGGER_CREATE_DATASET.out.datasetdir + .join(SEGGER_TRAIN.out.trained_models) + .join(ch_transcripts_file) + } + // ch_predict_paired: [meta, dataset_dir, models_dir, transcripts] + + SEGGER_PREDICT( + ch_predict_paired.map { meta, dataset, _m, _tx -> [meta, dataset] }, + ch_predict_paired.map { _meta, _dataset, models, _tx -> models }, + ch_predict_paired.map { _meta, _dataset, _m, tx -> [tx] }, + ) + // convert parquet to XR compatible form + SEGGER2XR(SEGGER_PREDICT.out.transcripts) + + // run xeniumranger import-segmentation with Baysor-format CSV + viz polygons + // xeniumranger 4.0 expects Baysor CSV (with is_noise column) for --transcript-assignment + ch_imp_seg_inputs = ch_bundle + .combine(SEGGER2XR.out.segmentation_csv, by: 0) + .combine(SEGGER2XR.out.viz_polygons, by: 0) + .map { meta, bundle, segmentation_csv, polygons -> + tuple( + meta, + bundle, + segmentation_csv, // transcript_assignment (Baysor-format CSV) + polygons, // viz_polygons (GeoJSON cell boundaries) + [], // nuclei + [], // cells + [], // coordinate_transform + ch_coordinate_space.val, + ) + } + + XENIUMRANGER_IMPORT_SEGMENTATION( + ch_imp_seg_inputs + ) + + emit: + coordinate_space = ch_coordinate_space // channel: [ "microns" ] + redefined_bundle = XENIUMRANGER_IMPORT_SEGMENTATION.out.outs // channel: [ val(meta), ["redefined-xenium-bundle"] ] +} diff --git a/subworkflows/local/segger_create_train_predict/meta.yml b/subworkflows/local/segger_create_train_predict/meta.yml new file mode 100644 index 00000000..7db69d19 --- /dev/null +++ b/subworkflows/local/segger_create_train_predict/meta.yml @@ -0,0 +1,59 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/nf-core/modules/master/subworkflows/yaml-schema.json +name: "segger_create_train_predict" +description: | + segger is a cutting-edge tool for cell segmentation in single-molecule spatial omics datasets, + subworkflow is the implementation of segger modules run in the recommended sequence +keywords: + - segger + - segmentation + - xeniumranger import-segmentation + - coordinate-based segmentation +components: + - segger + - segger/create/dataset + - segger/train + - segger/predict + - segger2xr + - xeniumranger/import/segmentation +input: + - ch_bundle_path: + description: | + Directory containing the raw dataset - xenium bundle (e.g., transcripts, boundaries). + Structure: [ val(meta), path("path-to-xenium-bundle") ] + - ch_transcripts_parquet: + description: | + input parquet file from the xenium bundle + Structure: [ val(meta), path("path-to-transcripts.parquet") ] +output: + - datasetdir: + description: | + Directory generated by the segger create dataset module + Structure: [ val(meta), path(datasetdir) ] + - trained_models: + description: | + The model trained on the data by the segger training module + Structure: [ val(meta), path(trained_models) ] + - benchmarks: + description: | + benchmarks generated from the segger training and prediction steps + Structure: [ val(meta), path(benchmarks) ] + - segger_transcripts: + description: | + transcripts parquet file generated after segger prediction conatining the segger ids + Structure: [ path(transcripts.parquet) ] + - coordinate_space: + description: | + the coordinate space in which xeniumranger import-segmentation was run + Structure: [ val("pixels") ] + - redefined_bundle: + description: | + the redefined xenium bundle generated with the segmentation results from baysor + Structure: [ val(meta), ["redefined-xenium-bundle"] ] + - versions: + description: | + Files containing software versions + Structure: [ path(versions.yml) ] +authors: + - "@khersameesh24" +maintainers: + - "@khersameesh24" diff --git a/subworkflows/local/spatialdata_write_meta_merge/main.nf b/subworkflows/local/spatialdata_write_meta_merge/main.nf new file mode 100644 index 00000000..7a407c88 --- /dev/null +++ b/subworkflows/local/spatialdata_write_meta_merge/main.nf @@ -0,0 +1,81 @@ +// +// generate spatialdata object from the spatialxe layers +// + +include { SPATIALDATA_META } from '../../../modules/local/spatialdata/meta/main' +include { SPATIALDATA_WRITE as SPATIALDATA_WRITE_RAW_BUNDLE } from '../../../modules/local/spatialdata/write/main' +include { SPATIALDATA_MERGE as SPATIALDATA_MERGE_RAW_REDEFINED } from '../../../modules/local/spatialdata/merge/main' +include { SPATIALDATA_WRITE as SPATIALDATA_WRITE_REDEFINED_BUNDLE } from '../../../modules/local/spatialdata/write/main' + +workflow SPATIALDATA_WRITE_META_MERGE { + take: + ch_bundle_path // channel: [ val(meta), [ "path-to-xenium-bundle" ] ] + ch_redefined_bundle // channel: [ val(meta), [ "redefined-xenium-bundle" ] ] + ch_coordinate_space // channel: [ "pixels" or "microns" ] + cell_segmentation_only // value: bool + mode // value: pipeline mode (image/coordinate/...) + nucleus_segmentation_only // value: bool + + main: + + ch_segmented_object = channel.empty() + + // check segmentation - only nuclei, cells or both cells & nuclei + if (mode == 'image') { + + if (nucleus_segmentation_only && cell_segmentation_only) { + ch_segmented_object = channel.value('cells_and_nuclei') + } + else if (nucleus_segmentation_only) { + ch_segmented_object = channel.value('nuclei') + } + else if (cell_segmentation_only) { + ch_segmented_object = channel.value('cells') + } + else { + ch_segmented_object = channel.value([]) + } + } + + // set all boundaries as false - default + if (mode == 'coordinate') { + ch_segmented_object = channel.value([]) + } + + // write spatialdata object from the raw xenium bundle + SPATIALDATA_WRITE_RAW_BUNDLE( + ch_bundle_path, + 'raw_bundle', + ch_segmented_object, + ch_coordinate_space, + ) + + + // write spatialdata object after running IMP_SEG + SPATIALDATA_WRITE_REDEFINED_BUNDLE( + ch_redefined_bundle, + 'redefined_bundle', + ch_segmented_object, + ch_coordinate_space, + ) + + + // merge raw & redefined spatialdata objects + SPATIALDATA_MERGE_RAW_REDEFINED( + SPATIALDATA_WRITE_RAW_BUNDLE.out.spatialdata.combine(ch_redefined_bundle, by: 0), + 'merged_bundle' + ) + + + // write metadata with spatialdata object + SPATIALDATA_META( + SPATIALDATA_MERGE_RAW_REDEFINED.out.merged_bundle.combine(ch_bundle_path, by: 0), + 'metadata' + ) + + emit: + sd_raw_bundle = SPATIALDATA_WRITE_RAW_BUNDLE.out.spatialdata // channel: [ val(meta), "spatialdata_raw" ] + sd_redefined_bundle = SPATIALDATA_WRITE_REDEFINED_BUNDLE.out.spatialdata // channel: [ val(meta), "spatialdata_redefined" ] + sd_merged_bundle = SPATIALDATA_MERGE_RAW_REDEFINED.out.merged_bundle // channel: [ val(meta), "spatialdata_merged" ] + sd_metadata = SPATIALDATA_META.out.metadata // channel: [ val(meta), "spatialdata_meta" ] +} diff --git a/subworkflows/local/spatialdata_write_meta_merge/meta.yml b/subworkflows/local/spatialdata_write_meta_merge/meta.yml new file mode 100644 index 00000000..f8bfa2c4 --- /dev/null +++ b/subworkflows/local/spatialdata_write_meta_merge/meta.yml @@ -0,0 +1,54 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/nf-core/modules/master/subworkflows/yaml-schema.json +name: "spatialdata_write_meta_merge" +description: | + SpatialData is a data framework that comprises a FAIR storage format and a collection of python libraries for + performant access, alignment, and processing of uni- and multi-modal spatial omics datasets +keywords: + - spatialdata + - xenium + - xeniumranger import-segmentation + - spatialdata object + - metadata + - merge spatialdata objects +components: + - spatialdata/write + - spatialdata/meta + - spatialdata/merge +input: + - ch_bundle_path: + description: | + Directory containing the raw dataset + Structure: [ val(meta), path("path-to-xenium-bundle") ] + - ch_redefined_bundle: + description: | + Directory containing the redefined xenium bundle after running xeniumranger import-segmentation + Structure: [ val(meta), path("redefined-xenium-bundle") ] + - ch_coordinate_space: + description: | + the coordinate space in which xeniumranger import-segmentation was run + Structure: [ val("pixels" or "microns") ] +output: + - sd_raw_bundle: + description: | + spatialdata object generated from the raw xenium bundle + Structure: [ val(meta), path(spatialdata_raw) ] + - sd_redefined_bundle: + description: | + spatialdata object generated from the redefined xenium bundle + Structure: [ val(meta), path(spatialdata_redefined) ] + - sd_merged_bundle: + description: | + spatialdata object generated from merging the spatialdata objects from raw and redefined xenium bundles + Structure: [ val(meta), path(spatialdata_merged) ] + - sd_metadata: + description: | + spatialdata object containing the metadata info. + Structure: [ path(transcripts.parquet) ] + - versions: + description: | + Files containing software versions + Structure: [ path(versions.yml) ] +authors: + - "@khersameesh24" +maintainers: + - "@khersameesh24" diff --git a/subworkflows/local/stardist_resolift_morphology_ome_tif/main.nf b/subworkflows/local/stardist_resolift_morphology_ome_tif/main.nf new file mode 100644 index 00000000..bc255409 --- /dev/null +++ b/subworkflows/local/stardist_resolift_morphology_ome_tif/main.nf @@ -0,0 +1,70 @@ +// +// Run stardist nuclei segmentation on the morphology tiff +// + +include { RESOLIFT } from '../../../modules/local/resolift/main' +include { EXTRACT_DAPI } from '../../../modules/local/utility/extract_dapi/main' +include { STARDIST as STARDIST_NUCLEI } from '../../../modules/nf-core/stardist/main' +include { CONVERT_MASK_UINT32 } from '../../../modules/local/utility/convert_mask_uint32/main' +include { XENIUMRANGER_IMPORT_SEGMENTATION } from '../../../modules/nf-core/xeniumranger/import-segmentation/main' + +workflow STARDIST_RESOLIFT_MORPHOLOGY_OME_TIF { + take: + ch_morphology_image // channel: [ val(meta), ["path-to-morphology.ome.tiff"] ] + ch_bundle_path // channel: [ val(meta), ["path-to-xenium-bundle"] ] + sharpen_tiff // value: bool + stardist_nuclei_model // value: stardist pretrained model name + + main: + + ch_imp_seg_inputs = channel.empty() + ch_coordinate_space = channel.value("pixels") + + // Use default model when no model is provided + stardist_model = stardist_nuclei_model ?: '2D_versatile_fluo' + + // sharpen morphology tiff if param - sharpen_tiff is true + if (sharpen_tiff) { + + RESOLIFT(ch_morphology_image) + + ch_image = RESOLIFT.out.enhanced_tiff + } + else { + + ch_image = ch_morphology_image + } + + // Extract DAPI channel for StarDist (expects single-channel input) + EXTRACT_DAPI(ch_image) + + // Run StarDist nuclei segmentation on DAPI channel + STARDIST_NUCLEI(EXTRACT_DAPI.out.dapi, [stardist_model, []]) + + // Convert mask to uint32 for XeniumRanger compatibility + CONVERT_MASK_UINT32(STARDIST_NUCLEI.out.mask) + + // Run import-segmentation with nuclei only + // XeniumRanger expands nuclei by expansion_distance to create cell boundaries + ch_imp_seg_inputs = ch_bundle_path + .combine(CONVERT_MASK_UINT32.out.mask, by: 0) + .map { meta, bundle, nuclei_seg -> + tuple( + meta, + bundle, + [], + [], + nuclei_seg, + [], + [], + ch_coordinate_space.val, + ) + } + XENIUMRANGER_IMPORT_SEGMENTATION( + ch_imp_seg_inputs + ) + + emit: + coordinate_space = ch_coordinate_space // channel: [ ["pixels"] ] + redefined_bundle = XENIUMRANGER_IMPORT_SEGMENTATION.out.outs // channel: [ val(meta), ["redefined-xenium-bundle"] ] +} diff --git a/subworkflows/local/utils_nfcore_spatialxe_pipeline/main.nf b/subworkflows/local/utils_nfcore_spatialxe_pipeline/main.nf new file mode 100644 index 00000000..6439211b --- /dev/null +++ b/subworkflows/local/utils_nfcore_spatialxe_pipeline/main.nf @@ -0,0 +1,429 @@ +// +// Subworkflow with functionality specific to the nf-core/spatialxe pipeline +// + +/* +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + IMPORT FUNCTIONS / MODULES / SUBWORKFLOWS +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +*/ + +include { UTILS_NFSCHEMA_PLUGIN } from '../../nf-core/utils_nfschema_plugin' +include { paramsSummaryMap } from 'plugin/nf-schema' +include { samplesheetToList } from 'plugin/nf-schema' +include { completionEmail } from '../../nf-core/utils_nfcore_pipeline' +include { completionSummary } from '../../nf-core/utils_nfcore_pipeline' +include { imNotification } from '../../nf-core/utils_nfcore_pipeline' +include { UTILS_NFCORE_PIPELINE } from '../../nf-core/utils_nfcore_pipeline' +include { UTILS_NEXTFLOW_PIPELINE } from '../../nf-core/utils_nextflow_pipeline' + +/* +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + SUBWORKFLOW TO INITIALISE PIPELINE +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +*/ + +workflow PIPELINE_INITIALISATION { + take: + version // boolean: Display version and exit + validate_params // boolean: Boolean whether to validate parameters against the schema at runtime + monochrome_logs // boolean: Do not use coloured log outputs + nextflow_cli_args // array: List of positional nextflow CLI args + outdir // string: The output directory where the results will be saved + input // string: Path to input samplesheet + help // boolean: Display help message and exit + help_full // boolean: Show the full help message + show_hidden // boolean: Show hidden parameters in the help message + gene_panel // string: path to gene panel + gene_synonyms // string: path to gene synonyms + image_seg_methods // list: valid image-mode segmentation methods + method // string: chosen segmentation method + mode // string: pipeline mode + nucleus_segmentation_only // boolean + offtarget_probe_tracking // boolean + probes_fasta // string: path to probes fasta + reference_annotations // string: path to reference annotations + relabel_genes // boolean + segmentation_mask // string: path to segmentation mask + transcript_seg_methods // list: valid coordinate-mode segmentation methods + + main: + + // + // Print version and exit if required and dump pipeline parameters to JSON file + // + UTILS_NEXTFLOW_PIPELINE( + version, + true, + outdir, + workflow.profile.tokenize(',').intersect(['conda', 'mamba']).size() >= 1, + ) + + // + // Validate parameters and generate parameter summary to stdout + // + before_text = """ +-\033[2m----------------------------------------------------\033[0m- + \033[0;32m,--.\033[0;30m/\033[0;32m,-.\033[0m +\033[0;34m ___ __ __ __ ___ \033[0;32m/,-._.--~\'\033[0m +\033[0;34m |\\ | |__ __ / ` / \\ |__) |__ \033[0;33m} {\033[0m +\033[0;34m | \\| | \\__, \\__/ | \\ |___ \033[0;32m\\`-._,-`-,\033[0m + \033[0;32m`._,._,\'\033[0m +\033[0;35m nf-core/spatialxe ${workflow.manifest.version}\033[0m +-\033[2m----------------------------------------------------\033[0m- +""" + after_text = """${workflow.manifest.doi ? "\n* The pipeline\n" : ""}${workflow.manifest.doi.tokenize(",").collect { " https://doi.org/${it.trim().replace('https://doi.org/', '')}" }.join("\n")}${workflow.manifest.doi ? "\n" : ""} +* The nf-core framework + https://doi.org/10.1038/s41587-020-0439-x + +* Software dependencies + https://github.com/nf-core/spatialxe/blob/master/CITATIONS.md +""" + command = "nextflow run ${workflow.manifest.name} -profile --input samplesheet.csv --mode --outdir " + + UTILS_NFSCHEMA_PLUGIN( + workflow, + validate_params, + null, + help, + help_full, + show_hidden, + before_text, + after_text, + command, + ) + + // + // Check config provided to the pipeline + // + UTILS_NFCORE_PIPELINE( + nextflow_cli_args + ) + + // + // Custom validation for pipeline parameters + // + validateInputParameters( + input, + mode, + method, + image_seg_methods, + transcript_seg_methods, + relabel_genes, + gene_panel, + nucleus_segmentation_only, + segmentation_mask, + offtarget_probe_tracking, + probes_fasta, + reference_annotations, + gene_synonyms, + ) + log.info("✅ Pipeline parameters validated.") + + // + // Create channel from input file provided through --input + // + try { + + channel.fromList(samplesheetToList(input, "${projectDir}/assets/schema_input.json")) + .map { meta, bundle, image -> + return [[id: meta.id], bundle, image] + } + .set { ch_samplesheet } + + log.info("✅ Samplesheet validated.") + } + catch (Exception e) { + + log.error("❌ Samplesheet validation failed: ${e.message}") + exit(1) + } + + + // + // Check and validate xenium bundle + // + if (!workflow.profile.contains('test')) { + validateXeniumBundle(ch_samplesheet) + } + + emit: + samplesheet = ch_samplesheet +} + +/* +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + SUBWORKFLOW FOR PIPELINE COMPLETION +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +*/ + +workflow PIPELINE_COMPLETION { + take: + email // string: email address + email_on_fail // string: email address sent on pipeline failure + plaintext_email // boolean: Send plain-text email instead of HTML + outdir // path: Path to output directory where results will be published + monochrome_logs // boolean: Disable ANSI colour codes in log output + hook_url // string: hook URL for notifications + multiqc_report // string: Path to MultiQC report + + main: + summary_params = paramsSummaryMap(workflow, parameters_schema: "nextflow_schema.json") + def multiqc_reports = multiqc_report.toList() + + // + // Completion email and summary + // + workflow.onComplete { + if (email || email_on_fail) { + completionEmail( + summary_params, + email, + email_on_fail, + plaintext_email, + outdir, + monochrome_logs, + multiqc_reports.getVal(), + ) + } + + completionSummary(monochrome_logs) + if (hook_url) { + imNotification(summary_params, hook_url) + } + } + + workflow.onError { + log.error("❌ Pipeline failed. Please refer to troubleshooting docs: https://nf-co.re/docs/usage/troubleshooting") + } +} + +/* +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + FUNCTIONS +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +*/ +// +// Check and validate pipeline parameters +// +def validateInputParameters( + input, + mode, + method, + image_seg_methods, + transcript_seg_methods, + relabel_genes, + gene_panel, + nucleus_segmentation_only, + segmentation_mask, + offtarget_probe_tracking, + probes_fasta, + reference_annotations, + gene_synonyms +) { + + // check if conda profile is provided + if (workflow.profile.contains('conda')) { + log.error("❌ Error: `nf-core/spatialxe` does not support running the pipeline with profile: conda ") + exit(1) + } + + // check if the samplesheet provided with the test config is assets/samplesheet.csv + if (workflow.profile.contains('test') && !"${input}".endsWith("assets/samplesheet.csv")) { + log.error("❌ Error: Use the samplesheet at: ${projectDir}/assets/samplesheet.csv with `--input` when running the pipeline in test profile.") + exit(1) + } + + // check if the segmentation method provided is valid for a mode + if (mode == 'image' && method) { + if (!image_seg_methods.contains(method)) { + log.error("❌ Error: Invalid segmentation method: ${method} provided for the `image` based mode. Options: ${image_seg_methods}") + exit(1) + } + } + + if (mode == 'coordinate' && method) { + if (!transcript_seg_methods.contains(method)) { + log.error("❌ Error: Invalid segmentation method: `${method}` provided for the `coordinate` based mode. Options: ${transcript_seg_methods}") + exit(1) + } + } + + // check if --relabel_genes is true but --gene_panel is not provided + if (relabel_genes && !gene_panel) { + log.warn("⚠️ Relabel genes is enabled, but gene panel is not provided with the `--gene_panel`. Using `gene_panel.json` in the xenium bundle.") + } + + // check if --relabel_genes is true but --gene_panel is not provided + if (gene_panel && !relabel_genes) { + log.warn("⚠️ Gene panel provided, but relabel genes is disabled. Using `gene_panel.json` only to generate metadata.") + } + + // check if segmentation method is xeniumranger and nucleus_ony_segmentation is enabled + if (method == 'xeniumranger' && !nucleus_segmentation_only) { + log.warn("⚠️ Nucleus segmentation is disabled. Running xeniumranger resegment module to redefine xenium bundle without nucleus segmentation.") + log.warn("⚠️ Use --nucleus_segmentation_only to enable nucleus segmentation to redefine xenium bundle with import-segmentation module.") + } + + // check if segmentation mask is provided in image mode and baysor method + if (mode == 'image' && method == 'baysor') { + if (!segmentation_mask) { + log.warn("⚠️ Missing segmentation mask with `--segmentation_mask` when pipeline is run in ${mode} and with the ${method}. Running in coordinate mode.") + } + } + + // check if required arguments are provided for off-target probe tracking + if (!mode && offtarget_probe_tracking) { + if(!probes_fasta || !reference_annotations || !gene_synonyms) { + log.error("❌ Error: Missing required param(s) for off-target-proebe detection.") + exit(1) + } + log.error("❌ Error: Use --mode qc and --offtraget_probe_tracking to run off-target probe tracking.") + exit(1) + } +} + +// +// Check and validate xenium bundle +// +def validateXeniumBundle(ch_samplesheet) { + + // define xenium bundle directory structure - required files + def bundle_required_files = [ + "cell_boundaries.csv.gz", + "cell_boundaries.parquet", + "cell_feature_matrix.h5", + "cell_feature_matrix.zarr.zip", + "cells.csv.gz", + "cells.parquet", + "cells.zarr.zip", + "experiment.xenium", + "gene_panel.json", + "metrics_summary.csv", + "morphology.ome.tif", + "morphology_focus/", + "nucleus_boundaries.csv.gz", + "nucleus_boundaries.parquet", + "transcripts.parquet", + "transcripts.zarr.zip", + ] + + // bundle optional files + def bundle_optional_files = [ + "analysis.tar.gz", + "analysis.zarr.zip", + "analysis_summary.html" + ] + + // get bundle path (keep raw string for remote-path detection) + def ch_bundle_info = ch_samplesheet.map { _meta, bundle, _image -> + def rawPath = bundle.toString().replaceFirst(/\/$/, '') + def bundle_path = file(rawPath) + return [rawPath, bundle_path] + } + + // Skip file-level validation for remote paths (S3, GS, AZ) because + // file().exists() is unreliable on cloud storage during initialization + // (Fusion mounts s3://bucket as /bucket, breaking startsWith checks). + // Files will be validated at task staging time instead. + ch_bundle_info.map { rawPath, path -> + if (rawPath.startsWith('s3://') || rawPath.startsWith('gs://') || rawPath.startsWith('az://')) { + log.info("Skipping bundle file validation for remote path: ${rawPath}") + return + } + + def missing_required_files = [] + def missing_optional_files = [] + + def requiredExist = bundle_required_files.every { filename -> + def fullPath = file("${path}/${filename}") + if (!fullPath.exists()) { + missing_required_files.add(filename) + return false + } + return true + } + if (!requiredExist) { + log.error("❌ Missing file(s) at bundle path provided in the samplesheet: ${missing_required_files}") + exit(1) + } + + def optionalExist = bundle_optional_files.every { filename -> + def fullPath = file("${path}/${filename}") + if (!fullPath.exists()) { + missing_optional_files.add(filename) + return false + } + return true + } + if (!optionalExist) { + log.warn("⚠️ Missing optional file(s) at bundle path provided in the samplesheet: ${missing_optional_files}") + } + + log.info("✅ Xenium bundle validated.\n") + } +} + +// +// Generate methods description for MultiQC +// +def toolCitationText() { + // Can use ternary operators to dynamically construct based conditions, e.g. params["run_xyz"] ? "Tool (Foo et al. 2023)" : "", + // Uncomment function in methodsDescriptionText to render in MultiQC report + def citation_text = [ + "Tools used in the workflow included:", + "MultiQC (Ewels et al. 2016)", + ".", + ].join(' ').trim() + + return citation_text +} + +def toolBibliographyText() { + // Can use ternary operators to dynamically construct based conditions, e.g. params["run_xyz"] ? "
  • Author (2023) Pub name, Journal, DOI
  • " : "", + // Uncomment function in methodsDescriptionText to render in MultiQC report + def reference_text = [ + "
  • Ewels, P., Magnusson, M., Lundin, S., & Käller, M. (2016). MultiQC: summarize analysis results for multiple tools and samples in a single report. Bioinformatics , 32(19), 3047–3048. doi: /10.1093/bioinformatics/btw354
  • " + ].join(' ').trim() + + return reference_text +} + +def methodsDescriptionText(mqc_methods_yaml) { + // Convert to a named map so can be used as with familiar NXF ${workflow} variable syntax in the MultiQC YML file + def meta = [:] + meta.workflow = workflow.toMap() + meta["manifest_map"] = workflow.manifest.toMap() + + // Pipeline DOI + if (meta.manifest_map.doi) { + // Using a loop to handle multiple DOIs + // Removing `https://doi.org/` to handle pipelines using DOIs vs DOI resolvers + // Removing ` ` since the manifest.doi is a string and not a proper list + def temp_doi_ref = "" + def manifest_doi = meta.manifest_map.doi.tokenize(",") + manifest_doi.each { doi_ref -> + temp_doi_ref += "(doi: ${doi_ref.replace("https://doi.org/", "").replace(" ", "")}), " + } + meta["doi_text"] = temp_doi_ref.substring(0, temp_doi_ref.length() - 2) + } + else { + meta["doi_text"] = "" + } + meta["nodoi_text"] = meta.manifest_map.doi ? "" : "
  • If available, make sure to update the text to include the Zenodo DOI of version of the pipeline used.
  • " + + // Tool references + meta["tool_citations"] = "" + meta["tool_bibliography"] = "" + + // Only uncomment below if logic in toolCitationText/toolBibliographyText has been filled! + // meta["tool_citations"] = toolCitationText().replaceAll(", \\.", ".").replaceAll("\\. \\.", ".").replaceAll(", \\.", ".") + // meta["tool_bibliography"] = toolBibliographyText() + + def methods_text = mqc_methods_yaml.text + + def engine = new groovy.text.SimpleTemplateEngine() + def description_html = engine.createTemplate(methods_text).make(meta) + + return description_html.toString() +} diff --git a/subworkflows/local/xeniumranger_import_segmentation_redefine_bundle/main.nf b/subworkflows/local/xeniumranger_import_segmentation_redefine_bundle/main.nf new file mode 100644 index 00000000..4c7b41d5 --- /dev/null +++ b/subworkflows/local/xeniumranger_import_segmentation_redefine_bundle/main.nf @@ -0,0 +1,133 @@ +// +// Run xeniumranger import-segmentation +// + +include { XENIUMRANGER_IMPORT_SEGMENTATION as IMP_SEG_COUNT_MATRIX_EXP_DISTANCE } from '../../../modules/nf-core/xeniumranger/import-segmentation/main' +include { XENIUMRANGER_IMPORT_SEGMENTATION as IMP_SEG_POLYGON_GEOJSON_INPUT } from '../../../modules/nf-core/xeniumranger/import-segmentation/main' +include { XENIUMRANGER_IMPORT_SEGMENTATION as IMP_SEG_TRANS_MATRIX_INPUT } from '../../../modules/nf-core/xeniumranger/import-segmentation/main' + + +workflow XENIUMRANGER_IMPORT_SEGMENTATION_REDEFINE_BUNDLE { + take: + ch_bundle_path // channel: [ val(meta), [ "path-to-xenium-bundle" ] ] + alignment_csv // value: path to alignment csv (or null) + expansion_distance // value: nuclear expansion distance + nucleus_segmentation_only // value: bool + qupath_polygons // value: path to qupath polygons dir (or null) + + main: + + ch_versions = channel.empty() + ch_redefined_bundle = channel.empty() + ch_coordinate_space = channel.empty() + + cells = ch_bundle_path.map { meta, bundle -> + return [meta, bundle + "/cells.zarr.zip"] + } + + // scenario - 1 change nuclear expansion distance / create a nucleus-only count matrix(--expansion_distance=0) + if (expansion_distance == 0 || expansion_distance != 5) { + ch_coordinate_space = "microns" + ch_imp_seg_inputs = ch_bundle_path + .combine(cells, by: 0) + .map { meta, bundle, cells_zarr -> + tuple( + meta, + bundle, + [], + [], + cells_zarr, + [], + [], + ch_coordinate_space.val, + ) + } + + IMP_SEG_COUNT_MATRIX_EXP_DISTANCE( + ch_imp_seg_inputs + ) + ch_redefined_bundle = IMP_SEG_COUNT_MATRIX_EXP_DISTANCE.out.outs + } + + // scenario - 2 polygon input - geojson format (from QuPath) + if (qupath_polygons && nucleus_segmentation_only) { + + ch_coordinate_space = "microns" + ch_imp_seg_inputs = ch_bundle_path + .combine(qupath_polygons) + .map { meta, bundle, polygons_geojson -> + tuple( + meta, + bundle, + [], + [], + polygons_geojson, + [], + [], + ch_coordinate_space.val, + ) + } + + IMP_SEG_POLYGON_GEOJSON_INPUT( + ch_imp_seg_inputs + ) + ch_redefined_bundle = IMP_SEG_POLYGON_GEOJSON_INPUT.out.outs + } + else if (qupath_polygons) { + + ch_coordinate_space = "microns" + ch_imp_seg_inputs = ch_bundle_path + .combine(qupath_polygons) + .map { meta, bundle, polygons_geojson -> + tuple( + meta, + bundle, + [], + [], + polygons_geojson, + polygons_geojson, + [], + ch_coordinate_space.val, + ) + } + + IMP_SEG_POLYGON_GEOJSON_INPUT( + ch_imp_seg_inputs + ) + ch_redefined_bundle = IMP_SEG_POLYGON_GEOJSON_INPUT.out.outs + } + + // scenario 3 - mask input - included in the cellpose subworkflow + + // scenario 4 - transcript assignment input - included in the baysor & proseg subworkflows + + // scenario 5 - transformation matrix input + if (qupath_polygons && alignment_csv) { + + ch_imp_seg_inputs = ch_bundle_path + .combine(qupath_polygons) + .combine(alignment_csv) + .map { meta, bundle, polygons_geojson, alignment_csv_file -> + tuple( + meta, + bundle, + [], + [], + polygons_geojson, + polygons_geojson, + alignment_csv_file, + ch_coordinate_space.val, + ) + } + + IMP_SEG_TRANS_MATRIX_INPUT( + ch_imp_seg_inputs + ) + ch_redefined_bundle = IMP_SEG_TRANS_MATRIX_INPUT.out.outs + } + + emit: + redefined_bundle = ch_redefined_bundle // channel: [ val(meta), ["redefined-xenium-bundle"] ] + coordinate_space = ch_coordinate_space // channel: [ ["pixels"] ] + versions = ch_versions // channel: [ versions.yml ] +} diff --git a/subworkflows/local/xeniumranger_import_segmentation_redefine_bundle/meta.yml b/subworkflows/local/xeniumranger_import_segmentation_redefine_bundle/meta.yml new file mode 100644 index 00000000..a4be036f --- /dev/null +++ b/subworkflows/local/xeniumranger_import_segmentation_redefine_bundle/meta.yml @@ -0,0 +1,36 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/nf-core/modules/master/subworkflows/yaml-schema.json +name: "xeniumranger_import_segmentation_redefine_bundle" +description: | + The import-segmentation pipeline allows you to specify 2D nuclei and/or cell segmentation results to use for + assigning transcripts to cells and recalculate all Xenium Onboard Analysis (XOA) outputs that depend on segmentation. +keywords: + - xenium + - xeniumranger import-segmentation + - qupath + - expansion distance + - segmentation + - polygons +components: + - xeniumranger/import/segmentation +input: + - ch_bundle_path: + description: | + Directory containing the raw dataset + Structure: [ val(meta), path("path-to-xenium-bundle") ] +output: + - redefined_bundle: + description: | + the redefined xenium bundle generated with the segmentation results from baysor + Structure: [ val(meta), ["redefined-xenium-bundle"] ] + - coordinate_space: + description: | + the coordinate space in which xeniumranger import-segmentation was run + Structure: [ val("microns") ] + - versions: + description: | + Files containing software versions + Structure: [ path(versions.yml) ] +authors: + - "@khersameesh24" +maintainers: + - "@khersameesh24" diff --git a/subworkflows/local/xeniumranger_relabel_resegment/main.nf b/subworkflows/local/xeniumranger_relabel_resegment/main.nf new file mode 100644 index 00000000..424224a7 --- /dev/null +++ b/subworkflows/local/xeniumranger_relabel_resegment/main.nf @@ -0,0 +1,29 @@ +// +// run xeniumranger relabel & resegment to redine the xenium bundle +// + +include { XENIUMRANGER_RELABEL } from '../../../modules/nf-core/xeniumranger/relabel/main' +include { XENIUMRANGER_RESEGMENT } from '../../../modules/nf-core/xeniumranger/resegment/main' + +workflow XENIUMRANGER_RELABEL_RESEGMENT { + take: + ch_bundle_path // channel: [ val(meta), [ "path-to-xenium-bundle" ] ] + ch_gene_panel // channel: [ val(meta), ["path-to-gene_panel.json"] ] + + main: + + ch_versions = channel.empty() + + // Combine bundle path with gene panel into a single tuple for relabel + XENIUMRANGER_RELABEL( + ch_bundle_path.combine(ch_gene_panel, by: 0), + ) + + XENIUMRANGER_RESEGMENT( + XENIUMRANGER_RELABEL.out.outs + ) + + emit: + redefined_bundle = XENIUMRANGER_RESEGMENT.out.outs // channel: [ val(meta), ["redefined-xenium-bundle"] ] + versions = ch_versions // channel: [ versions.yml ] +} diff --git a/subworkflows/local/xeniumranger_relabel_resegment/meta.yml b/subworkflows/local/xeniumranger_relabel_resegment/meta.yml new file mode 100644 index 00000000..d8628f4d --- /dev/null +++ b/subworkflows/local/xeniumranger_relabel_resegment/meta.yml @@ -0,0 +1,37 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/nf-core/modules/master/subworkflows/yaml-schema.json +name: "xeniumranger_relabel_resegment" +description: | + The relabel pipeline allows you to change the gene labels applied to decoded transcripts. + The resegment pipeline allows you to generate a new segmentation of the morphology image space by rerunning the + Xenium Onboard Analysis (XOA) segmentation algorithms with modified parameters. +keywords: + - xenium + - xeniumranger resegment + - expansion distance + - gene panel + - relabel +components: + - xeniumranger/relabel + - xeniumranger/resegment +input: + - ch_bundle_path: + description: | + Directory containing the raw dataset + Structure: [ val(meta), path("path-to-xenium-bundle") ] + - ch_gene_panel: + description: | + The gene panel JSON file to use for relabeling decoded transcripts + Structure: [ path("path-to-gene_panel.json") ] +output: + - redefined_bundle: + description: | + the redefined xenium bundle generated with the segmentation results from baysor + Structure: [ val(meta), ["redefined-xenium-bundle"] ] + - versions: + description: | + Files containing software versions + Structure: [ path(versions.yml) ] +authors: + - "@khersameesh24" +maintainers: + - "@khersameesh24" diff --git a/subworkflows/local/xeniumranger_resegment_morphology_ome_tif/main.nf b/subworkflows/local/xeniumranger_resegment_morphology_ome_tif/main.nf new file mode 100644 index 00000000..5a186a11 --- /dev/null +++ b/subworkflows/local/xeniumranger_resegment_morphology_ome_tif/main.nf @@ -0,0 +1,61 @@ +// +// Run xeniumranger resegment +// + +include { XENIUMRANGER_RESEGMENT } from '../../../modules/nf-core/xeniumranger/resegment/main' +include { XENIUMRANGER_IMPORT_SEGMENTATION } from '../../../modules/nf-core/xeniumranger/import-segmentation/main' + +workflow XENIUMRANGER_RESEGMENT_MORPHOLOGY_OME_TIF { + take: + ch_bundle_path // channel: [ val(meta), ["path-to-xenium-bundle"] ] + nucleus_segmentation_only // value: bool + + main: + + ch_redefined_bundle = channel.empty() + ch_coordinate_space = channel.value("pixels") + + // run resegment with changed config values + XENIUMRANGER_RESEGMENT(ch_bundle_path) + + + // run import segmentation to redine xenium bundle along with nuclear segmentation + // Keep meta in the cells channel for proper per-sample joining + def cells = XENIUMRANGER_RESEGMENT.out.outs.map { meta, bundle -> + return [meta, bundle + "/cells.zarr.zip"] + } + + // adjust the nuclear expansion distance without altering nuclei detection + if (nucleus_segmentation_only) { + + def ch_imp_seg_inputs = ch_bundle_path + .join(XENIUMRANGER_RESEGMENT.out.outs, by: 0) + .join(cells, by: 0) + .map { meta, bundle, reseg_bundle, cells_zarr -> + tuple( + meta, + bundle, + [], + [], + [], + cells_zarr, + [], + "pixels", + ) + } + + XENIUMRANGER_IMPORT_SEGMENTATION( + ch_imp_seg_inputs + ) + + ch_redefined_bundle = XENIUMRANGER_IMPORT_SEGMENTATION.out.outs + } + else { + + ch_redefined_bundle = XENIUMRANGER_RESEGMENT.out.outs + } + + emit: + redefined_bundle = ch_redefined_bundle // channel: [ val(meta), ["redefined-xenium-bundle"] ] + coordinate_space = ch_coordinate_space // channel: [ ["pixels"] ] +} diff --git a/subworkflows/local/xeniumranger_resegment_morphology_ome_tif/meta.yml b/subworkflows/local/xeniumranger_resegment_morphology_ome_tif/meta.yml new file mode 100644 index 00000000..db6a07a8 --- /dev/null +++ b/subworkflows/local/xeniumranger_resegment_morphology_ome_tif/meta.yml @@ -0,0 +1,37 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/nf-core/modules/master/subworkflows/yaml-schema.json +name: "xeniumranger_resegment_morphology_ome_tif" +description: | + The resegment pipeline allows you to generate a new segmentation of the morphology image space by rerunning the + Xenium Onboard Analysis (XOA) segmentation algorithms with modified parameters. +keywords: + - xenium + - xeniumranger resegment + - xeniumranger import-segmentation + - expansion distance + - cell segmentation + - nucleus segmentation +components: + - xeniumranger/import/segmentation + - xeniumranger/resegment +input: + - ch_bundle_path: + description: | + Directory containing the raw dataset + Structure: [ val(meta), path("path-to-xenium-bundle") ] +output: + - redefined_bundle: + description: | + the redefined xenium bundle generated with the segmentation results from baysor + Structure: [ val(meta), ["redefined-xenium-bundle"] ] + - coordinate_space: + description: | + the coordinate space in which xeniumranger import-segmentation was run + Structure: [ val("pixels") ] + - versions: + description: | + Files containing software versions + Structure: [ path(versions.yml) ] +authors: + - "@khersameesh24" +maintainers: + - "@khersameesh24" diff --git a/subworkflows/nf-core/utils_nextflow_pipeline/main.nf b/subworkflows/nf-core/utils_nextflow_pipeline/main.nf new file mode 100644 index 00000000..d6e593e8 --- /dev/null +++ b/subworkflows/nf-core/utils_nextflow_pipeline/main.nf @@ -0,0 +1,126 @@ +// +// Subworkflow with functionality that may be useful for any Nextflow pipeline +// + +/* +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + SUBWORKFLOW DEFINITION +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +*/ + +workflow UTILS_NEXTFLOW_PIPELINE { + take: + print_version // boolean: print version + dump_parameters // boolean: dump parameters + outdir // path: base directory used to publish pipeline results + check_conda_channels // boolean: check conda channels + + main: + + // + // Print workflow version and exit on --version + // + if (print_version) { + log.info("${workflow.manifest.name} ${getWorkflowVersion()}") + System.exit(0) + } + + // + // Dump pipeline parameters to a JSON file + // + if (dump_parameters && outdir) { + dumpParametersToJSON(outdir) + } + + // + // When running with Conda, warn if channels have not been set-up appropriately + // + if (check_conda_channels) { + checkCondaChannels() + } + + emit: + dummy_emit = true +} + +/* +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + FUNCTIONS +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +*/ + +// +// Generate version string +// +def getWorkflowVersion() { + def version_string = "" as String + if (workflow.manifest.version) { + def prefix_v = workflow.manifest.version[0] != 'v' ? 'v' : '' + version_string += "${prefix_v}${workflow.manifest.version}" + } + + if (workflow.commitId) { + def git_shortsha = workflow.commitId.substring(0, 7) + version_string += "-g${git_shortsha}" + } + + return version_string +} + +// +// Dump pipeline parameters to a JSON file +// +def dumpParametersToJSON(outdir) { + def timestamp = new java.util.Date().format('yyyy-MM-dd_HH-mm-ss') + def filename = "params_${timestamp}.json" + def temp_pf = new File(workflow.launchDir.toString(), ".${filename}") + def jsonStr = groovy.json.JsonOutput.toJson(params) + temp_pf.text = groovy.json.JsonOutput.prettyPrint(jsonStr) + + nextflow.extension.FilesEx.copyTo(temp_pf.toPath(), "${outdir}/pipeline_info/params_${timestamp}.json") + temp_pf.delete() +} + +// +// When running with -profile conda, warn if channels have not been set-up appropriately +// +def checkCondaChannels() { + def parser = new org.yaml.snakeyaml.Yaml() + def channels = [] + try { + def config = parser.load("conda config --show channels".execute().text) + channels = config.channels + } + catch (NullPointerException e) { + log.debug(e) + log.warn("Could not verify conda channel configuration.") + return null + } + catch (IOException e) { + log.debug(e) + log.warn("Could not verify conda channel configuration.") + return null + } + + // Check that all channels are present + // This channel list is ordered by required channel priority. + def required_channels_in_order = ['conda-forge', 'bioconda'] + def channels_missing = ((required_channels_in_order as Set) - (channels as Set)) as Boolean + + // Check that they are in the right order + def channel_priority_violation = required_channels_in_order != channels.findAll { ch -> ch in required_channels_in_order } + + if (channels_missing | channel_priority_violation) { + log.warn """\ + ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + There is a problem with your Conda configuration! + You will need to set-up the conda-forge and bioconda channels correctly. + Please refer to https://bioconda.github.io/ + The observed channel order is + ${channels} + but the following channel order is required: + ${required_channels_in_order} + ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~" + """.stripIndent(true) + } +} diff --git a/subworkflows/nf-core/utils_nextflow_pipeline/meta.yml b/subworkflows/nf-core/utils_nextflow_pipeline/meta.yml new file mode 100644 index 00000000..e5c3a0a8 --- /dev/null +++ b/subworkflows/nf-core/utils_nextflow_pipeline/meta.yml @@ -0,0 +1,38 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/nf-core/modules/master/subworkflows/yaml-schema.json +name: "UTILS_NEXTFLOW_PIPELINE" +description: Subworkflow with functionality that may be useful for any Nextflow pipeline +keywords: + - utility + - pipeline + - initialise + - version +components: [] +input: + - print_version: + type: boolean + description: | + Print the version of the pipeline and exit + - dump_parameters: + type: boolean + description: | + Dump the parameters of the pipeline to a JSON file + - output_directory: + type: directory + description: Path to output dir to write JSON file to. + pattern: "results/" + - check_conda_channel: + type: boolean + description: | + Check if the conda channel priority is correct. +output: + - dummy_emit: + type: boolean + description: | + Dummy emit to make nf-core subworkflows lint happy +authors: + - "@adamrtalbot" + - "@drpatelh" +maintainers: + - "@adamrtalbot" + - "@drpatelh" + - "@maxulysse" diff --git a/subworkflows/nf-core/utils_nextflow_pipeline/tests/main.function.nf.test b/subworkflows/nf-core/utils_nextflow_pipeline/tests/main.function.nf.test new file mode 100644 index 00000000..897d6681 --- /dev/null +++ b/subworkflows/nf-core/utils_nextflow_pipeline/tests/main.function.nf.test @@ -0,0 +1,54 @@ + +nextflow_function { + + name "Test Functions" + script "subworkflows/nf-core/utils_nextflow_pipeline/main.nf" + config "subworkflows/nf-core/utils_nextflow_pipeline/tests/nextflow.config" + tag 'subworkflows' + tag 'utils_nextflow_pipeline' + tag 'subworkflows/utils_nextflow_pipeline' + + test("Test Function getWorkflowVersion") { + + function "getWorkflowVersion" + + then { + assertAll( + { assert function.success }, + { assert snapshot(function.result).match() } + ) + } + } + + test("Test Function dumpParametersToJSON") { + + function "dumpParametersToJSON" + + when { + function { + """ + // define inputs of the function here. Example: + input[0] = "$outputDir" + """.stripIndent() + } + } + + then { + assertAll( + { assert function.success } + ) + } + } + + test("Test Function checkCondachannels") { + + function "checkCondachannels" + + then { + assertAll( + { assert function.success }, + { assert snapshot(function.result).match() } + ) + } + } +} diff --git a/subworkflows/nf-core/utils_nextflow_pipeline/tests/main.function.nf.test.snap b/subworkflows/nf-core/utils_nextflow_pipeline/tests/main.function.nf.test.snap new file mode 100644 index 00000000..f03a352a --- /dev/null +++ b/subworkflows/nf-core/utils_nextflow_pipeline/tests/main.function.nf.test.snap @@ -0,0 +1,20 @@ +{ + "Test Function getWorkflowVersion": { + "content": [ + "v9.9.9" + ], + "meta": { + "nf-test": "0.8.4", + "nextflow": "23.10.1" + }, + "timestamp": "2024-02-28T12:02:05.308243" + }, + "Test Function checkCondachannels": { + "content": null, + "meta": { + "nf-test": "0.8.4", + "nextflow": "23.10.1" + }, + "timestamp": "2024-02-28T12:02:12.425833" + } +} \ No newline at end of file diff --git a/subworkflows/nf-core/utils_nextflow_pipeline/tests/main.workflow.nf.test b/subworkflows/nf-core/utils_nextflow_pipeline/tests/main.workflow.nf.test new file mode 100644 index 00000000..02dbf094 --- /dev/null +++ b/subworkflows/nf-core/utils_nextflow_pipeline/tests/main.workflow.nf.test @@ -0,0 +1,113 @@ +nextflow_workflow { + + name "Test Workflow UTILS_NEXTFLOW_PIPELINE" + script "../main.nf" + config "subworkflows/nf-core/utils_nextflow_pipeline/tests/nextflow.config" + workflow "UTILS_NEXTFLOW_PIPELINE" + tag 'subworkflows' + tag 'utils_nextflow_pipeline' + tag 'subworkflows/utils_nextflow_pipeline' + + test("Should run no inputs") { + + when { + workflow { + """ + print_version = false + dump_parameters = false + outdir = null + check_conda_channels = false + + input[0] = print_version + input[1] = dump_parameters + input[2] = outdir + input[3] = check_conda_channels + """ + } + } + + then { + assertAll( + { assert workflow.success } + ) + } + } + + test("Should print version") { + + when { + workflow { + """ + print_version = true + dump_parameters = false + outdir = null + check_conda_channels = false + + input[0] = print_version + input[1] = dump_parameters + input[2] = outdir + input[3] = check_conda_channels + """ + } + } + + then { + expect { + with(workflow) { + assert success + assert "nextflow_workflow v9.9.9" in stdout + } + } + } + } + + test("Should dump params") { + + when { + workflow { + """ + print_version = false + dump_parameters = true + outdir = 'results' + check_conda_channels = false + + input[0] = false + input[1] = true + input[2] = outdir + input[3] = false + """ + } + } + + then { + assertAll( + { assert workflow.success } + ) + } + } + + test("Should not create params JSON if no output directory") { + + when { + workflow { + """ + print_version = false + dump_parameters = true + outdir = null + check_conda_channels = false + + input[0] = false + input[1] = true + input[2] = outdir + input[3] = false + """ + } + } + + then { + assertAll( + { assert workflow.success } + ) + } + } +} diff --git a/subworkflows/nf-core/utils_nextflow_pipeline/tests/nextflow.config b/subworkflows/nf-core/utils_nextflow_pipeline/tests/nextflow.config new file mode 100644 index 00000000..a09572e5 --- /dev/null +++ b/subworkflows/nf-core/utils_nextflow_pipeline/tests/nextflow.config @@ -0,0 +1,9 @@ +manifest { + name = 'nextflow_workflow' + author = """nf-core""" + homePage = 'https://127.0.0.1' + description = """Dummy pipeline""" + nextflowVersion = '!>=23.04.0' + version = '9.9.9' + doi = 'https://doi.org/10.5281/zenodo.5070524' +} diff --git a/subworkflows/nf-core/utils_nfcore_pipeline/main.nf b/subworkflows/nf-core/utils_nfcore_pipeline/main.nf new file mode 100644 index 00000000..bfd25876 --- /dev/null +++ b/subworkflows/nf-core/utils_nfcore_pipeline/main.nf @@ -0,0 +1,419 @@ +// +// Subworkflow with utility functions specific to the nf-core pipeline template +// + +/* +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + SUBWORKFLOW DEFINITION +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +*/ + +workflow UTILS_NFCORE_PIPELINE { + take: + nextflow_cli_args + + main: + valid_config = checkConfigProvided() + checkProfileProvided(nextflow_cli_args) + + emit: + valid_config +} + +/* +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + FUNCTIONS +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +*/ + +// +// Warn if a -profile or Nextflow config has not been provided to run the pipeline +// +def checkConfigProvided() { + def valid_config = true as Boolean + if (workflow.profile == 'standard' && workflow.configFiles.size() <= 1) { + log.warn( + "[${workflow.manifest.name}] You are attempting to run the pipeline without any custom configuration!\n\n" + "This will be dependent on your local compute environment but can be achieved via one or more of the following:\n" + " (1) Using an existing pipeline profile e.g. `-profile docker` or `-profile singularity`\n" + " (2) Using an existing nf-core/configs for your Institution e.g. `-profile crick` or `-profile uppmax`\n" + " (3) Using your own local custom config e.g. `-c /path/to/your/custom.config`\n\n" + "Please refer to the quick start section and usage docs for the pipeline.\n " + ) + valid_config = false + } + return valid_config +} + +// +// Exit pipeline if --profile contains spaces +// +def checkProfileProvided(nextflow_cli_args) { + if (workflow.profile.endsWith(',')) { + error( + "The `-profile` option cannot end with a trailing comma, please remove it and re-run the pipeline!\n" + "HINT: A common mistake is to provide multiple values separated by spaces e.g. `-profile test, docker`.\n" + ) + } + if (nextflow_cli_args[0]) { + log.warn( + "nf-core pipelines do not accept positional arguments. The positional argument `${nextflow_cli_args[0]}` has been detected.\n" + "HINT: A common mistake is to provide multiple values separated by spaces e.g. `-profile test, docker`.\n" + ) + } +} + +// +// Generate workflow version string +// +def getWorkflowVersion() { + def version_string = "" as String + if (workflow.manifest.version) { + def prefix_v = workflow.manifest.version[0] != 'v' ? 'v' : '' + version_string += "${prefix_v}${workflow.manifest.version}" + } + + if (workflow.commitId) { + def git_shortsha = workflow.commitId.substring(0, 7) + version_string += "-g${git_shortsha}" + } + + return version_string +} + +// +// Get software versions for pipeline +// +def processVersionsFromYAML(yaml_file) { + def yaml = new org.yaml.snakeyaml.Yaml() + def versions = yaml.load(yaml_file).collectEntries { k, v -> [k.tokenize(':')[-1], v] } + return yaml.dumpAsMap(versions).trim() +} + +// +// Get workflow version for pipeline +// +def workflowVersionToYAML() { + return """ + Workflow: + ${workflow.manifest.name}: ${getWorkflowVersion()} + Nextflow: ${workflow.nextflow.version} + """.stripIndent().trim() +} + +// +// Get channel of software versions used in pipeline in YAML format +// +def softwareVersionsToYAML(ch_versions) { + return ch_versions.unique().map { version -> processVersionsFromYAML(version) }.unique().mix(Channel.of(workflowVersionToYAML())) +} + +// +// Get workflow summary for MultiQC +// +def paramsSummaryMultiqc(summary_params) { + def summary_section = '' + summary_params + .keySet() + .each { group -> + def group_params = summary_params.get(group) + // This gets the parameters of that particular group + if (group_params) { + summary_section += "

    ${group}

    \n" + summary_section += "
    \n" + group_params + .keySet() + .sort() + .each { param -> + summary_section += "
    ${param}
    ${group_params.get(param) ?: 'N/A'}
    \n" + } + summary_section += "
    \n" + } + } + + def yaml_file_text = "id: '${workflow.manifest.name.replace('/', '-')}-summary'\n" as String + yaml_file_text += "description: ' - this information is collected when the pipeline is started.'\n" + yaml_file_text += "section_name: '${workflow.manifest.name} Workflow Summary'\n" + yaml_file_text += "section_href: 'https://github.com/${workflow.manifest.name}'\n" + yaml_file_text += "plot_type: 'html'\n" + yaml_file_text += "data: |\n" + yaml_file_text += "${summary_section}" + + return yaml_file_text +} + +// +// ANSII colours used for terminal logging +// +def logColours(monochrome_logs=true) { + def colorcodes = [:] as Map + + // Reset / Meta + colorcodes['reset'] = monochrome_logs ? '' : "\033[0m" + colorcodes['bold'] = monochrome_logs ? '' : "\033[1m" + colorcodes['dim'] = monochrome_logs ? '' : "\033[2m" + colorcodes['underlined'] = monochrome_logs ? '' : "\033[4m" + colorcodes['blink'] = monochrome_logs ? '' : "\033[5m" + colorcodes['reverse'] = monochrome_logs ? '' : "\033[7m" + colorcodes['hidden'] = monochrome_logs ? '' : "\033[8m" + + // Regular Colors + colorcodes['black'] = monochrome_logs ? '' : "\033[0;30m" + colorcodes['red'] = monochrome_logs ? '' : "\033[0;31m" + colorcodes['green'] = monochrome_logs ? '' : "\033[0;32m" + colorcodes['yellow'] = monochrome_logs ? '' : "\033[0;33m" + colorcodes['blue'] = monochrome_logs ? '' : "\033[0;34m" + colorcodes['purple'] = monochrome_logs ? '' : "\033[0;35m" + colorcodes['cyan'] = monochrome_logs ? '' : "\033[0;36m" + colorcodes['white'] = monochrome_logs ? '' : "\033[0;37m" + + // Bold + colorcodes['bblack'] = monochrome_logs ? '' : "\033[1;30m" + colorcodes['bred'] = monochrome_logs ? '' : "\033[1;31m" + colorcodes['bgreen'] = monochrome_logs ? '' : "\033[1;32m" + colorcodes['byellow'] = monochrome_logs ? '' : "\033[1;33m" + colorcodes['bblue'] = monochrome_logs ? '' : "\033[1;34m" + colorcodes['bpurple'] = monochrome_logs ? '' : "\033[1;35m" + colorcodes['bcyan'] = monochrome_logs ? '' : "\033[1;36m" + colorcodes['bwhite'] = monochrome_logs ? '' : "\033[1;37m" + + // Underline + colorcodes['ublack'] = monochrome_logs ? '' : "\033[4;30m" + colorcodes['ured'] = monochrome_logs ? '' : "\033[4;31m" + colorcodes['ugreen'] = monochrome_logs ? '' : "\033[4;32m" + colorcodes['uyellow'] = monochrome_logs ? '' : "\033[4;33m" + colorcodes['ublue'] = monochrome_logs ? '' : "\033[4;34m" + colorcodes['upurple'] = monochrome_logs ? '' : "\033[4;35m" + colorcodes['ucyan'] = monochrome_logs ? '' : "\033[4;36m" + colorcodes['uwhite'] = monochrome_logs ? '' : "\033[4;37m" + + // High Intensity + colorcodes['iblack'] = monochrome_logs ? '' : "\033[0;90m" + colorcodes['ired'] = monochrome_logs ? '' : "\033[0;91m" + colorcodes['igreen'] = monochrome_logs ? '' : "\033[0;92m" + colorcodes['iyellow'] = monochrome_logs ? '' : "\033[0;93m" + colorcodes['iblue'] = monochrome_logs ? '' : "\033[0;94m" + colorcodes['ipurple'] = monochrome_logs ? '' : "\033[0;95m" + colorcodes['icyan'] = monochrome_logs ? '' : "\033[0;96m" + colorcodes['iwhite'] = monochrome_logs ? '' : "\033[0;97m" + + // Bold High Intensity + colorcodes['biblack'] = monochrome_logs ? '' : "\033[1;90m" + colorcodes['bired'] = monochrome_logs ? '' : "\033[1;91m" + colorcodes['bigreen'] = monochrome_logs ? '' : "\033[1;92m" + colorcodes['biyellow'] = monochrome_logs ? '' : "\033[1;93m" + colorcodes['biblue'] = monochrome_logs ? '' : "\033[1;94m" + colorcodes['bipurple'] = monochrome_logs ? '' : "\033[1;95m" + colorcodes['bicyan'] = monochrome_logs ? '' : "\033[1;96m" + colorcodes['biwhite'] = monochrome_logs ? '' : "\033[1;97m" + + return colorcodes +} + +// Return a single report from an object that may be a Path or List +// +def getSingleReport(multiqc_reports) { + if (multiqc_reports instanceof Path) { + return multiqc_reports + } else if (multiqc_reports instanceof List) { + if (multiqc_reports.size() == 0) { + log.warn("[${workflow.manifest.name}] No reports found from process 'MULTIQC'") + return null + } else if (multiqc_reports.size() == 1) { + return multiqc_reports.first() + } else { + log.warn("[${workflow.manifest.name}] Found multiple reports from process 'MULTIQC', will use only one") + return multiqc_reports.first() + } + } else { + return null + } +} + +// +// Construct and send completion email +// +def completionEmail(summary_params, email, email_on_fail, plaintext_email, outdir, monochrome_logs=true, multiqc_report=null) { + + // Set up the e-mail variables + def subject = "[${workflow.manifest.name}] Successful: ${workflow.runName}" + if (!workflow.success) { + subject = "[${workflow.manifest.name}] FAILED: ${workflow.runName}" + } + + def summary = [:] + summary_params + .keySet() + .sort() + .each { group -> + summary << summary_params[group] + } + + def misc_fields = [:] + misc_fields['Date Started'] = workflow.start + misc_fields['Date Completed'] = workflow.complete + misc_fields['Pipeline script file path'] = workflow.scriptFile + misc_fields['Pipeline script hash ID'] = workflow.scriptId + if (workflow.repository) { + misc_fields['Pipeline repository Git URL'] = workflow.repository + } + if (workflow.commitId) { + misc_fields['Pipeline repository Git Commit'] = workflow.commitId + } + if (workflow.revision) { + misc_fields['Pipeline Git branch/tag'] = workflow.revision + } + misc_fields['Nextflow Version'] = workflow.nextflow.version + misc_fields['Nextflow Build'] = workflow.nextflow.build + misc_fields['Nextflow Compile Timestamp'] = workflow.nextflow.timestamp + + def email_fields = [:] + email_fields['version'] = getWorkflowVersion() + email_fields['runName'] = workflow.runName + email_fields['success'] = workflow.success + email_fields['dateComplete'] = workflow.complete + email_fields['duration'] = workflow.duration + email_fields['exitStatus'] = workflow.exitStatus + email_fields['errorMessage'] = (workflow.errorMessage ?: 'None') + email_fields['errorReport'] = (workflow.errorReport ?: 'None') + email_fields['commandLine'] = workflow.commandLine + email_fields['projectDir'] = workflow.projectDir + email_fields['summary'] = summary << misc_fields + + // On success try attach the multiqc report + def mqc_report = getSingleReport(multiqc_report) + + // Check if we are only sending emails on failure + def email_address = email + if (!email && email_on_fail && !workflow.success) { + email_address = email_on_fail + } + + // Render the TXT template + def engine = new groovy.text.GStringTemplateEngine() + def tf = new File("${workflow.projectDir}/assets/email_template.txt") + def txt_template = engine.createTemplate(tf).make(email_fields) + def email_txt = txt_template.toString() + + // Render the HTML template + def hf = new File("${workflow.projectDir}/assets/email_template.html") + def html_template = engine.createTemplate(hf).make(email_fields) + def email_html = html_template.toString() + + // Render the sendmail template + def max_multiqc_email_size = (params.containsKey('max_multiqc_email_size') ? params.max_multiqc_email_size : 0) as MemoryUnit + def smail_fields = [email: email_address, subject: subject, email_txt: email_txt, email_html: email_html, projectDir: "${workflow.projectDir}", mqcFile: mqc_report, mqcMaxSize: max_multiqc_email_size.toBytes()] + def sf = new File("${workflow.projectDir}/assets/sendmail_template.txt") + def sendmail_template = engine.createTemplate(sf).make(smail_fields) + def sendmail_html = sendmail_template.toString() + + // Send the HTML e-mail + def colors = logColours(monochrome_logs) as Map + if (email_address) { + try { + if (plaintext_email) { + new org.codehaus.groovy.GroovyException('Send plaintext e-mail, not HTML') + } + // Try to send HTML e-mail using sendmail + def sendmail_tf = new File(workflow.launchDir.toString(), ".sendmail_tmp.html") + sendmail_tf.withWriter { w -> w << sendmail_html } + ['sendmail', '-t'].execute() << sendmail_html + log.info("-${colors.purple}[${workflow.manifest.name}]${colors.green} Sent summary e-mail to ${email_address} (sendmail)-") + } + catch (Exception msg) { + log.debug(msg.toString()) + log.debug("Trying with mail instead of sendmail") + // Catch failures and try with plaintext + def mail_cmd = ['mail', '-s', subject, '--content-type=text/html', email_address] + mail_cmd.execute() << email_html + log.info("-${colors.purple}[${workflow.manifest.name}]${colors.green} Sent summary e-mail to ${email_address} (mail)-") + } + } + + // Write summary e-mail HTML to a file + def output_hf = new File(workflow.launchDir.toString(), ".pipeline_report.html") + output_hf.withWriter { w -> w << email_html } + nextflow.extension.FilesEx.copyTo(output_hf.toPath(), "${outdir}/pipeline_info/pipeline_report.html") + output_hf.delete() + + // Write summary e-mail TXT to a file + def output_tf = new File(workflow.launchDir.toString(), ".pipeline_report.txt") + output_tf.withWriter { w -> w << email_txt } + nextflow.extension.FilesEx.copyTo(output_tf.toPath(), "${outdir}/pipeline_info/pipeline_report.txt") + output_tf.delete() +} + +// +// Print pipeline summary on completion +// +def completionSummary(monochrome_logs=true) { + def colors = logColours(monochrome_logs) as Map + if (workflow.success) { + if (workflow.stats.ignoredCount == 0) { + log.info("-${colors.purple}[${workflow.manifest.name}]${colors.green} Pipeline completed successfully${colors.reset}-") + } + else { + log.info("-${colors.purple}[${workflow.manifest.name}]${colors.yellow} Pipeline completed successfully, but with errored process(es) ${colors.reset}-") + } + } + else { + log.info("-${colors.purple}[${workflow.manifest.name}]${colors.red} Pipeline completed with errors${colors.reset}-") + } +} + +// +// Construct and send a notification to a web server as JSON e.g. Microsoft Teams and Slack +// +def imNotification(summary_params, hook_url) { + def summary = [:] + summary_params + .keySet() + .sort() + .each { group -> + summary << summary_params[group] + } + + def misc_fields = [:] + misc_fields['start'] = workflow.start + misc_fields['complete'] = workflow.complete + misc_fields['scriptfile'] = workflow.scriptFile + misc_fields['scriptid'] = workflow.scriptId + if (workflow.repository) { + misc_fields['repository'] = workflow.repository + } + if (workflow.commitId) { + misc_fields['commitid'] = workflow.commitId + } + if (workflow.revision) { + misc_fields['revision'] = workflow.revision + } + misc_fields['nxf_version'] = workflow.nextflow.version + misc_fields['nxf_build'] = workflow.nextflow.build + misc_fields['nxf_timestamp'] = workflow.nextflow.timestamp + + def msg_fields = [:] + msg_fields['version'] = getWorkflowVersion() + msg_fields['runName'] = workflow.runName + msg_fields['success'] = workflow.success + msg_fields['dateComplete'] = workflow.complete + msg_fields['duration'] = workflow.duration + msg_fields['exitStatus'] = workflow.exitStatus + msg_fields['errorMessage'] = (workflow.errorMessage ?: 'None') + msg_fields['errorReport'] = (workflow.errorReport ?: 'None') + msg_fields['commandLine'] = workflow.commandLine.replaceFirst(/ +--hook_url +[^ ]+/, "") + msg_fields['projectDir'] = workflow.projectDir + msg_fields['summary'] = summary << misc_fields + + // Render the JSON template + def engine = new groovy.text.GStringTemplateEngine() + // Different JSON depending on the service provider + // Defaults to "Adaptive Cards" (https://adaptivecards.io), except Slack which has its own format + def json_path = hook_url.contains("hooks.slack.com") ? "slackreport.json" : "adaptivecard.json" + def hf = new File("${workflow.projectDir}/assets/${json_path}") + def json_template = engine.createTemplate(hf).make(msg_fields) + def json_message = json_template.toString() + + // POST + def post = new URL(hook_url).openConnection() + post.setRequestMethod("POST") + post.setDoOutput(true) + post.setRequestProperty("Content-Type", "application/json") + post.getOutputStream().write(json_message.getBytes("UTF-8")) + def postRC = post.getResponseCode() + if (!postRC.equals(200)) { + log.warn(post.getErrorStream().getText()) + } +} diff --git a/subworkflows/nf-core/utils_nfcore_pipeline/meta.yml b/subworkflows/nf-core/utils_nfcore_pipeline/meta.yml new file mode 100644 index 00000000..d08d2434 --- /dev/null +++ b/subworkflows/nf-core/utils_nfcore_pipeline/meta.yml @@ -0,0 +1,24 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/nf-core/modules/master/subworkflows/yaml-schema.json +name: "UTILS_NFCORE_PIPELINE" +description: Subworkflow with utility functions specific to the nf-core pipeline template +keywords: + - utility + - pipeline + - initialise + - version +components: [] +input: + - nextflow_cli_args: + type: list + description: | + Nextflow CLI positional arguments +output: + - success: + type: boolean + description: | + Dummy output to indicate success +authors: + - "@adamrtalbot" +maintainers: + - "@adamrtalbot" + - "@maxulysse" diff --git a/subworkflows/nf-core/utils_nfcore_pipeline/tests/main.function.nf.test b/subworkflows/nf-core/utils_nfcore_pipeline/tests/main.function.nf.test new file mode 100644 index 00000000..f117040c --- /dev/null +++ b/subworkflows/nf-core/utils_nfcore_pipeline/tests/main.function.nf.test @@ -0,0 +1,126 @@ + +nextflow_function { + + name "Test Functions" + script "../main.nf" + config "subworkflows/nf-core/utils_nfcore_pipeline/tests/nextflow.config" + tag "subworkflows" + tag "subworkflows_nfcore" + tag "utils_nfcore_pipeline" + tag "subworkflows/utils_nfcore_pipeline" + + test("Test Function checkConfigProvided") { + + function "checkConfigProvided" + + then { + assertAll( + { assert function.success }, + { assert snapshot(function.result).match() } + ) + } + } + + test("Test Function checkProfileProvided") { + + function "checkProfileProvided" + + when { + function { + """ + input[0] = [] + """ + } + } + + then { + assertAll( + { assert function.success }, + { assert snapshot(function.result).match() } + ) + } + } + + test("Test Function without logColours") { + + function "logColours" + + when { + function { + """ + input[0] = true + """ + } + } + + then { + assertAll( + { assert function.success }, + { assert snapshot(function.result).match() } + ) + } + } + + test("Test Function with logColours") { + function "logColours" + + when { + function { + """ + input[0] = false + """ + } + } + + then { + assertAll( + { assert function.success }, + { assert snapshot(function.result).match() } + ) + } + } + + test("Test Function getSingleReport with a single file") { + function "getSingleReport" + + when { + function { + """ + input[0] = file(params.modules_testdata_base_path + '/generic/tsv/test.tsv', checkIfExists: true) + """ + } + } + + then { + assertAll( + { assert function.success }, + { assert function.result.contains("test.tsv") } + ) + } + } + + test("Test Function getSingleReport with multiple files") { + function "getSingleReport" + + when { + function { + """ + input[0] = [ + file(params.modules_testdata_base_path + '/generic/tsv/test.tsv', checkIfExists: true), + file(params.modules_testdata_base_path + '/generic/tsv/network.tsv', checkIfExists: true), + file(params.modules_testdata_base_path + '/generic/tsv/expression.tsv', checkIfExists: true) + ] + """ + } + } + + then { + assertAll( + { assert function.success }, + { assert function.result.contains("test.tsv") }, + { assert !function.result.contains("network.tsv") }, + { assert !function.result.contains("expression.tsv") } + ) + } + } +} diff --git a/subworkflows/nf-core/utils_nfcore_pipeline/tests/main.function.nf.test.snap b/subworkflows/nf-core/utils_nfcore_pipeline/tests/main.function.nf.test.snap new file mode 100644 index 00000000..02c67014 --- /dev/null +++ b/subworkflows/nf-core/utils_nfcore_pipeline/tests/main.function.nf.test.snap @@ -0,0 +1,136 @@ +{ + "Test Function checkProfileProvided": { + "content": null, + "meta": { + "nf-test": "0.8.4", + "nextflow": "23.10.1" + }, + "timestamp": "2024-02-28T12:03:03.360873" + }, + "Test Function checkConfigProvided": { + "content": [ + true + ], + "meta": { + "nf-test": "0.8.4", + "nextflow": "23.10.1" + }, + "timestamp": "2024-02-28T12:02:59.729647" + }, + "Test Function without logColours": { + "content": [ + { + "reset": "", + "bold": "", + "dim": "", + "underlined": "", + "blink": "", + "reverse": "", + "hidden": "", + "black": "", + "red": "", + "green": "", + "yellow": "", + "blue": "", + "purple": "", + "cyan": "", + "white": "", + "bblack": "", + "bred": "", + "bgreen": "", + "byellow": "", + "bblue": "", + "bpurple": "", + "bcyan": "", + "bwhite": "", + "ublack": "", + "ured": "", + "ugreen": "", + "uyellow": "", + "ublue": "", + "upurple": "", + "ucyan": "", + "uwhite": "", + "iblack": "", + "ired": "", + "igreen": "", + "iyellow": "", + "iblue": "", + "ipurple": "", + "icyan": "", + "iwhite": "", + "biblack": "", + "bired": "", + "bigreen": "", + "biyellow": "", + "biblue": "", + "bipurple": "", + "bicyan": "", + "biwhite": "" + } + ], + "meta": { + "nf-test": "0.8.4", + "nextflow": "23.10.1" + }, + "timestamp": "2024-02-28T12:03:17.969323" + }, + "Test Function with logColours": { + "content": [ + { + "reset": "\u001b[0m", + "bold": "\u001b[1m", + "dim": "\u001b[2m", + "underlined": "\u001b[4m", + "blink": "\u001b[5m", + "reverse": "\u001b[7m", + "hidden": "\u001b[8m", + "black": "\u001b[0;30m", + "red": "\u001b[0;31m", + "green": "\u001b[0;32m", + "yellow": "\u001b[0;33m", + "blue": "\u001b[0;34m", + "purple": "\u001b[0;35m", + "cyan": "\u001b[0;36m", + "white": "\u001b[0;37m", + "bblack": "\u001b[1;30m", + "bred": "\u001b[1;31m", + "bgreen": "\u001b[1;32m", + "byellow": "\u001b[1;33m", + "bblue": "\u001b[1;34m", + "bpurple": "\u001b[1;35m", + "bcyan": "\u001b[1;36m", + "bwhite": "\u001b[1;37m", + "ublack": "\u001b[4;30m", + "ured": "\u001b[4;31m", + "ugreen": "\u001b[4;32m", + "uyellow": "\u001b[4;33m", + "ublue": "\u001b[4;34m", + "upurple": "\u001b[4;35m", + "ucyan": "\u001b[4;36m", + "uwhite": "\u001b[4;37m", + "iblack": "\u001b[0;90m", + "ired": "\u001b[0;91m", + "igreen": "\u001b[0;92m", + "iyellow": "\u001b[0;93m", + "iblue": "\u001b[0;94m", + "ipurple": "\u001b[0;95m", + "icyan": "\u001b[0;96m", + "iwhite": "\u001b[0;97m", + "biblack": "\u001b[1;90m", + "bired": "\u001b[1;91m", + "bigreen": "\u001b[1;92m", + "biyellow": "\u001b[1;93m", + "biblue": "\u001b[1;94m", + "bipurple": "\u001b[1;95m", + "bicyan": "\u001b[1;96m", + "biwhite": "\u001b[1;97m" + } + ], + "meta": { + "nf-test": "0.8.4", + "nextflow": "23.10.1" + }, + "timestamp": "2024-02-28T12:03:21.714424" + } +} \ No newline at end of file diff --git a/subworkflows/nf-core/utils_nfcore_pipeline/tests/main.workflow.nf.test b/subworkflows/nf-core/utils_nfcore_pipeline/tests/main.workflow.nf.test new file mode 100644 index 00000000..8940d32d --- /dev/null +++ b/subworkflows/nf-core/utils_nfcore_pipeline/tests/main.workflow.nf.test @@ -0,0 +1,29 @@ +nextflow_workflow { + + name "Test Workflow UTILS_NFCORE_PIPELINE" + script "../main.nf" + config "subworkflows/nf-core/utils_nfcore_pipeline/tests/nextflow.config" + workflow "UTILS_NFCORE_PIPELINE" + tag "subworkflows" + tag "subworkflows_nfcore" + tag "utils_nfcore_pipeline" + tag "subworkflows/utils_nfcore_pipeline" + + test("Should run without failures") { + + when { + workflow { + """ + input[0] = [] + """ + } + } + + then { + assertAll( + { assert workflow.success }, + { assert snapshot(workflow.out).match() } + ) + } + } +} diff --git a/subworkflows/nf-core/utils_nfcore_pipeline/tests/main.workflow.nf.test.snap b/subworkflows/nf-core/utils_nfcore_pipeline/tests/main.workflow.nf.test.snap new file mode 100644 index 00000000..859d1030 --- /dev/null +++ b/subworkflows/nf-core/utils_nfcore_pipeline/tests/main.workflow.nf.test.snap @@ -0,0 +1,19 @@ +{ + "Should run without failures": { + "content": [ + { + "0": [ + true + ], + "valid_config": [ + true + ] + } + ], + "meta": { + "nf-test": "0.8.4", + "nextflow": "23.10.1" + }, + "timestamp": "2024-02-28T12:03:25.726491" + } +} \ No newline at end of file diff --git a/subworkflows/nf-core/utils_nfcore_pipeline/tests/nextflow.config b/subworkflows/nf-core/utils_nfcore_pipeline/tests/nextflow.config new file mode 100644 index 00000000..d0a926bf --- /dev/null +++ b/subworkflows/nf-core/utils_nfcore_pipeline/tests/nextflow.config @@ -0,0 +1,9 @@ +manifest { + name = 'nextflow_workflow' + author = """nf-core""" + homePage = 'https://127.0.0.1' + description = """Dummy pipeline""" + nextflowVersion = '!>=23.04.0' + version = '9.9.9' + doi = 'https://doi.org/10.5281/zenodo.5070524' +} diff --git a/subworkflows/nf-core/utils_nfschema_plugin/main.nf b/subworkflows/nf-core/utils_nfschema_plugin/main.nf new file mode 100644 index 00000000..ee4738c8 --- /dev/null +++ b/subworkflows/nf-core/utils_nfschema_plugin/main.nf @@ -0,0 +1,74 @@ +// +// Subworkflow that uses the nf-schema plugin to validate parameters and render the parameter summary +// + +include { paramsSummaryLog } from 'plugin/nf-schema' +include { validateParameters } from 'plugin/nf-schema' +include { paramsHelp } from 'plugin/nf-schema' + +workflow UTILS_NFSCHEMA_PLUGIN { + + take: + input_workflow // workflow: the workflow object used by nf-schema to get metadata from the workflow + validate_params // boolean: validate the parameters + parameters_schema // string: path to the parameters JSON schema. + // this has to be the same as the schema given to `validation.parametersSchema` + // when this input is empty it will automatically use the configured schema or + // "${projectDir}/nextflow_schema.json" as default. This input should not be empty + // for meta pipelines + help // boolean: show help message + help_full // boolean: show full help message + show_hidden // boolean: show hidden parameters in help message + before_text // string: text to show before the help message and parameters summary + after_text // string: text to show after the help message and parameters summary + command // string: an example command of the pipeline + + main: + + if(help || help_full) { + help_options = [ + beforeText: before_text, + afterText: after_text, + command: command, + showHidden: show_hidden, + fullHelp: help_full, + ] + if(parameters_schema) { + help_options << [parametersSchema: parameters_schema] + } + log.info paramsHelp( + help_options, + params.help instanceof String ? params.help : "", + ) + exit 0 + } + + // + // Print parameter summary to stdout. This will display the parameters + // that differ from the default given in the JSON schema + // + + summary_options = [:] + if(parameters_schema) { + summary_options << [parametersSchema: parameters_schema] + } + log.info before_text + log.info paramsSummaryLog(summary_options, input_workflow) + log.info after_text + + // + // Validate the parameters using nextflow_schema.json or the schema + // given via the validation.parametersSchema configuration option + // + if(validate_params) { + validateOptions = [:] + if(parameters_schema) { + validateOptions << [parametersSchema: parameters_schema] + } + validateParameters(validateOptions) + } + + emit: + dummy_emit = true +} + diff --git a/subworkflows/nf-core/utils_nfschema_plugin/meta.yml b/subworkflows/nf-core/utils_nfschema_plugin/meta.yml new file mode 100644 index 00000000..f7d9f028 --- /dev/null +++ b/subworkflows/nf-core/utils_nfschema_plugin/meta.yml @@ -0,0 +1,35 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/nf-core/modules/master/subworkflows/yaml-schema.json +name: "utils_nfschema_plugin" +description: Run nf-schema to validate parameters and create a summary of changed parameters +keywords: + - validation + - JSON schema + - plugin + - parameters + - summary +components: [] +input: + - input_workflow: + type: object + description: | + The workflow object of the used pipeline. + This object contains meta data used to create the params summary log + - validate_params: + type: boolean + description: Validate the parameters and error if invalid. + - parameters_schema: + type: string + description: | + Path to the parameters JSON schema. + This has to be the same as the schema given to the `validation.parametersSchema` config + option. When this input is empty it will automatically use the configured schema or + "${projectDir}/nextflow_schema.json" as default. The schema should not be given in this way + for meta pipelines. +output: + - dummy_emit: + type: boolean + description: Dummy emit to make nf-core subworkflows lint happy +authors: + - "@nvnieuwk" +maintainers: + - "@nvnieuwk" diff --git a/subworkflows/nf-core/utils_nfschema_plugin/tests/main.nf.test b/subworkflows/nf-core/utils_nfschema_plugin/tests/main.nf.test new file mode 100644 index 00000000..c977917a --- /dev/null +++ b/subworkflows/nf-core/utils_nfschema_plugin/tests/main.nf.test @@ -0,0 +1,173 @@ +nextflow_workflow { + + name "Test Subworkflow UTILS_NFSCHEMA_PLUGIN" + script "../main.nf" + workflow "UTILS_NFSCHEMA_PLUGIN" + + tag "subworkflows" + tag "subworkflows_nfcore" + tag "subworkflows/utils_nfschema_plugin" + tag "plugin/nf-schema" + + config "./nextflow.config" + + test("Should run nothing") { + + when { + + params { + test_data = '' + } + + workflow { + """ + validate_params = false + input[0] = workflow + input[1] = validate_params + input[2] = "" + input[3] = false + input[4] = false + input[5] = false + input[6] = "" + input[7] = "" + input[8] = "" + """ + } + } + + then { + assertAll( + { assert workflow.success } + ) + } + } + + test("Should validate params") { + + when { + + params { + test_data = '' + outdir = null + } + + workflow { + """ + validate_params = true + input[0] = workflow + input[1] = validate_params + input[2] = "" + input[3] = false + input[4] = false + input[5] = false + input[6] = "" + input[7] = "" + input[8] = "" + """ + } + } + + then { + assertAll( + { assert workflow.failed }, + { assert workflow.stdout.any { it.contains('ERROR ~ Validation of pipeline parameters failed!') } } + ) + } + } + + test("Should run nothing - custom schema") { + + when { + + params { + test_data = '' + } + + workflow { + """ + validate_params = false + input[0] = workflow + input[1] = validate_params + input[2] = "${projectDir}/subworkflows/nf-core/utils_nfschema_plugin/tests/nextflow_schema.json" + input[3] = false + input[4] = false + input[5] = false + input[6] = "" + input[7] = "" + input[8] = "" + """ + } + } + + then { + assertAll( + { assert workflow.success } + ) + } + } + + test("Should validate params - custom schema") { + + when { + + params { + test_data = '' + outdir = null + } + + workflow { + """ + validate_params = true + input[0] = workflow + input[1] = validate_params + input[2] = "${projectDir}/subworkflows/nf-core/utils_nfschema_plugin/tests/nextflow_schema.json" + input[3] = false + input[4] = false + input[5] = false + input[6] = "" + input[7] = "" + input[8] = "" + """ + } + } + + then { + assertAll( + { assert workflow.failed }, + { assert workflow.stdout.any { it.contains('ERROR ~ Validation of pipeline parameters failed!') } } + ) + } + } + + test("Should create a help message") { + + when { + + params { + test_data = '' + outdir = null + } + + workflow { + """ + validate_params = true + input[0] = workflow + input[1] = validate_params + input[2] = "${projectDir}/subworkflows/nf-core/utils_nfschema_plugin/tests/nextflow_schema.json" + input[3] = true + input[4] = false + input[5] = false + input[6] = "Before" + input[7] = "After" + input[8] = "nextflow run test/test" + """ + } + } + + then { + assertAll( + { assert workflow.success } + ) + } + } +} diff --git a/subworkflows/nf-core/utils_nfschema_plugin/tests/nextflow.config b/subworkflows/nf-core/utils_nfschema_plugin/tests/nextflow.config new file mode 100644 index 00000000..8d8c7371 --- /dev/null +++ b/subworkflows/nf-core/utils_nfschema_plugin/tests/nextflow.config @@ -0,0 +1,8 @@ +plugins { + id "nf-schema@2.5.1" +} + +validation { + parametersSchema = "${projectDir}/subworkflows/nf-core/utils_nfschema_plugin/tests/nextflow_schema.json" + monochromeLogs = true +} diff --git a/subworkflows/nf-core/utils_nfschema_plugin/tests/nextflow_schema.json b/subworkflows/nf-core/utils_nfschema_plugin/tests/nextflow_schema.json new file mode 100644 index 00000000..331e0d2f --- /dev/null +++ b/subworkflows/nf-core/utils_nfschema_plugin/tests/nextflow_schema.json @@ -0,0 +1,96 @@ +{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "$id": "https://raw.githubusercontent.com/./master/nextflow_schema.json", + "title": ". pipeline parameters", + "description": "", + "type": "object", + "$defs": { + "input_output_options": { + "title": "Input/output options", + "type": "object", + "fa_icon": "fas fa-terminal", + "description": "Define where the pipeline should find input data and save output data.", + "required": ["outdir"], + "properties": { + "validate_params": { + "type": "boolean", + "description": "Validate parameters?", + "default": true, + "hidden": true + }, + "outdir": { + "type": "string", + "format": "directory-path", + "description": "The output directory where the results will be saved. You have to use absolute paths to storage on Cloud infrastructure.", + "fa_icon": "fas fa-folder-open" + }, + "test_data_base": { + "type": "string", + "default": "https://raw.githubusercontent.com/nf-core/test-datasets/modules", + "description": "Base for test data directory", + "hidden": true + }, + "test_data": { + "type": "string", + "description": "Fake test data param", + "hidden": true + } + } + }, + "generic_options": { + "title": "Generic options", + "type": "object", + "fa_icon": "fas fa-file-import", + "description": "Less common options for the pipeline, typically set in a config file.", + "help_text": "These options are common to all nf-core pipelines and allow you to customise some of the core preferences for how the pipeline runs.\n\nTypically these options would be set in a Nextflow config file loaded for all pipeline runs, such as `~/.nextflow/config`.", + "properties": { + "help": { + "type": "boolean", + "description": "Display help text.", + "fa_icon": "fas fa-question-circle", + "hidden": true + }, + "version": { + "type": "boolean", + "description": "Display version and exit.", + "fa_icon": "fas fa-question-circle", + "hidden": true + }, + "logo": { + "type": "boolean", + "default": true, + "description": "Display nf-core logo in console output.", + "fa_icon": "fas fa-image", + "hidden": true + }, + "singularity_pull_docker_container": { + "type": "boolean", + "description": "Pull Singularity container from Docker?", + "hidden": true + }, + "publish_dir_mode": { + "type": "string", + "default": "copy", + "description": "Method used to save pipeline results to output directory.", + "help_text": "The Nextflow `publishDir` option specifies which intermediate files should be saved to the output directory. This option tells the pipeline what method should be used to move these files. See [Nextflow docs](https://www.nextflow.io/docs/latest/process.html#publishdir) for details.", + "fa_icon": "fas fa-copy", + "enum": ["symlink", "rellink", "link", "copy", "copyNoFollow", "move"], + "hidden": true + }, + "monochrome_logs": { + "type": "boolean", + "description": "Use monochrome_logs", + "hidden": true + } + } + } + }, + "allOf": [ + { + "$ref": "#/$defs/input_output_options" + }, + { + "$ref": "#/$defs/generic_options" + } + ] +} diff --git a/tests/.nftignore b/tests/.nftignore new file mode 100644 index 00000000..2e57a91e --- /dev/null +++ b/tests/.nftignore @@ -0,0 +1,12 @@ +.DS_Store +multiqc/multiqc_data/fastqc_top_overrepresented_sequences_table.txt +multiqc/multiqc_data/multiqc.parquet +multiqc/multiqc_data/multiqc.log +multiqc/multiqc_data/multiqc_data.json +multiqc/multiqc_data/multiqc_sources.txt +multiqc/multiqc_data/multiqc_software_versions.txt +multiqc/multiqc_data/llms-full.txt +multiqc/multiqc_plots/{svg,pdf,png}/*.{svg,pdf,png} +multiqc/multiqc_report.html +pipeline_info/*.{html,json,txt,yml} +**/proseg/preset/** diff --git a/tests/conftest.py b/tests/conftest.py new file mode 100644 index 00000000..a8eb3c10 --- /dev/null +++ b/tests/conftest.py @@ -0,0 +1,9 @@ +"""Pytest configuration for nf-xenium-processing tests.""" + +from __future__ import annotations + +import sys +from pathlib import Path + +# Add bin/ to path so skill_*.py modules can be imported +sys.path.insert(0, str(Path(__file__).resolve().parent.parent / "bin")) diff --git a/tests/coordinate_mode.nf.test b/tests/coordinate_mode.nf.test new file mode 100644 index 00000000..51623327 --- /dev/null +++ b/tests/coordinate_mode.nf.test @@ -0,0 +1,36 @@ +nextflow_pipeline { + + name "Test pipeline for the `coordinate` mode, test run the proseg subworkflow" + script "../main.nf" + tag "pipeline" + config "../conf/test_coordinate_mode.config" + + test("-profile test stub") { + + options "-stub" + + when { + params { + outdir = "$outputDir" + } + } + + then { + // stable_name: All files + folders in ${params.outdir}/ with a stable name + def stable_name = getAllFilesFromDir(params.outdir, relative: true, includeDir: true, ignore: ['pipeline_info/*.{html,json,txt}']) + // stable_path: All files in ${params.outdir}/ with stable content + def stable_path = getAllFilesFromDir(params.outdir, ignoreFile: 'tests/.nftignore') + assertAll( + { assert workflow.success}, + { assert snapshot( + // pipeline versions.yml file for multiqc from which Nextflow version is removed because we test pipelines on multiple Nextflow versions + removeNextflowVersion("$outputDir/pipeline_info/nf_core_spatialxe_software_mqc_versions.yml"), + // All stable path name, with a relative path + stable_name, + // All files with stable contents + stable_path + ).match() } + ) + } + } +} diff --git a/tests/coordinate_mode.nf.test.snap b/tests/coordinate_mode.nf.test.snap new file mode 100644 index 00000000..828fe335 --- /dev/null +++ b/tests/coordinate_mode.nf.test.snap @@ -0,0 +1,115 @@ +{ + "-profile test stub": { + "content": [ + { + "PROSEG2BAYSOR": { + "proseg": "3.1.0" + }, + "PROSEG": { + "proseg": "3.1.0" + }, + "SPATIALDATA_MERGE_RAW_REDEFINED": { + "spatialdata": "0.7.2" + }, + "SPATIALDATA_META": { + "spatialdata": "0.7.2" + }, + "SPATIALDATA_WRITE_RAW_BUNDLE": { + "spatialdata": "0.7.2" + }, + "SPATIALDATA_WRITE_REDEFINED_BUNDLE": { + "spatialdata": "0.7.2" + }, + "UNTAR": { + "untar": 1.34 + }, + "Workflow": { + "nf-core/spatialxe": "v1.0.0" + }, + "XENIUMRANGER_IMPORT_SEGMENTATION": { + "xeniumranger": "4.0.1.1" + } + }, + [ + "coordinate", + "coordinate/multiqc", + "coordinate/multiqc/raw_bundle", + "coordinate/multiqc/raw_bundle/multiqc_data", + "coordinate/multiqc/raw_bundle/multiqc_data/.stub", + "coordinate/multiqc/raw_bundle/multiqc_plots", + "coordinate/multiqc/raw_bundle/multiqc_plots/.stub", + "coordinate/multiqc/raw_bundle/multiqc_report.html", + "coordinate/multiqc/redefined_bundle", + "coordinate/multiqc/redefined_bundle/multiqc_data", + "coordinate/multiqc/redefined_bundle/multiqc_data/.stub", + "coordinate/multiqc/redefined_bundle/multiqc_plots", + "coordinate/multiqc/redefined_bundle/multiqc_plots/.stub", + "coordinate/multiqc/redefined_bundle/multiqc_report.html", + "coordinate/proseg", + "coordinate/proseg/preset", + "coordinate/proseg/preset/test_run", + "coordinate/proseg/preset/test_run/cell-polygons.geojson.gz", + "coordinate/proseg/preset/test_run/proseg-output.zarr", + "coordinate/proseg/preset/test_run/transcript-metadata.csv.gz", + "coordinate/proseg/proseg2baysor", + "coordinate/proseg/proseg2baysor/test_run", + "coordinate/proseg/proseg2baysor/test_run/cell-polygons.geojson", + "coordinate/proseg/proseg2baysor/test_run/transcript-metadata.csv", + "coordinate/spatialdata", + "coordinate/spatialdata/merge", + "coordinate/spatialdata/merge/spatialdata", + "coordinate/spatialdata/merge/spatialdata/test_run", + "coordinate/spatialdata/merge/spatialdata/test_run/merged_bundle", + "coordinate/spatialdata/merge/spatialdata/test_run/merged_bundle/fake_file.txt", + "coordinate/spatialdata/meta", + "coordinate/spatialdata/meta/spatialdata", + "coordinate/spatialdata/meta/spatialdata/test_run", + "coordinate/spatialdata/meta/spatialdata/test_run/metadata", + "coordinate/spatialdata/meta/spatialdata/test_run/metadata/fake_file.txt", + "coordinate/spatialdata/write", + "coordinate/spatialdata/write/spatialdata", + "coordinate/spatialdata/write/spatialdata/test_run", + "coordinate/spatialdata/write/spatialdata/test_run/raw_bundle", + "coordinate/spatialdata/write/spatialdata/test_run/raw_bundle/fake_file.txt", + "coordinate/spatialdata/write/spatialdata/test_run/redefined_bundle", + "coordinate/spatialdata/write/spatialdata/test_run/redefined_bundle/fake_file.txt", + "coordinate/untar", + "coordinate/untar/test_run", + "coordinate/untar/test_run/experiment.xenium", + "coordinate/untar/test_run/gene_panel.json", + "coordinate/untar/test_run/morphology.ome.tif", + "coordinate/untar/test_run/transcripts.parquet", + "coordinate/xeniumranger", + "coordinate/xeniumranger/import_segementation", + "coordinate/xeniumranger/import_segementation/test_run", + "coordinate/xeniumranger/import_segementation/test_run/experiment.xenium", + "pipeline_info", + "pipeline_info/nf_core_spatialxe_software_mqc_versions.yml" + ], + [ + ".stub:md5,d41d8cd98f00b204e9800998ecf8427e", + ".stub:md5,d41d8cd98f00b204e9800998ecf8427e", + "multiqc_report.html:md5,d41d8cd98f00b204e9800998ecf8427e", + ".stub:md5,d41d8cd98f00b204e9800998ecf8427e", + ".stub:md5,d41d8cd98f00b204e9800998ecf8427e", + "multiqc_report.html:md5,d41d8cd98f00b204e9800998ecf8427e", + "cell-polygons.geojson:md5,d41d8cd98f00b204e9800998ecf8427e", + "transcript-metadata.csv:md5,d41d8cd98f00b204e9800998ecf8427e", + "fake_file.txt:md5,d41d8cd98f00b204e9800998ecf8427e", + "fake_file.txt:md5,d41d8cd98f00b204e9800998ecf8427e", + "fake_file.txt:md5,d41d8cd98f00b204e9800998ecf8427e", + "fake_file.txt:md5,d41d8cd98f00b204e9800998ecf8427e", + "experiment.xenium:md5,d41d8cd98f00b204e9800998ecf8427e", + "gene_panel.json:md5,d41d8cd98f00b204e9800998ecf8427e", + "morphology.ome.tif:md5,d41d8cd98f00b204e9800998ecf8427e", + "transcripts.parquet:md5,d41d8cd98f00b204e9800998ecf8427e", + "experiment.xenium:md5,d41d8cd98f00b204e9800998ecf8427e" + ] + ], + "meta": { + "nf-test": "0.9.3", + "nextflow": "25.10.2" + }, + "timestamp": "2026-03-22T19:54:10.312439732" + } +} \ No newline at end of file diff --git a/tests/default.nf.test b/tests/default.nf.test new file mode 100644 index 00000000..1e6301e7 --- /dev/null +++ b/tests/default.nf.test @@ -0,0 +1,36 @@ +nextflow_pipeline { + + name "Test pipeline - runs the default pipeline tests (coordinate mode)" + script "../main.nf" + tag "pipeline" + config "../conf/test.config" + + test("-profile test stub") { + + options "-stub" + + when { + params { + outdir = "$outputDir" + } + } + + then { + // stable_name: All files + folders in ${params.outdir}/ with a stable name + def stable_name = getAllFilesFromDir(params.outdir, relative: true, includeDir: true, ignore: ['pipeline_info/*.{html,json,txt}']) + // stable_path: All files in ${params.outdir}/ with stable content + def stable_path = getAllFilesFromDir(params.outdir, ignoreFile: 'tests/.nftignore') + assertAll( + { assert workflow.success}, + { assert snapshot( + // pipeline versions.yml file for multiqc from which Nextflow version is removed because we test pipelines on multiple Nextflow versions + removeNextflowVersion("$outputDir/pipeline_info/nf_core_spatialxe_software_mqc_versions.yml"), + // All stable path name, with a relative path + stable_name, + // All files with stable contents + stable_path + ).match() } + ) + } + } +} diff --git a/tests/default.nf.test.snap b/tests/default.nf.test.snap new file mode 100644 index 00000000..b913b700 --- /dev/null +++ b/tests/default.nf.test.snap @@ -0,0 +1,115 @@ +{ + "-profile test stub": { + "content": [ + { + "PROSEG2BAYSOR": { + "proseg": "3.1.0" + }, + "PROSEG": { + "proseg": "3.1.0" + }, + "SPATIALDATA_MERGE_RAW_REDEFINED": { + "spatialdata": "0.7.2" + }, + "SPATIALDATA_META": { + "spatialdata": "0.7.2" + }, + "SPATIALDATA_WRITE_RAW_BUNDLE": { + "spatialdata": "0.7.2" + }, + "SPATIALDATA_WRITE_REDEFINED_BUNDLE": { + "spatialdata": "0.7.2" + }, + "UNTAR": { + "untar": 1.34 + }, + "Workflow": { + "nf-core/spatialxe": "v1.0.0" + }, + "XENIUMRANGER_IMPORT_SEGMENTATION": { + "xeniumranger": "4.0.1.1" + } + }, + [ + "coordinate", + "coordinate/multiqc", + "coordinate/multiqc/raw_bundle", + "coordinate/multiqc/raw_bundle/multiqc_data", + "coordinate/multiqc/raw_bundle/multiqc_data/.stub", + "coordinate/multiqc/raw_bundle/multiqc_plots", + "coordinate/multiqc/raw_bundle/multiqc_plots/.stub", + "coordinate/multiqc/raw_bundle/multiqc_report.html", + "coordinate/multiqc/redefined_bundle", + "coordinate/multiqc/redefined_bundle/multiqc_data", + "coordinate/multiqc/redefined_bundle/multiqc_data/.stub", + "coordinate/multiqc/redefined_bundle/multiqc_plots", + "coordinate/multiqc/redefined_bundle/multiqc_plots/.stub", + "coordinate/multiqc/redefined_bundle/multiqc_report.html", + "coordinate/proseg", + "coordinate/proseg/preset", + "coordinate/proseg/preset/test_run", + "coordinate/proseg/preset/test_run/cell-polygons.geojson.gz", + "coordinate/proseg/preset/test_run/proseg-output.zarr", + "coordinate/proseg/preset/test_run/transcript-metadata.csv.gz", + "coordinate/proseg/proseg2baysor", + "coordinate/proseg/proseg2baysor/test_run", + "coordinate/proseg/proseg2baysor/test_run/cell-polygons.geojson", + "coordinate/proseg/proseg2baysor/test_run/transcript-metadata.csv", + "coordinate/spatialdata", + "coordinate/spatialdata/merge", + "coordinate/spatialdata/merge/spatialdata", + "coordinate/spatialdata/merge/spatialdata/test_run", + "coordinate/spatialdata/merge/spatialdata/test_run/merged_bundle", + "coordinate/spatialdata/merge/spatialdata/test_run/merged_bundle/fake_file.txt", + "coordinate/spatialdata/meta", + "coordinate/spatialdata/meta/spatialdata", + "coordinate/spatialdata/meta/spatialdata/test_run", + "coordinate/spatialdata/meta/spatialdata/test_run/metadata", + "coordinate/spatialdata/meta/spatialdata/test_run/metadata/fake_file.txt", + "coordinate/spatialdata/write", + "coordinate/spatialdata/write/spatialdata", + "coordinate/spatialdata/write/spatialdata/test_run", + "coordinate/spatialdata/write/spatialdata/test_run/raw_bundle", + "coordinate/spatialdata/write/spatialdata/test_run/raw_bundle/fake_file.txt", + "coordinate/spatialdata/write/spatialdata/test_run/redefined_bundle", + "coordinate/spatialdata/write/spatialdata/test_run/redefined_bundle/fake_file.txt", + "coordinate/untar", + "coordinate/untar/test_run", + "coordinate/untar/test_run/experiment.xenium", + "coordinate/untar/test_run/gene_panel.json", + "coordinate/untar/test_run/morphology.ome.tif", + "coordinate/untar/test_run/transcripts.parquet", + "coordinate/xeniumranger", + "coordinate/xeniumranger/import_segementation", + "coordinate/xeniumranger/import_segementation/test_run", + "coordinate/xeniumranger/import_segementation/test_run/experiment.xenium", + "pipeline_info", + "pipeline_info/nf_core_spatialxe_software_mqc_versions.yml" + ], + [ + ".stub:md5,d41d8cd98f00b204e9800998ecf8427e", + ".stub:md5,d41d8cd98f00b204e9800998ecf8427e", + "multiqc_report.html:md5,d41d8cd98f00b204e9800998ecf8427e", + ".stub:md5,d41d8cd98f00b204e9800998ecf8427e", + ".stub:md5,d41d8cd98f00b204e9800998ecf8427e", + "multiqc_report.html:md5,d41d8cd98f00b204e9800998ecf8427e", + "cell-polygons.geojson:md5,d41d8cd98f00b204e9800998ecf8427e", + "transcript-metadata.csv:md5,d41d8cd98f00b204e9800998ecf8427e", + "fake_file.txt:md5,d41d8cd98f00b204e9800998ecf8427e", + "fake_file.txt:md5,d41d8cd98f00b204e9800998ecf8427e", + "fake_file.txt:md5,d41d8cd98f00b204e9800998ecf8427e", + "fake_file.txt:md5,d41d8cd98f00b204e9800998ecf8427e", + "experiment.xenium:md5,d41d8cd98f00b204e9800998ecf8427e", + "gene_panel.json:md5,d41d8cd98f00b204e9800998ecf8427e", + "morphology.ome.tif:md5,d41d8cd98f00b204e9800998ecf8427e", + "transcripts.parquet:md5,d41d8cd98f00b204e9800998ecf8427e", + "experiment.xenium:md5,d41d8cd98f00b204e9800998ecf8427e" + ] + ], + "meta": { + "nf-test": "0.9.3", + "nextflow": "25.10.2" + }, + "timestamp": "2026-03-22T19:55:52.515294044" + } +} \ No newline at end of file diff --git a/tests/image_mode.nf.test b/tests/image_mode.nf.test new file mode 100644 index 00000000..000faeb9 --- /dev/null +++ b/tests/image_mode.nf.test @@ -0,0 +1,36 @@ +nextflow_pipeline { + + name "Test pipeline for the `image` mode, test run the cellpose->baysor subworkflow" + script "../main.nf" + tag "pipeline" + config "../conf/test_image_mode.config" + + test("-profile test stub") { + + options "-stub" + + when { + params { + outdir = "$outputDir" + } + } + + then { + // stable_name: All files + folders in ${params.outdir}/ with a stable name + def stable_name = getAllFilesFromDir(params.outdir, relative: true, includeDir: true, ignore: ['pipeline_info/*.{html,json,txt}']) + // stable_path: All files in ${params.outdir}/ with stable content + def stable_path = getAllFilesFromDir(params.outdir, ignoreFile: 'tests/.nftignore') + assertAll( + { assert workflow.success}, + { assert snapshot( + // pipeline versions.yml file for multiqc from which Nextflow version is removed because we test pipelines on multiple Nextflow versions + removeNextflowVersion("$outputDir/pipeline_info/nf_core_spatialxe_software_mqc_versions.yml"), + // All stable path name, with a relative path + stable_name, + // All files with stable contents + stable_path + ).match() } + ) + } + } +} diff --git a/tests/image_mode.nf.test.snap b/tests/image_mode.nf.test.snap new file mode 100644 index 00000000..50366f0a --- /dev/null +++ b/tests/image_mode.nf.test.snap @@ -0,0 +1,130 @@ +{ + "-profile test stub": { + "content": [ + { + "BAYSOR_PREPROCESS_TRANSCRIPTS": { + "python": "3.14.4" + }, + "BAYSOR_RUN": { + "baysor": "0.7.1" + }, + "CELLPOSE_CELLS": { + "torch": "2.10.0+cu128" + }, + "RESIZE_TIF": { + "tifffile": "2026.2.24" + }, + "SPATIALDATA_MERGE_RAW_REDEFINED": { + "spatialdata": "0.7.2" + }, + "SPATIALDATA_META": { + "spatialdata": "0.7.2" + }, + "SPATIALDATA_WRITE_RAW_BUNDLE": { + "spatialdata": "0.7.2" + }, + "SPATIALDATA_WRITE_REDEFINED_BUNDLE": { + "spatialdata": "0.7.2" + }, + "UNTAR": { + "untar": 1.34 + }, + "Workflow": { + "nf-core/spatialxe": "v1.0.0" + }, + "XENIUMRANGER_IMPORT_SEGMENTATION": { + "xeniumranger": "4.0.1.1" + } + }, + [ + "coordinate", + "coordinate/xeniumranger", + "coordinate/xeniumranger/import_segementation", + "coordinate/xeniumranger/import_segementation/test_run", + "coordinate/xeniumranger/import_segementation/test_run/experiment.xenium", + "image", + "image/baysor", + "image/baysor/preprocess", + "image/baysor/preprocess/test_run", + "image/baysor/preprocess/test_run/filtered_transcripts.csv", + "image/baysor/run", + "image/baysor/run/test_run", + "image/baysor/run/test_run/segmentation.csv", + "image/baysor/run/test_run/segmentation_polygons_2d.json", + "image/cellpose_cells", + "image/cellpose_cells/test_run", + "image/cellpose_cells/test_run/morphology.ome_cp_masks.tif", + "image/multiqc", + "image/multiqc/raw_bundle", + "image/multiqc/raw_bundle/multiqc_data", + "image/multiqc/raw_bundle/multiqc_data/.stub", + "image/multiqc/raw_bundle/multiqc_plots", + "image/multiqc/raw_bundle/multiqc_plots/.stub", + "image/multiqc/raw_bundle/multiqc_report.html", + "image/multiqc/redefined_bundle", + "image/multiqc/redefined_bundle/multiqc_data", + "image/multiqc/redefined_bundle/multiqc_data/.stub", + "image/multiqc/redefined_bundle/multiqc_plots", + "image/multiqc/redefined_bundle/multiqc_plots/.stub", + "image/multiqc/redefined_bundle/multiqc_report.html", + "image/spatialdata", + "image/spatialdata/merge", + "image/spatialdata/merge/spatialdata", + "image/spatialdata/merge/spatialdata/test_run", + "image/spatialdata/merge/spatialdata/test_run/merged_bundle", + "image/spatialdata/merge/spatialdata/test_run/merged_bundle/fake_file.txt", + "image/spatialdata/meta", + "image/spatialdata/meta/spatialdata", + "image/spatialdata/meta/spatialdata/test_run", + "image/spatialdata/meta/spatialdata/test_run/metadata", + "image/spatialdata/meta/spatialdata/test_run/metadata/fake_file.txt", + "image/spatialdata/write", + "image/spatialdata/write/spatialdata", + "image/spatialdata/write/spatialdata/test_run", + "image/spatialdata/write/spatialdata/test_run/raw_bundle", + "image/spatialdata/write/spatialdata/test_run/raw_bundle/fake_file.txt", + "image/spatialdata/write/spatialdata/test_run/redefined_bundle", + "image/spatialdata/write/spatialdata/test_run/redefined_bundle/fake_file.txt", + "image/untar", + "image/untar/test_run", + "image/untar/test_run/experiment.xenium", + "image/untar/test_run/gene_panel.json", + "image/untar/test_run/morphology.ome.tif", + "image/untar/test_run/transcripts.parquet", + "image/utility", + "image/utility/resize_tif", + "image/utility/resize_tif/test_run", + "image/utility/resize_tif/test_run/resized_morphology.ome_cp_masks.tif.tif", + "pipeline_info", + "pipeline_info/nf_core_spatialxe_software_mqc_versions.yml" + ], + [ + "experiment.xenium:md5,d41d8cd98f00b204e9800998ecf8427e", + "filtered_transcripts.csv:md5,d41d8cd98f00b204e9800998ecf8427e", + "segmentation.csv:md5,d41d8cd98f00b204e9800998ecf8427e", + "segmentation_polygons_2d.json:md5,d41d8cd98f00b204e9800998ecf8427e", + "morphology.ome_cp_masks.tif:md5,d41d8cd98f00b204e9800998ecf8427e", + ".stub:md5,d41d8cd98f00b204e9800998ecf8427e", + ".stub:md5,d41d8cd98f00b204e9800998ecf8427e", + "multiqc_report.html:md5,d41d8cd98f00b204e9800998ecf8427e", + ".stub:md5,d41d8cd98f00b204e9800998ecf8427e", + ".stub:md5,d41d8cd98f00b204e9800998ecf8427e", + "multiqc_report.html:md5,d41d8cd98f00b204e9800998ecf8427e", + "fake_file.txt:md5,d41d8cd98f00b204e9800998ecf8427e", + "fake_file.txt:md5,d41d8cd98f00b204e9800998ecf8427e", + "fake_file.txt:md5,d41d8cd98f00b204e9800998ecf8427e", + "fake_file.txt:md5,d41d8cd98f00b204e9800998ecf8427e", + "experiment.xenium:md5,d41d8cd98f00b204e9800998ecf8427e", + "gene_panel.json:md5,d41d8cd98f00b204e9800998ecf8427e", + "morphology.ome.tif:md5,d41d8cd98f00b204e9800998ecf8427e", + "transcripts.parquet:md5,d41d8cd98f00b204e9800998ecf8427e", + "resized_morphology.ome_cp_masks.tif.tif:md5,d41d8cd98f00b204e9800998ecf8427e" + ] + ], + "timestamp": "2026-04-29T19:30:52.537301132", + "meta": { + "nf-test": "0.9.5", + "nextflow": "25.10.4" + } + } +} \ No newline at end of file diff --git a/tests/nextflow.config b/tests/nextflow.config new file mode 100644 index 00000000..eff6fcbb --- /dev/null +++ b/tests/nextflow.config @@ -0,0 +1,13 @@ +/* +======================================================================================== + Nextflow config file for running nf-test tests +======================================================================================== +*/ + +// Or any resources requirements +params { + modules_testdata_base_path = 'https://raw.githubusercontent.com/nf-core/test-datasets/modules/data/' + pipelines_testdata_base_path = 'https://raw.githubusercontent.com/nf-core/test-datasets/refs/heads/spatialxe' +} + +aws.client.anonymous = true // fixes S3 access issues on self-hosted runners diff --git a/tests/preview_mode.nf.test b/tests/preview_mode.nf.test new file mode 100644 index 00000000..fe206319 --- /dev/null +++ b/tests/preview_mode.nf.test @@ -0,0 +1,36 @@ +nextflow_pipeline { + + name "Test pipeline for the `preview` mode, test run the basyor-preview subworkflow" + script "../main.nf" + tag "pipeline" + config "../conf/test_preview_mode.config" + + test("-profile test stub") { + + options "-stub" + + when { + params { + outdir = "$outputDir" + } + } + + then { + // stable_name: All files + folders in ${params.outdir}/ with a stable name + def stable_name = getAllFilesFromDir(params.outdir, relative: true, includeDir: true, ignore: ['pipeline_info/*.{html,json,txt}']) + // stable_path: All files in ${params.outdir}/ with stable content + def stable_path = getAllFilesFromDir(params.outdir, ignoreFile: 'tests/.nftignore') + assertAll( + { assert workflow.success}, + { assert snapshot( + // pipeline versions.yml file for multiqc from which Nextflow version is removed because we test pipelines on multiple Nextflow versions + removeNextflowVersion("$outputDir/pipeline_info/nf_core_spatialxe_software_mqc_versions.yml"), + // All stable path name, with a relative path + stable_name, + // All files with stable contents + stable_path + ).match() } + ) + } + } +} diff --git a/tests/preview_mode.nf.test.snap b/tests/preview_mode.nf.test.snap new file mode 100644 index 00000000..e1c92907 --- /dev/null +++ b/tests/preview_mode.nf.test.snap @@ -0,0 +1,83 @@ +{ + "-profile test stub": { + "content": [ + { + "BAYSOR_CREATE_DATASET": { + "python": "3.11.2" + }, + "BAYSOR_PREVIEW": { + "baysor": "0.7.1" + }, + "EXTRACT_PREVIEW_DATA": { + "python": "3.14.4" + }, + "PARQUET_TO_CSV": { + "pyarrow": "24.0.0" + }, + "UNTAR": { + "untar": 1.34 + }, + "Workflow": { + "nf-core/spatialxe": "v1.0.0" + } + }, + [ + "pipeline_info", + "pipeline_info/nf_core_spatialxe_software_mqc_versions.yml", + "preview", + "preview/baysor", + "preview/baysor/create_dataset", + "preview/baysor/create_dataset/test_run", + "preview/baysor/create_dataset/test_run/sampled_transcripts.csv", + "preview/baysor/preview", + "preview/baysor/preview/test_run", + "preview/baysor/preview/test_run/preview.html", + "preview/multiqc", + "preview/multiqc/multiqc_data", + "preview/multiqc/multiqc_data/.stub", + "preview/multiqc/multiqc_plots", + "preview/multiqc/multiqc_plots/.stub", + "preview/multiqc/multiqc_report.html", + "preview/untar", + "preview/untar/test_run", + "preview/untar/test_run/experiment.xenium", + "preview/untar/test_run/gene_panel.json", + "preview/untar/test_run/morphology.ome.tif", + "preview/untar/test_run/transcripts.parquet", + "preview/utility", + "preview/utility/parquet_to_csv", + "preview/utility/parquet_to_csv/test_run", + "preview/utility/parquet_to_csv/test_run/transcripts.parquet.csv", + "preview/utility/preview_data", + "preview/utility/preview_data/test_run", + "preview/utility/preview_data/test_run/gene_structure_mqc.tsv", + "preview/utility/preview_data/test_run/noise_distribution_mqc.tsv", + "preview/utility/preview_data/test_run/noise_level_mqc.png", + "preview/utility/preview_data/test_run/transcript_plots_mqc.png", + "preview/utility/preview_data/test_run/umap_mqc.tsv" + ], + [ + "sampled_transcripts.csv:md5,d41d8cd98f00b204e9800998ecf8427e", + "preview.html:md5,d41d8cd98f00b204e9800998ecf8427e", + ".stub:md5,d41d8cd98f00b204e9800998ecf8427e", + ".stub:md5,d41d8cd98f00b204e9800998ecf8427e", + "multiqc_report.html:md5,d41d8cd98f00b204e9800998ecf8427e", + "experiment.xenium:md5,d41d8cd98f00b204e9800998ecf8427e", + "gene_panel.json:md5,d41d8cd98f00b204e9800998ecf8427e", + "morphology.ome.tif:md5,d41d8cd98f00b204e9800998ecf8427e", + "transcripts.parquet:md5,d41d8cd98f00b204e9800998ecf8427e", + "transcripts.parquet.csv:md5,d41d8cd98f00b204e9800998ecf8427e", + "gene_structure_mqc.tsv:md5,d41d8cd98f00b204e9800998ecf8427e", + "noise_distribution_mqc.tsv:md5,d41d8cd98f00b204e9800998ecf8427e", + "noise_level_mqc.png:md5,d41d8cd98f00b204e9800998ecf8427e", + "transcript_plots_mqc.png:md5,d41d8cd98f00b204e9800998ecf8427e", + "umap_mqc.tsv:md5,d41d8cd98f00b204e9800998ecf8427e" + ] + ], + "timestamp": "2026-04-30T19:00:29.84624323", + "meta": { + "nf-test": "0.9.5", + "nextflow": "26.04.0" + } + } +} \ No newline at end of file diff --git a/tests/segfree_mode.nf.test b/tests/segfree_mode.nf.test new file mode 100644 index 00000000..2319b6e2 --- /dev/null +++ b/tests/segfree_mode.nf.test @@ -0,0 +1,36 @@ +nextflow_pipeline { + + name "Test pipeline" + script "../main.nf" + tag "pipeline" + config "../conf/test_segfree_mode.config" + + test("-profile test stub") { + + options "-stub" + + when { + params { + outdir = "$outputDir" + } + } + + then { + // stable_name: All files + folders in ${params.outdir}/ with a stable name + def stable_name = getAllFilesFromDir(params.outdir, relative: true, includeDir: true, ignore: ['pipeline_info/*.{html,json,txt}']) + // stable_path: All files in ${params.outdir}/ with stable content + def stable_path = getAllFilesFromDir(params.outdir, ignoreFile: 'tests/.nftignore') + assertAll( + { assert workflow.success}, + { assert snapshot( + // pipeline versions.yml file for multiqc from which Nextflow version is removed because we test pipelines on multiple Nextflow versions + removeNextflowVersion("$outputDir/pipeline_info/nf_core_spatialxe_software_mqc_versions.yml"), + // All stable path name, with a relative path + stable_name, + // All files with stable contents + stable_path + ).match() } + ) + } + } +} diff --git a/tests/segfree_mode.nf.test.snap b/tests/segfree_mode.nf.test.snap new file mode 100644 index 00000000..98326c02 --- /dev/null +++ b/tests/segfree_mode.nf.test.snap @@ -0,0 +1,60 @@ +{ + "-profile test stub": { + "content": [ + { + "BAYSOR_PREPROCESS_TRANSCRIPTS": { + "python": "3.14.4" + }, + "BAYSOR_SEGFREE": { + "baysor": "0.7.1" + }, + "UNTAR": { + "untar": 1.34 + }, + "Workflow": { + "nf-core/spatialxe": "v1.0.0" + } + }, + [ + "pipeline_info", + "pipeline_info/nf_core_spatialxe_software_mqc_versions.yml", + "segfree", + "segfree/baysor", + "segfree/baysor/preprocess", + "segfree/baysor/preprocess/test_run", + "segfree/baysor/preprocess/test_run/filtered_transcripts.csv", + "segfree/baysor/segfree", + "segfree/baysor/segfree/test_run", + "segfree/baysor/segfree/test_run/ncvs.loom", + "segfree/multiqc", + "segfree/multiqc/multiqc_data", + "segfree/multiqc/multiqc_data/.stub", + "segfree/multiqc/multiqc_plots", + "segfree/multiqc/multiqc_plots/.stub", + "segfree/multiqc/multiqc_report.html", + "segfree/untar", + "segfree/untar/test_run", + "segfree/untar/test_run/experiment.xenium", + "segfree/untar/test_run/gene_panel.json", + "segfree/untar/test_run/morphology.ome.tif", + "segfree/untar/test_run/transcripts.parquet" + ], + [ + "filtered_transcripts.csv:md5,d41d8cd98f00b204e9800998ecf8427e", + "ncvs.loom:md5,d41d8cd98f00b204e9800998ecf8427e", + ".stub:md5,d41d8cd98f00b204e9800998ecf8427e", + ".stub:md5,d41d8cd98f00b204e9800998ecf8427e", + "multiqc_report.html:md5,d41d8cd98f00b204e9800998ecf8427e", + "experiment.xenium:md5,d41d8cd98f00b204e9800998ecf8427e", + "gene_panel.json:md5,d41d8cd98f00b204e9800998ecf8427e", + "morphology.ome.tif:md5,d41d8cd98f00b204e9800998ecf8427e", + "transcripts.parquet:md5,d41d8cd98f00b204e9800998ecf8427e" + ] + ], + "timestamp": "2026-04-29T19:35:23.216079554", + "meta": { + "nf-test": "0.9.5", + "nextflow": "25.10.4" + } + } +} \ No newline at end of file diff --git a/tests/test_xenium_patch/__init__.py b/tests/test_xenium_patch/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/test_xenium_patch/test_divide_transcripts.py b/tests/test_xenium_patch/test_divide_transcripts.py new file mode 100644 index 00000000..859fd5ae --- /dev/null +++ b/tests/test_xenium_patch/test_divide_transcripts.py @@ -0,0 +1,720 @@ +"""Tests for divide_transcripts.py — grid computation + transcript division.""" + +import importlib.util +import json +import math +import sys +from pathlib import Path + +import numpy as np +import pyarrow as pa +import pyarrow.parquet as pq +import pytest + +# --------------------------------------------------------------------------- +# Import the standalone script from bin/ +# --------------------------------------------------------------------------- + +_BIN_DIR = Path(__file__).resolve().parents[2] / "bin" +_SCRIPT = _BIN_DIR / "divide_transcripts.py" +_spec = importlib.util.spec_from_file_location("divide_transcripts", _SCRIPT) +_mod = importlib.util.module_from_spec(_spec) +sys.modules["divide_transcripts"] = _mod +_spec.loader.exec_module(_mod) + +from divide_transcripts import ( # noqa: E402 + Bounds, + PatchInfo, + _compute_uniform_grid, + _count_transcripts_per_tile, + _find_adjacent_patches, + compute_density_quadtree_grid, + compute_tilewidth_uniform_grid, + divide_transcripts, + merge_sparse_tiles, + save_grid_metadata, +) + +PIXEL_SIZE = 0.2125 + + +# --------------------------------------------------------------------------- +# Fixtures +# --------------------------------------------------------------------------- + + +@pytest.fixture +def synthetic_transcripts(tmp_path: Path) -> Path: + """Write a synthetic transcripts.parquet with 1000 rows, uniform spatial distribution.""" + rng = np.random.default_rng(42) + n = 1000 + table = pa.table( + { + "transcript_id": pa.array([f"tx_{i}" for i in range(n)], type=pa.string()), + "cell_id": pa.array(["UNASSIGNED"] * n, type=pa.string()), + "overlaps_nucleus": pa.array([0] * n, type=pa.int32()), + "feature_name": pa.array( + [f"gene_{i % 50}" for i in range(n)], type=pa.string() + ), + "x_location": pa.array(rng.uniform(0.0, 1275.0, n), type=pa.float32()), + "y_location": pa.array(rng.uniform(0.0, 1275.0, n), type=pa.float32()), + "z_location": pa.array(rng.uniform(0.0, 10.0, n), type=pa.float32()), + "qv": pa.array(rng.uniform(20.0, 40.0, n), type=pa.float32()), + } + ) + path = tmp_path / "transcripts.parquet" + pq.write_table(table, str(path)) + return path + + +@pytest.fixture +def dense_corner_transcripts(tmp_path: Path) -> Path: + """90% of transcripts in the top-left corner, 10% uniform.""" + rng = np.random.default_rng(99) + n_dense = 900 + n_sparse = 100 + n = n_dense + n_sparse + + x_dense = rng.uniform(0.0, 50.0, n_dense) + y_dense = rng.uniform(0.0, 50.0, n_dense) + x_sparse = rng.uniform(0.0, 1275.0, n_sparse) + y_sparse = rng.uniform(0.0, 1275.0, n_sparse) + + table = pa.table( + { + "transcript_id": pa.array([f"tx_{i}" for i in range(n)], type=pa.string()), + "cell_id": pa.array(["UNASSIGNED"] * n, type=pa.string()), + "overlaps_nucleus": pa.array([0] * n, type=pa.int32()), + "feature_name": pa.array( + [f"gene_{i % 50}" for i in range(n)], type=pa.string() + ), + "x_location": pa.array( + np.concatenate([x_dense, x_sparse]).astype(np.float32), + type=pa.float32(), + ), + "y_location": pa.array( + np.concatenate([y_dense, y_sparse]).astype(np.float32), + type=pa.float32(), + ), + "z_location": pa.array( + rng.uniform(0.0, 10.0, n).astype(np.float32), type=pa.float32() + ), + "qv": pa.array( + rng.uniform(20.0, 40.0, n).astype(np.float32), type=pa.float32() + ), + } + ) + path = tmp_path / "transcripts_dense.parquet" + pq.write_table(table, str(path)) + return path + + +# --------------------------------------------------------------------------- +# Uniform grid tests +# --------------------------------------------------------------------------- + + +class TestUniformGridBasic: + def test_uniform_grid_basic(self): + """3x3 grid from 2000um tile width on a 6000x6000um image.""" + # Use exact pixel count: 6000um / 0.2125um/px = 28235.29... -> round to 28235 + # image_um = 28235 * 0.2125 = 5999.9375; ceil(5999.9375 / 2000) = 3 + image_px = int(6000.0 / PIXEL_SIZE) + extent = Bounds(0.0, 6000.0, 0.0, 6000.0) + + patches, rows, cols, overlap_px = compute_tilewidth_uniform_grid( + image_height_px=image_px, + image_width_px=image_px, + tile_width_um=2000.0, + overlap_um=50.0, + pixel_size_um=PIXEL_SIZE, + transcript_extent_um=extent, + ) + + assert rows == 3 + assert cols == 3 + assert len(patches) == 9 + + def test_uniform_grid_single_tile(self): + """Tile width larger than image produces a 1x1 grid.""" + image_px = 1000 + extent = Bounds(0.0, image_px * PIXEL_SIZE, 0.0, image_px * PIXEL_SIZE) + + patches, rows, cols, _ = compute_tilewidth_uniform_grid( + image_height_px=image_px, + image_width_px=image_px, + tile_width_um=50000.0, + overlap_um=50.0, + pixel_size_um=PIXEL_SIZE, + transcript_extent_um=extent, + ) + + assert rows == 1 + assert cols == 1 + assert len(patches) == 1 + + def test_uniform_grid_overlap(self): + """Global bounds extend beyond core by the overlap amount.""" + image_px = 1000 + overlap_um = 50.0 + + patches = _compute_uniform_grid( + image_height_px=image_px, + image_width_px=image_px, + grid_rows=2, + grid_cols=2, + overlap_px=int(math.ceil(overlap_um / PIXEL_SIZE)), + pixel_size_um=PIXEL_SIZE, + ) + + # Interior patch boundary: global should extend beyond core + for p in patches: + assert p.global_bounds_px.x_min <= p.core_bounds_px.x_min + assert p.global_bounds_px.x_max >= p.core_bounds_px.x_max + assert p.global_bounds_px.y_min <= p.core_bounds_px.y_min + assert p.global_bounds_px.y_max >= p.core_bounds_px.y_max + + +# --------------------------------------------------------------------------- +# Quadtree grid tests +# --------------------------------------------------------------------------- + + +class TestQuadtreeGrid: + def test_quadtree_uniform_density(self): + """When density is uniform with high threshold, quadtree should not subdivide.""" + rng = np.random.default_rng(42) + image_px = 1000 + image_um = image_px * PIXEL_SIZE + + x = rng.uniform(0, image_um, 1000) + y = rng.uniform(0, image_um, 1000) + + patches, _, _, _ = compute_density_quadtree_grid( + image_height_px=image_px, + image_width_px=image_px, + tile_width_um=100.0, + overlap_um=10.0, + pixel_size_um=PIXEL_SIZE, + x_coords_um=x, + y_coords_um=y, + max_transcripts_per_patch=10000, + ) + + # ceil(212.5/100)=3 -> 3x3 = 9 initial patches, no subdivision + assert len(patches) == 9 + + def test_quadtree_dense_region(self): + """Put 90% of transcripts in one corner, verify subdivision produces more patches.""" + rng = np.random.default_rng(42) + image_px = 1000 + image_um = image_px * PIXEL_SIZE + + x_sparse = rng.uniform(0, image_um, 100) + y_sparse = rng.uniform(0, image_um, 100) + x_dense = rng.uniform(0, image_um * 0.2, 5000) + y_dense = rng.uniform(0, image_um * 0.2, 5000) + x = np.concatenate([x_sparse, x_dense]) + y = np.concatenate([y_sparse, y_dense]) + + patches, _, _, _ = compute_density_quadtree_grid( + image_height_px=image_px, + image_width_px=image_px, + tile_width_um=100.0, + overlap_um=10.0, + pixel_size_um=PIXEL_SIZE, + x_coords_um=x, + y_coords_um=y, + max_transcripts_per_patch=500, + min_tile_width_um=10.0, + max_depth=4, + ) + + # Should have subdivided beyond the initial 9 + assert len(patches) > 9 + + def test_quadtree_max_depth(self): + """Verify subdivision stops at max_depth: deeper depth -> more patches.""" + rng = np.random.default_rng(42) + image_px = 1000 + image_um = image_px * PIXEL_SIZE + + x = rng.normal(image_um / 2, 5.0, 10000) + y = rng.normal(image_um / 2, 5.0, 10000) + + patches_d1, _, _, _ = compute_density_quadtree_grid( + image_height_px=image_px, + image_width_px=image_px, + tile_width_um=100.0, + overlap_um=10.0, + pixel_size_um=PIXEL_SIZE, + x_coords_um=x, + y_coords_um=y, + max_transcripts_per_patch=10, + min_tile_width_um=1.0, + max_depth=1, + ) + + patches_d4, _, _, _ = compute_density_quadtree_grid( + image_height_px=image_px, + image_width_px=image_px, + tile_width_um=100.0, + overlap_um=10.0, + pixel_size_um=PIXEL_SIZE, + x_coords_um=x, + y_coords_um=y, + max_transcripts_per_patch=10, + min_tile_width_um=1.0, + max_depth=4, + ) + + assert len(patches_d4) > len(patches_d1) + + def test_quadtree_min_tile_width(self): + """Verify subdivision stops at min_tile_width: all cores >= min width.""" + rng = np.random.default_rng(42) + image_px = 1000 + image_um = image_px * PIXEL_SIZE + min_tile_um = 30.0 + + x = rng.normal(image_um / 2, 5.0, 10000) + y = rng.normal(image_um / 2, 5.0, 10000) + + patches, _, _, _ = compute_density_quadtree_grid( + image_height_px=image_px, + image_width_px=image_px, + tile_width_um=100.0, + overlap_um=10.0, + pixel_size_um=PIXEL_SIZE, + x_coords_um=x, + y_coords_um=y, + max_transcripts_per_patch=10, + min_tile_width_um=min_tile_um, + max_depth=10, + ) + + for p in patches: + # Allow 1um rounding tolerance from pixel conversion + assert p.core_bounds_um.width >= min_tile_um - 1.0, ( + f"Patch {p.patch_id} width {p.core_bounds_um.width:.1f} < min {min_tile_um}" + ) + assert p.core_bounds_um.height >= min_tile_um - 1.0, ( + f"Patch {p.patch_id} height {p.core_bounds_um.height:.1f} < min {min_tile_um}" + ) + + +# --------------------------------------------------------------------------- +# Division tests +# --------------------------------------------------------------------------- + + +class TestDivideTranscripts: + def test_divide_transcripts_basic( + self, synthetic_transcripts: Path, tmp_path: Path + ): + """Divide synthetic parquet, verify per-patch files are written.""" + output_dir = tmp_path / "output" + + divide_transcripts( + transcripts_path=synthetic_transcripts, + output_dir=output_dir, + image_width_px=6000, + image_height_px=6000, + tile_width_um=1000.0, + overlap_um=50.0, + balanced=False, + pixel_size_um=PIXEL_SIZE, + max_workers=1, + ) + + # Grid metadata should exist + grid_json = output_dir / "patch_grid.json" + assert grid_json.exists() + + with open(grid_json) as f: + metadata = json.load(f) + + # Each patch should have a transcripts.parquet file + for patch in metadata["patches"]: + patch_parquet = output_dir / patch["patch_id"] / "transcripts.parquet" + assert patch_parquet.exists(), f"Missing parquet for {patch['patch_id']}" + + def test_divide_transcripts_coordinates_offset( + self, synthetic_transcripts: Path, tmp_path: Path + ): + """Verify coordinates are offset to patch-local space.""" + output_dir = tmp_path / "output" + + divide_transcripts( + transcripts_path=synthetic_transcripts, + output_dir=output_dir, + image_width_px=6000, + image_height_px=6000, + tile_width_um=1000.0, + overlap_um=50.0, + balanced=False, + pixel_size_um=PIXEL_SIZE, + max_workers=1, + ) + + with open(output_dir / "patch_grid.json") as f: + metadata = json.load(f) + + for patch_meta in metadata["patches"]: + patch_parquet = output_dir / patch_meta["patch_id"] / "transcripts.parquet" + if not patch_parquet.exists(): + continue + tbl = pq.read_table(str(patch_parquet)) + if tbl.num_rows == 0: + continue + + gb = patch_meta["global_bounds_um"] + patch_width = gb["x_max"] - gb["x_min"] + patch_height = gb["y_max"] - gb["y_min"] + + x_arr = tbl.column("x_location").to_numpy() + y_arr = tbl.column("y_location").to_numpy() + + # Local coords should be in [0, patch_width) x [0, patch_height) + assert float(np.min(x_arr)) >= -0.01, ( + f"Patch {patch_meta['patch_id']}: x_min={np.min(x_arr)} < 0" + ) + assert float(np.max(x_arr)) < patch_width + 0.01, ( + f"Patch {patch_meta['patch_id']}: x_max={np.max(x_arr)} >= {patch_width}" + ) + assert float(np.min(y_arr)) >= -0.01 + assert float(np.max(y_arr)) < patch_height + 0.01 + + def test_divide_transcripts_no_transcript_loss( + self, synthetic_transcripts: Path, tmp_path: Path + ): + """Verify all transcripts appear in at least one patch.""" + output_dir = tmp_path / "output" + + original = pq.read_table(str(synthetic_transcripts)) + original_ids = set(original.column("transcript_id").to_pylist()) + + divide_transcripts( + transcripts_path=synthetic_transcripts, + output_dir=output_dir, + image_width_px=6000, + image_height_px=6000, + tile_width_um=1000.0, + overlap_um=50.0, + balanced=False, + pixel_size_um=PIXEL_SIZE, + max_workers=1, + ) + + with open(output_dir / "patch_grid.json") as f: + metadata = json.load(f) + + found_ids: set[str] = set() + for patch_meta in metadata["patches"]: + patch_parquet = output_dir / patch_meta["patch_id"] / "transcripts.parquet" + if patch_parquet.exists(): + tbl = pq.read_table(str(patch_parquet)) + found_ids.update(tbl.column("transcript_id").to_pylist()) + + # Every original transcript must appear in at least one patch + missing = original_ids - found_ids + assert len(missing) == 0, f"{len(missing)} transcripts lost during division" + + +# --------------------------------------------------------------------------- +# Grid metadata JSON roundtrip +# --------------------------------------------------------------------------- + + +class TestGridMetadataJSON: + def test_grid_metadata_json_roundtrip(self, tmp_path: Path): + """Save + load grid metadata preserves all fields.""" + image_px = 1000 + extent = Bounds(0.0, image_px * PIXEL_SIZE, 0.0, image_px * PIXEL_SIZE) + patches, rows, cols, overlap_px = compute_tilewidth_uniform_grid( + image_height_px=image_px, + image_width_px=image_px, + tile_width_um=100.0, + overlap_um=50.0, + pixel_size_um=PIXEL_SIZE, + transcript_extent_um=extent, + ) + + path = tmp_path / "patch_grid.json" + save_grid_metadata( + patches=patches, + image_height_px=image_px, + image_width_px=image_px, + pixel_size_um=PIXEL_SIZE, + transcript_extent_um=extent, + grid_rows=rows, + grid_cols=cols, + overlap_um=50.0, + overlap_px=overlap_px, + grid_type="uniform", + output_path=path, + ) + + with open(path) as f: + data = json.load(f) + + assert data["version"] == "1.0" + assert data["grid_rows"] == rows + assert data["grid_cols"] == cols + assert data["overlap_um"] == 50.0 + assert data["overlap_px"] == overlap_px + assert data["grid_type"] == "uniform" + assert len(data["patches"]) == len(patches) + + for orig, loaded in zip(patches, data["patches"]): + assert loaded["patch_id"] == orig.patch_id + assert loaded["row"] == orig.row + assert loaded["col"] == orig.col + assert loaded["global_bounds_px"]["x_min"] == pytest.approx( + orig.global_bounds_px.x_min + ) + assert loaded["core_bounds_um"]["y_max"] == pytest.approx( + orig.core_bounds_um.y_max + ) + + +# --------------------------------------------------------------------------- +# Merge sparse tiles tests +# --------------------------------------------------------------------------- + + +def _make_2x2_grid(pixel_size: float = PIXEL_SIZE) -> tuple[list[PatchInfo], int, int]: + """Build a 2x2 uniform grid on a 1000x1000 pixel image. + + Returns: + Tuple of (patches, image_width_px, image_height_px). + """ + image_px = 1000 + overlap_px = int(math.ceil(50.0 / pixel_size)) + patches = _compute_uniform_grid( + image_height_px=image_px, + image_width_px=image_px, + grid_rows=2, + grid_cols=2, + overlap_px=overlap_px, + pixel_size_um=pixel_size, + ) + return patches, image_px, overlap_px + + +class TestMergeSparseTiles: + def test_no_merge_above_threshold(self): + """All tiles above threshold -- no merging happens.""" + patches, image_px, overlap_px = _make_2x2_grid() + image_um = image_px * PIXEL_SIZE + + rng = np.random.default_rng(42) + n = 4000 + x = rng.uniform(0, image_um, n).astype(np.float64) + y = rng.uniform(0, image_um, n).astype(np.float64) + + merged, merge_count = merge_sparse_tiles( + patches=patches, + x_coords_um=x, + y_coords_um=y, + overlap_px=overlap_px, + pixel_size_um=PIXEL_SIZE, + image_width_px=image_px, + image_height_px=image_px, + min_transcripts=100, + ) + + assert merge_count == 0 + assert len(merged) == len(patches) + merged_ids = {p.patch_id for p in merged} + original_ids = {p.patch_id for p in patches} + assert merged_ids == original_ids + + def test_merge_sparse_edge_tile(self): + """One corner tile has very few transcripts -- it gets merged into a neighbor.""" + patches, image_px, overlap_px = _make_2x2_grid() + + # Put 500 transcripts in each of 3 tiles, 5 in the last tile (row0_col0) + rng = np.random.default_rng(7) + # Find the core bounds of each patch to place transcripts correctly + patch_map = {p.patch_id: p for p in patches} + + sparse_id = patches[0].patch_id # first tile gets very few transcripts + xs, ys = [], [] + for p in patches: + cb = p.core_bounds_um + n = 5 if p.patch_id == sparse_id else 500 + xs.append(rng.uniform(cb.x_min + 0.1, cb.x_max - 0.1, n)) + ys.append(rng.uniform(cb.y_min + 0.1, cb.y_max - 0.1, n)) + + x = np.concatenate(xs) + y = np.concatenate(ys) + + merged, merge_count = merge_sparse_tiles( + patches=patches, + x_coords_um=x, + y_coords_um=y, + overlap_px=overlap_px, + pixel_size_um=PIXEL_SIZE, + image_width_px=image_px, + image_height_px=image_px, + min_transcripts=100, + ) + + assert merge_count == 1 + assert len(merged) == 3 + + # The sparse tile should no longer exist as a patch + merged_ids = {p.patch_id for p in merged} + assert sparse_id not in merged_ids + + # The absorbing neighbor's bounds should cover the sparse tile's area + sparse_global = patch_map[sparse_id].global_bounds_um + absorber = [p for p in merged if p.patch_id != sparse_id] + # At least one neighbor should now have bounds covering the sparse tile's origin + covers_sparse = any( + p.global_bounds_um.x_min <= sparse_global.x_min + 0.01 + and p.global_bounds_um.y_min <= sparse_global.y_min + 0.01 + for p in absorber + ) + assert covers_sparse, "No merged tile covers the sparse tile's region" + + def test_merge_preserves_all_transcripts(self, tmp_path: Path): + """After merging, divide_transcripts with merged grid loses no transcripts.""" + image_px = 1000 + image_um = image_px * PIXEL_SIZE + + # Create transcripts: sparse in one corner, dense elsewhere + rng = np.random.default_rng(33) + n_sparse = 10 + n_dense = 990 + + x_sparse = rng.uniform(0, image_um * 0.1, n_sparse).astype(np.float32) + y_sparse = rng.uniform(0, image_um * 0.1, n_sparse).astype(np.float32) + x_dense = rng.uniform(image_um * 0.3, image_um, n_dense).astype(np.float32) + y_dense = rng.uniform(image_um * 0.3, image_um, n_dense).astype(np.float32) + + n = n_sparse + n_dense + table = pa.table( + { + "transcript_id": pa.array( + [f"tx_{i}" for i in range(n)], type=pa.string() + ), + "cell_id": pa.array(["UNASSIGNED"] * n, type=pa.string()), + "overlaps_nucleus": pa.array([0] * n, type=pa.int32()), + "feature_name": pa.array( + [f"gene_{i % 50}" for i in range(n)], type=pa.string() + ), + "x_location": pa.array( + np.concatenate([x_sparse, x_dense]), type=pa.float32() + ), + "y_location": pa.array( + np.concatenate([y_sparse, y_dense]), type=pa.float32() + ), + "z_location": pa.array( + rng.uniform(0, 10, n).astype(np.float32), type=pa.float32() + ), + "qv": pa.array( + rng.uniform(20, 40, n).astype(np.float32), type=pa.float32() + ), + } + ) + parquet_path = tmp_path / "transcripts.parquet" + pq.write_table(table, str(parquet_path)) + + original_ids = set(table.column("transcript_id").to_pylist()) + output_dir = tmp_path / "output" + + divide_transcripts( + transcripts_path=parquet_path, + output_dir=output_dir, + image_width_px=image_px, + image_height_px=image_px, + tile_width_um=100.0, + overlap_um=10.0, + balanced=False, + pixel_size_um=PIXEL_SIZE, + max_workers=1, + min_transcripts=50, + ) + + with open(output_dir / "patch_grid.json") as f: + metadata = json.load(f) + + found_ids: set[str] = set() + for patch_meta in metadata["patches"]: + patch_parquet = output_dir / patch_meta["patch_id"] / "transcripts.parquet" + if patch_parquet.exists(): + tbl = pq.read_table(str(patch_parquet)) + found_ids.update(tbl.column("transcript_id").to_pylist()) + + missing = original_ids - found_ids + assert len(missing) == 0, ( + f"{len(missing)} transcripts lost after merge + divide" + ) + + def test_merge_disabled_with_zero_threshold(self): + """min_transcripts=0 disables merging regardless of transcript counts.""" + patches, image_px, overlap_px = _make_2x2_grid() + + # Put only 1 transcript per tile -- still no merge with threshold=0 + rng = np.random.default_rng(99) + xs, ys = [], [] + for p in patches: + cb = p.core_bounds_um + xs.append(rng.uniform(cb.x_min + 0.1, cb.x_max - 0.1, 1)) + ys.append(rng.uniform(cb.y_min + 0.1, cb.y_max - 0.1, 1)) + + x = np.concatenate(xs) + y = np.concatenate(ys) + + merged, merge_count = merge_sparse_tiles( + patches=patches, + x_coords_um=x, + y_coords_um=y, + overlap_px=overlap_px, + pixel_size_um=PIXEL_SIZE, + image_width_px=image_px, + image_height_px=image_px, + min_transcripts=0, + ) + + assert merge_count == 0 + assert len(merged) == len(patches) + + def test_count_transcripts_per_tile(self): + """Unit test for _count_transcripts_per_tile with known placement.""" + patches, image_px, _ = _make_2x2_grid() + + # Place 10 transcripts in each patch's core + rng = np.random.default_rng(11) + xs, ys = [], [] + expected_per_patch: dict[str, int] = {} + counts_list = [10, 20, 30, 40] + for p, n in zip(patches, counts_list): + cb = p.core_bounds_um + xs.append(rng.uniform(cb.x_min + 0.1, cb.x_max - 0.1, n)) + ys.append(rng.uniform(cb.y_min + 0.1, cb.y_max - 0.1, n)) + expected_per_patch[p.patch_id] = n + + x = np.concatenate(xs) + y = np.concatenate(ys) + + counts = _count_transcripts_per_tile(patches, x, y) + + for pid, expected in expected_per_patch.items(): + assert counts[pid] == expected, ( + f"Patch {pid}: expected {expected}, got {counts[pid]}" + ) + + def test_find_adjacent_patches(self): + """Each tile in a 2x2 grid has exactly 2 neighbors.""" + patches, _, _ = _make_2x2_grid() + adjacency = _find_adjacent_patches(patches) + + # 2x2 grid: each corner tile touches 2 others (horizontal + vertical) + for p in patches: + neighbors = adjacency[p.patch_id] + assert len(neighbors) == 2, ( + f"Patch {p.patch_id} has {len(neighbors)} neighbors, expected 2: {neighbors}" + ) diff --git a/tests/test_xenium_patch/test_stitch_transcripts.py b/tests/test_xenium_patch/test_stitch_transcripts.py new file mode 100644 index 00000000..5419c897 --- /dev/null +++ b/tests/test_xenium_patch/test_stitch_transcripts.py @@ -0,0 +1,865 @@ +"""Tests for stitch_transcripts.py — sopa-based stitching.""" + +import importlib.util +import json +import sys +from pathlib import Path + +import pyarrow as pa +import pyarrow.csv as pa_csv +import pytest +from shapely.geometry import Polygon, mapping + +# --------------------------------------------------------------------------- +# Import the standalone script from module resources +# --------------------------------------------------------------------------- + +_SCRIPT = ( + Path(__file__).resolve().parents[2] + / "modules/local/xenium_patch/stitch/resources/usr/bin/stitch_transcripts.py" +) +_spec = importlib.util.spec_from_file_location("stitch_transcripts", _SCRIPT) +_mod = importlib.util.module_from_spec(_spec) +sys.modules["stitch_transcripts"] = _mod +_spec.loader.exec_module(_mod) + +from stitch_transcripts import ( # noqa: E402 + Bounds, + PatchGridMetadata, + PatchInfo, + _normalize_geometry_collection, + read_geojson, + stitch_transcript_assignments, + transform_polygons, +) + +PIXEL_SIZE = 0.2125 + + +# --------------------------------------------------------------------------- +# Helpers +# --------------------------------------------------------------------------- + + +def _make_patch_info( + patch_id: str, + row: int, + col: int, + global_x: tuple[float, float], + global_y: tuple[float, float], + core_x: tuple[float, float], + core_y: tuple[float, float], +) -> PatchInfo: + """Create a PatchInfo with bounds in both pixel and micron space.""" + return PatchInfo( + patch_id=patch_id, + row=row, + col=col, + global_bounds_px=Bounds( + global_x[0] / PIXEL_SIZE, + global_x[1] / PIXEL_SIZE, + global_y[0] / PIXEL_SIZE, + global_y[1] / PIXEL_SIZE, + ), + global_bounds_um=Bounds(global_x[0], global_x[1], global_y[0], global_y[1]), + core_bounds_px=Bounds( + core_x[0] / PIXEL_SIZE, + core_x[1] / PIXEL_SIZE, + core_y[0] / PIXEL_SIZE, + core_y[1] / PIXEL_SIZE, + ), + core_bounds_um=Bounds(core_x[0], core_x[1], core_y[0], core_y[1]), + ) + + +def _make_metadata(patches: list[PatchInfo]) -> PatchGridMetadata: + """Create minimal PatchGridMetadata.""" + return PatchGridMetadata( + version="1.0", + bundle_path="", + image_height_px=10000, + image_width_px=10000, + pixel_size_um=PIXEL_SIZE, + transcript_extent_um=Bounds(0.0, 2125.0, 0.0, 2125.0), + grid_rows=1, + grid_cols=2, + overlap_um=50.0, + overlap_px=236, + patches=patches, + ) + + +def _write_grid_json(metadata: PatchGridMetadata, output_path: Path) -> None: + """Serialize PatchGridMetadata to JSON (matching the format load_grid_metadata expects).""" + + def bounds_dict(b: Bounds) -> dict: + return {"x_min": b.x_min, "x_max": b.x_max, "y_min": b.y_min, "y_max": b.y_max} + + data = { + "version": metadata.version, + "bundle_path": metadata.bundle_path, + "image_height_px": metadata.image_height_px, + "image_width_px": metadata.image_width_px, + "pixel_size_um": metadata.pixel_size_um, + "transcript_extent_um": bounds_dict(metadata.transcript_extent_um), + "grid_rows": metadata.grid_rows, + "grid_cols": metadata.grid_cols, + "overlap_um": metadata.overlap_um, + "overlap_px": metadata.overlap_px, + "grid_type": metadata.grid_type, + "patches": [ + { + "patch_id": p.patch_id, + "row": p.row, + "col": p.col, + "global_bounds_px": bounds_dict(p.global_bounds_px), + "global_bounds_um": bounds_dict(p.global_bounds_um), + "core_bounds_px": bounds_dict(p.core_bounds_px), + "core_bounds_um": bounds_dict(p.core_bounds_um), + } + for p in metadata.patches + ], + } + output_path.parent.mkdir(parents=True, exist_ok=True) + with open(output_path, "w") as f: + json.dump(data, f, indent=2) + + +def _write_patch_csv( + patch_dir: Path, rows: list[dict], filename: str = "segmentation.csv" +) -> None: + """Write a Baysor-style CSV.""" + if not rows: + return + cols = list(rows[0].keys()) + arrays = { + col: pa.array([str(r[col]) for r in rows], type=pa.string()) for col in cols + } + table = pa.table(arrays) + patch_dir.mkdir(parents=True, exist_ok=True) + pa_csv.write_csv(table, patch_dir / filename) + + +def _write_patch_geojson( + patch_dir: Path, + cell_polygons: dict[str, Polygon], + filename: str = "segmentation_polygons.json", +) -> None: + """Write a GeoJSON FeatureCollection with cell polygons in local coordinates.""" + features = [] + for cell_id, poly in cell_polygons.items(): + features.append( + { + "type": "Feature", + "id": cell_id, + "geometry": mapping(poly), + "properties": {"cell_id": cell_id}, + } + ) + geojson = {"type": "FeatureCollection", "features": features} + patch_dir.mkdir(parents=True, exist_ok=True) + with open(patch_dir / filename, "w") as f: + json.dump(geojson, f) + + +# --------------------------------------------------------------------------- +# Stitch tests +# --------------------------------------------------------------------------- + + +class TestStitchBasic: + def test_stitch_basic(self, tmp_path: Path): + """Create 2 patches with non-overlapping cells, verify merged output.""" + # Patch 0: core [0,500) x [0,1000), global [0,525) x [0,1000) + # Patch 1: core [500,1000) x [0,1000), global [475,1000) x [0,1000) + p0 = _make_patch_info( + "patch_0", + 0, + 0, + global_x=(0.0, 525.0), + global_y=(0.0, 1000.0), + core_x=(0.0, 500.0), + core_y=(0.0, 1000.0), + ) + p1 = _make_patch_info( + "patch_1", + 0, + 1, + global_x=(475.0, 1000.0), + global_y=(0.0, 1000.0), + core_x=(500.0, 1000.0), + core_y=(0.0, 1000.0), + ) + metadata = _make_metadata([p0, p1]) + + patches_dir = tmp_path / "patches" + _write_grid_json(metadata, patches_dir / "patch_grid.json") + + # Patch 0: cell at (100, 100) local -> (100, 100) global + _write_patch_csv( + patches_dir / "patch_0", + [ + { + "transcript_id": "tx_1", + "x": "100.0", + "y": "100.0", + "gene": "A", + "cell": "cell_1", + "is_noise": "0", + }, + { + "transcript_id": "tx_2", + "x": "200.0", + "y": "200.0", + "gene": "B", + "cell": "cell_1", + "is_noise": "0", + }, + ], + ) + _write_patch_geojson( + patches_dir / "patch_0", + {"cell_1": Polygon([(50, 50), (250, 50), (250, 250), (50, 250)])}, + ) + + # Patch 1: cell at (100, 100) local -> (575, 100) global + _write_patch_csv( + patches_dir / "patch_1", + [ + { + "transcript_id": "tx_3", + "x": "100.0", + "y": "100.0", + "gene": "C", + "cell": "cell_1", + "is_noise": "0", + }, + ], + ) + _write_patch_geojson( + patches_dir / "patch_1", + {"cell_1": Polygon([(50, 50), (200, 50), (200, 200), (50, 200)])}, + ) + + output_dir = tmp_path / "output" + stitch_transcript_assignments( + patches_dir=patches_dir, + output_dir=output_dir, + max_workers=1, + ) + + csv_out = output_dir / "xr-transcript-metadata.csv" + assert csv_out.exists() + + geo_out = output_dir / "xr-cell-polygons.geojson" + assert geo_out.exists() + + # Read CSV and verify transcripts present + merged = pa_csv.read_csv(csv_out) + assert merged.num_rows >= 3 + + def test_stitch_cell_id_sequential(self, tmp_path: Path): + """Verify global IDs are cell-1, cell-2, ...""" + p0 = _make_patch_info( + "patch_0", + 0, + 0, + global_x=(0.0, 525.0), + global_y=(0.0, 1000.0), + core_x=(0.0, 500.0), + core_y=(0.0, 1000.0), + ) + p1 = _make_patch_info( + "patch_1", + 0, + 1, + global_x=(475.0, 1000.0), + global_y=(0.0, 1000.0), + core_x=(500.0, 1000.0), + core_y=(0.0, 1000.0), + ) + metadata = _make_metadata([p0, p1]) + + patches_dir = tmp_path / "patches" + _write_grid_json(metadata, patches_dir / "patch_grid.json") + + _write_patch_csv( + patches_dir / "patch_0", + [ + { + "transcript_id": "tx_1", + "x": "100.0", + "y": "100.0", + "gene": "A", + "cell": "cell_1", + "is_noise": "0", + }, + ], + ) + _write_patch_geojson( + patches_dir / "patch_0", + {"cell_1": Polygon([(50, 50), (250, 50), (250, 250), (50, 250)])}, + ) + + _write_patch_csv( + patches_dir / "patch_1", + [ + { + "transcript_id": "tx_2", + "x": "100.0", + "y": "100.0", + "gene": "B", + "cell": "cell_1", + "is_noise": "0", + }, + ], + ) + _write_patch_geojson( + patches_dir / "patch_1", + {"cell_1": Polygon([(50, 50), (200, 50), (200, 200), (50, 200)])}, + ) + + output_dir = tmp_path / "output" + stitch_transcript_assignments( + patches_dir=patches_dir, + output_dir=output_dir, + max_workers=1, + ) + + geo_out = output_dir / "xr-cell-polygons.geojson" + with open(geo_out) as f: + geo = json.load(f) + + cell_ids = [feat["id"] for feat in geo["features"]] + for cid in cell_ids: + assert cid.startswith("cell-"), f"Cell ID {cid} not in cell-N format" + + # IDs should be sequential starting at 1 + numbers = sorted(int(cid.split("-")[1]) for cid in cell_ids) + assert numbers == list(range(1, len(cell_ids) + 1)) + + def test_stitch_transcript_dedup(self, tmp_path: Path): + """Same transcript in 2 patches: assigned wins over noise.""" + p0 = _make_patch_info( + "patch_0", + 0, + 0, + global_x=(0.0, 600.0), + global_y=(0.0, 1000.0), + core_x=(0.0, 500.0), + core_y=(0.0, 1000.0), + ) + p1 = _make_patch_info( + "patch_1", + 0, + 1, + global_x=(400.0, 1000.0), + global_y=(0.0, 1000.0), + core_x=(500.0, 1000.0), + core_y=(0.0, 1000.0), + ) + metadata = _make_metadata([p0, p1]) + + patches_dir = tmp_path / "patches" + _write_grid_json(metadata, patches_dir / "patch_grid.json") + + # tx_dup appears in both patches. In patch_0 it's assigned, in patch_1 it's noise. + _write_patch_csv( + patches_dir / "patch_0", + [ + { + "transcript_id": "tx_dup", + "x": "450.0", + "y": "100.0", + "gene": "A", + "cell": "cell_1", + "is_noise": "0", + }, + { + "transcript_id": "tx_only0", + "x": "100.0", + "y": "100.0", + "gene": "B", + "cell": "cell_1", + "is_noise": "0", + }, + ], + ) + _write_patch_geojson( + patches_dir / "patch_0", + {"cell_1": Polygon([(50, 50), (500, 50), (500, 250), (50, 250)])}, + ) + + _write_patch_csv( + patches_dir / "patch_1", + [ + { + "transcript_id": "tx_dup", + "x": "50.0", + "y": "100.0", + "gene": "A", + "cell": "", + "is_noise": "1", + }, + { + "transcript_id": "tx_only1", + "x": "200.0", + "y": "200.0", + "gene": "C", + "cell": "cell_1", + "is_noise": "0", + }, + ], + ) + _write_patch_geojson( + patches_dir / "patch_1", + {"cell_1": Polygon([(150, 50), (350, 50), (350, 350), (150, 350)])}, + ) + + output_dir = tmp_path / "output" + stitch_transcript_assignments( + patches_dir=patches_dir, + output_dir=output_dir, + max_workers=1, + ) + + csv_out = output_dir / "xr-transcript-metadata.csv" + merged = pa_csv.read_csv(csv_out) + tid_col = merged.column("transcript_id").to_pylist() + cell_col = merged.column("cell").to_pylist() + + # tx_dup should appear exactly once + dup_count = tid_col.count("tx_dup") + assert dup_count == 1, f"tx_dup appears {dup_count} times, expected 1" + + # The kept version should be assigned (non-empty cell) + dup_idx = tid_col.index("tx_dup") + assert cell_col[dup_idx] != "", "tx_dup should be assigned, not noise" + + def test_stitch_noise_spatial_reassignment(self, tmp_path: Path): + """Noise transcript inside a resolved cell polygon gets assigned.""" + p0 = _make_patch_info( + "patch_0", + 0, + 0, + global_x=(0.0, 600.0), + global_y=(0.0, 1000.0), + core_x=(0.0, 600.0), + core_y=(0.0, 1000.0), + ) + metadata = _make_metadata([p0]) + + patches_dir = tmp_path / "patches" + _write_grid_json(metadata, patches_dir / "patch_grid.json") + + # tx_noise is at (150, 150) local -> (150, 150) global, inside the cell polygon + _write_patch_csv( + patches_dir / "patch_0", + [ + { + "transcript_id": "tx_assigned", + "x": "100.0", + "y": "100.0", + "gene": "A", + "cell": "cell_1", + "is_noise": "0", + }, + { + "transcript_id": "tx_noise", + "x": "150.0", + "y": "150.0", + "gene": "B", + "cell": "", + "is_noise": "1", + }, + ], + ) + # Cell polygon covers (50,50) to (250,250) in local coords -> global same since origin is 0 + _write_patch_geojson( + patches_dir / "patch_0", + {"cell_1": Polygon([(50, 50), (250, 50), (250, 250), (50, 250)])}, + ) + + output_dir = tmp_path / "output" + stitch_transcript_assignments( + patches_dir=patches_dir, + output_dir=output_dir, + max_workers=1, + ) + + csv_out = output_dir / "xr-transcript-metadata.csv" + merged = pa_csv.read_csv(csv_out) + tid_col = merged.column("transcript_id").to_pylist() + cell_col = merged.column("cell").to_pylist() + + noise_idx = tid_col.index("tx_noise") + assert cell_col[noise_idx] != "", ( + "tx_noise should be spatially reassigned to a cell" + ) + + def test_stitch_geojson_not_found(self, tmp_path: Path): + """When GeoJSON doesn't exist, stitch should still work (transcript-only).""" + p0 = _make_patch_info( + "patch_0", + 0, + 0, + global_x=(0.0, 1000.0), + global_y=(0.0, 1000.0), + core_x=(0.0, 1000.0), + core_y=(0.0, 1000.0), + ) + metadata = _make_metadata([p0]) + + patches_dir = tmp_path / "patches" + _write_grid_json(metadata, patches_dir / "patch_grid.json") + + # Write CSV but no GeoJSON + _write_patch_csv( + patches_dir / "patch_0", + [ + { + "transcript_id": "tx_1", + "x": "100.0", + "y": "100.0", + "gene": "A", + "cell": "cell_1", + "is_noise": "0", + }, + ], + ) + + output_dir = tmp_path / "output" + # Should not raise + stitch_transcript_assignments( + patches_dir=patches_dir, + output_dir=output_dir, + max_workers=1, + ) + + # No geojson output (no polygons to write) + geo_out = output_dir / "xr-cell-polygons.geojson" + assert not geo_out.exists() + + +# --------------------------------------------------------------------------- +# Helper function tests +# --------------------------------------------------------------------------- + + +class TestReadGeoJSON: + def test_read_geojson_feature_collection(self, tmp_path: Path): + """Standard FeatureCollection is returned as-is.""" + geojson = { + "type": "FeatureCollection", + "features": [ + { + "type": "Feature", + "id": "cell_1", + "geometry": mapping(Polygon([(0, 0), (10, 0), (10, 10), (0, 10)])), + "properties": {"cell_id": "cell_1"}, + } + ], + } + path = tmp_path / "test.geojson" + with open(path, "w") as f: + json.dump(geojson, f) + + result = read_geojson(path) + assert result["type"] == "FeatureCollection" + assert len(result["features"]) == 1 + assert result["features"][0]["id"] == "cell_1" + + def test_read_geojson_geometry_collection(self, tmp_path: Path): + """proseg's GeometryCollection format is normalized to FeatureCollection.""" + geojson = { + "type": "GeometryCollection", + "geometries": [ + { + "type": "Polygon", + "coordinates": [[[0, 0], [10, 0], [10, 10], [0, 10], [0, 0]]], + "cell": 1, + }, + { + "type": "Polygon", + "coordinates": [[[20, 20], [30, 20], [30, 30], [20, 30], [20, 20]]], + "cell": 2, + }, + ], + } + path = tmp_path / "proseg.geojson" + with open(path, "w") as f: + json.dump(geojson, f) + + result = read_geojson(path) + assert result["type"] == "FeatureCollection" + assert len(result["features"]) == 2 + assert result["features"][0]["id"] == "1" + assert result["features"][1]["id"] == "2" + # geometry should not contain the 'cell' key + assert "cell" not in result["features"][0]["geometry"] + + +class TestTransformPolygons: + def test_transform_polygons_offset(self): + """Verify coordinate shift by (offset_x, offset_y).""" + geojson = { + "type": "FeatureCollection", + "features": [ + { + "type": "Feature", + "id": "cell_1", + "geometry": mapping(Polygon([(0, 0), (10, 0), (10, 10), (0, 10)])), + "properties": {"cell_id": "cell_1"}, + } + ], + } + + shifted = transform_polygons(geojson, offset_x=100.0, offset_y=200.0) + + assert shifted["type"] == "FeatureCollection" + assert len(shifted["features"]) == 1 + + coords = shifted["features"][0]["geometry"]["coordinates"][0] + xs = [c[0] for c in coords] + ys = [c[1] for c in coords] + + assert min(xs) == pytest.approx(100.0) + assert max(xs) == pytest.approx(110.0) + assert min(ys) == pytest.approx(200.0) + assert max(ys) == pytest.approx(210.0) + + +class TestNormalizeGeometryCollection: + def test_empty_geometry_collection(self): + """Empty GeometryCollection returns empty FeatureCollection.""" + result = _normalize_geometry_collection( + {"type": "GeometryCollection", "geometries": []} + ) + assert result["type"] == "FeatureCollection" + assert result["features"] == [] + + def test_string_cell_id_passthrough(self): + """Non-integer cell key is passed through as string.""" + geojson = { + "type": "GeometryCollection", + "geometries": [ + { + "type": "Polygon", + "coordinates": [[[0, 0], [1, 0], [1, 1], [0, 1], [0, 0]]], + "cell": "custom-id", + } + ], + } + result = _normalize_geometry_collection(geojson) + assert result["features"][0]["id"] == "custom-id" + + +# --------------------------------------------------------------------------- +# Baysor native format tests (empty cell column, mismatched IDs) +# --------------------------------------------------------------------------- + + +class TestBaysorNativeFormat: + def test_baysor_empty_cell_column(self, tmp_path: Path): + """Baysor native output: cell column is empty, GeoJSON has integer IDs. + + This is the core bug that spatial containment fixes. Previously, the + ID-matching approach would skip all polygons because no CSV cell values + matched the GeoJSON cell IDs. + """ + p0 = _make_patch_info( + "patch_0", + 0, + 0, + global_x=(0.0, 1000.0), + global_y=(0.0, 1000.0), + core_x=(0.0, 1000.0), + core_y=(0.0, 1000.0), + ) + metadata = _make_metadata([p0]) + + patches_dir = tmp_path / "patches" + _write_grid_json(metadata, patches_dir / "patch_grid.json") + + # Baysor CSV: cell column is EMPTY, cell_id has string labels + _write_patch_csv( + patches_dir / "patch_0", + [ + { + "transcript_id": "tx_1", + "cell_id": "higeahke-1", + "x": "100.0", + "y": "100.0", + "gene": "GeneA", + "cell": "", + "is_noise": "0", + }, + { + "transcript_id": "tx_2", + "cell_id": "higeahke-1", + "x": "150.0", + "y": "150.0", + "gene": "GeneB", + "cell": "", + "is_noise": "0", + }, + { + "transcript_id": "tx_3", + "cell_id": "", + "x": "800.0", + "y": "800.0", + "gene": "GeneC", + "cell": "", + "is_noise": "1", + }, + ], + ) + + # GeoJSON: GeometryCollection with integer cell keys (proseg format) + # Polygon covers (50,50)-(250,250), so tx_1 and tx_2 are inside, tx_3 is outside + geojson = { + "type": "GeometryCollection", + "geometries": [ + { + "type": "Polygon", + "coordinates": [ + [[50, 50], [250, 50], [250, 250], [50, 250], [50, 50]] + ], + "cell": 4986, + } + ], + } + patch_dir = patches_dir / "patch_0" + patch_dir.mkdir(parents=True, exist_ok=True) + with open(patch_dir / "segmentation_polygons.json", "w") as f: + json.dump(geojson, f) + + output_dir = tmp_path / "output" + stitch_transcript_assignments( + patches_dir=patches_dir, + output_dir=output_dir, + max_workers=1, + ) + + csv_out = output_dir / "xr-transcript-metadata.csv" + assert csv_out.exists(), "CSV output should be written" + + geo_out = output_dir / "xr-cell-polygons.geojson" + assert geo_out.exists(), "GeoJSON output should be written" + + merged = pa_csv.read_csv(csv_out) + tid_col = merged.column("transcript_id").to_pylist() + cell_col = merged.column("cell").to_pylist() + + # tx_1 and tx_2 should be assigned to a cell (spatially inside polygon) + for tx_id in ["tx_1", "tx_2"]: + idx = tid_col.index(tx_id) + assert cell_col[idx] != "", ( + f"{tx_id} should be assigned via spatial containment" + ) + assert cell_col[idx].startswith("cell-"), ( + f"{tx_id} should have global ID format" + ) + + # tx_3 should be noise (outside polygon) + tx3_idx = tid_col.index("tx_3") + assert cell_col[tx3_idx] == "", "tx_3 should remain noise (outside polygon)" + + def test_baysor_two_patches_empty_cell(self, tmp_path: Path): + """Two patches with Baysor native format: spatial assignment across patches.""" + p0 = _make_patch_info( + "patch_0", + 0, + 0, + global_x=(0.0, 525.0), + global_y=(0.0, 1000.0), + core_x=(0.0, 500.0), + core_y=(0.0, 1000.0), + ) + p1 = _make_patch_info( + "patch_1", + 0, + 1, + global_x=(475.0, 1000.0), + global_y=(0.0, 1000.0), + core_x=(500.0, 1000.0), + core_y=(0.0, 1000.0), + ) + metadata = _make_metadata([p0, p1]) + + patches_dir = tmp_path / "patches" + _write_grid_json(metadata, patches_dir / "patch_grid.json") + + # Patch 0: cell column empty, transcript at (100,100) + _write_patch_csv( + patches_dir / "patch_0", + [ + { + "transcript_id": "tx_1", + "cell_id": "abc-1", + "x": "100.0", + "y": "100.0", + "gene": "A", + "cell": "", + "is_noise": "0", + }, + ], + ) + # Polygon at (50,50)-(250,250) in local coords + _write_patch_geojson( + patches_dir / "patch_0", + {"anything": Polygon([(50, 50), (250, 50), (250, 250), (50, 250)])}, + ) + + # Patch 1: cell column empty, transcript at (100,100) local -> (575,100) global + _write_patch_csv( + patches_dir / "patch_1", + [ + { + "transcript_id": "tx_2", + "cell_id": "xyz-1", + "x": "100.0", + "y": "100.0", + "gene": "B", + "cell": "", + "is_noise": "0", + }, + ], + ) + _write_patch_geojson( + patches_dir / "patch_1", + {"whatever": Polygon([(50, 50), (200, 50), (200, 200), (50, 200)])}, + ) + + output_dir = tmp_path / "output" + stitch_transcript_assignments( + patches_dir=patches_dir, + output_dir=output_dir, + max_workers=1, + ) + + csv_out = output_dir / "xr-transcript-metadata.csv" + assert csv_out.exists() + + merged = pa_csv.read_csv(csv_out) + tid_col = merged.column("transcript_id").to_pylist() + cell_col = merged.column("cell").to_pylist() + + # Both transcripts should be assigned + for tx_id in ["tx_1", "tx_2"]: + idx = tid_col.index(tx_id) + assert cell_col[idx] != "", f"{tx_id} should be assigned" + assert cell_col[idx].startswith("cell-") + + # They should be in different cells + tx1_cell = cell_col[tid_col.index("tx_1")] + tx2_cell = cell_col[tid_col.index("tx_2")] + assert tx1_cell != tx2_cell, ( + "Transcripts in different patches should have different cells" + ) + + geo_out = output_dir / "xr-cell-polygons.geojson" + assert geo_out.exists() + with open(geo_out) as f: + geo = json.load(f) + assert len(geo["features"]) == 2 diff --git a/tower.yml b/tower.yml new file mode 100644 index 00000000..787aedfe --- /dev/null +++ b/tower.yml @@ -0,0 +1,5 @@ +reports: + multiqc_report.html: + display: "MultiQC HTML report" + samplesheet.csv: + display: "Auto-created samplesheet with collated metadata and FASTQ paths" diff --git a/workflows/spatialxe.nf b/workflows/spatialxe.nf index c4687ea3..b0eff48c 100644 --- a/workflows/spatialxe.nf +++ b/workflows/spatialxe.nf @@ -1,56 +1,50 @@ /* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - VALIDATE INPUTS + IMPORT MODULES / SUBWORKFLOWS / FUNCTIONS ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ -def summary_params = NfcoreSchema.paramsSummaryMap(workflow, params) +// multiqc +include { MULTIQC } from '../modules/nf-core/multiqc/main' +include { MULTIQC as MULTIQC_PRE_XR_RUN } from '../modules/nf-core/multiqc/main' +include { MULTIQC as MULTIQC_POST_XR_RUN } from '../modules/nf-core/multiqc/main' +include { paramsSummaryMultiqc } from '../subworkflows/nf-core/utils_nfcore_pipeline' -// Validate input parameters -WorkflowSpatialxe.initialise(params, log) +// nf-core functionality +include { softwareVersionsToYAML } from '../subworkflows/nf-core/utils_nfcore_pipeline' +include { methodsDescriptionText } from '../subworkflows/local/utils_nfcore_spatialxe_pipeline' +include { paramsSummaryMap } from 'plugin/nf-schema' -// TODO nf-core: Add all file path parameters for the pipeline to the list below -// Check input path parameters to see if they exist -def checkPathParamList = [ params.input, params.multiqc_config, params.fasta ] -for (param in checkPathParamList) { if (param) { file(param, checkIfExists: true) } } +// nf-core modules +include { UNTAR } from '../modules/nf-core/untar/main' -// Check mandatory parameters -if (params.input) { ch_input = file(params.input) } else { exit 1, 'Input samplesheet not specified!' } +// coordinate-based segmentation subworklfows +include { SEGGER_CREATE_TRAIN_PREDICT } from '../subworkflows/local/segger_create_train_predict/main' +include { PROSEG_PRESET_PROSEG2BAYSOR } from '../subworkflows/local/proseg_preset_proseg2baysor/main' +include { PROSEG_PRESET_PROSEG2BAYSOR_TILED } from '../subworkflows/local/proseg_preset_proseg2baysor_tiled/main' +include { BAYSOR_GENERATE_PREVIEW } from '../subworkflows/local/baysor_generate_preview/main' +include { BAYSOR_RUN_TRANSCRIPTS_PARQUET } from '../subworkflows/local/baysor_run_transcripts_parquet/main' -/* -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - CONFIG FILES -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -*/ +// image-based segmentation subworklfows +include { BAYSOR_RUN_PRIOR_SEGMENTATION_MASK } from '../subworkflows/local/baysor_run_prior_segmentation_mask/main' +include { CELLPOSE_RESOLIFT_MORPHOLOGY_OME_TIF } from '../subworkflows/local/cellpose_resolift_morphology_ome_tif/main' +include { CELLPOSE_BAYSOR_IMPORT_SEGMENTATION } from '../subworkflows/local/cellpose_baysor_import_segmentation/main' +include { STARDIST_RESOLIFT_MORPHOLOGY_OME_TIF } from '../subworkflows/local/stardist_resolift_morphology_ome_tif/main' +include { XENIUMRANGER_RESEGMENT_MORPHOLOGY_OME_TIF } from '../subworkflows/local/xeniumranger_resegment_morphology_ome_tif/main' -ch_multiqc_config = Channel.fromPath("$projectDir/assets/multiqc_config.yml", checkIfExists: true) -ch_multiqc_custom_config = params.multiqc_config ? Channel.fromPath( params.multiqc_config, checkIfExists: true ) : Channel.empty() -ch_multiqc_logo = params.multiqc_logo ? Channel.fromPath( params.multiqc_logo, checkIfExists: true ) : Channel.empty() -ch_multiqc_custom_methods_description = params.multiqc_methods_description ? file(params.multiqc_methods_description, checkIfExists: true) : file("$projectDir/assets/methods_description_template.yml", checkIfExists: true) +// segmentation-free subworkflows +include { BAYSOR_GENERATE_SEGFREE } from '../subworkflows/local/baysor_generate_segfree/main' +include { FICTURE_PREPROCESS_MODEL } from '../subworkflows/local/ficture_preprocess_model/main' -/* -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - IMPORT LOCAL MODULES/SUBWORKFLOWS -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -*/ +// xeniumranger subworkflows +include { XENIUMRANGER_RELABEL_RESEGMENT } from '../subworkflows/local/xeniumranger_relabel_resegment/main' +include { XENIUMRANGER_IMPORT_SEGMENTATION_REDEFINE_BUNDLE } from '../subworkflows/local/xeniumranger_import_segmentation_redefine_bundle/main' -// -// SUBWORKFLOW: Consisting of a mix of local and nf-core/modules -// -include { INPUT_CHECK } from '../subworkflows/local/input_check' +// spatialdata subworkflows +include { SPATIALDATA_WRITE_META_MERGE } from '../subworkflows/local/spatialdata_write_meta_merge/main' -/* -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - IMPORT NF-CORE MODULES/SUBWORKFLOWS -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -*/ - -// -// MODULE: Installed directly from nf-core/modules -// -include { FASTQC } from '../modules/nf-core/fastqc/main' -include { MULTIQC } from '../modules/nf-core/multiqc/main' -include { CUSTOM_DUMPSOFTWAREVERSIONS } from '../modules/nf-core/custom/dumpsoftwareversions/main' +// qc layer subworkflows +include { OPT_FLIP_TRACK_STAT } from '../subworkflows/local/opt_flip_track_stat/main' /* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -58,75 +52,706 @@ include { CUSTOM_DUMPSOFTWAREVERSIONS } from '../modules/nf-core/custom/dumpsoft ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ -// Info required for completion email and summary -def multiqc_report = [] - workflow SPATIALXE { + take: + ch_samplesheet // channel: samplesheet read in from --input + alignment_csv + baysor_config + baysor_prior + baysor_scale + baysor_tiling + baysor_tiling_scale + buffer_samples + buffer_size + cell_segmentation_only + cellpose_downscale + cellpose_model + expansion_distance + features + gene_panel + gene_synonyms + max_x + max_y + method + min_qv + min_x + min_y + mode + multiqc_config + multiqc_logo + multiqc_methods_description + nucleus_segmentation_only + offtarget_probe_tracking + outdir + probes_fasta + qupath_polygons + reference_annotations + relabel_genes + run_qc + segger_model + segmentation_mask + sharpen_tiff + stardist_nuclei_model + tiling + xeniumranger_only + + main: + + /* + ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + SPATIALXE - GENERATE INPUT CHANNELS + ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + */ + + ch_versions = channel.empty() + + ch_input = channel.empty() + ch_config = channel.empty() + ch_features = channel.value([]) + ch_raw_bundle = channel.empty() + ch_gene_panel = channel.empty() + ch_qc_reports = channel.empty() + ch_bundle_path = channel.empty() + ch_preview_html = channel.empty() + ch_exp_metadata = channel.empty() + ch_gene_synonyms = channel.empty() + ch_multiqc_files = channel.empty() + ch_multiqc_report = channel.empty() + ch_qupath_polygons = channel.empty() + ch_morphology_image = channel.empty() + ch_redefined_bundle = channel.empty() + ch_coordinate_space = channel.empty() + ch_panel_probes_fasta = channel.empty() + ch_transcripts_file = channel.empty() + ch_reference_annotations = channel.empty() + ch_multiqc_pre_xr_report = channel.empty() + ch_multiqc_post_xr_report = channel.empty() + + + /* + ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + SPATIALXE - DATA STAGING + ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + */ + + if (workflow.profile.contains('test')) { + + // get sample, xenium bundle and image path + ch_input_untar = ch_samplesheet.map { meta, bundle, _image -> + return [meta, bundle] + } + + // get testdata + UNTAR(ch_input_untar) + + ch_untar_outs = UNTAR.out.untar.map { meta, bundle -> + return [meta, bundle.toString()] + } + + ch_samplesheet + .combine(ch_untar_outs, by: 0) + .map { meta, _url, image, test_bundle -> + return [meta, test_bundle, image] + } + .set { ch_input } + } + else { + // for all other profile runs + + // check if samples are buffered + if (buffer_samples) { + ch_input = ch_samplesheet.buffer(size: buffer_size).map + { buffered_sample -> + def (meta, bundle, tif) = buffered_sample[0] + tuple(meta, bundle, tif) + } + } + else { + ch_input = ch_samplesheet + } + } + + // path to bundle input + ch_bundle_path = ch_input.map { meta, bundle, _image -> + + def bundle_path = file(bundle) + if( !bundle_path.exists() ) { + log.error("❌ Check if the path to the xenium bundle exists.") + exit(1) + } + return [meta, bundle] + } + + // get transcript.parquet from the xenium bundle + ch_transcripts_file = ch_input.map { meta, bundle, _image -> + def transcripts_parquet = file( + bundle.toString().replaceFirst(/\/$/, '') + "/transcripts.parquet", + checkIfExists: true + ) + return [meta, transcripts_parquet] + } + + // get morphology focus image from the xenium bundle (single 2D plane) + // supports all Xenium versions: + // v2/v3: morphology_focus/morphology_focus_0000.ome.tif + // v4+: morphology_focus/ch0000_dapi.ome.tif + // v1.x: morphology_focus.ome.tif (single file at bundle root) + // fallback: morphology.ome.tif (multi-Z stack, not ideal for Cellpose) + ch_morphology_image = ch_input.map { meta, bundle, image -> + def morphology_img + if (image) { + morphology_img = file(image) + } else { + def bundle_path = bundle.toString().replaceFirst(/\/$/, '') + def focus_v3 = file("${bundle_path}/morphology_focus/morphology_focus_0000.ome.tif") + def focus_v4 = file("${bundle_path}/morphology_focus/ch0000_dapi.ome.tif") + def focus_v1 = file("${bundle_path}/morphology_focus.ome.tif") + if (focus_v3.exists()) { + morphology_img = focus_v3 + } else if (focus_v4.exists()) { + morphology_img = focus_v4 + } else if (focus_v1.exists()) { + morphology_img = focus_v1 + } else { + morphology_img = file("${bundle_path}/morphology.ome.tif", checkIfExists: true) + } + } + return [meta, morphology_img] + } + + // get experiment metdata - experiment.xenium + ch_exp_metadata = ch_input.map { meta, bundle, _image -> + def exp_metadata = file( + bundle.toString().replaceFirst(/\/$/, '') + "/experiment.xenium", + checkIfExists: true + ) + return [meta, exp_metadata] + } + + // get baysor xenium config + ch_config = channel.fromPath( + "${projectDir}/assets/config/xenium.toml", + checkIfExists: true + ) + .flatten() + + // get segmentation mask if provided with --segmentation_mask for the baysor method + if (segmentation_mask) { + ch_segmentation_mask = channel.fromPath( + segmentation_mask, + checkIfExists: true + ) + .flatten() + } + + // get a list of features if provided with the --features for the ficture method + ch_features = features + ? channel.fromPath(features, checkIfExists: true).flatten() + : channel.value([]) + + // get custom cellpose model if provided with the --cellpose_model for the cellpose method + if (cellpose_model) { + ch_cellpose_model = channel.fromPath( + cellpose_model, + checkIfExists: true + ) + .flatten() + } + + // get panel probes fasta for off-target-probe tracking + if (probes_fasta) { + ch_panel_probes_fasta = channel.fromPath( + probes_fasta, + checkIfExists: true + ) + .flatten() + } + + // get reference annotation files (gff,fa) for off-target-probe tracking + if (reference_annotations) { + ch_reference_annotations = channel.fromPath( + "${reference_annotations}/*.{fa,gff}".toString(), + checkIfExists: true + ) + .flatten() + } + + // get gene synonyms for off-target-probe tracking + if (gene_synonyms) { + ch_gene_synonyms = channel.fromPath( + gene_synonyms, + checkIfExists: true + ) + .flatten() + } + + // get qupath ploygons + if (qupath_polygons) { + ch_qupath_polygons = channel.fromPath( + "${qupath_polygons}/*.geojson", + checkIfExists: true + ) + .flatten() + } + + // get gene_panel.json if provided with --gene_panel, sets relabel_genes to true + def do_relabel = gene_panel ? true : relabel_genes + if (gene_panel) { + + def gene_panel_file = file(gene_panel, checkIfExists: true) + ch_gene_panel = ch_input.map { meta, _bundle, _image -> + return [meta, gene_panel_file] + } + } + else { + + // gene panel to use if only --relabel_genes is provided + ch_gene_panel = ch_input.map { meta, bundle, _image -> + def gene_panel_file = file( + bundle.toString().replaceFirst(/\/$/, '') + "/gene_panel.json", + checkIfExists: true + ) + return [meta, gene_panel_file] + } + } + + /* + ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + SPATIALXE - RELABEL GENES + ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + */ + + // run xr relabel if relabel_genes is true, check if gene_panel.json is provided + if (do_relabel) { + + XENIUMRANGER_RELABEL_RESEGMENT( + ch_bundle_path, + ch_gene_panel, + ) + ch_raw_bundle = XENIUMRANGER_RELABEL_RESEGMENT.out.redefined_bundle + } + else { + ch_raw_bundle = ch_bundle_path + } + + /* + ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + SPATIALXE - DATA PREVIEW + ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + */ + // run baysor preview if `generate_preview ` is true + if (mode == 'preview') { + + BAYSOR_GENERATE_PREVIEW( + ch_transcripts_file, + ch_config, + ) + ch_preview_html = BAYSOR_GENERATE_PREVIEW.out.preview_html + } + + /* + ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + SPATIALXE - XENIUMRANGER LAYER + ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + */ + // run only xeniumranger import segmentation with changes xr specific params + if (mode == 'image' && xeniumranger_only) { + + XENIUMRANGER_IMPORT_SEGMENTATION_REDEFINE_BUNDLE( + ch_bundle_path, + alignment_csv, + expansion_distance, + nucleus_segmentation_only, + qupath_polygons, + ) + ch_redefined_bundle = XENIUMRANGER_IMPORT_SEGMENTATION_REDEFINE_BUNDLE.out.redefined_bundle + ch_coordinate_space = XENIUMRANGER_IMPORT_SEGMENTATION_REDEFINE_BUNDLE.out.coordinate_space + } + + + /* + ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + SPATIALXE - IMAGE-BASED SEGMENTATION LAYER + ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + */ + if (mode == 'image') { + + // trigger the default image-based workflow if no method is specified + if (!method) { + + CELLPOSE_BAYSOR_IMPORT_SEGMENTATION( + ch_morphology_image, + ch_bundle_path, + ch_transcripts_file, + ch_exp_metadata, + ch_config, + cell_segmentation_only, + cellpose_model, + max_x, + max_y, + min_qv, + min_x, + min_y, + nucleus_segmentation_only, + sharpen_tiff, + stardist_nuclei_model, + ) + ch_redefined_bundle = CELLPOSE_BAYSOR_IMPORT_SEGMENTATION.out.redefined_bundle + ch_coordinate_space = CELLPOSE_BAYSOR_IMPORT_SEGMENTATION.out.coordinate_space + } + + // run xeniumranger resegment with morphology_ome.tif + if (method == 'xeniumranger') { + + XENIUMRANGER_RESEGMENT_MORPHOLOGY_OME_TIF( + ch_bundle_path, + nucleus_segmentation_only, + ) + ch_redefined_bundle = XENIUMRANGER_RESEGMENT_MORPHOLOGY_OME_TIF.out.redefined_bundle + ch_coordinate_space = XENIUMRANGER_RESEGMENT_MORPHOLOGY_OME_TIF.out.coordinate_space + } + + // run baysor run with morphology_ome.tif + if (method == 'baysor') { - ch_versions = Channel.empty() + if (segmentation_mask) { + BAYSOR_RUN_PRIOR_SEGMENTATION_MASK( + ch_bundle_path, + ch_transcripts_file, + ch_segmentation_mask, + ch_config, + max_x, + max_y, + min_qv, + min_x, + min_y, + ) + } + ch_redefined_bundle = BAYSOR_RUN_PRIOR_SEGMENTATION_MASK.out.redefined_bundle + ch_coordinate_space = BAYSOR_RUN_PRIOR_SEGMENTATION_MASK.out.coordinate_space + } - // - // SUBWORKFLOW: Read in samplesheet, validate and stage input files - // - INPUT_CHECK ( - ch_input + // run cellpose on the morphology_ome.tif + if (method == 'cellpose') { + + CELLPOSE_RESOLIFT_MORPHOLOGY_OME_TIF( + ch_morphology_image, + ch_bundle_path, + cellpose_downscale, + cellpose_model, + nucleus_segmentation_only, + sharpen_tiff, + stardist_nuclei_model, + ) + ch_redefined_bundle = CELLPOSE_RESOLIFT_MORPHOLOGY_OME_TIF.out.redefined_bundle + ch_coordinate_space = CELLPOSE_RESOLIFT_MORPHOLOGY_OME_TIF.out.coordinate_space + } + + // run stardist on the morphology_ome.tif + if (method == 'stardist') { + + STARDIST_RESOLIFT_MORPHOLOGY_OME_TIF( + ch_morphology_image, + ch_bundle_path, + sharpen_tiff, + stardist_nuclei_model, + ) + ch_redefined_bundle = STARDIST_RESOLIFT_MORPHOLOGY_OME_TIF.out.redefined_bundle + ch_coordinate_space = STARDIST_RESOLIFT_MORPHOLOGY_OME_TIF.out.coordinate_space + } + } + + /* + ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + SPATIALXE - TRANSCRIPT-BASED SEGMENTATION LAYER + ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + */ + if (mode == 'coordinate') { + + // run proseg with transcripts.parquet if method = proseg or is not provided (default workflow) + if (!method || method == 'proseg') { + + if (tiling) { + PROSEG_PRESET_PROSEG2BAYSOR_TILED( + ch_bundle_path, + ch_transcripts_file, + ) + ch_redefined_bundle = PROSEG_PRESET_PROSEG2BAYSOR_TILED.out.redefined_bundle + ch_coordinate_space = PROSEG_PRESET_PROSEG2BAYSOR_TILED.out.coordinate_space + } else { + PROSEG_PRESET_PROSEG2BAYSOR( + ch_bundle_path, + ch_transcripts_file, + ) + ch_redefined_bundle = PROSEG_PRESET_PROSEG2BAYSOR.out.redefined_bundle + ch_coordinate_space = PROSEG_PRESET_PROSEG2BAYSOR.out.coordinate_space + } + } + + // run segger with transcripts.parquet + if (method == 'segger') { + + SEGGER_CREATE_TRAIN_PREDICT( + ch_bundle_path, + ch_transcripts_file, + segger_model, + ) + ch_redefined_bundle = SEGGER_CREATE_TRAIN_PREDICT.out.redefined_bundle + ch_coordinate_space = SEGGER_CREATE_TRAIN_PREDICT.out.coordinate_space + } + + // run baysor with transcripts.parquet (unified tiled/non-tiled subworkflow) + if (method == 'baysor') { + + // Image-based prior (cellpose mask) requires non-tiled Baysor + if ( baysor_tiling && baysor_prior == 'cellpose' ) { + error "ERROR: baysor_prior='cellpose' (image-based) requires baysor_tiling=false. " + + "For tiled Baysor, use baysor_prior='cells' (column-based)." + } + + ch_prior_mask = channel.empty() + + BAYSOR_RUN_TRANSCRIPTS_PARQUET( + ch_bundle_path, + ch_transcripts_file, + ch_morphology_image, + ch_config, + ch_prior_mask, + baysor_config, + baysor_scale, + baysor_tiling, + baysor_tiling_scale, + max_x, + max_y, + min_qv, + min_x, + min_y, + ) + ch_redefined_bundle = BAYSOR_RUN_TRANSCRIPTS_PARQUET.out.redefined_bundle + ch_coordinate_space = BAYSOR_RUN_TRANSCRIPTS_PARQUET.out.coordinate_space + } + } + + + + /* + ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + SPATIALXE - SPATIALDATA / METADATA LAYER + ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + */ + + // run spatialdata modules to generate sd objects in image or coordinate mode + if (mode == 'image' || mode == 'coordinate') { + + SPATIALDATA_WRITE_META_MERGE( + ch_bundle_path, + ch_redefined_bundle, + ch_coordinate_space, + cell_segmentation_only, + mode, + nucleus_segmentation_only, + ) + } + + /* + ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + SPATIALXE - QC LAYER + ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + */ + + // check to run the qc layer + if (mode == 'qc' || run_qc) { + + if (offtarget_probe_tracking) { + + // run off-target probe tracking + OPT_FLIP_TRACK_STAT( + ch_panel_probes_fasta, + ch_reference_annotations, + ch_gene_synonyms, + ) + } + } + + + /* + ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + SPATIALXE - SEGMENTATION-FREE LAYER + ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + */ + if (mode == 'segfree') { + + // trigger the default segfree workflow if no method or if the method is baysor + if (!method || method == 'baysor') { + + BAYSOR_GENERATE_SEGFREE( + ch_transcripts_file, + ch_config, + max_x, + max_y, + min_qv, + min_x, + min_y, + ) + } + + // run ficture with transcripts.parquet + if (method == 'ficture') { + + FICTURE_PREPROCESS_MODEL( + ch_transcripts_file, + ch_features, + features, + ) + } + } + + + + /* + ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + SPATIALXE - COLLATE & SAVE SOFTWARE VERSIONS + ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + */ + // Collect versions published via topic channels (local modules) + ch_topic_versions = channel.topic('versions') + .map { process, tool, version -> + "\"${process}\":\n ${tool}: ${version}" + } + + softwareVersionsToYAML(ch_versions.mix(ch_topic_versions)) + .collectFile( + storeDir: "${outdir}/pipeline_info", + name: 'nf_core_' + 'spatialxe_software_' + 'mqc_' + 'versions.yml', + sort: true, + newLine: true, + ) + .set { ch_collated_versions } + + /* + ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + SPATIALXE - MultiQC + ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + */ + ch_multiqc_config = channel.fromPath( + "${projectDir}/assets/multiqc_config.yml", + checkIfExists: true ) - ch_versions = ch_versions.mix(INPUT_CHECK.out.versions) - // - // MODULE: Run FastQC - // - FASTQC ( - INPUT_CHECK.out.reads + ch_multiqc_custom_config = multiqc_config + ? channel.fromPath(multiqc_config, checkIfExists: true) + : channel.empty() + + ch_multiqc_logo = multiqc_logo + ? channel.fromPath(multiqc_logo, checkIfExists: true) + : channel.empty() + + // Combine default and custom configs into a single list for the tuple-based MULTIQC input + ch_multiqc_configs = ch_multiqc_config.mix(ch_multiqc_custom_config).collect() + + summary_params = paramsSummaryMap( + workflow, + parameters_schema: "nextflow_schema.json" ) - ch_versions = ch_versions.mix(FASTQC.out.versions.first()) - CUSTOM_DUMPSOFTWAREVERSIONS ( - ch_versions.unique().collectFile(name: 'collated_versions.yml') + ch_workflow_summary = channel.value(paramsSummaryMultiqc(summary_params)) + + ch_multiqc_files = ch_multiqc_files.mix( + ch_workflow_summary.collectFile(name: 'workflow_summary_mqc.yaml') ) - // - // MODULE: MultiQC - // - workflow_summary = WorkflowSpatialxe.paramsSummaryMultiqc(workflow, summary_params) - ch_workflow_summary = Channel.value(workflow_summary) - - methods_description = WorkflowSpatialxe.methodsDescriptionText(workflow, ch_multiqc_custom_methods_description) - ch_methods_description = Channel.value(methods_description) - - ch_multiqc_files = Channel.empty() - ch_multiqc_files = ch_multiqc_files.mix(ch_workflow_summary.collectFile(name: 'workflow_summary_mqc.yaml')) - ch_multiqc_files = ch_multiqc_files.mix(ch_methods_description.collectFile(name: 'methods_description_mqc.yaml')) - ch_multiqc_files = ch_multiqc_files.mix(CUSTOM_DUMPSOFTWAREVERSIONS.out.mqc_yml.collect()) - ch_multiqc_files = ch_multiqc_files.mix(FASTQC.out.zip.collect{it[1]}.ifEmpty([])) - - MULTIQC ( - ch_multiqc_files.collect(), - ch_multiqc_config.toList(), - ch_multiqc_custom_config.toList(), - ch_multiqc_logo.toList() + ch_multiqc_custom_methods_description = multiqc_methods_description + ? file(multiqc_methods_description, checkIfExists: true) + : file("${projectDir}/assets/methods_description_template.yml", checkIfExists: true) + + ch_methods_description = channel.value( + methodsDescriptionText(ch_multiqc_custom_methods_description) ) - multiqc_report = MULTIQC.out.report.toList() -} -/* -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - COMPLETION EMAIL AND SUMMARY -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -*/ + ch_multiqc_files = ch_multiqc_files.mix(ch_collated_versions) + + ch_multiqc_files = ch_multiqc_files.mix( + ch_methods_description.collectFile( + name: 'methods_description_mqc.yaml', + sort: true, + ) + ) + + if (mode == 'image' || mode == 'coordinate') { + + // get path to the raw bundle + ch_multiqc_files = ch_multiqc_files.mix( + ch_bundle_path.map { _meta, bundle -> file(bundle) }.collect().ifEmpty([]) + ) + + MULTIQC_PRE_XR_RUN ( + ch_multiqc_files.collect().map { [it] } + .combine(ch_multiqc_configs.map { [it] }) + .combine(ch_multiqc_logo.toList().map { [it] }) + .map { files, configs, logo -> + [ [id: 'multiqc_pre_xr'], files, configs, logo ? logo[0] : [], [], [] ] + } + ) + ch_multiqc_pre_xr_report = MULTIQC_PRE_XR_RUN.out.report.map { _meta, report -> report }.toList() + + // get path to the redefined bundle + ch_multiqc_files = ch_multiqc_files.mix( + ch_redefined_bundle.map { _meta, bundle -> file(bundle) }.collect().ifEmpty([]) + ) + + MULTIQC_POST_XR_RUN ( + ch_multiqc_files.collect().map { [it] } + .combine(ch_multiqc_configs.map { [it] }) + .combine(ch_multiqc_logo.toList().map { [it] }) + .map { files, configs, logo -> + [ [id: 'multiqc_post_xr'], files, configs, logo ? logo[0] : [], [], [] ] + } + ) + ch_multiqc_post_xr_report = MULTIQC_POST_XR_RUN.out.report.map { _meta, report -> report }.toList() + + } else { + + // get path to the raw bundle + ch_multiqc_files = ch_multiqc_files.mix( + ch_bundle_path.map { _meta, bundle -> file(bundle) }.collect().ifEmpty([]) + ) + + + // get the qc htmls if qc mode is run + if (mode == 'qc' || run_qc) { + + ch_multiqc_files = ch_multiqc_files.mix( + ch_qc_reports.map { _meta, qc_reports -> qc_reports }.collect().ifEmpty([]) + ) + + } + + + // get the preview html if preview mode is run + if (mode == 'preview') { + + ch_multiqc_files = ch_multiqc_files.mix( + ch_preview_html.map { _meta, preview_html -> preview_html }.collect().ifEmpty([]) + ) + + } + + + MULTIQC ( + ch_multiqc_files.collect().map { [it] } + .combine(ch_multiqc_configs.map { [it] }) + .combine(ch_multiqc_logo.toList().map { [it] }) + .map { files, configs, logo -> + [ [id: 'multiqc'], files, configs, logo ? logo[0] : [], [], [] ] + } + ) + ch_multiqc_report = MULTIQC.out.report.map { _meta, report -> report }.toList() -workflow.onComplete { - if (params.email || params.email_on_fail) { - NfcoreTemplate.email(workflow, params, summary_params, projectDir, log, multiqc_report) - } - NfcoreTemplate.summary(workflow, params, log) - if (params.hook_url) { - NfcoreTemplate.IM_notification(workflow, params, summary_params, projectDir, log) } -} -/* -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - THE END -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -*/ + emit: + multiqc_pre_xr_report = ch_multiqc_pre_xr_report // channel: /path/to/multiqc_report.html + multiqc_post_xr_report = ch_multiqc_post_xr_report // channel: /path/to/multiqc_report.html + multiqc_report = ch_multiqc_report // channel: /path/to/multiqc_report.html + versions = ch_versions // channel: [ path(versions.yml) ] +}