diff --git a/.coveragerc b/.coveragerc deleted file mode 100644 index 91eec13a..00000000 --- a/.coveragerc +++ /dev/null @@ -1,6 +0,0 @@ -[run] -source=fairworkflows -branch=true - -[report] -omit=fairworkflows/templates/* diff --git a/.flake8 b/.flake8 deleted file mode 100644 index 69af492f..00000000 --- a/.flake8 +++ /dev/null @@ -1,10 +0,0 @@ -[flake8] - -exclude = - .git, - __pycache__, - fairworkflows/templates/, - fairworkflows/old, - venv, - env - diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index ce7f09eb..155786e2 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -1,31 +1,31 @@ -# This workflow will install Python dependencies, run tests and lint with a single version of Python -# For more information see: https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions - -name: Python application - +name: Run tests on: [push, pull_request] jobs: - build: + tests: runs-on: ubuntu-latest + strategy: + matrix: + python-version: [3.7, 3.8, 3.9, '3.10'] steps: - - uses: actions/checkout@v2 - - name: Set up Python 3.7 - uses: actions/setup-python@v1 + - uses: actions/checkout@v3 + + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v4 with: - python-version: 3.7 + python-version: ${{ matrix.python-version }} + - name: Install dependencies run: | - python -m pip install --upgrade pip - pip install -r requirements.txt - pip install -r requirements_dev.txt sudo apt-get install -y graphviz - - name: Create RSA key for nanopubs + pip install ".[test,dev]" + + - name: Setup nanopub profile (including RSA keys) run: | - printf '\n' | setup_nanopub_profile --orcid_id 'https://orcid.org/0000-0000-0000-0000' --no-publish --name 'test' + np setup --orcid-id https://orcid.org/0000-0000-0000-0000 --no-publish --name test --newkeys - name: Lint with flake8 run: | @@ -34,16 +34,19 @@ jobs: flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics # exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics + - name: Test with pytest run: | - pip install pytest pytest --cov + - name: Publish coverage to Coveralls env: COVERALLS_REPO_TOKEN: ${{ secrets.COVERALLS_REPO_TOKEN }} + if: ${{ env.COVERALLS_REPO_TOKEN }} run: | coverage xml coveralls + - name: Build Sphinx docs run: | cd docs diff --git a/.github/workflows/pypi.yml b/.github/workflows/pypi.yml index 73c1d0ee..3a4e1a01 100644 --- a/.github/workflows/pypi.yml +++ b/.github/workflows/pypi.yml @@ -1,4 +1,4 @@ -name: Publish +name: Publish to PyPI on: release: @@ -8,18 +8,20 @@ jobs: publish: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 + - name: Set up Python - uses: actions/setup-python@v1 + uses: actions/setup-python@v4 with: - python-version: 3.7 - - name: Install dependencies - run: | - python -m pip install --upgrade pip - pip install setuptools wheel twine - python setup.py bdist_wheel - - name: Publish package - uses: pypa/gh-action-pypi-publish@master + python-version: "3.7" + + - name: Install build dependencies + run: pip install build + + - name: Build distribution + run: python -m build + + - name: Publish + uses: pypa/gh-action-pypi-publish@release/v1 with: - user: __token__ password: ${{ secrets.PYPI_FAIRWORKFLOWS_TOKEN }} diff --git a/.gitignore b/.gitignore index 6b9a49d3..4398c21f 100644 --- a/.gitignore +++ b/.gitignore @@ -32,6 +32,7 @@ docs/apidocs env/ venv/ +.venv/ sample_output/ .coveralls.yml diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 00000000..09ba1b74 --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,48 @@ +# See https://pre-commit.com for more information +# See https://pre-commit.com/hooks.html for more hooks +repos: +- repo: https://github.com/pre-commit/pre-commit-hooks + rev: v4.3.0 + hooks: + - id: check-added-large-files + name: 🐘 Check for added large files + - id: check-toml + name: ✔️ Check TOML + - id: check-yaml + name: ✔️ Check YAML + args: + - --unsafe + - id: end-of-file-fixer + name: 🪚 Fix end of files + - id: trailing-whitespace + name: ✂️ Trim trailing whitespaces +- repo: https://github.com/asottile/pyupgrade + rev: v2.37.3 + hooks: + - id: pyupgrade + name: ⏫ Running pyupgrade + args: + - --py3-plus + - --keep-runtime-typing +- repo: https://github.com/myint/autoflake + rev: v1.5.3 + hooks: + - id: autoflake + name: ❄️ Running autoflake + args: + - --recursive + - --in-place + - --remove-all-unused-imports + - --remove-unused-variables + - --expand-star-imports + - --exclude + - __init__.py + - --remove-duplicate-keys +- repo: https://github.com/pycqa/isort + rev: 5.10.1 + hooks: + - id: isort + name: 🔄 Formatting imports with isort (python) +ci: + autofix_commit_msg: 🎨 [pre-commit.ci] Auto format from pre-commit.com hooks + autoupdate_commit_msg: ⬆ [pre-commit.ci] pre-commit autoupdate diff --git a/CHANGELOG.md b/CHANGELOG.md index da887bd0..2a3e8627 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,6 +4,22 @@ All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). +## [0.4.0] - 2022-12-15 + +### Added + +* Migrate to use `pyproject.toml` with a `hatch` build backend +* Add pre-commit and a `.pre-commit-config.yaml` to make sure the files are properly formatted (similar to the nanopub package setup). + +### Modified + +* Update the `nanopub` dependency to 2.0.0 (no need for java anymore) and make the changes required. +* Update rdflib dependency to v6+ + +### Removed + +* Configuration files in the root folder of the repository have been removed, their configuration being now moved in the pyproject.toml: `setup.py`, ` MANIFEST.in `, `pytest.ini `, `requirements.txt`, `requirements_dev.txt`, `.flake8`, `.coveragerc` + ## [0.3.0] - 2021-06-25 ### Added diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 3f8786e0..23cc0b41 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -30,18 +30,54 @@ The sections below outline the steps in each case. 1. (**important**) announce your plan to the rest of the community _before you start working_. This announcement should be in the form of a (new) issue; 1. (**important**) wait until some kind of consensus is reached about your idea being a good idea; 1. if needed, fork the repository to your own Github profile and create your own feature branch off of the latest master commit. While working on your feature branch, make sure to stay up to date with the master branch by pulling in changes, possibly from the 'upstream' repository (follow the instructions [here](https://help.github.com/articles/configuring-a-remote-for-a-fork/) and [here](https://help.github.com/articles/syncing-a-fork/)); -1. install the dependencies required to run `fairworkflows` in development: - - ```bash - pip install -r requirements.txt - ``` - +1. install dependencies (see instructions below) +1. make your changes 1. make sure the existing tests still work by running ``pytest``. Note that any pull requests to the fairworkflows repository on github will automatically trigger running of the test suite; 1. check that the code is in accordance with the PEP8 style guide, by running ``flake8 . --count --show-source --statistics``, -configuration is in `tox.ini`. + configuration is in `tox.ini`. 1. add your own tests (if necessary); 1. update or expand the documentation; 1. [push](http://rogerdudler.github.io/git-guide/) your feature branch to (your fork of) the fairworkflows repository on GitHub; 1. create the pull request, e.g. following the instructions [here](https://help.github.com/articles/creating-a-pull-request/). In case you feel like you've made a valuable contribution, but you don't know how to write or run tests for it, or how to generate the documentation: don't let this discourage you from making the pull request; we can help you! Just go ahead and submit the pull request, but keep in mind that you might be asked to append additional commits to your pull request. + +### Install dependencies + +Install [Hatch](https://hatch.pypa.io), this will automatically handle virtual environments and make sure all dependencies are installed when you run a script in the project: + +```bash +pip install hatch +``` + +Install the depencies in a virtual environment: + +```bash +hatch -v env create +``` + +### Development workflow + +Run example workflow defined in `examples/basic_workflow.py` (feel free to change this file if needed to try things in development): + +```bash +hatch run example +``` + +Make sure the existing tests still work by running ``pytest``. Note that any pull requests to the fairworkflows repository on github will automatically trigger running of the test suite; + +```bash +hatch run test +``` + +The code will be automatically formatted when you commit your changes using `pre-commit`. But you can also run the script to format the code yourself: + +``` +hatch run format +``` + +Check the code for errors, and if it is in accordance with the PEP8 style guide, by running `flake8` and `mypy`: + +``` +hatch run check +``` diff --git a/MANIFEST.in b/MANIFEST.in deleted file mode 100644 index 339ca4d8..00000000 --- a/MANIFEST.in +++ /dev/null @@ -1,6 +0,0 @@ -include README.md -include CITATION.cff -include fairworkflows/_version.py -include requirements.txt -include requirements_dev.txt -include fairworkflows/resources/* diff --git a/README.md b/README.md index 1ea1a299..0ccaad80 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,4 @@ -![Build Status](https://github.com/fair-workflows/fairworkflows/workflows/Python%20application/badge.svg) +[![Build Status](https://github.com/fair-workflows/fairworkflows/actions/workflows/build.yml/badge.svg)](https://github.com/fair-workflows/fairworkflows/actions/workflows/build.yml) [![Documentation Status](https://readthedocs.org/projects/fairworkflows/badge/?version=latest)](https://fairworkflows.readthedocs.io/en/latest/?badge=latest) [![Coverage Status](https://coveralls.io/repos/github/fair-workflows/fairworkflows/badge.svg?branch=main)](https://coveralls.io/github/fair-workflows/fairworkflows?branch=main) [![PyPI version](https://badge.fury.io/py/fairworkflows.svg)](https://badge.fury.io/py/fairworkflows) @@ -9,12 +9,12 @@ # ```fairworkflows``` python library `fairworkflows` is a high-level, user-friendly python library that supports the construction, -manipulation and publishing of FAIR scientific workflows using semantic technologies. +manipulation and publishing of FAIR scientific workflows using semantic technologies. ## Background -`fairworkflows` is developed as a component of the FAIR Workbench, as part of the FAIR is FAIR project. +`fairworkflows` is developed as a component of the FAIR Workbench, as part of the FAIR is FAIR project. -The focus is on description of workflows consisting of manual and computational steps using semantic technology, +The focus is on description of workflows consisting of manual and computational steps using semantic technology, such as the ontology described in the publication: _Celebi, R., Moreira, J. R., Hassan, A. A., Ayyar, S., Ridder, L., Kuhn, T., & Dumontier, M. (2019). Towards FAIR protocols and workflows: The OpenPREDICT case study._ [_arXiv:1911.09531._](https://arxiv.org/abs/1911.09531) @@ -34,14 +34,15 @@ Checkout the [user documentation](https://fairworkflows.readthedocs.io/). The most recent release can be installed from the python package index using ```pip```: -``` +```bash pip install fairworkflows ``` To publish workflows to the nanopub server you need to setup your nanopub profile. This allows the nanopub server to identify you. Run the following in the terminal after installation: -``` -setup_nanopub_profile + +```bash +np setup ``` This will add and store RSA keys to sign your nanopublications, publish a nanopublication with your name and ORCID iD to declare that you are @@ -60,7 +61,7 @@ from fairworkflows import is_fairworkflow, is_fairstep, FairWorkflow ### Define a step for your workflow Mark a function as a FAIR step using the `is_fairstep` decorator. -Use keyword arguments to semantically annotate the step. +Use keyword arguments to semantically annotate the step. In this example to provide a label and describe that this is a script task. ```python @is_fairstep(label='Addition', is_script_task=True) @@ -69,7 +70,7 @@ def add(x: float, y: float) -> float: return x + y ``` ### Define your workflow -Define your workflow by calling previously defined step functions. +Define your workflow by calling previously defined step functions. Mark the function as a workflow using the `is_fairworkflow` decorator. ```python @is_fairworkflow(label='My Workflow') @@ -107,3 +108,7 @@ It is expected that the library will soon interact with FAIR Data Points as well ## Relation to existing workflow formats/engines (e.g. CWL, WDL, Snakemake etc) This library is not intended to replace or compete with the hundreds of existing computational workflow formats, but rather to aid in RDF description and comparison of workflows in the most general sense of the term (including manual experiemental steps, notebooks, and so on). Steps in a FAIRWorkflow may very well be 'run this CWL workflow' or 'run this script', so such workflows are expected to sit more on a meta-level, describing the before-and-after of running one of these fully automated computational workflows as well. + +## Contribute + +If you would like to know how contribute, or learn how to run the project in development, feel free to check the documentation at [CONTRIBUTING.md](https://github.com/fair-workflows/fairworkflows/blob/main/CONTRIBUTING.md) diff --git a/docs/index.rst b/docs/index.rst index 89147745..770c7100 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -56,7 +56,7 @@ following in the terminal after installation: :: - setup_nanopub_profile + np setup This will add and store RSA keys to sign your nanopublications, publish a nanopublication with your name and ORCID iD to declare that you are diff --git a/examples/basic_workflow.py b/examples/basic_workflow.py new file mode 100644 index 00000000..687c4855 --- /dev/null +++ b/examples/basic_workflow.py @@ -0,0 +1,78 @@ +import logging + +from fairworkflows import FairWorkflow, is_fairstep, is_fairworkflow + +logger = logging.getLogger() +logger.setLevel(logging.ERROR) + +print("📋️ Running basic workflow example in examples/basic_workflow.py") + +@is_fairstep(label='Addition') +def add(a:float, b:float) -> float: + """Adding up numbers.""" + return a + b + +@is_fairstep(label='Subtraction') +def sub(a: float, b: float) -> float: + """Subtracting numbers.""" + return a - b + +@is_fairstep(label='Multiplication') +def mul(a: float, b: float) -> float: + """Multiplying numbers.""" + return a * b + +@is_fairstep(label='A strange step with little use') +def weird(a: float, b:float) -> float: + """A weird function""" + return a * 2 + b * 4 + + +@is_fairworkflow(label='My Workflow') +def my_workflow(in1, in2, in3): + """ + A simple addition, subtraction, multiplication workflow + """ + t1 = add(in1, in2) # 5 + t2 = sub(in1, in2) # -3 + t3 = weird(t1, in3) # 10 + 12 = 22 + t4 = mul(t3, t2) # 22 * -3 = 66 + return t4 + + +fw = FairWorkflow.from_function(my_workflow) + +result, prov = fw.execute(1, 4, 3) +# When unpublished it generates URI starting with http://www.example.org/unpublished +# Should we change this to the temp nanopub URI? + +fw.publish_as_nanopub(use_test_server=True, publish_steps=True) + +for step in fw: + print("STEP FW") + print(step) + +prov.publish_as_nanopub(use_test_server=True) + +fw._rdf.serialize(f"examples/basic_workflow.workflow.trig", format="trig") +prov._rdf.serialize(f"examples/basic_workflow.prov.trig", format="trig") + +for step_prov in prov: + print("STEP PROV") + print(step_prov) + + +# Not working with nanopubs due to conflict with the graphs using the same namespaces +# def merge_graphs(graph_list): +# """Merge RDFLib graphs while preserving the contexts""" +# merged = ConjunctiveGraph() +# for g in graph_list: +# for c in g.contexts(): +# for s, p, o, cont in g.quads((None, None, None, c)): +# merged.add((s, p, o, c)) +# return merged +# to_merge = [fw._rdf, prov._rdf] +# for step_prov in prov: +# to_merge.append(step_prov._rdf) +# g = merge_graphs(to_merge) +# g.serialize(f"examples/basic_workflow.trig", format="trig") diff --git a/fairworkflows/_version.py b/fairworkflows/_version.py index 493f7415..6a9beea8 100644 --- a/fairworkflows/_version.py +++ b/fairworkflows/_version.py @@ -1 +1 @@ -__version__ = "0.3.0" +__version__ = "0.4.0" diff --git a/fairworkflows/fairstep.py b/fairworkflows/fairstep.py index 48afbb12..a2ec5782 100644 --- a/fairworkflows/fairstep.py +++ b/fairworkflows/fairstep.py @@ -1,23 +1,20 @@ import functools -import sys import inspect import typing from copy import deepcopy -from typing import Callable, get_type_hints, List, Union -from urllib.parse import urldefrag from datetime import datetime +from typing import Callable, List, Union, get_type_hints +from urllib.parse import urldefrag from warnings import warn import noodles import rdflib -from rdflib import RDF, RDFS, DCTERMS +from rdflib import RDF, RDFS -from fairworkflows import namespaces, LinguisticSystem, LINGSYS_ENGLISH, LINGSYS_PYTHON -from fairworkflows.config import DUMMY_FAIRWORKFLOWS_URI, IS_FAIRSTEP_RETURN_VALUE_PARAMETER_NAME, \ - LOGGER, WARN_FOR_TYPE_HINTING -from fairworkflows.prov import prov_logger, StepRetroProv +from fairworkflows import LINGSYS_ENGLISH, LINGSYS_PYTHON, LinguisticSystem, manual_assistant, namespaces +from fairworkflows.config import DUMMY_FAIRWORKFLOWS_URI, IS_FAIRSTEP_RETURN_VALUE_PARAMETER_NAME, WARN_FOR_TYPE_HINTING +from fairworkflows.prov import StepRetroProv, prov_logger from fairworkflows.rdf_wrapper import RdfWrapper, replace_in_rdf -from fairworkflows import manual_assistant class FairVariable: @@ -400,7 +397,7 @@ def __str__(self): Returns string representation of this FairStep object. """ s = f'Step URI = {self._uri}\n' - s += self._rdf.serialize(format='trig').decode('utf-8') + s += self._rdf.serialize(format='trig') return s @@ -455,6 +452,8 @@ def _modify_function(func): inputs = _extract_inputs_from_function(func, kwargs) outputs = _extract_outputs_from_function(func, kwargs) + # TODO: use the regular nanopub temp URI instead of example.org? + # fairstep = FairStep(uri='http://purl.org/nanopub/temp/'+func.__name__, fairstep = FairStep(uri='http://www.example.org/unpublished-'+func.__name__, label=label, description=description, diff --git a/fairworkflows/fairworkflow.py b/fairworkflows/fairworkflow.py index 11a01a95..7d6e2e02 100644 --- a/fairworkflows/fairworkflow.py +++ b/fairworkflows/fairworkflow.py @@ -1,11 +1,9 @@ import inspect -import io -import logging import warnings from copy import deepcopy from pathlib import Path from tempfile import TemporaryDirectory -from typing import Iterator, Optional, Callable +from typing import Callable, Iterator, Optional import networkx as nx import noodles @@ -15,8 +13,7 @@ from rdflib.tools.rdf2dot import rdf2dot from requests import HTTPError -from fairworkflows import namespaces, LinguisticSystem, LINGSYS_PYTHON -from fairworkflows.config import LOGGER +from fairworkflows import LINGSYS_PYTHON, LinguisticSystem, namespaces from fairworkflows.fairstep import FairStep from fairworkflows.prov import WorkflowRetroProv, prov_logger from fairworkflows.rdf_wrapper import RdfWrapper @@ -344,7 +341,7 @@ def display(self): raise ValueError( 'Cannot display workflow as no noodles step_level_promise has been constructed.') - from IPython.display import display, SVG + from IPython.display import SVG, display with TemporaryDirectory() as td: filename = Path(td) / 'dag.svg' @@ -385,6 +382,8 @@ def _generate_retrospective_prov_publication(self) -> WorkflowRetroProv: workflow_uri = rdflib.URIRef(self.uri) else: workflow_uri = rdflib.URIRef('http://www.example.org/unpublishedworkflow') + # TODO: use the regular nanopub temp URI instead of example.org? + # workflow_uri = rdflib.URIRef('http://purl.org/nanopub/temp/workflow') step_provs = prov_logger.get_all() return WorkflowRetroProv(self, workflow_uri, step_provs) @@ -448,7 +447,7 @@ def __str__(self): Returns string representation of this FairWorkflow object. """ s = f'Workflow URI = {self._uri}\n' - s += self._rdf.serialize(format='trig').decode('utf-8') + s += self._rdf.serialize(format='trig') return s diff --git a/fairworkflows/linguistic_system.py b/fairworkflows/linguistic_system.py index 4a4adcc7..2208bbca 100644 --- a/fairworkflows/linguistic_system.py +++ b/fairworkflows/linguistic_system.py @@ -1,9 +1,12 @@ import sys import warnings from typing import List + import rdflib -from rdflib import DC, RDF, RDFS, OWL -from .namespaces import SCHEMAORG +from rdflib import OWL, RDF, RDFS + +from .namespaces import DCTERMS, SCHEMAORG + class LinguisticSystem: def __init__(self, lstype: rdflib.URIRef = None, label: str = None, see_also: rdflib.URIRef = None, version_info: str = None): @@ -49,7 +52,7 @@ def _check_unique(l: List): -LINGSYS_ENGLISH = LinguisticSystem(lstype=DC.LinguisticSystem, +LINGSYS_ENGLISH = LinguisticSystem(lstype=DCTERMS.LinguisticSystem, label='en', see_also="http://www.datypic.com/sc/xsd/t-xsd_language.html") @@ -57,4 +60,3 @@ def _check_unique(l: List): label='python', version_info='.'.join([str(v) for v in sys.version_info]), see_also="https://www.wikidata.org/wiki/Q28865") - diff --git a/fairworkflows/manual_assistant.py b/fairworkflows/manual_assistant.py index 2c372673..843029a6 100644 --- a/fairworkflows/manual_assistant.py +++ b/fairworkflows/manual_assistant.py @@ -1,11 +1,13 @@ import base64 import cgi import logging -from http.server import HTTPServer, BaseHTTPRequestHandler -from typing import List, Dict -from fairworkflows.config import MANUAL_ASSISTANT_HOST, MANUAL_ASSISTANT_PORT +from http.server import BaseHTTPRequestHandler, HTTPServer +from typing import Dict, List + from jinja2 import Environment, PackageLoader, select_autoescape +from fairworkflows.config import MANUAL_ASSISTANT_HOST, MANUAL_ASSISTANT_PORT + logging.basicConfig(level=logging.INFO) env = Environment(loader=PackageLoader('fairworkflows', 'templates'), autoescape=select_autoescape('html')) diff --git a/fairworkflows/namespaces.py b/fairworkflows/namespaces.py index be044ab7..a68765be 100644 --- a/fairworkflows/namespaces.py +++ b/fairworkflows/namespaces.py @@ -6,6 +6,7 @@ BPMN = rdflib.Namespace("http://dkm.fbk.eu/index.php/BPMN2_Ontology#") PWO = rdflib.Namespace("http://purl.org/spar/pwo/") SCHEMAORG = rdflib.Namespace("https://schema.org/") +DCTERMS = rdflib.Namespace("http://purl.org/dc/terms/") """ Namespace for diff --git a/fairworkflows/prov.py b/fairworkflows/prov.py index 592b6d1c..5ce4dbb4 100644 --- a/fairworkflows/prov.py +++ b/fairworkflows/prov.py @@ -1,6 +1,6 @@ import threading from datetime import datetime -from typing import List, Iterator, Dict +from typing import Dict, Iterator, List import rdflib @@ -133,7 +133,7 @@ def publish_as_nanopub(self, use_test_server=False, **kwargs): def __str__(self): """String representation.""" s = f'Step retrospective provenance.\n' - s += self._rdf.serialize(format='turtle').decode('utf-8') + s += self._rdf.serialize(format='turtle') return s @@ -220,5 +220,5 @@ def publish_as_nanopub(self, use_test_server=False, **kwargs): def __str__(self): """String representation.""" s = f'Workflow retrospective provenance.\n' - s += self._rdf.serialize(format='turtle').decode('utf-8') + s += self._rdf.serialize(format='turtle') return s diff --git a/fairworkflows/rdf_wrapper.py b/fairworkflows/rdf_wrapper.py index f9d5a699..2bdb7fe6 100644 --- a/fairworkflows/rdf_wrapper.py +++ b/fairworkflows/rdf_wrapper.py @@ -6,11 +6,13 @@ import pyshacl import rdflib -from rdflib import RDF, RDFS, DCTERMS, OWL -from nanopub import Publication, NanopubClient + +# from nanopub import Publication, NanopubClient +from nanopub import Nanopub, NanopubConf, load_profile +from rdflib import DCTERMS, OWL, RDFS from rdflib.tools.rdf2dot import rdf2dot -from fairworkflows import namespaces, LinguisticSystem +from fairworkflows import LinguisticSystem, namespaces from fairworkflows.config import PACKAGE_DIR PLEX_SHAPES_SHACL_FILEPATH = str(PACKAGE_DIR / 'resources' / 'plex-shapes.ttl') @@ -250,8 +252,10 @@ def from_nanopub(cls, uri: str, use_test_server=False): nanopub_uri, frag = urldefrag(uri) # Fetch the nanopub - client = NanopubClient(use_test_server=use_test_server) - nanopub = client.fetch(nanopub_uri) + nanopub = Nanopub( + source_uri=nanopub_uri, + conf=NanopubConf(use_test_server=use_test_server) + ) if len(frag) > 0: # If we found a fragment we can use the passed URI @@ -305,18 +309,35 @@ def _publish_as_nanopub(self, use_test_server=False, **kwargs): f'property of this object: {self._derived_from}') # Publish the rdf of this step as a nanopublication - nanopub = Publication.from_assertion(assertion_rdf=self.rdf, - introduces_concept=self.self_ref, - derived_from=self._derived_from, - **kwargs) - - client = NanopubClient(use_test_server=use_test_server) - publication_info = client.publish(nanopub) + np_conf = NanopubConf( + profile=load_profile(), + use_test_server=use_test_server, + add_prov_generated_time=True, + attribute_publication_to_profile=True, + derived_from=self._derived_from, + **kwargs + ) + np = Nanopub( + conf=np_conf, + assertion=self.rdf, + introduces_concept=self.self_ref, + ) + np.publish() # Set the new, published, URI, which should be whatever the (published) URI of the concept that was introduced is. # Note that this is NOT the nanopub's URI, since the nanopub is not the step/workflow. The rdf object describing the step/workflow # is contained in the assertion graph of the nanopub, and has its own URI. - self._uri = publication_info['concept_uri'] + + publication_info = { + "source_uri": np.source_uri + } + self._rdf = np._rdf + if np.concept_uri: + publication_info["concept_uri"] = np.concept_uri + self._uri = np.concept_uri + else: + self._uri = np.source_uri + # self._uri = publication_info['concept_uri'] self._is_published = True self._is_modified = False diff --git a/manual_step/README b/manual_step/README index 618a6b18..d6a8ac68 100644 --- a/manual_step/README +++ b/manual_step/README @@ -13,7 +13,10 @@ Test CWL manual tool with -```cwl-runner --preserve-entire-environment test_workflow.cwl --manual_mode commandline``` + +```bash +cwl-runner --preserve-entire-environment test_workflow.cwl --manual_mode commandline +``` ## Define manual step class in CWL spec? * Include RDF description of manual steps @@ -22,4 +25,3 @@ Test CWL manual tool with ## Higher level notebook * Each cell in a notebook can trigger a CWL workflow, using the inputs/outputs specified in the notebook. * Some cells are descriptions of manual tasks, followed by a prompt for the resulting data set that can be used as input to the next workflow. - diff --git a/manual_step/manual/manual.py b/manual_step/manual/manual.py index eda127d3..a94ea443 100644 --- a/manual_step/manual/manual.py +++ b/manual_step/manual/manual.py @@ -1,6 +1,8 @@ -import click import json +import click + + @click.command() @click.argument('mode', type=str) def manual(mode): diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 00000000..3feea4b2 --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,166 @@ +[project] +name = "fairworkflows" +description = "A high-level and user-friendly python library for constructing, modifying, and publishing scientific workflows described using semantic technologies." +readme = "README.md" +requires-python = ">=3.7" +license = { file = "LICENSE" } +authors = [ + { name = "Robin Richardson", email = "r.richardson@esciencecenter.nl" }, + { name = "Djura Smits" }, + { name = "Sven van den Burg" }, + { name = "Vincent Emonet", email = "vincent.emonet@gmail.com" }, +] +keywords = [ + "Nanopublication", + "RDF", + "Linked Data", + "Publishing" +] +classifiers = [ + "License :: OSI Approved :: Apache Software License", + "Operating System :: OS Independent", + "Programming Language :: Python :: 3", + "Programming Language :: Python :: 3.7", + "Programming Language :: Python :: 3.8", + "Programming Language :: Python :: 3.9", + "Programming Language :: Python :: 3.10", +] +dynamic = ["version"] + +dependencies = [ + "nanopub >=2.0.0", + "networkx ~=2.5", + "pyyaml", + "rdflib <7.0.0,>=6.0.2", + "requests", + "pyshacl >=0.17.0", + "noodles ==0.3.3", + "Jinja2 >=2.11.3", +] + +[project.optional-dependencies] +test = [ + "pytest >=7.1.3,<8.0.0", + "pytest-cov >=2.12.0,<4.0.0", + "coveralls", + "isort >=5.0.6,<6.0.0", + "flake8 >=3.8.3,<6.0.0", + "Flake8-pyproject>=1.1.0.post0", + "flaky", + "mypy ==0.971", +] +doc = [ + "recommonmark", + "sphinx", + "sphinx_rtd_theme", +] +dev = [ + "graphviz==0.14.1", + "pre-commit >=2.17.0,<3.0.0", + "autoflake >=1.4.0,<2.0.0", + "jupyter", + "notebook", + "types-requests", +] + + +[project.urls] +Homepage = "https://fair-workflows.github.io/fairworkflows" +Documentation = "https://fair-workflows.github.io/fairworkflows" +History = "https://github.com/fair-workflows/fairworkflows/releases" +Tracker = "https://github.com/fair-workflows/fairworkflows/issues" +Source = "https://github.com/fair-workflows/fairworkflows" + + +[build-system] +requires = ["hatchling"] +build-backend = "hatchling.build" + + +# ENVIRONMENTS AND SCRIPTS +[tool.hatch.envs.default] +features = [ + "test", + "doc", + "dev", +] +post-install-commands = [ + "pre-commit install", +] + +[tool.hatch.envs.default.scripts] +test = "pytest {args}" +cov = "test --cov=fairworkflows {args}" +format = [ + "isort fairworkflows tests", + "autoflake --remove-all-unused-imports --recursive --remove-unused-variables --in-place fairworkflows tests --exclude=__init__.py", + "pre-commit run --all-files || true", +] +check = [ + "isort --check-only --diff fairworkflows", + "flake8 fairworkflows", +] +example = "python examples/basic_workflow.py {args}" + + +# TOOLS +[tool.hatch.version] +path = "fairworkflows/_version.py" + +[tool.hatch.metadata] +allow-direct-references = true +# TODO: required to import from GitHub URL, to remove when not needed anymore + + +[tool.isort] +line_length = 120 +skip = ["fairworkflows/__init__.py"] +profile = "black" + + +[tool.coverage.run] +source = ["fairworkflows"] +branch = true + +[tool.coverage.report] +omit = ["fairworkflows/templates/*", "tests/*"] + + +[tool.flake8] +max-complexity = 17 +max-line-length = 120 +per-file-ignores = [ + "__init__.py:F401", +] +ignore = [ + "E501", # line too long + "E303", # too many blank lines + "E301", # expected 1 blank lines found 0 + "W503", # line break before binary operator +] +exclude =[ + ".git", + "__pycache__", + "fairworkflows/templates/", + "fairworkflows/old", + "venv", + "env" +] + + +[tool.mypy] +strict = false +disallow_untyped_defs = false +follow_imports = "normal" +ignore_missing_imports = true +pretty = true +show_column_numbers = true +warn_no_return = false +warn_unused_ignores = true + + +[tool.pytest.ini_options] +testpaths = ["tests"] +markers = [ + "no_rsa_key: mark a test as a test only run when there is no nanopub RSA key setup.", +] diff --git a/pytest.ini b/pytest.ini deleted file mode 100644 index f87141d6..00000000 --- a/pytest.ini +++ /dev/null @@ -1,3 +0,0 @@ -# content of pytest.ini -[pytest] -testpaths = tests diff --git a/requirements.txt b/requirements.txt deleted file mode 100644 index ea139faf..00000000 --- a/requirements.txt +++ /dev/null @@ -1,9 +0,0 @@ -nanopub==1.2.7 -networkx~=2.5 -pytest -pyyaml -rdflib<6.0.0,>=5.0.0 -requests -pyshacl>=0.14.1 -noodles==0.3.3 -Jinja2==2.11.3 diff --git a/requirements_dev.txt b/requirements_dev.txt deleted file mode 100644 index c856e76d..00000000 --- a/requirements_dev.txt +++ /dev/null @@ -1,8 +0,0 @@ -coveralls -flaky -graphviz==0.14.1 -pytest -pytest-cov -recommonmark -sphinx -sphinx_rtd_theme diff --git a/setup.py b/setup.py deleted file mode 100644 index b08fb621..00000000 --- a/setup.py +++ /dev/null @@ -1,46 +0,0 @@ -#!/usr/bin/env python - -from setuptools import setup, find_packages -import distutils - -import codecs -import os.path - - -def read(rel_path): - here = os.path.abspath(os.path.dirname(__file__)) - with codecs.open(os.path.join(here, rel_path), 'r') as fp: - return fp.read() - - -def get_version(rel_path): - for line in read(rel_path).splitlines(): - if line.startswith('__version__'): - delim = '"' if '"' in line else "'" - return line.split(delim)[1] - else: - raise RuntimeError("Unable to find version string.") - - -setup( - name='fairworkflows', - version=get_version('fairworkflows/_version.py'), - description='FAIRWorkflows python library', - long_description=open("README.md", "r").read(), - long_description_content_type='text/markdown', - author='Robin Richardson, Djura Smits, Sven van den Burg', - author_email='r.richardson@esciencecenter.nl', - url='https://github.com/fair-workflows/fairworkflows/', - install_requires=open("requirements.txt", "r").readlines(), - packages=['fairworkflows'], - extras_require={ - 'dev': open('requirements_dev.txt', 'r').readlines() - }, - include_package_data=True, - classifiers=[ - "Programming Language :: Python :: 3", - "License :: OSI Approved :: Apache Software License", - "Operating System :: OS Independent" - ], - python_requires='>=3.6' -) diff --git a/conftest.py b/tests/conftest.py similarity index 96% rename from conftest.py rename to tests/conftest.py index 50611d0e..fb8f2f90 100644 --- a/conftest.py +++ b/tests/conftest.py @@ -1,6 +1,7 @@ import pytest import rdflib import requests + from fairworkflows.config import TESTS_RESOURCES NANOPUB_SERVER = 'http://purl.org/np/' @@ -9,6 +10,7 @@ pytest.mark.skipif(requests.get(NANOPUB_SERVER).status_code != 200, reason='Nanopub server is unavailable')) +use_test_server=True def read_rdf_test_resource(filename: str) -> rdflib.Graph(): """ diff --git a/tests/test.txt b/tests/test.txt deleted file mode 100644 index 04b7925b..00000000 --- a/tests/test.txt +++ /dev/null @@ -1 +0,0 @@ -Logging to file 'test.txt'. diff --git a/tests/test_fairstep.py b/tests/test_fairstep.py index c37fc563..6388fc4a 100644 --- a/tests/test_fairstep.py +++ b/tests/test_fairstep.py @@ -1,21 +1,18 @@ -import sys from typing import Tuple -from unittest.mock import patch import pytest import rdflib +from nanopub import Nanopub from rdflib import DCTERMS, OWL, RDF -from nanopub import Publication -from conftest import skip_if_nanopub_server_unavailable, read_rdf_test_resource -from fairworkflows import FairStep, namespaces, FairVariable, is_fairworkflow -from fairworkflows.fairstep import _extract_outputs_from_function, is_fairstep, \ - _extract_inputs_from_function +from fairworkflows import FairStep, FairVariable, LinguisticSystem, namespaces +from fairworkflows.fairstep import _extract_inputs_from_function, _extract_outputs_from_function, is_fairstep from fairworkflows.rdf_wrapper import replace_in_rdf -from fairworkflows import LinguisticSystem +from tests.conftest import read_rdf_test_resource, skip_if_nanopub_server_unavailable, use_test_server + def test_construct_fair_variable_get_name_from_uri(): - variable = FairVariable(name=None, uri='http:example.org#input1', computational_type='int') + variable = FairVariable(name=None, uri='http://example.org#input1', computational_type='int') assert variable.name == 'input1' assert variable.computational_type == 'int' @@ -169,17 +166,19 @@ def test_validation(self): with pytest.raises(AssertionError): step.validate() - @patch('fairworkflows.rdf_wrapper.NanopubClient.publish') - @patch('fairworkflows.rdf_wrapper.NanopubClient.fetch') - def test_modification_and_republishing(self, nanopub_fetch_mock, - nanopub_publish_mock): + # @patch('fairworkflows.rdf_wrapper.NanopubClient.publish') + # @patch('fairworkflows.rdf_wrapper.NanopubClient.fetch') + def test_modification_and_republishing(self): - test_uri = 'http://purl.org/np/RACLlhNijmCk4AX_2PuoBPHKfY1T6jieGaUPVFv-fWCAg#step' + # test_uri = 'http://purl.org/np/RACLlhNijmCk4AX_2PuoBPHKfY1T6jieGaUPVFv-fWCAg#step' + test_uri = 'http://purl.org/np/RACLlhNijmCk4AX_2PuoBPHKfY1T6jieGaUPVFv-fWCAg' # Mock the Nanopub.fetch() method to return a locally sourced nanopub nanopub_rdf = read_rdf_test_resource('sample_fairstep_nanopub.trig') - returned_nanopubobj = Publication(rdf=nanopub_rdf, source_uri=test_uri) - nanopub_fetch_mock.return_value = returned_nanopubobj + local_np = Nanopub(rdf=nanopub_rdf) + published_np = Nanopub(test_uri) + assert local_np.source_uri == published_np.source_uri + # nanopub_fetch_mock.return_value = returned_nanopubobj # 'Fetch' the nanopub as a fairstep, and attempt to publish it without modification preheat_oven = FairStep.from_nanopub(uri=test_uri) @@ -187,14 +186,14 @@ def test_modification_and_republishing(self, nanopub_fetch_mock, assert not preheat_oven.is_modified original_uri = preheat_oven.uri with pytest.warns(Warning): - preheat_oven.publish_as_nanopub() + preheat_oven.publish_as_nanopub(use_test_server=use_test_server) assert preheat_oven.uri == original_uri # Now modify the step description preheat_oven.description = 'Preheat an oven to 200 degrees C.' assert preheat_oven.is_modified is True - preheat_oven.publish_as_nanopub() - assert nanopub_publish_mock.called + preheat_oven.publish_as_nanopub(use_test_server=use_test_server) + # assert nanopub_publish_mock.called assert preheat_oven.uri != original_uri assert preheat_oven.is_modified is False @@ -247,8 +246,8 @@ def test_shacl_does_not_validate(self): assert len(step.rdf) == n_triples_before, 'shacl_validate mutated RDF' -@patch('fairworkflows.rdf_wrapper.NanopubClient.publish') -def test_is_fairstep_decorator(mock_publish): +# @patch('fairworkflows.rdf_wrapper.NanopubClient.publish') +def test_is_fairstep_decorator(): @is_fairstep(label='test_label') def add(a: int, b: int) -> int: """ @@ -258,8 +257,10 @@ def add(a: int, b: int) -> int: assert hasattr(add(1,2), '_fairstep') - add._fairstep.publish_as_nanopub() - assert mock_publish.call_count == 1 + add._fairstep.publish_as_nanopub(use_test_server=use_test_server) + assert add._fairstep.uri is not None + # TODO: improve test assertion here + # assert mock_publish.call_count == 1 def test_decorator_semantic_types(): test_types_a = ['http://www.example.org/distance', 'http://www.example.org/number'] diff --git a/tests/test_fairworkflow.py b/tests/test_fairworkflow.py index e25601aa..4abd979a 100644 --- a/tests/test_fairworkflow.py +++ b/tests/test_fairworkflow.py @@ -1,17 +1,14 @@ -import inspect import warnings from unittest import mock import pytest import rdflib -from nanopub.definitions import DUMMY_NANOPUB_URI from requests import HTTPError -from conftest import skip_if_nanopub_server_unavailable, read_rdf_test_resource -from fairworkflows import FairWorkflow, FairStep, namespaces, FairVariable, is_fairstep, is_fairworkflow -from fairworkflows.prov import WorkflowRetroProv, StepRetroProv +from fairworkflows import FairStep, FairWorkflow, is_fairstep, is_fairworkflow, namespaces +from fairworkflows.prov import StepRetroProv, WorkflowRetroProv from fairworkflows.rdf_wrapper import replace_in_rdf -from nanopub import Publication +from tests.conftest import read_rdf_test_resource, skip_if_nanopub_server_unavailable, use_test_server class TestFairWorkflow: @@ -265,27 +262,28 @@ def test_display_rdf_with_graphviz_module_and_dependency(self, test_workflow): """ test_workflow.display_rdf() - @mock.patch('fairworkflows.rdf_wrapper.NanopubClient.publish') - def test_publish_as_nanopub(self, mock_publish, test_workflow): - test_published_uris = ['www.example.org/published_step1#step', - 'www.example.org/published_step2#step', - 'www.example.org/published_step3#step', - 'www.example.org/published_workflow#workflow'] - mock_publish.side_effect = [ - {'concept_uri': test_published_uris[0]}, # first call - {'concept_uri': test_published_uris[1]}, - {'concept_uri': test_published_uris[2]}, - {'concept_uri': test_published_uris[3]} # Last call - ] + # @mock.patch('fairworkflows.rdf_wrapper.NanopubClient.publish') + def test_publish_as_nanopub(self, test_workflow): + # test_published_uris = ['http://www.example.org/published_step1#step', + # 'http://www.example.org/published_step2#step', + # 'http://www.example.org/published_step3#step', + # 'http://www.example.org/published_workflow#workflow'] + # mock_publish.side_effect = [ + # {'concept_uri': test_published_uris[0]}, # first call + # {'concept_uri': test_published_uris[1]}, + # {'concept_uri': test_published_uris[2]}, + # {'concept_uri': test_published_uris[3]} # Last call + # ] with pytest.raises(RuntimeError): # 'Publishing a workflow with unpublished steps must raise RunTimeError...' - test_workflow.publish_as_nanopub() + test_workflow.publish_as_nanopub(use_test_server=use_test_server) # ...unless using pubish_steps=True - pubinfo = test_workflow.publish_as_nanopub(publish_steps=True) - assert pubinfo['concept_uri'] == 'www.example.org/published_workflow#workflow' - assert mock_publish.call_count == 4 # 1 workflow, 3 steps + pubinfo = test_workflow.publish_as_nanopub(publish_steps=True, use_test_server=use_test_server) + assert pubinfo['concept_uri'].endswith("#plan") + + # assert mock_publish.call_count == 4 # 1 workflow, 3 steps for step in test_workflow: - assert step.uri in test_published_uris + # assert step.uri in test_published_uris assert ((rdflib.URIRef(step.uri), None, None) in test_workflow.rdf or (None, None, rdflib.URIRef(step.uri)) in test_workflow.rdf), \ 'The new step URIs are not in the workflow' @@ -294,8 +292,8 @@ def test_publish_as_nanopub(self, mock_publish, test_workflow): and (None, None, rdflib.URIRef(uri)) not in test_workflow.rdf), \ 'The old step URIs are still in the workflow' - @mock.patch('fairworkflows.rdf_wrapper.NanopubClient.publish') - def test_publish_as_nanopub_no_modifications(self, mock_publish, test_workflow): + # @mock.patch('fairworkflows.rdf_wrapper.NanopubClient.publish') + def test_publish_as_nanopub_no_modifications(self, test_workflow): """ Test case of an already published workflow that itself nor its steps are not modified. """ @@ -304,12 +302,12 @@ def test_publish_as_nanopub_no_modifications(self, mock_publish, test_workflow): step._is_published = True test_workflow._is_modified = False test_workflow._is_published = True - pubinfo = test_workflow.publish_as_nanopub() - assert mock_publish.call_count == 0 + pubinfo = test_workflow.publish_as_nanopub(use_test_server=use_test_server) + # assert mock_publish.call_count == 0 assert pubinfo['nanopub_uri'] is None - @mock.patch('fairworkflows.rdf_wrapper.NanopubClient.publish') - def test_workflow_construction_and_execution(self, mock_publish): + # @mock.patch('fairworkflows.rdf_wrapper.NanopubClient.publish') + def test_workflow_construction_and_execution(self): """ Construct a workflow using the is_fairstep and is_fairworkflow decorators and check that execution and returned provenance is as expected. Then @@ -362,20 +360,19 @@ def my_workflow(in1, in2, in3): assert step_prov.step in fw._steps.values() print(step_prov) - test_published_uris = [ - 'www.example.org/published1#prov', - 'www.example.org/published2#prov', - 'www.example.org/published3#prov', - 'www.example.org/published4#prov', - 'www.example.org/published5#prov' - ] - mock_publish.side_effect = [{'concept_uri': uri} for uri in test_published_uris] + # test_published_uris = [ + # 'http://www.example.org/published1#prov', + # 'http://www.example.org/published2#prov', + # 'http://www.example.org/published3#prov', + # 'http://www.example.org/published4#prov', + # 'http://www.example.org/published5#prov' + # ] + # mock_publish.side_effect = [{'concept_uri': uri} for uri in test_published_uris] - prov.publish_as_nanopub() - assert mock_publish.call_count == 5 # 1 workflow, 4 steps + prov.publish_as_nanopub(use_test_server=use_test_server) + # assert mock_publish.call_count == 5 # 1 workflow, 4 steps + assert (None, namespaces.PROV.hasMember, None) in prov._rdf - for uri in test_published_uris[:4]: - assert (None, namespaces.PROV.hasMember, rdflib.URIRef(uri)) in prov._rdf def test_workflow_complex_serialization(self): class OtherType: diff --git a/tests/test_rdf_wrapper.py b/tests/test_rdf_wrapper.py index 405c24d4..6b1d4cbd 100644 --- a/tests/test_rdf_wrapper.py +++ b/tests/test_rdf_wrapper.py @@ -1,10 +1,10 @@ import warnings -from unittest import mock import pytest import rdflib from fairworkflows.rdf_wrapper import RdfWrapper +from tests.conftest import use_test_server class TestRdfWrapper: @@ -31,18 +31,18 @@ def test_set_and_get_attribute(self): def test_publish_as_nanopub_invalid_kwargs(self): wrapper = RdfWrapper(uri='test') with pytest.raises(ValueError): - wrapper._publish_as_nanopub(introduces_concept='test') + wrapper._publish_as_nanopub(introduces_concept='test', use_test_server=use_test_server) with pytest.raises(ValueError): - wrapper._publish_as_nanopub(assertion_rdf='test') + wrapper._publish_as_nanopub(assertion_rdf='test', use_test_server=use_test_server) def test_publish_as_nanopub_double_derived_from(self): - wrapper = RdfWrapper(uri='test', derived_from=['http:example.nl/workflow1']) + wrapper = RdfWrapper(uri='test', derived_from=['http://example.nl/workflow1']) with pytest.raises(ValueError): - wrapper._publish_as_nanopub(derived_from=['http:example.nl/workflow2']) + wrapper._publish_as_nanopub(derived_from=['http://example.nl/workflow2'], use_test_server=use_test_server) - @mock.patch('fairworkflows.rdf_wrapper.NanopubClient.publish') - def test_publish_as_nanopub_with_kwargs(self, nanopub_wrapper_publish_mock): - wrapper = RdfWrapper(uri='test', derived_from=['http:example.nl/workflow1']) - wrapper.rdf.add((rdflib.Literal('test'), rdflib.Literal('test'), rdflib.Literal('test'))) + # @mock.patch('fairworkflows.rdf_wrapper.NanopubClient.publish') + def test_publish_as_nanopub_with_kwargs(self): + wrapper = RdfWrapper(uri='test', derived_from=['http://example.nl/workflow1']) + wrapper.rdf.add((rdflib.URIRef('http://test'), rdflib.URIRef('http://test'), rdflib.Literal('test'))) # attribute_asseriton_to_profile is kwarg for nanopub.Publication.from_assertion() - wrapper._publish_as_nanopub(attribute_assertion_to_profile=True) + wrapper._publish_as_nanopub(attribute_assertion_to_profile=True, use_test_server=use_test_server)