diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 0000000..ebe4d43 --- /dev/null +++ b/.dockerignore @@ -0,0 +1,5 @@ +.gitignore +add_new_architectures.md +AUTHORS +LICENSE +Makefile diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml new file mode 100644 index 0000000..ce33890 --- /dev/null +++ b/.github/workflows/ci.yml @@ -0,0 +1,36 @@ +name: Docker +on: + push: + branches: + - master + - main + schedule: + - cron: '0 0 2 */1 *' # Every month + pull_request: + workflow_dispatch: # Allow manual triggering + +jobs: + build-test-publish: + runs-on: ubuntu-latest + steps: + - + uses: actions/checkout@master + - + name: code-check + run: | + sudo apt update && sudo apt install pylint + pip install -r requirements.txt + cd .. + pylint reactive-tools --rcfile reactive-tools/.pylintrc + - + name: Build + id: build + if: ${{ github.event_name != 'pull_request' }} + uses: gianlu33/Publish-Docker-Github-Action@main + with: + name: authexec/reactive-tools + username: authexec + password: ${{ secrets.DOCKER_TOKEN }} + cache: ${{ github.event_name != 'schedule' }} + tag_names: true + diff --git a/.github/workflows/pr.yml b/.github/workflows/pr.yml new file mode 100644 index 0000000..2206da6 --- /dev/null +++ b/.github/workflows/pr.yml @@ -0,0 +1,47 @@ +name: Test +on: + pull_request: + workflow_dispatch: # Allow manual triggering + +env: + SGX_IMAGE: authexec/event-manager-sgx:latest + SANCUS_IMAGE: authexec/event-manager-sancus:latest + TRUSTZONE_IMAGE: authexec/event-manager-trustzone:latest + AESM_CLIENT_IMAGE: authexec/aesm-client:latest + MANAGER_IMAGE: authexec/attestation-manager + ADMIN_IMAGE: authexec/reactive-tools:latest + +jobs: + test-examples: + runs-on: ubuntu-latest + steps: + - + uses: actions/checkout@master + - + name: Build image + run: | + docker build -t ${{ env.ADMIN_IMAGE }} . + - + name: Checkout examples + run: git clone https://github.com/AuthenticExecution/examples.git + - + name: Pull images + run: | + ### Uncomment lines below if other Docker images are needed ### + #docker pull ${{ env.SGX_IMAGE }} + #docker pull ${{ env.MANAGER_IMAGE }}:native + #docker pull ${{ env.SANCUS_IMAGE }} + #docker pull ${{ env.TRUSTZONE_IMAGE }} + #docker pull ${{ env.AESM_CLIENT_IMAGE }} + #docker pull ${{ env.MANAGER_IMAGE }}:sgx + - + name: build button-led + timeout-minutes: 10 + run: | + cd examples/button-led + shopt -s expand_aliases + alias REACTIVE_TOOLS="docker run --rm --network=host -v $(pwd):/usr/src/app/ ${{ env.ADMIN_IMAGE }} reactive-tools" + REACTIVE_TOOLS --debug build descriptor-local.json + unalias REACTIVE_TOOLS + touch completed + diff --git a/.gitignore b/.gitignore index 11733b3..046ff38 100644 --- a/.gitignore +++ b/.gitignore @@ -1,3 +1,8 @@ __pycache__/ *.egg-info/ *.swp + +build +dist + +res.json diff --git a/.pylintrc b/.pylintrc new file mode 100644 index 0000000..1c89449 --- /dev/null +++ b/.pylintrc @@ -0,0 +1,605 @@ +[MASTER] + +# A comma-separated list of package or module names from where C extensions may +# be loaded. Extensions are loading into the active Python interpreter and may +# run arbitrary code. +extension-pkg-whitelist= + +# Add files or directories to the blacklist. They should be base names, not +# paths. +ignore=CVS + +# Add files or directories matching the regex patterns to the blacklist. The +# regex matches against base names, not paths. +ignore-patterns= + +# Python code to execute, usually for sys.path manipulation such as +# pygtk.require(). +#init-hook= + +# Use multiple processes to speed up Pylint. Specifying 0 will auto-detect the +# number of processors available to use. +jobs=1 + +# Control the amount of potential inferred values when inferring a single +# object. This can help the performance when dealing with large functions or +# complex, nested conditions. +limit-inference-results=100 + +# List of plugins (as comma separated values of python module names) to load, +# usually to register additional checkers. +load-plugins= + +# Pickle collected data for later comparisons. +persistent=yes + +# Specify a configuration file. +#rcfile= + +# When enabled, pylint would attempt to guess common misconfiguration and emit +# user-friendly hints instead of false-positive error messages. +suggestion-mode=yes + +# Allow loading of arbitrary C extensions. Extensions are imported into the +# active Python interpreter and may run arbitrary code. +unsafe-load-any-extension=no + + +[MESSAGES CONTROL] + +# Only show warnings with the listed confidence levels. Leave empty to show +# all. Valid levels: HIGH, INFERENCE, INFERENCE_FAILURE, UNDEFINED. +confidence= + +# Disable the message, report, category or checker with the given id(s). You +# can either give multiple identifiers separated by comma (,) or put this +# option multiple times (only on the command line, not in the configuration +# file where it should appear only once). You can also use "--disable=all" to +# disable everything first and then reenable specific checks. For example, if +# you want to run only the similarities checker, you can use "--disable=all +# --enable=similarities". If you want to run only the classes checker, but have +# no Warning level messages displayed, use "--disable=all --enable=classes +# --disable=W". +disable=print-statement, + parameter-unpacking, + unpacking-in-except, + old-raise-syntax, + backtick, + long-suffix, + old-ne-operator, + old-octal-literal, + import-star-module-level, + non-ascii-bytes-literal, + raw-checker-failed, + bad-inline-option, + locally-disabled, + file-ignored, + suppressed-message, + useless-suppression, + deprecated-pragma, + use-symbolic-message-instead, + apply-builtin, + basestring-builtin, + buffer-builtin, + cmp-builtin, + coerce-builtin, + execfile-builtin, + file-builtin, + long-builtin, + raw_input-builtin, + reduce-builtin, + standarderror-builtin, + unicode-builtin, + xrange-builtin, + coerce-method, + delslice-method, + getslice-method, + setslice-method, + no-absolute-import, + old-division, + dict-iter-method, + dict-view-method, + next-method-called, + metaclass-assignment, + indexing-exception, + raising-string, + reload-builtin, + oct-method, + hex-method, + nonzero-method, + cmp-method, + input-builtin, + round-builtin, + intern-builtin, + unichr-builtin, + map-builtin-not-iterating, + zip-builtin-not-iterating, + range-builtin-not-iterating, + filter-builtin-not-iterating, + using-cmp-argument, + eq-without-hash, + div-method, + idiv-method, + rdiv-method, + exception-message-attribute, + invalid-str-codec, + sys-max-int, + bad-python3-import, + deprecated-string-function, + deprecated-str-translate-call, + deprecated-itertools-function, + deprecated-types-field, + next-method-defined, + dict-items-not-iterating, + dict-keys-not-iterating, + dict-values-not-iterating, + deprecated-operator-function, + deprecated-urllib-function, + xreadlines-attribute, + deprecated-sys-function, + exception-escape, + comprehension-escape, + # gianlu33 + anomalous-backslash-in-string, + invalid-name, + missing-function-docstring, + missing-module-docstring, + missing-class-docstring, + too-many-locals, + broad-except, + too-many-statements, + logging-format-interpolation, + duplicate-code, + bare-except, + too-many-arguments, + fixme, + too-many-public-methods, + unused-wildcard-import, + eval-used, + too-many-instance-attributes, + attribute-defined-outside-init, + too-few-public-methods, + wildcard-import, + global-statement, + dangerous-default-value, + import-outside-toplevel + +# Enable the message, report, category or checker with the given id(s). You can +# either give multiple identifier separated by comma (,) or put this option +# multiple time (only on the command line, not in the configuration file where +# it should appear only once). See also the "--disable" option for examples. +enable=c-extension-no-member + + +[REPORTS] + +# Python expression which should return a score less than or equal to 10. You +# have access to the variables 'error', 'warning', 'refactor', and 'convention' +# which contain the number of messages in each category, as well as 'statement' +# which is the total number of statements analyzed. This score is used by the +# global evaluation report (RP0004). +evaluation=10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10) + +# Template used to display messages. This is a python new-style format string +# used to format the message information. See doc for all details. +#msg-template= + +# Set the output format. Available formats are text, parseable, colorized, json +# and msvs (visual studio). You can also give a reporter class, e.g. +# mypackage.mymodule.MyReporterClass. +output-format=text + +# Tells whether to display a full report or only the messages. +reports=no + +# Activate the evaluation score. +score=yes + + +[REFACTORING] + +# Maximum number of nested blocks for function / method body +max-nested-blocks=5 + +# Complete name of functions that never returns. When checking for +# inconsistent-return-statements if a never returning function is called then +# it will be considered as an explicit return statement and no message will be +# printed. +never-returning-functions=sys.exit + + +[VARIABLES] + +# List of additional names supposed to be defined in builtins. Remember that +# you should avoid defining new builtins when possible. +additional-builtins= + +# Tells whether unused global variables should be treated as a violation. +allow-global-unused-variables=yes + +# List of strings which can identify a callback function by name. A callback +# name must start or end with one of those strings. +callbacks=cb_, + _cb + +# A regular expression matching the name of dummy variables (i.e. expected to +# not be used). +dummy-variables-rgx=_+$|(_[a-zA-Z0-9_]*[a-zA-Z0-9]+?$)|dummy|^ignored_|^unused_ + +# Argument names that match this expression will be ignored. Default to name +# with leading underscore. +ignored-argument-names=_.*|^ignored_|^unused_ + +# Tells whether we should check for unused import in __init__ files. +init-import=no + +# List of qualified module names which can have objects that can redefine +# builtins. +redefining-builtins-modules=six.moves,past.builtins,future.builtins,builtins,io + + +[STRING] + +# This flag controls whether the implicit-str-concat-in-sequence should +# generate a warning on implicit string concatenation in sequences defined over +# several lines. +check-str-concat-over-line-jumps=no + + +[MISCELLANEOUS] + +# List of note tags to take in consideration, separated by a comma. +notes=FIXME, + XXX, + TODO + + +[SPELLING] + +# Limits count of emitted suggestions for spelling mistakes. +max-spelling-suggestions=4 + +# Spelling dictionary name. Available dictionaries: none. To make it work, +# install the python-enchant package. +spelling-dict= + +# List of comma separated words that should not be checked. +spelling-ignore-words= + +# A path to a file that contains the private dictionary; one word per line. +spelling-private-dict-file= + +# Tells whether to store unknown words to the private dictionary (see the +# --spelling-private-dict-file option) instead of raising a message. +spelling-store-unknown-words=no + + +[FORMAT] + +# Expected format of line ending, e.g. empty (any line ending), LF or CRLF. +expected-line-ending-format= + +# Regexp for a line that is allowed to be longer than the limit. +ignore-long-lines=^\s*(# )??$ + +# Number of spaces of indent required inside a hanging or continued line. +indent-after-paren=4 + +# String used as indentation unit. This is usually " " (4 spaces) or "\t" (1 +# tab). +indent-string=' ' + +# Maximum number of characters on a single line. +max-line-length=100 + +# Maximum number of lines in a module. +max-module-lines=1000 + +# List of optional constructs for which whitespace checking is disabled. `dict- +# separator` is used to allow tabulation in dicts, etc.: {1 : 1,\n222: 2}. +# `trailing-comma` allows a space between comma and closing bracket: (a, ). +# `empty-line` allows space-only lines. +no-space-check=trailing-comma, + dict-separator + +# Allow the body of a class to be on the same line as the declaration if body +# contains single statement. +single-line-class-stmt=no + +# Allow the body of an if to be on the same line as the test if there is no +# else. +single-line-if-stmt=no + + +[TYPECHECK] + +# List of decorators that produce context managers, such as +# contextlib.contextmanager. Add to this list to register other decorators that +# produce valid context managers. +contextmanager-decorators=contextlib.contextmanager + +# List of members which are set dynamically and missed by pylint inference +# system, and so shouldn't trigger E1101 when accessed. Python regular +# expressions are accepted. +generated-members= + +# Tells whether missing members accessed in mixin class should be ignored. A +# mixin class is detected if its name ends with "mixin" (case insensitive). +ignore-mixin-members=yes + +# Tells whether to warn about missing members when the owner of the attribute +# is inferred to be None. +ignore-none=yes + +# This flag controls whether pylint should warn about no-member and similar +# checks whenever an opaque object is returned when inferring. The inference +# can return multiple potential results while evaluating a Python object, but +# some branches might not be evaluated, which results in partial inference. In +# that case, it might be useful to still emit no-member and other checks for +# the rest of the inferred objects. +ignore-on-opaque-inference=yes + +# List of class names for which member attributes should not be checked (useful +# for classes with dynamically set attributes). This supports the use of +# qualified names. +ignored-classes=optparse.Values,thread._local,_thread._local + +# List of module names for which member attributes should not be checked +# (useful for modules/projects where namespaces are manipulated during runtime +# and thus existing member attributes cannot be deduced by static analysis). It +# supports qualified module names, as well as Unix pattern matching. +ignored-modules= + +# Show a hint with possible names when a member name was not found. The aspect +# of finding the hint is based on edit distance. +missing-member-hint=yes + +# The minimum edit distance a name should have in order to be considered a +# similar match for a missing member name. +missing-member-hint-distance=1 + +# The total number of similar names that should be taken in consideration when +# showing a hint for a missing member. +missing-member-max-choices=1 + +# List of decorators that change the signature of a decorated function. +signature-mutators= + + +[LOGGING] + +# Format style used to check logging format string. `old` means using % +# formatting, `new` is for `{}` formatting,and `fstr` is for f-strings. +logging-format-style=old + +# Logging modules to check that the string format arguments are in logging +# function parameter format. +logging-modules=logging + + +[SIMILARITIES] + +# Ignore comments when computing similarities. +ignore-comments=yes + +# Ignore docstrings when computing similarities. +ignore-docstrings=yes + +# Ignore imports when computing similarities. +ignore-imports=no + +# Minimum lines number of a similarity. +min-similarity-lines=4 + + +[BASIC] + +# Naming style matching correct argument names. +argument-naming-style=snake_case + +# Regular expression matching correct argument names. Overrides argument- +# naming-style. +#argument-rgx= + +# Naming style matching correct attribute names. +attr-naming-style=snake_case + +# Regular expression matching correct attribute names. Overrides attr-naming- +# style. +#attr-rgx= + +# Bad variable names which should always be refused, separated by a comma. +bad-names=foo, + bar, + baz, + toto, + tutu, + tata + +# Naming style matching correct class attribute names. +class-attribute-naming-style=any + +# Regular expression matching correct class attribute names. Overrides class- +# attribute-naming-style. +#class-attribute-rgx= + +# Naming style matching correct class names. +class-naming-style=PascalCase + +# Regular expression matching correct class names. Overrides class-naming- +# style. +#class-rgx= + +# Naming style matching correct constant names. +const-naming-style=UPPER_CASE + +# Regular expression matching correct constant names. Overrides const-naming- +# style. +#const-rgx= + +# Minimum line length for functions/classes that require docstrings, shorter +# ones are exempt. +docstring-min-length=-1 + +# Naming style matching correct function names. +function-naming-style=snake_case + +# Regular expression matching correct function names. Overrides function- +# naming-style. +#function-rgx= + +# Good variable names which should always be accepted, separated by a comma. +good-names=i, + j, + k, + ex, + Run, + _ + +# Include a hint for the correct naming format with invalid-name. +include-naming-hint=no + +# Naming style matching correct inline iteration names. +inlinevar-naming-style=any + +# Regular expression matching correct inline iteration names. Overrides +# inlinevar-naming-style. +#inlinevar-rgx= + +# Naming style matching correct method names. +method-naming-style=snake_case + +# Regular expression matching correct method names. Overrides method-naming- +# style. +#method-rgx= + +# Naming style matching correct module names. +module-naming-style=snake_case + +# Regular expression matching correct module names. Overrides module-naming- +# style. +#module-rgx= + +# Colon-delimited sets of names that determine each other's naming style when +# the name regexes allow several styles. +name-group= + +# Regular expression which should only match function or class names that do +# not require a docstring. +no-docstring-rgx=^_ + +# List of decorators that produce properties, such as abc.abstractproperty. Add +# to this list to register other decorators that produce valid properties. +# These decorators are taken in consideration only for invalid-name. +property-classes=abc.abstractproperty + +# Naming style matching correct variable names. +variable-naming-style=snake_case + +# Regular expression matching correct variable names. Overrides variable- +# naming-style. +#variable-rgx= + + +[IMPORTS] + +# List of modules that can be imported at any level, not just the top level +# one. +allow-any-import-level= + +# Allow wildcard imports from modules that define __all__. +allow-wildcard-with-all=no + +# Analyse import fallback blocks. This can be used to support both Python 2 and +# 3 compatible code, which means that the block might have code that exists +# only in one or another interpreter, leading to false positives when analysed. +analyse-fallback-blocks=no + +# Deprecated modules which should not be used, separated by a comma. +deprecated-modules=optparse,tkinter.tix + +# Create a graph of external dependencies in the given file (report RP0402 must +# not be disabled). +ext-import-graph= + +# Create a graph of every (i.e. internal and external) dependencies in the +# given file (report RP0402 must not be disabled). +import-graph= + +# Create a graph of internal dependencies in the given file (report RP0402 must +# not be disabled). +int-import-graph= + +# Force import order to recognize a module as part of the standard +# compatibility libraries. +known-standard-library= + +# Force import order to recognize a module as part of a third party library. +known-third-party=enchant + +# Couples of modules and preferred modules, separated by a comma. +preferred-modules= + + +[DESIGN] + +# Maximum number of arguments for function / method. +max-args=5 + +# Maximum number of attributes for a class (see R0902). +max-attributes=7 + +# Maximum number of boolean expressions in an if statement (see R0916). +max-bool-expr=5 + +# Maximum number of branch for function / method body. +max-branches=12 + +# Maximum number of locals for function / method body. +max-locals=15 + +# Maximum number of parents for a class (see R0901). +max-parents=7 + +# Maximum number of public methods for a class (see R0904). +max-public-methods=20 + +# Maximum number of return / yield for function / method body. +max-returns=6 + +# Maximum number of statements in function / method body. +max-statements=50 + +# Minimum number of public methods for a class (see R0903). +min-public-methods=2 + + +[CLASSES] + +# List of method names used to declare (i.e. assign) instance attributes. +defining-attr-methods=__init__, + __new__, + setUp, + __post_init__ + +# List of member names, which should be excluded from the protected access +# warning. +exclude-protected=_asdict, + _fields, + _replace, + _source, + _make + +# List of valid names for the first argument in a class method. +valid-classmethod-first-arg=cls + +# List of valid names for the first argument in a metaclass class method. +valid-metaclass-classmethod-first-arg=cls + + +[EXCEPTIONS] + +# Exceptions that will emit a warning when being caught. Defaults to +# "BaseException, Exception". +overgeneral-exceptions=BaseException, + Exception diff --git a/AUTHORS b/AUTHORS new file mode 100644 index 0000000..9d1dafb --- /dev/null +++ b/AUTHORS @@ -0,0 +1,6 @@ +List of authors of this repository + +Job Noorman +Jan Tobias Mühlberg +Gianluca Scopelliti +Sepideh Pouyanrad diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 0000000..35ac9bb --- /dev/null +++ b/Dockerfile @@ -0,0 +1,5 @@ +FROM authexec/reactive-base:latest + +COPY . . +RUN python -m pip install . \ + && rm -rf * diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..85fbf6b --- /dev/null +++ b/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2021 Gianluca Scopelliti + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/MANIFEST.in b/MANIFEST.in new file mode 100644 index 0000000..2f0da91 --- /dev/null +++ b/MANIFEST.in @@ -0,0 +1 @@ +recursive-include reactivetools/rules * diff --git a/Makefile b/Makefile new file mode 100644 index 0000000..dc83e25 --- /dev/null +++ b/Makefile @@ -0,0 +1,18 @@ +REPO ?= authexec/reactive-tools +TAG ?= latest +VOLUME ?= $(shell pwd) + +run: + docker run --rm -it --network=host -v $(VOLUME):/usr/src/app/ $(REPO):$(TAG) bash + +pull: + docker pull $(REPO):$(TAG) + +build: + docker build -t $(REPO):$(TAG) . + +push: login + docker push $(REPO):$(TAG) + +login: + docker login diff --git a/README.md b/README.md new file mode 100644 index 0000000..be8cbb1 --- /dev/null +++ b/README.md @@ -0,0 +1,97 @@ +# reactive-tools + +Deployment tools for the [Authentic Execution framework](https://github.com/AuthenticExecution) + +Quick hands-on: check out our [examples](https://github.com/AuthenticExecution/examples). + +## Support + +Currently, the following architectures are supported: + +- Sancus +- SGX +- Native (no TEE support, run natively) +- TrustZone with OPTEE + +[Extending support for new architectures](add_new_architectures.md) + +[Tutorial: develop an Authentic Execution application](https://github.com/gianlu33/authentic-execution/blob/master/docs/tutorial-develop-apps.md) + +### Limitations + +- Currently, SGX modules can only be deployed in debug mode +- Trustzone support is experimental, it works on QEMU v7 only, on our custom [optee_os](https://github.com/AuthenticExecution/optee_os) and untrusted [event manager](https://github.com/AuthenticExecution/event_manager_trustzone) + +## Dependencies & installation + +Check the [reactive-base](https://github.com/AuthenticExecution/reactive-base) repository for more info about the dependencies needed for each TEE. + +```bash +# Install reactive-tools - you must be at the root of this repository +pip install . +``` + +## Run reactive-tools with Docker + +The [authexec/reactive-tools](https://hub.docker.com/repository/docker/authexec/reactive-tools) Docker image provide a simple and fast way to run reactive-tools from any Linux-based OS. + +- the `latest` image contains all the dependencies/toolchains to build and deploy modules (sgx, native, sancus, trustzone), plus some other utilities. It is *not* optimized, therefore it is not really lightweight but it contains everything is needed. + +When running the Docker image, ideally you should mount a volume that includes the workspace of the application to be deployed, containing all the source files and the deployment descriptor. + +```bash +# run reactive-tools image +### : volume we want to mount (ideally, contains the workspace of our app) +### : tag of the image we want to run (default: latest) +make run VOLUME= TAG= +``` + +## Run + +All of the following commands can be run with either the `--verbose` or `--debug` flags, for debugging purposes. For a full description of the arguments, run `reactive-tools -h`. + +### Build + +```bash +# Build the application. Might be useful to check that all the modules compile before the actual deployment +### : root directory of the application to deploy. Default: "." +### : name of the input deployment descriptor, should be inside +reactive-tools build --workspace +``` + +### Deploy +```bash +# Deploy the application +### : root directory of the application to deploy. Default: "." +### : name of the deployment descriptor, should be inside +### : path to the output deployment descriptor that will be generated (optional) +reactive-tools deploy --workspace --result +``` + +### Call +```bash +# Call a specific entry point of a deployed application +### : deployment descriptor. MUST be the output of a previous deploy command +### : name of the module we want to call +### : either the name or the ID of th entry point we want to call +### : byte array in hexadecimal format, e.g., "deadbeef" (OPTIONAL) +reactive-tools call --module --entry --arg +``` + +### Output +```bash +# Trigger the output of a _direct_ connection +### : deployment descriptor. MUST be the output of a previous deploy command +### : either the name or the ID of the connection +### : byte array in hexadecimal format, e.g., "deadbeef" (OPTIONAL) +reactive-tools output --connection --arg +``` + +### Request +```bash +# Trigger the request of a _direct_ connection +### : deployment descriptor. MUST be the output of a previous deploy command +### : either the name or the ID of the connection +### : byte array in hexadecimal format, e.g., "deadbeef" (OPTIONAL) +reactive-tools request --connection --arg +``` diff --git a/add_new_architectures.md b/add_new_architectures.md new file mode 100644 index 0000000..ccef1d4 --- /dev/null +++ b/add_new_architectures.md @@ -0,0 +1,140 @@ +# Add support for a new architecture + +In this tutorial, we will describe all the steps to do to add support for a new architecture. + +## Preliminary notes + +- **You MUST NOT modify** any existing files except for `modules/__init__.py` and `nodes/__init__.py` + - Your new architecture will be managed automatically + +- If you need to install some python libraries through `pip`, add the name in the `install_requires` list in `setup.py` (line 16) + - Please declare a specific version! This way we don't risk that future updates break the app + +- If you have some other external python libraries (e.g., the sancus python library), you need to add the path in your PYTHONPATH environment variable in order to use them + - Example: ` PYTHONPATH=$PYTHONPATH:/usr/local/share/sancus-compiler/python/lib/` + - If this is the case, **DO NOT** import a module at the beginning of your files, otherwise people that do not have such module would not be able to run the application. Instead, import the module inside the functions where you use it + - Example: `modules/sancus.py` at line 207 + +## High-level view of the steps to do + +- Fork this repository in your account +- [optional] create a new branch, where you will implement your code +- Implement & test code +- Open a Pull Request (PR) from your branch to the `main` branch of **this** repository (not the one you forked!) +- Wait for a review and, optionally, improve code +- Update your code according to the review received +- End: your code is merged to the main repo! + +## Implementation + +In this tutorial we will describe how to add support for the `TrustZone` architecture. + +### Add rules + +A rule file is a YAML file which contains some logical constraints about the definition of the deployment descriptor (nodes, modules, connections). Essentially, the purpose of these rules is to ensure that the deployment descriptor is structured as expected, and to give a meaningful error if something is wrong. + +- All the rule files are stored in the `rules` folder. + +- Examples of rules: + - *each item in the `nodes` section of the deployment descriptor must provide a type, a name, an IP address and a port.* + - *a port must be a positive 16-bit integer* +- In `rules/default`, pre-defined rules are stored. For modules and nodes, we wrote some generic rules that all the types of nodes/modules should follow (e.g., for nodes, like in the example above) +- In `rules/modules` and `rules/nodes` specific rules for specific architectures are stored (e.g., Sancus, SGX, etc..) + +**Your task** + +- **[required]** Create an empty `trustzone.yaml` file both in `rules/modules` and `rules/nodes` +- [optional] Fill the files with your rules. These will be evaluated automatically at runtime. + - It's optional, but **recommended** + - Each rule is a key-value pair where the key is a string message that will be printed if the rule is not satisfied, and the value is your logical expression in **python code** + - To satisfy the rule, the expression must be evaluated to `True` + - Some helper functions are provided in `rules/evaluators.py`, which can be used + - Check the other rule files to get an idea of how to declare these rules + +### Add Node and Module classes + +- **[required]** add a file called `trustzone.py` in the folder `nodes` + - This file has to declare a new class called `TrustZoneNode`, which extends the base class `Node` +- **[required]** add a file called `trustzone.py` in the folder `modules` + - This file has to declare a new class called `TrustZoneModule`, which extends the base class `Module` + +These classes have to implement **at least** the abstract methods of the corresponding base classes, according to the description provided in the `base.py` files +- For some methods, a default implementation is provided. If needed, you can override these methods in the subclasses +- In the `__init__` function of your classes, **you must** call `super().__init__(args)` , where args are the parameters of the `__init__` function in the base class (again, look at the `base.py`) + +### Update `__init__.py` files in `nodes/` and `modules/` + +To have your classes used by the application, you should modify these two files: + +- `nodes/__init__.py` +- `modules/__init__.py` + +For both the files, the procedure is the same and very intuitive. + +- The examples below show how to update `modules/__init__.py`. For the same file under `nodes`, the procedure is analogous (just replace`module` with `node`) + +**[required] Import your classes** + +```python +from .trustzone import TrustZoneModule +``` + +**[required] Declare your rules files** + +You should update the `module_rules` and `node_rules` dicts as described below + +- **NOTE:** the key `"trustzone"` is the type of your node/module as written in the deployment descriptor + +```python +module_rules = { + # ... + + # THIS is what you have to add: + "trustzone" : "trustzone.yaml" +} +``` + +The application will automatically fetch the `trustzone.yaml` file inside the `rules/nodes` or `rules/modules` folders. + +**[required] Declare your load function** + +The `load` function, declared as an abstract static method in the base class, takes as input the definition of the node/module as written in the deployment descriptor and creates the `TrustZoneNode` or `TrustZoneModule` object. + +- The `dump` function, instead, does the opposite work + +You should update the `module_funcs` and `node_funcs` as described below + +- **NOTE:** the key `"trustzone"` is the type of your node/module as written in the deployment descriptor + +```python +module_funcs = { + # ... + + # THIS is what you have to add: + "trustzone" : TrustZoneModule.load +} +``` + +**[optional] cleanup coroutines** + +If your `Node` or `Module` classes need to perform certain operations before the application ends (e.g., kill some background process), you can add an entry in the `module_cleanup_coros` and `node_cleanup_coros` lists. + +- This is not required, but it is **recommended** to update these lists even if you do not have any task to do. +- The `cleanup` method of your classes has a default implementation in the base class, therefore you do not have to implement new methods by yourself if you don't need to do any cleanup operations. + +```python +module_cleanup_coros = [ + # ... + + # THIS is what you have to add: + TrustZoneModule.cleanup +] +``` + +### Implement methods + +Now, you just have to implement all the abstract methods in your classes inherited from the base classes `Node` and `Module`. + +- Check the `base.py` file for a description of the methods you have to override +- Check other implementations (e.g., `sancus.py`) for some additional hints +- You can of course implement new methods if needed, as well as override default implementation of methods in the base class \ No newline at end of file diff --git a/example/example.json b/example/example.json deleted file mode 100644 index d1e7619..0000000 --- a/example/example.json +++ /dev/null @@ -1,33 +0,0 @@ -{ - "nodes": [ - { - "type": "sancus", - "name": "node1", - "ip_address": "192.168.0.2", - "vendor_id": 4660, - "vendor_key": "4078d505d82099ba" - } - ], - "modules" : [ - { - "type": "sancus", - "name": "sm1", - "files": ["sm1.c"], - "node": "node1" - }, - { - "type": "sancus", - "name": "sm2", - "files": ["sm2.c"], - "node": "node1" - } - ], - "connections": [ - { - "from_module": "sm1", - "from_output": "output", - "to_module": "sm2", - "to_input": "input" - } - ] -} diff --git a/example/info.txt b/example/info.txt deleted file mode 100644 index 4ef8ab4..0000000 --- a/example/info.txt +++ /dev/null @@ -1,6 +0,0 @@ -reactive-tools deploy example.json --result result.json -sancus-crypto --key --mac buttons_driver -reactive-tools call --config result.json --module sm1 --entry init --arg -sancus-crypto --key --mac lcd_driver -reactive-tools call --config result.json --module sm2 --entry init --arg - \ No newline at end of file diff --git a/example/result.json b/example/result.json deleted file mode 100644 index 84d32ec..0000000 --- a/example/result.json +++ /dev/null @@ -1,46 +0,0 @@ -{ - "connections": [ - { - "from_output": "output", - "from_module": "sm1", - "key": "9d458b1b871a1f6c", - "to_input": "input", - "to_module": "sm2" - } - ], - "modules": [ - { - "id": 5, - "symtab": "/tmp/tmp3g2bhsva.ld", - "files": [ - "/home/job/test/sancus/reactive/sm1.c" - ], - "node": "node1", - "key": "245355d57e6ae413", - "name": "sm1", - "binary": "/tmp/tmpw5u59raa.elf", - "type": "sancus" - }, - { - "id": 6, - "symtab": "/tmp/tmpycjlxdj5.ld", - "files": [ - "/home/job/test/sancus/reactive/sm2.c" - ], - "node": "node1", - "key": "4064dc064c4906bd", - "name": "sm2", - "binary": "/tmp/tmpgj1yp6ox.elf", - "type": "sancus" - } - ], - "nodes": [ - { - "ip_address": "192.168.0.2", - "name": "node1", - "vendor_id": 4660, - "type": "sancus", - "vendor_key": "4078d505d82099ba" - } - ] -} \ No newline at end of file diff --git a/example/sm1.c b/example/sm1.c deleted file mode 100644 index 561dbe1..0000000 --- a/example/sm1.c +++ /dev/null @@ -1,40 +0,0 @@ -#include - -#include - -#include "/home/job/phd/sancus/contiki-support/examples/sm-server/buttons_driver.h" - -SM_OUTPUT(sm1, output); - -SM_ENTRY(sm1) void on_button_event(int pressed) -{ - if (sancus_get_caller_id() != *SM_GET_VERIFY_ID(sm1, buttons_driver)) - { - //puts("Illegal caller"); - return; - } - - output(&pressed, sizeof(pressed)); -} - -SM_ENTRY(sm1) void init(uint8_t* input_data, size_t len) -{ -// puts("SM1 init"); - - sm_id driver_id = sancus_verify_address(input_data, - SM_GET_ENTRY(buttons_driver)); - - if (driver_id == 0) - { - //puts("Driver verification failed"); - return; - } - - *SM_GET_VERIFY_ID(sm1, buttons_driver) = driver_id; - - //puts("Registering callback"); - buttons_driver_register_callback(Button1, - SM_GET_ENTRY(sm1), - SM_GET_ENTRY_IDX(sm1, on_button_event)); - //puts("Done"); -} diff --git a/example/sm2.c b/example/sm2.c deleted file mode 100644 index df37460..0000000 --- a/example/sm2.c +++ /dev/null @@ -1,28 +0,0 @@ -#include - -#include - -#include "/home/job/phd/sancus/contiki-support/examples/sm-server/lcd_driver.h" - -SM_INPUT(sm2, input, data, len) -{ - int pressed = *(int*)data; - lcd_driver_write(pressed ? "P" : "R"); -} - -SM_ENTRY(sm2) void init(uint8_t* input_data, size_t len) -{ -// puts("SM2 init"); - - sm_id driver_id = sancus_verify_address(input_data, - SM_GET_ENTRY(lcd_driver)); - - if (driver_id == 0) - { -// puts("Driver verification failed"); - return; - } - - *SM_GET_VERIFY_ID(sm2, lcd_driver) = driver_id; - lcd_driver_acquire(); -} diff --git a/reactivetools/cli.py b/reactivetools/cli.py index 27d69ac..ebabd23 100644 --- a/reactivetools/cli.py +++ b/reactivetools/cli.py @@ -1,11 +1,16 @@ import argparse import logging import asyncio -import pdb import sys import binascii +import os from . import config +from . import glob + + +class Error(Exception): + pass def _setup_logging(args): @@ -16,28 +21,7 @@ def _setup_logging(args): else: level = logging.WARNING - err_handler = logging.StreamHandler(sys.stderr) - err_handler.setFormatter(logging.Formatter('%(levelname)s: %(message)s')) - err_handler.setLevel(logging.WARNING) - logging.root.addHandler(err_handler) - - class InfoFilter(logging.Filter): - def filter(self, record): - return record.levelno < logging.WARNING - - info_handler = logging.StreamHandler(sys.stdout) - info_handler.setFormatter(logging.Formatter('%(message)s')) - info_handler.setLevel(logging.INFO) - info_handler.addFilter(InfoFilter()) - logging.root.addHandler(info_handler) - - logging.root.setLevel(level) - - -def _setup_pdb(args): - if args.debug: - sys.excepthook = \ - lambda type, value, traceback: pdb.post_mortem(traceback) + logging.basicConfig(format='%(levelname)s: %(message)s', level=level) def _parse_args(args): @@ -49,20 +33,38 @@ def _parse_args(args): action='store_true') parser.add_argument( '--debug', - help='Debug output and open PDB on uncaught exceptions', + help='Debug output', + action='store_true') + parser.add_argument( + '--manager', + help='Offload the task to the Attestation Manager, if possible', + action='store_true') + parser.add_argument( + '--timing', + help='Measure time between operations, for evaluation', action='store_true') subparsers = parser.add_subparsers(dest='command') # Workaround a Python bug. See http://bugs.python.org/issue9253#msg186387 subparsers.required = True + # deploy deploy_parser = subparsers.add_parser( 'deploy', help='Deploy a reactive network') deploy_parser.set_defaults(command_handler=_handle_deploy) + deploy_parser.add_argument( + '--mode', + help='build mode of modules. between "debug" and "release"', + default='debug' + ) deploy_parser.add_argument( 'config', - help='Configuration file describing the network') + help='Name of the configuration file describing the network') + deploy_parser.add_argument( + '--workspace', + help='Root directory containing all the modules and the configuration file', + default=".") deploy_parser.add_argument( '--result', help='File to write the resulting configuration to') @@ -70,16 +72,105 @@ def _parse_args(args): '--deploy-in-order', help='Deploy modules in the order they are found in the config file', action='store_true') + deploy_parser.add_argument( + '--output', + help='Output file type, between JSON and YAML', + default=None) + deploy_parser.add_argument( + '--module', + help='Module to deploy (if not specified, deploy all modules)', + default=None) + + # build + build_parser = subparsers.add_parser( + 'build', + help='Build the executables of the SMs as declared in the input configuration file') + build_parser.set_defaults(command_handler=_handle_build) + build_parser.add_argument( + '--mode', + help='build mode of modules. between "debug" and "release"', + default='debug' + ) + build_parser.add_argument( + 'config', + help='Name of the configuration file describing the network') + build_parser.add_argument( + '--workspace', + help='Root directory containing all the modules and the configuration file', + default=".") + build_parser.add_argument( + '--module', + help='Module to build (if not specified, build all modules)', + default=None) + # attest + attest_parser = subparsers.add_parser( + 'attest', + help='Attest deployed modules') + attest_parser.set_defaults(command_handler=_handle_attest) + attest_parser.add_argument( + 'config', + help='Specify configuration file to use') + attest_parser.add_argument( + '--result', + help='File to write the resulting configuration to') + attest_parser.add_argument( + '--output', + help='Output file type, between JSON and YAML', + default=None) + attest_parser.add_argument( + '--module', + help='Module to attest (if not specified, attest all modules)', + default=None) + + # connect + connect_parser = subparsers.add_parser( + 'connect', + help='Connect deployed and attested modules') + connect_parser.set_defaults(command_handler=_handle_connect) + connect_parser.add_argument( + 'config', + help='Specify configuration file to use') + connect_parser.add_argument( + '--result', + help='File to write the resulting configuration to') + connect_parser.add_argument( + '--output', + help='Output file type, between JSON and YAML', + default=None) + connect_parser.add_argument( + '--connection', + help='Connection to establish (if unspecified, establish all connections)', + default=None) + + # register + register_parser = subparsers.add_parser( + 'register', + help='Register a periodic event') + register_parser.set_defaults(command_handler=_handle_register) + register_parser.add_argument( + 'config', + help='Specify configuration file to use') + register_parser.add_argument( + '--result', + help='File to write the resulting configuration to') + register_parser.add_argument( + '--output', + help='Output file type, between JSON and YAML', + default=None) + register_parser.add_argument( + '--event', + help='Event to register (if not specified, register all events)', + default=None) + + # call call_parser = subparsers.add_parser( 'call', help='Call a deployed module') call_parser.set_defaults(command_handler=_handle_call) call_parser.add_argument( - '--config', - help='Specify configuration file to use ' - '(the result of a previous "deploy" run)', - required=True) + 'config', + help='Specify configuration file to use') call_parser.add_argument( '--module', help='Name of the module to call', @@ -93,6 +184,95 @@ def _parse_args(args): help='Argument to pass to the entry point (hex byte array)', type=binascii.unhexlify, default=None) + call_parser.add_argument( + '--out', + help='File to write the received result to') + + # output + output_parser = subparsers.add_parser( + 'output', + help='Trigger the output of a \"direct\" connection (between deployer and SM)') + output_parser.set_defaults(command_handler=_handle_output) + output_parser.add_argument( + 'config', + help='Specify configuration file to use') + output_parser.add_argument( + '--connection', + help='Connection ID or name of the connection', + required=True) + output_parser.add_argument( + '--arg', + help='Argument to pass to the output (hex byte array)', + type=binascii.unhexlify, + default=None) + output_parser.add_argument( + '--result', + help='File to write the resulting configuration to') + + # request + request_parser = subparsers.add_parser( + 'request', + help='Trigger the request of a \"direct\" connection (between deployer and SM)') + request_parser.set_defaults(command_handler=_handle_request) + request_parser.add_argument( + 'config', + help='Specify configuration file to use') + request_parser.add_argument( + '--connection', + help='Connection ID or name of the connection', + required=True) + request_parser.add_argument( + '--arg', + help='Argument to pass to the request (hex byte array)', + type=binascii.unhexlify, + default=None) + request_parser.add_argument( + '--result', + help='File to write the resulting configuration to') + request_parser.add_argument( + '--out', + help='File to write the received result to') + + # disable + disable_parser = subparsers.add_parser( + 'disable', + help='Send a request to disable a module') + disable_parser.set_defaults(command_handler=_handle_disable) + disable_parser.add_argument( + 'config', + help='Specify configuration file to use') + disable_parser.add_argument( + '--module', + help='Name of the module to disable', + required=True) + disable_parser.add_argument( + '--result', + help='File to write the resulting configuration to') + + # update + update_parser = subparsers.add_parser( + 'update', + help='Update a module') + update_parser.set_defaults(command_handler=_handle_update) + update_parser.add_argument( + 'config', + help='Specify configuration file to use') + update_parser.add_argument( + '--module', + help='Name of the module to update', + required=True) + update_parser.add_argument( + '--result', + help='File to write the resulting configuration to') + update_parser.add_argument( + '--entry', + help='Entry point of the old module to call (for state transfer)') + update_parser.add_argument( + '--output', + help='Output of the old module to connect (for state transfer)') + update_parser.add_argument( + '--input', + help='Input of the new module to connect (for state transfer)') return parser.parse_args(args) @@ -100,45 +280,196 @@ def _parse_args(args): def _handle_deploy(args): logging.info('Deploying %s', args.config) - conf = config.load(args.config) + glob.set_build_mode(args.mode) + + os.chdir(args.workspace) + conf = config.load(args.config, args.manager, args.timing, args.output) + + conf.deploy(args.deploy_in_order, args.module) + + out_file = args.result or args.config + logging.info('Writing post-deployment configuration to %s', out_file) + config.dump_config(conf, out_file) + conf.cleanup() + + +def _handle_build(args): + logging.info('Building %s', args.config) + + glob.set_build_mode(args.mode) + + os.chdir(args.workspace) + conf = config.load(args.config, args.manager, args.timing) + + conf.build(args.module) + conf.cleanup() + + +def _handle_attest(args): + logging.info('Attesting modules') + + conf = config.load(args.config, args.manager, args.timing, args.output) + + conf.attest(args.module) + + out_file = args.result or args.config + logging.info('Writing post-deployment configuration to %s', out_file) + config.dump_config(conf, out_file) + conf.cleanup() + + +def _handle_connect(args): + logging.info('Connecting modules') - if args.deploy_in_order: - conf.deploy_modules_ordered() + conf = config.load(args.config, args.manager, args.timing, args.output) - conf.install() + conf.connect(args.connection) - if args.result is not None: - logging.info('Writing post-deployment configuration to %s', args.result) - config.dump(conf, args.result) + out_file = args.result or args.config + logging.info('Writing post-deployment configuration to %s', out_file) + config.dump_config(conf, out_file) + conf.cleanup() + + +def _handle_register(args): + logging.info('Registering periodic events') + + conf = config.load(args.config, args.manager, args.timing, args.output) + + conf.register_event(args.event) + + out_file = args.result or args.config + logging.info('Writing post-deployment configuration to %s', out_file) + config.dump_config(conf, out_file) + conf.cleanup() def _handle_call(args): logging.info('Calling %s:%s', args.module, args.entry) - conf = config.load(args.config) + conf = config.load(args.config, args.manager, args.timing) module = conf.get_module(args.module) + t1 = conf.record_time() + asyncio.get_event_loop().run_until_complete( - module.call(args.entry, args.arg)) + module.node.call(module, args.entry, args.arg, args.out)) + + conf.record_time(t1, "Call time for {}:{}".format(args.module, args.entry)) + + conf.cleanup() + + +def _handle_output(args): + logging.info('Triggering output of connection %s', args.connection) + + conf = config.load(args.config, args.manager, args.timing) + + if args.connection.isnumeric(): + conn = conf.get_connection_by_id(int(args.connection)) + else: + conn = conf.get_connection_by_name(args.connection) + + if not conn.direct: + raise Error("Connection is not direct.") + + if conn.to_input is None: + raise Error("Not a output-input connection") + + t1 = conf.record_time() + + asyncio.get_event_loop().run_until_complete( + conn.to_module.node.output(conn, args.arg)) + + conf.record_time(t1, "Output time for {}".format(conn.name)) + + conn.nonce += 1 + out_file = args.result or args.config + config.dump_config(conf, out_file) + conf.cleanup() + + +def _handle_request(args): + logging.info('Triggering request of connection %s', args.connection) + + conf = config.load(args.config, args.manager, args.timing) + + if args.connection.isnumeric(): + conn = conf.get_connection_by_id(int(args.connection)) + else: + conn = conf.get_connection_by_name(args.connection) + + if not conn.direct: + raise Error("Connection is not direct.") + + if conn.to_handler is None: + raise Error("Not a request-handler connection") + + t1 = conf.record_time() + + asyncio.get_event_loop().run_until_complete( + conn.to_module.node.request(conn, args.arg, args.out)) + + conf.record_time(t1, "Request time for {}".format(conn.name)) + + conn.nonce += 2 + out_file = args.result or args.config + config.dump_config(conf, out_file) + conf.cleanup() + + +def _handle_disable(args): + logging.info('Disabling %s', args.module) + + conf = config.load(args.config, args.manager, args.timing) + module = conf.get_module(args.module) + + t1 = conf.record_time() + + asyncio.get_event_loop().run_until_complete( + module.node.disable_module(module)) + + conf.record_time(t1, "Disable time for {}".format(module.name)) + + conf.cleanup() + + +def _handle_update(args): + logging.info('Updating %s', args.module) + + conf = config.load(args.config, args.manager, args.timing) + module = conf.get_module(args.module) + + conf.update(module, args.entry, args.output, args.input) + + out_file = args.result or args.config + logging.info('Writing post-deployment configuration to %s', out_file) + config.dump_config(conf, out_file) + conf.cleanup() def main(raw_args=None): args = _parse_args(raw_args) - _setup_logging(args) - _setup_pdb(args) + + # create working directory + try: + os.mkdir(glob.BUILD_DIR) + except FileExistsError: + pass + except: + logging.error("Failed to create build dir") + sys.exit(-1) try: args.command_handler(args) - except BaseException as e: + except Exception as e: if args.debug: raise logging.error(e) - return 1 - finally: - # If we don't close the event loop explicitly, there is an unhandled - # exception being thrown from its destructor. Not sure why but closing - # it here prevents annoying noise. - asyncio.get_event_loop().close() + for task in asyncio.Task.all_tasks(): + task.cancel() + + sys.exit(-1) diff --git a/reactivetools/config.py b/reactivetools/config.py index 8e2659e..5451c4c 100644 --- a/reactivetools/config.py +++ b/reactivetools/config.py @@ -1,18 +1,20 @@ -import json -import binascii -import ipaddress -from pathlib import Path import os import asyncio -import functools -import binascii -import types +import logging +import time -import sancus.config - -from .nodes import SancusNode -from .modules import SancusModule +from .modules import Module +from .nodes import Node from .connection import Connection +from .periodic_event import PeriodicEvent +from .dumpers import * +from .loaders import * +from .rules.evaluators import * +from .descriptor import DescriptorType +from .manager import Manager, set_manager, get_manager + +from .nodes import node_rules, node_funcs, node_cleanup_coros +from .modules import module_rules, module_funcs, module_cleanup_coros class Error(Exception): @@ -20,14 +22,13 @@ class Error(Exception): class Config: - def __init__(self, file_name): - self.path = Path(file_name).resolve() + def __init__(self): self.nodes = [] self.modules = [] self.connections = [] - - def get_dir(self): - return self.path.parent + self.connections_current_id = 0 + self.events_current_id = 0 + self.output_type = None def get_node(self, name): for n in self.nodes: @@ -43,201 +44,436 @@ def get_module(self, name): raise Error('No module with name {}'.format(name)) - async def install_async(self): - futures = map(Connection.establish, self.connections) + def replace_module(self, module): + for i in range(len(self.modules)): + m = self.modules[i] + if m.name == module.name: + self.modules[i] = module + return + + raise Error('No module with name {}'.format(module.name)) + + def replace_connection(self, conn): + for i in range(len(self.connections)): + c = self.connections[i] + if c.id == conn.id: + self.connections[i] = conn + return + + raise Error('No connection with id {}'.format(conn.id)) + + def get_connection_by_id(self, id_): + for c in self.connections: + if c.id == id_: + return c + + raise Error('No connection with ID {}'.format(id_)) + + def get_connection_by_name(self, name): + for c in self.connections: + if c.name == name: + return c + + raise Error('No connection with name {}'.format(name)) + + def get_periodic_event(self, name): + for e in self.periodic_events: + if e.name == name: + return e + + raise Error('No periodic event with name {}'.format(name)) + + async def __deploy_module(self, module): + t1 = self.record_time() + await module.build() + t2 = self.record_time(t1, "Build time for {}".format(module.name)) + await module.deploy() + self.record_time(t2, "Deploy time for {}".format(module.name)) + + async def __build_module(self, module): + t1 = self.record_time() + await module.build() + self.record_time(t1, "Build time for {}".format(module.name)) + + async def __attest_module(self, module): + t1 = self.record_time() + await module.attest() + self.record_time(t1, "Attest time for {}".format(module.name)) + + async def __establish_connection(self, conn): + t1 = self.record_time() + await conn.establish() + self.record_time(t1, "Establish time for {}".format(conn.name)) + + async def __register_event(self, event): + t1 = self.record_time() + await event.register() + self.record_time(t1, "Register time for {}".format(event.name)) + + async def __transfer_state(self, module, new_module, + entry_name, output_name, input_name): + if not all([entry_name, output_name, input_name]): + return + + t1 = self.record_time() + + conn_transfer = Connection( + "__transfer", + module, + output_name, + None, + new_module, + input_name, + None, + module.get_default_encryption(), + None, + self.connections_current_id, + None, + False, + False + ) + + # create new connection + await conn_transfer.establish() + + # call entry point of module + await module.node.call(module, entry_name, None, None) + + # disable both modules + await module.node.disable_module(module) + await new_module.node.disable_module(new_module) + + self.record_time(t1, "Transfer time for {}".format(new_module.name)) + + async def deploy_priority_modules(self): + priority_modules = [ + sm for sm in self.modules if sm.priority is not None and not sm.deployed] + priority_modules.sort(key=lambda sm: sm.priority) + + logging.debug("Priority modules: {}".format( + [sm.name for sm in priority_modules])) + for module in priority_modules: + await self.__deploy_module(module) + + async def deploy_async(self, in_order, module): + # If module is not None, deploy just this one + if module: + mod = self.get_module(module) + if mod.deployed: + raise Error('Module {} already deployed'.format(module)) + + logging.info("Deploying {}".format(module)) + await self.__deploy_module(mod) + return + + # First, deploy all modules that have a priority (in order of priority) + await self.deploy_priority_modules() + + # If deployment in order is desired, deploy one module at a time + if in_order: + for m in self.modules: + if not m.deployed: + await self.__deploy_module(m) + # Otherwise, deploy all modules concurrently + else: + lst = self.modules + + def l_filter(x): + return not x.deployed + + def l_map(x): + return self.__deploy_module(x) + + futures = map(l_map, filter(l_filter, lst)) + await asyncio.gather(*futures) + + def deploy(self, in_order, module): + asyncio.get_event_loop().run_until_complete(self.deploy_async(in_order, module)) + + async def build_async(self, module): + lst = self.modules if not module else [self.get_module(module)] + + futures = [self.__build_module(module) for module in lst] + await asyncio.gather(*futures) + + def build(self, module): + asyncio.get_event_loop().run_until_complete(self.build_async(module)) + + async def attest_async(self, module): + lst = self.modules if not module else [self.get_module(module)] + + to_attest = list(filter(lambda x: not x.attested, lst)) + + if any(map(lambda x: not x.deployed, to_attest)): + raise Error("One or more modules to attest are not deployed yet") + + logging.info("To attest: {}".format([x.name for x in to_attest])) + + futures = map(self.__attest_module, to_attest) + await asyncio.gather(*futures) + + def attest(self, module): + asyncio.get_event_loop().run_until_complete(self.attest_async(module)) + + async def connect_async(self, conn): + lst = self.connections if not conn else [ + self.get_connection_by_name(conn)] + + to_connect = list(filter(lambda x: not x.established, lst)) + + if any(map( + lambda x: (x.from_module and not x.from_module.attested) or + not x.to_module.attested, to_connect)): + raise Error("One or more modules to connect are not attested yet") + + logging.info("To connect: {}".format([x.name for x in to_connect])) + + futures = map(self.__establish_connection, to_connect) await asyncio.gather(*futures) - def install(self): - asyncio.get_event_loop().run_until_complete(self.install_async()) + def connect(self, conn): + asyncio.get_event_loop().run_until_complete(self.connect_async(conn)) + + async def register_async(self, event): + lst = self.periodic_events if not event else [ + self.get_periodic_event(event)] + + to_register = list(filter(lambda x: not x.established, lst)) + + if any(map(lambda x: not x.module.attested, to_register)): + raise Error("One or more modules are not attested yet") + + logging.info("To register: {}".format([x.name for x in to_register])) + + futures = map(self.__register_event, to_register) + await asyncio.gather(*futures) + + def register_event(self, event): + asyncio.get_event_loop().run_until_complete(self.register_async(event)) + + async def cleanup_async(self): + coros = list( + map(lambda c: c(), node_cleanup_coros + module_cleanup_coros)) + await asyncio.gather(*coros) + + def cleanup(self): + asyncio.get_event_loop().run_until_complete(self.cleanup_async()) + + async def update_async(self, module, entry_name, output_name, input_name): + if not module.deployed: + raise Error("Module is not deployed yet.") + + t1 = self.record_time() + + # clone module and update nodes + new_module = module.clone() + module.node = module.old_node + new_module.old_node = new_module.node + + logging.info("Deploying and attesting new {}".format(module)) + + await self.__deploy_module(new_module) + await self.__attest_module(new_module) + + logging.info("Disabling old module") + await module.node.disable_module(module) + + # transfer state + await self.__transfer_state(module, new_module, + entry_name, output_name, input_name) - async def deploy_modules_ordered_async(self): - for module in self.modules: - await module.deploy() + # re-establish all connections that involve this module + t2 = self.record_time() - def deploy_modules_ordered(self): + connections = [conn for conn in self.connections + if module in (conn.from_module, conn.to_module)] + + for conn in connections: + logging.info("Re-establishing connection {} with id {}".format(conn.name, conn.id)) + new_conn = conn.clone() + + if new_conn.from_module == module: + new_conn.from_module = new_module + if new_conn.to_module == module: + new_conn.to_module = new_module + + await self.__establish_connection(new_conn) + self.replace_connection(new_conn) + + self.record_time(t2, "Connect time for {}".format(new_module.name)) + + # update in conf + self.replace_module(new_module) + + logging.info("Update complete") + self.record_time(t1, "Update time for {}".format(new_module.name)) + + def update(self, module, entry_name, output_name, input_name): asyncio.get_event_loop().run_until_complete( - self.deploy_modules_ordered_async()) + self.update_async(module, entry_name, output_name, input_name)) + def record_time(self, previous=None, msg=None): + if not self.measure_time: + return None -def load(file_name): - with open(file_name, 'r') as f: - contents = json.load(f) + t = time.time() - config = Config(file_name) - config.nodes = _load_list(contents['nodes'], _load_node) - config.modules = _load_list(contents['modules'], - lambda m: _load_module(m, config)) - config.connections = _load_list(contents['connections'], - lambda c: _load_connection(c, config)) - return config + if not previous: + return t + print("{}: {:.3f}".format(msg, t - previous)) -def _load_list(l, load_func=lambda e: e): - if l is None: - return [] - else: - return [load_func(e) for e in l] + return t -def _load_node(node_dict): - return _node_load_funcs[node_dict['type']](node_dict) +def load(file_name, manager, measure_time, output_type=None): + config = Config() + desc_type = DescriptorType.from_str(output_type) + contents, input_type = DescriptorType.load_any(file_name) -def _load_sancus_node(node_dict): - name = node_dict['name'] - vendor_id = _parse_vendor_id(node_dict['vendor_id']) - vendor_key = _parse_vendor_key(node_dict['vendor_key']) - ip_address = ipaddress.ip_address(node_dict['ip_address']) - deploy_port = node_dict.get('deploy_port', 2000) - reactive_port = node_dict.get('reactive_port', 2001) - return SancusNode(name, vendor_id, vendor_key, - ip_address, deploy_port, reactive_port) + # Output file format is: + # - desc_type if has been provided as input, or + # - the same type of the input file otherwise + config.output_type = desc_type or input_type + config.measure_time = measure_time -def _load_module(mod_dict, config): - return _module_load_funcs[mod_dict['type']](mod_dict, config) + try: + _load_manager(contents['manager'], config) + except: + if manager: + raise + config.nodes = load_list(contents['nodes'], + lambda n: _load_node(n, config)) + config.modules = load_list(contents['modules'], + lambda m: _load_module(m, config)) -def _load_sancus_module(mod_dict, config): - name = mod_dict['name'] - files = _load_list(mod_dict['files'], - lambda f: _load_module_file(f, config)) - cflags = _load_list(mod_dict.get('cflags')) - ldflags = _load_list(mod_dict.get('ldlags')) - node = config.get_node(mod_dict['node']) - binary = mod_dict.get('binary') - id = mod_dict.get('id') - symtab = mod_dict.get('symtab') - key = mod_dict.get('key') - return SancusModule(name, files, cflags, ldflags, node, - binary, id, symtab, key) + config.connections_current_id = contents.get('connections_current_id') or 0 + config.events_current_id = contents.get('events_current_id') or 0 + if 'connections' in contents: + config.connections = load_list(contents['connections'], + lambda c: _load_connection(c, config)) + else: + config.connections = [] -def _load_connection(conn_dict, config): - from_module = config.get_module(conn_dict['from_module']) - from_output = conn_dict['from_output'] - to_module = config.get_module(conn_dict['to_module']) - to_input = conn_dict['to_input'] - - # Don't use dict.get() here because we don't want to call os.urandom() when - # not strictly necessary. - if 'key' in conn_dict: - key = conn_dict['key'] + if 'periodic-events' in contents: + config.periodic_events = load_list(contents['periodic-events'], + lambda e: _load_periodic_event(e, config)) else: - key = os.urandom(sancus.config.SECURITY // 8) + config.periodic_events = [] - return Connection(from_module, from_output, to_module, to_input, key) + return config -def _parse_vendor_id(id): - if not 1 <= id <= 2**16 - 1: - raise Error('Vendor ID out of range') +def _load_node(node_dict, _): + # Basic rules common to all nodes + evaluate_rules(os.path.join("default", "node.yaml"), node_dict) + # Specific rules for a specific node type + evaluate_rules(os.path.join( + "nodes", node_rules[node_dict['type']]), node_dict) - return id + return node_funcs[node_dict['type']](node_dict) -def _parse_vendor_key(key_str): - key = binascii.unhexlify(key_str) +def _load_module(mod_dict, config): + # Basic rules common to all nodes + evaluate_rules(os.path.join("default", "module.yaml"), mod_dict) + # Specific rules for a specific node type + evaluate_rules(os.path.join( + "modules", module_rules[mod_dict['type']]), mod_dict) - if len(key) != sancus.config.SECURITY // 8: - raise Error('Keys should be {} bit'.format(sancus.config.SECURITY)) + node = config.get_node(mod_dict['node']) + old_node = config.get_node(mod_dict.get('old_node', mod_dict['node'])) + module = module_funcs[mod_dict['type']](mod_dict, node, old_node) - return key + if node.__class__ not in module.get_supported_nodes(): + raise Error("Node {} ({}) does not support module {} ({})".format( + node.name, node.__class__.__name__, + module.name, module.__class__.__name__)) + return module -def _load_module_file(file_name, config): - path = Path(file_name) - return path if path.is_absolute() else config.get_dir() / path +def _load_connection(conn_dict, config): + evaluate_rules(os.path.join("default", "connection.yaml"), conn_dict) + return Connection.load(conn_dict, config) -_node_load_funcs = { - 'sancus': _load_sancus_node -} +def _load_periodic_event(events_dict, config): + evaluate_rules(os.path.join("default", "periodic_event.yaml"), events_dict) + return PeriodicEvent.load(events_dict, config) -_module_load_funcs = { - 'sancus': _load_sancus_module -} +def _load_manager(man_file, config): + if man_file is None: + raise Error("Error while parsing manager information") -def dump(config, file_name): - with open(file_name, 'w') as f: - json.dump(_dump(config), f, indent=4) + man_dict, _ = DescriptorType.load_any(man_file) + evaluate_rules(os.path.join("default", "manager.yaml"), man_dict) + man = Manager.load(man_file, man_dict, config) + set_manager(man) -@functools.singledispatch -def _dump(obj): - assert False, 'No dumper for {}'.format(type(obj)) +def evaluate_rules(rules_file, dict_): + rules = load_rules(rules_file) + ok = True -@_dump.register(Config) -def _(config): - return { - 'nodes': _dump(config.nodes), - 'modules': _dump(config.modules), - 'connections': _dump(config.connections) - } + for r in rules: + try: + result = eval(rules[r]) + except: + result = False + if not result: + logging.error("{} - Broken rule: {}".format(rules_file, r)) + ok = False -@_dump.register(list) -def _(l): - return [_dump(e) for e in l] + if not ok: + raise Error("Bad deployment descriptor") -@_dump.register(SancusNode) -def _(node): - return { - "type": "sancus", - "name": node.name, - "ip_address": str(node.ip_address), - "vendor_id": node.vendor_id, - "vendor_key": _dump(node.vendor_key) - } +def dump_config(config, file_name): + config.output_type.dump(file_name, dump(config)) -@_dump.register(SancusModule) -def _(module): +@dump.register(Config) +def _(config): + man = get_manager() return { - "type": "sancus", - "name": module.name, - "files": _dump(module.files), - "node": module.node.name, - "binary": _dump(module.binary), - "symtab": _dump(module.symtab), - "id": _dump(module.id), - "key": _dump(module.key) + 'manager': dump(man) if man is not None else None, + 'nodes': dump(config.nodes), + 'modules': dump(config.modules), + 'connections_current_id': config.connections_current_id, + 'connections': dump(config.connections), + 'events_current_id': config.events_current_id, + 'periodic-events': dump(config.periodic_events) } -@_dump.register(Connection) -def _(conn): - return { - "from_module": conn.from_module.name, - "from_output": conn.from_output, - "to_module": conn.to_module.name, - "to_input": conn.to_input, - "key": _dump(conn.key) - } - +@dump.register(Node) +def _(node): + return node.dump() -@_dump.register(bytes) -@_dump.register(bytearray) -def _(bs): - return binascii.hexlify(bs).decode('ascii') +@dump.register(Module) +def _(module): + return module.dump() -@_dump.register(str) -@_dump.register(int) -def _(x): - return x +@dump.register(Connection) +def _(conn): + return conn.dump() -@_dump.register(Path) -def _(path): - return str(path) +@dump.register(PeriodicEvent) +def _(event): + return event.dump() -@_dump.register(types.CoroutineType) -def _(coro): - return _dump(asyncio.get_event_loop().run_until_complete(coro)) +@dump.register(Manager) +def _(man): + return man.dump() diff --git a/reactivetools/connection.py b/reactivetools/connection.py index a796545..c845452 100644 --- a/reactivetools/connection.py +++ b/reactivetools/connection.py @@ -1,23 +1,184 @@ -from collections import namedtuple import asyncio import logging +from enum import IntEnum +from .dumpers import * +from .loaders import * +from .rules.evaluators import * + +from .crypto import Encryption +from . import tools + + +class Error(Exception): + pass + + +class ConnectionIO(IntEnum): + OUTPUT = 0x0 + INPUT = 0x1 + REQUEST = 0x2 + HANDLER = 0x3 + + +class ConnectionIndex(): + def __init__(self, type_, name): + self.type = type_ + self.name = name + self.index = None + + async def set_index(self, module): + if self.type == ConnectionIO.OUTPUT: + self.index = await module.get_output_id(self.name) + elif self.type == ConnectionIO.INPUT: + self.index = await module.get_input_id(self.name) + elif self.type == ConnectionIO.REQUEST: + self.index = await module.get_request_id(self.name) + elif self.type == ConnectionIO.HANDLER: + self.index = await module.get_handler_id(self.name) + + async def get_index(self, module): + if self.index: + return self.index + + await self.set_index(module) + return self.index + + +class Connection: + def __init__(self, name, from_module, from_output, from_request, to_module, + to_input, to_handler, encryption, key, id_, nonce, direct, established): + self.name = name + self.from_module = from_module + self.from_output = from_output + self.from_request = from_request + self.to_module = to_module + self.to_input = to_input + self.to_handler = to_handler + self.encryption = encryption + self.key = key or Connection.generate_key(from_module, to_module, encryption) + self.id = id_ + self.nonce = nonce or 0 + self.established = established + + if direct: + self.direct = True + self.from_index = None + else: + self.direct = False # to avoid assigning None + self.from_index = ConnectionIndex(ConnectionIO.OUTPUT, from_output) \ + if from_output is not None else ConnectionIndex(ConnectionIO.REQUEST, from_request) + + self.to_index = ConnectionIndex(ConnectionIO.INPUT, to_input) \ + if to_input is not None else ConnectionIndex(ConnectionIO.HANDLER, to_handler) + + @staticmethod + def load(conn_dict, config): + direct = conn_dict.get('direct') + from_module = config.get_module(conn_dict['from_module']) if is_present( + conn_dict, 'from_module') else None + from_output = conn_dict.get('from_output') + from_request = conn_dict.get('from_request') + to_module = config.get_module(conn_dict['to_module']) + to_input = conn_dict.get('to_input') + to_handler = conn_dict.get('to_handler') + encryption = Encryption.from_str(conn_dict['encryption']) + key = parse_key(conn_dict.get('key')) + nonce = conn_dict.get('nonce') + id_ = conn_dict.get('id') + established = conn_dict.get('established') + + if id_ is None: + id_ = config.connections_current_id # incremental ID + config.connections_current_id += 1 + + name = conn_dict.get('name') or "conn{}".format(id_) + + if from_module is not None: + from_module.connections += 1 + to_module.connections += 1 + + return Connection(name, from_module, from_output, from_request, to_module, + to_input, to_handler, encryption, key, id_, nonce, direct, established) + + def dump(self): + from_module = None if self.direct else self.from_module.name + + return { + "name": self.name, + "from_module": from_module, + "from_output": self.from_output, + "from_request": self.from_request, + "to_module": self.to_module.name, + "to_input": self.to_input, + "to_handler": self.to_handler, + "encryption": self.encryption.to_str(), + "key": dump(self.key), + "id": self.id, + "nonce": self.nonce, + "direct": self.direct, + "established": self.established + } + + def clone(self): + return Connection( + self.name, + self.from_module, + self.from_output, + self.from_request, + self.to_module, + self.to_input, + self.to_handler, + self.encryption, + None, + self.id, + None, + self.direct, + False + ) -class Connection(namedtuple('Connection', ['from_module', 'from_output', - 'to_module', 'to_input', - 'key'])): async def establish(self): + if self.established: + return + + if self.direct: + await self.__establish_direct() + else: + await self.__establish_normal() + + self.established = True + + async def __establish_normal(self): from_node, to_node = self.from_module.node, self.to_module.node - connect = from_node.connect(self.from_module, self.from_output, - self.to_module, self.to_input) - set_key_from = from_node.set_key(self.from_module, self.from_output, - self.key) - set_key_to = to_node.set_key(self.to_module, self.to_input, self.key) + # TODO check if the module is the same: if so, abort! + + connect = from_node.connect(self.to_module, self.id) + set_key_from = from_node.set_key(self.from_module, self.id, self.from_index, + self.encryption, self.key) + set_key_to = to_node.set_key(self.to_module, self.id, self.to_index, + self.encryption, self.key) await asyncio.gather(connect, set_key_from, set_key_to) - logging.info('Connection from %s:%s on %s to %s:%s on %s established', - self.from_module.name, self.from_output, from_node.name, - self.to_module.name, self.to_input, to_node.name) + logging.info('Connection %d:%s from %s:%s on %s to %s:%s on %s established', + self.id, self.name, self.from_module.name, self.from_index.name, + from_node.name, self.to_module.name, self.to_index.name, to_node.name) + + async def __establish_direct(self): + to_node = self.to_module.node + + await to_node.set_key(self.to_module, self.id, self.to_index, + self.encryption, self.key) + + logging.info('Direct connection %d:%s to %s:%s on %s established', + self.id, self.name, self.to_module.name, self.to_index.name, to_node.name) + + @staticmethod + def generate_key(module1, module2, encryption): + if (module1 is not None and encryption not in module1.get_supported_encryption()) \ + or encryption not in module2.get_supported_encryption(): + raise Error('Encryption {} not supported between {} and {}'.format( + str(encryption), module1.name, module2.name)) + return tools.generate_key(encryption.get_key_size()) diff --git a/reactivetools/crypto.py b/reactivetools/crypto.py new file mode 100644 index 0000000..fc6829a --- /dev/null +++ b/reactivetools/crypto.py @@ -0,0 +1,119 @@ +import hashlib +from enum import IntEnum +from Crypto.Cipher import AES + + +class Error(Exception): + pass + + +class Encryption(IntEnum): + AES = 0x0 # aes-gcm-128 + SPONGENT = 0x1 # spongent-128 + + @staticmethod + def from_str(str_): + lower_str = str_.lower() + + if lower_str == "aes": + return Encryption.AES + if lower_str == "spongent": + return Encryption.SPONGENT + + raise Error("No matching encryption type for {}".format(str_)) + + def to_str(self): + if self == Encryption.AES: + return "aes" + if self == Encryption.SPONGENT: + return "spongent" + + raise Error("to_str not implemented for {}".format(self.name)) + + def get_key_size(self): + if self == Encryption.AES: + return 16 + if self == Encryption.SPONGENT: + return 16 + + raise Error("get_key_size not implemented for {}".format(self.name)) + + async def encrypt(self, key, ad, data): + if self == Encryption.AES: + return await encrypt_aes(key, ad, data) + if self == Encryption.SPONGENT: + return await encrypt_spongent(key, ad, data) + + raise Error("encrypt not implemented for {}".format(self.name)) + + async def decrypt(self, key, ad, data): + if self == Encryption.AES: + return await decrypt_aes(key, ad, data) + if self == Encryption.SPONGENT: + return await decrypt_spongent(key, ad, data) + + raise Error("decrypt not implemented for {}".format(self.name)) + + async def mac(self, key, ad): + if self == Encryption.AES: + return await encrypt_aes(key, ad) + if self == Encryption.SPONGENT: + return await encrypt_spongent(key, ad) + + raise Error("mac not implemented for {}".format(self.name)) + + +async def encrypt_aes(key, ad, data=b''): + # Note: we set nonce to zero because our nonce is part of the associated data + aes_gcm = AES.new(key, AES.MODE_GCM, nonce=b'\x00'*12) + aes_gcm.update(ad) + cipher, tag = aes_gcm.encrypt_and_digest(data) + return cipher + tag + + +async def decrypt_aes(key, ad, data=b''): + try: + aes_gcm = AES.new(key, AES.MODE_GCM, nonce=b'\x00'*12) + aes_gcm.update(ad) + + cipher = data[:-16] + tag = data[-16:] + return aes_gcm.decrypt_and_verify(cipher, tag) + except: + raise Error("Decryption failed") + + +async def encrypt_spongent(key, ad, data=[]): + try: + import sancus.libsancuscrypt as sancus_crypto + except: + raise Error("Sancus python libraries not found in PYTHONPATH") + + cipher, tag = sancus_crypto.wrap(key, ad, data) + return cipher + tag + + +async def decrypt_spongent(key, ad, data=[]): + try: + import sancus.libsancuscrypt as sancus_crypto + except: + raise Error("Sancus python libraries not found in PYTHONPATH") + + # data should be formed like this: [cipher, tag] + tag_size = sancus_crypto.KEY_SIZE + cipher = data[:-tag_size] + tag = data[-tag_size:] + + plain = sancus_crypto.unwrap(key, ad, cipher, tag) + + if plain is None: + raise Error("Decryption failed") + + return plain + +def hash_sha256(data, size=32): + if size > 32: + raise Error( + "SHA256 cannot compute digests with length {}".format(size)) + + return hashlib.sha256(data).digest()[:size] diff --git a/reactivetools/descriptor.py b/reactivetools/descriptor.py new file mode 100644 index 0000000..33bde47 --- /dev/null +++ b/reactivetools/descriptor.py @@ -0,0 +1,61 @@ +import json +import os +from enum import IntEnum +import yaml + + +class Error(Exception): + pass + + +class DescriptorType(IntEnum): + JSON = 0 + YAML = 1 + + @staticmethod + def from_str(type_): + if type_ is None: + return None + + type_lower = type_.lower() + + if type_lower == "json": + return DescriptorType.JSON + if type_lower == "yaml": + return DescriptorType.YAML + + raise Error("Bad deployment descriptor type: {}".format(type_)) + + @staticmethod + def load_any(file): + if not os.path.exists(file): + raise Error("Input file {} does not exist".format(file)) + + try: + return DescriptorType.JSON.load(file), DescriptorType.JSON + except: + try: + return DescriptorType.YAML.load(file), DescriptorType.YAML + except: + raise Error( + "Input file {} is not a JSON, nor a YAML".format(file)) + + def load(self, file): + with open(file, 'r') as f: + if self == DescriptorType.JSON: + return json.load(f) + + if self == DescriptorType.YAML: + return yaml.load(f, Loader=yaml.FullLoader) + + raise Error( + "load not implemented for {}".format(self.name)) + + def dump(self, file, data): + with open(file, 'w') as f: + if self == DescriptorType.JSON: + json.dump(data, f, indent=4) + elif self == DescriptorType.YAML: + yaml.dump(data, f) + else: + raise Error("dump not implemented for {}".format(self.name)) diff --git a/reactivetools/dumpers.py b/reactivetools/dumpers.py new file mode 100644 index 0000000..1f5dc80 --- /dev/null +++ b/reactivetools/dumpers.py @@ -0,0 +1,41 @@ +import asyncio +import functools +import types +import binascii + + +@functools.singledispatch +def dump(obj): + assert False, 'No dumper for {}'.format(type(obj)) + + +@dump.register(list) +def _(l): + return [dump(e) for e in l] + + +@dump.register(bytes) +@dump.register(bytearray) +def _(bs): + return binascii.hexlify(bs).decode('ascii') + + +@dump.register(str) +@dump.register(int) +def _(x): + return x + + +@dump.register(tuple) +def _(t): + return {t[1]: t[0]} + + +@dump.register(types.CoroutineType) +def _(coro): + return dump(asyncio.get_event_loop().run_until_complete(coro)) + + +@dump.register(dict) +def _(dict_): + return dict_ diff --git a/reactivetools/glob.py b/reactivetools/glob.py new file mode 100644 index 0000000..2c802d2 --- /dev/null +++ b/reactivetools/glob.py @@ -0,0 +1,45 @@ +from enum import IntEnum +import os + +BUILD_DIR = os.path.join(os.getcwd(), "build") +ATTMAN_CLI = "attman-cli" + + +class Error(Exception): + pass + + +class BuildMode(IntEnum): + DEBUG = 0 + RELEASE = 1 + + @staticmethod + def from_str(mode): + mode_lower = mode.lower() + + if mode_lower == "debug": + return BuildMode.DEBUG + if mode_lower == "release": + return BuildMode.RELEASE + + raise Error("Bad BuildMode: {}".format(mode)) + + def to_str(self): + if self == BuildMode.DEBUG: + return "debug" + if self == BuildMode.RELEASE: + return "release" + + raise Error("BuildMode::to_str failed: this should never happen") + + +__BUILD_MODE = BuildMode.DEBUG + + +def set_build_mode(mode): + global __BUILD_MODE + __BUILD_MODE = BuildMode.from_str(mode) + + +def get_build_mode(): + return __BUILD_MODE diff --git a/reactivetools/loaders.py b/reactivetools/loaders.py new file mode 100644 index 0000000..456a0f6 --- /dev/null +++ b/reactivetools/loaders.py @@ -0,0 +1,23 @@ +import binascii +import os + + +def load_list(l, load_func=lambda e: e): + if l is None: + return [] + + return [load_func(e) for e in l] + + +def parse_key(key_str): + if key_str is None: + return None + + return binascii.unhexlify(key_str) + + +def parse_file_name(file_name): + if file_name is None: + return None + + return os.path.abspath(file_name) diff --git a/reactivetools/manager.py b/reactivetools/manager.py new file mode 100644 index 0000000..88d1b3f --- /dev/null +++ b/reactivetools/manager.py @@ -0,0 +1,61 @@ +import asyncio +import os + +from . import tools +from . import glob +from .descriptor import DescriptorType + +__manager = None + + +def set_manager(man): + global __manager + __manager = man + + +def get_manager(): + return __manager + + +class Manager: + lock = asyncio.Lock() + + def __init__(self, file, host, port, key): + self.config = file + self.host = host + self.port = port + self.key = key + self.sp_pubkey = None + + @staticmethod + def load(man_file, man_dict, _): + host = man_dict['host'] + port = man_dict['port'] + key = man_dict['key'] + + return Manager(man_file, host, port, key) + + def dump(self): + man = { + "host": self.host, + "port": self.port, + "key": self.key + } + + DescriptorType.YAML.dump(self.config, man) + return self.config + + async def get_sp_pubkey(self): + async with self.lock: + if self.sp_pubkey is not None: + return self.sp_pubkey + + args = "--config {} --request get-pub-key".format( + self.config).split() + out, _ = await tools.run_async_output(glob.ATTMAN_CLI, *args) + + self.sp_pubkey = os.path.join(glob.BUILD_DIR, "manager-sp_pubkey.pem") + with open(self.sp_pubkey, "wb") as f: + f.write(out) + + return self.sp_pubkey diff --git a/reactivetools/modules.py b/reactivetools/modules.py deleted file mode 100644 index 671cfd4..0000000 --- a/reactivetools/modules.py +++ /dev/null @@ -1,195 +0,0 @@ -import logging -import asyncio -import binascii -from enum import Enum -from collections import namedtuple - -from elftools.elf import elffile - -import sancus.crypto - -from .nodes import SancusNode -from . import tools - - -class Error(Exception): - pass - - -class Module: - def __init__(self, name, files, cflags, ldflags, node, - binary=None, id=None, symtab=None, key=None): - - self.__check_init_args(node, binary, id, symtab, key) - - self.name = name - self.files = files - self.cflags = cflags - self.ldflags = ldflags - self.node = node - - self.__build_fut = self.__init_future(binary) - self.__deploy_fut = self.__init_future(id, symtab) - self.__key_fut = self.__init_future(key) - - @property - async def binary(self): - if self.__build_fut is None: - self.__build_fut = asyncio.ensure_future(self.__build()) - - return await self.__build_fut - - @property - async def id(self): - id, _ = await self.deploy() - return id - - @property - async def symtab(self): - _, symtab = await self.deploy() - return symtab - - @property - async def key(self): - if self.__key_fut is None: - self.__key_fut = asyncio.ensure_future(self._calculate_key()) - - return await self.__key_fut - - async def get_io_id(self, io_name): - return await self._get_io_id(io_name) - - async def get_entry_id(self, entry_name): - return await self._get_entry_id(entry_name) - - async def call(self, entry, arg=None): - return await self.node.call(self, entry, arg) - - def __check_init_args(self, node, binary, id, symtab, key): - if not isinstance(node, self.get_supported_node_type()): - clsname = lambda o: type(o).__name__ - raise Error('A {} cannot run on a {}' - .format(clsname(self), clsname(node))) - - # For now, either all optionals should be given or none. This might be - # relaxed later if necessary. - optionals = (binary, id, symtab, key) - - if None in optionals and any(map(lambda x: x is not None, optionals)): - raise Error('Either all of the optional node parameters ' - 'should be given or none') - - @staticmethod - def __init_future(*results): - if all(map(lambda x: x is None, results)): - return None - - fut = asyncio.Future() - result = results[0] if len(results) == 1 else results - fut.set_result(result) - return fut - - async def __build(self): - logging.info('Building module %s from %s', - self.name, ', '.join(map(str, self.files))) - - config = self._get_build_config(_get_verbosity()) - objects = {str(p): tools.create_tmp(suffix='.o') for p in self.files} - - cflags = config.cflags + self.cflags - build_obj = lambda c, o: tools.run_async(config.cc, *cflags, - '-c', '-o', o, c) - build_futs = [build_obj(c, o) for c, o in objects.items()] - await asyncio.gather(*build_futs) - - binary = tools.create_tmp(suffix='.elf') - ldflags = config.ldflags + self.ldflags - await tools.run_async(config.ld, *ldflags, - '-o', binary, *objects.values()) - return binary - - async def deploy(self): - if self.__deploy_fut is None: - self.__deploy_fut = asyncio.ensure_future(self.node.deploy(self)) - - return await self.__deploy_fut - - -class SancusModule(Module): - async def _calculate_key(self): - linked_binary = await self.__link() - - with open(linked_binary, 'rb') as f: - key = sancus.crypto.get_sm_key(f, self.name, self.node.vendor_key) - logging.info('Module key for %s: %s', - self.name, binascii.hexlify(key).decode('ascii')) - return key - - async def __link(self): - linked_binary = tools.create_tmp(suffix='.elf') - await tools.run_async('msp430-ld', '-T', await self.symtab, - '-o', linked_binary, await self.binary) - return linked_binary - - async def _get_io_id(self, io_name): - sym_name = '__sm_{}_io_{}_idx'.format(self.name, io_name) - symbol = await self.__get_symbol(sym_name) - - if symbol is None: - raise Error('Module {} has no endpoint named {}' - .format(self.name, io_name)) - - return symbol - - async def _get_entry_id(self, entry_name): - sym_name = '__sm_{}_entry_{}_idx'.format(self.name, entry_name) - symbol = await self.__get_symbol(sym_name) - - if symbol is None: - raise Error('Module {} has no entry named {}' - .format(self.name, entry_name)) - - return symbol - - @staticmethod - def get_supported_node_type(): - return SancusNode - - @staticmethod - def _get_build_config(verbosity): - if verbosity == _Verbosity.Debug: - flags = ['--debug'] - # elif verbosity == _Verbosity.Verbose: - # flags = ['--verbose'] - else: - flags = [] - - return _BuildConfig(cc='sancus-cc', cflags=flags, - ld='sancus-ld', ldflags=flags) - - async def __get_symbol(self, name): - with open(await self.binary, 'rb') as f: - elf = elffile.ELFFile(f) - name = name.encode('ascii') - for section in elf.iter_sections(): - if isinstance(section, elffile.SymbolTableSection): - for symbol in section.iter_symbols(): - sym_section = symbol['st_shndx'] - if symbol.name == name and sym_section != 'SHN_UNDEF': - return symbol['st_value'] - - -_BuildConfig = namedtuple('_BuildConfig', ['cc', 'cflags', 'ld', 'ldflags']) -_Verbosity = Enum('_Verbosity', ['Normal', 'Verbose', 'Debug']) - - -def _get_verbosity(): - log_at = logging.getLogger().isEnabledFor - - if log_at(logging.DEBUG): - return _Verbosity.Debug - elif log_at(logging.INFO): - return _Verbosity.Verbose - else: - return _Verbosity.Normal - diff --git a/reactivetools/modules/__init__.py b/reactivetools/modules/__init__.py new file mode 100644 index 0000000..cc1ea22 --- /dev/null +++ b/reactivetools/modules/__init__.py @@ -0,0 +1,26 @@ +from .base import Module +from .sancus import SancusModule +from .native import NativeModule +from .sgx import SGXModule +from .trustzone import TrustZoneModule + +module_rules = { + "sancus": "sancus.yaml", + "sgx": "sgx.yaml", + "native": "native.yaml", + "trustzone": "trustzone.yaml" +} + +module_funcs = { + "sancus": SancusModule.load, + "sgx": SGXModule.load, + "native": NativeModule.load, + "trustzone": TrustZoneModule.load +} + +module_cleanup_coros = [ + SancusModule.cleanup, + SGXModule.cleanup, + NativeModule.cleanup, + TrustZoneModule.cleanup +] diff --git a/reactivetools/modules/base.py b/reactivetools/modules/base.py new file mode 100644 index 0000000..9a7711d --- /dev/null +++ b/reactivetools/modules/base.py @@ -0,0 +1,308 @@ +from abc import ABC, abstractmethod +import os +import logging +import sys +from .. import glob + + +class Error(Exception): + pass + + +class Module(ABC): + def __init__(self, name, node, old_node, priority, deployed, nonce, + attested, out_dir): + """ + Generic attributes common to all Module subclasses + + Priority, deployed and nonce are used internally by the application + + ### Attributes ### + name (str): name of the module + node (XXXNode): *instance* of the Node class where the module belongs + priority (int): priority of the module. For ordered deployment (can be None) + deployed (bool): that indicates if the module has been deployed (can be None) + nonce (int): nonce used in set_key to ensure freshness (can be None) + """ + self.name = name + self.node = node + self.old_node = old_node + self.priority = priority + self.deployed = deployed + self.nonce = 0 if nonce is None else nonce + self.attested = attested + + self.connections = 0 + + # create temp dir + try: + os.mkdir(os.path.join(glob.BUILD_DIR, out_dir)) + except FileExistsError: + pass + except: + logging.error("Failed to create build dir for {}".format(name)) + sys.exit(-1) + + @staticmethod + @abstractmethod + def load(mod_dict, node_obj, old_node_obj): + """ + ### Description ### + Creates a XXXModule object from a dict + This should take all the information declared in the deployment descriptor + and store it into the class as attributes. + + ### Parameters ### + mod_dict (dict): dictionary containing the definition of the module + node_obj (XXXNode): object where the module belongs to + old_node_obj (XXXNode): object where the old module belongs to. + only used during module updates! + + ### Returns ### + An instance of the XXXModule class + """ + + @abstractmethod + def dump(self): + """ + ### Description ### + Creates a dict from the XXXModule object (opposite procedure wrt. load) + This dict, saved in the output deployment descriptor, and serves two purposes: + 1) to provide the deployer some information (e.g., keys used) + 2) to give it as an input of subsequent runs of the application + Hence, ideally load() and dump() should involve the same attributes + + ### Parameters ### + self: Module object + + ### Returns ### + `dict`: description of the object + """ + + @abstractmethod + def clone(self): + """ + ### Description ### + Coroutine. Create a copy of the current module, but in a clean state, + i.e., not deployed nor attested + + The + + ### Parameters ### + self: Module object + + ### Returns ### + `Module`: copy of the Module object + """ + + @abstractmethod + async def build(self): + """ + ### Description ### + Coroutine. Create the binary file from sources + + ### Parameters ### + self: Module object + + ### Returns ### + `str`: path of the created binary file + """ + + @abstractmethod + async def deploy(self): + """ + ### Description ### + Coroutine. Deploy a module to the corrisponding node + + Note: this coroutine should call the `deploy` coroutine in self.node, + making sure that it can happen only once (e.g., using a flag) + + ### Parameters ### + self: Module object + + ### Returns ### + """ + + @abstractmethod + async def attest(self): + """ + ### Description ### + Coroutine. Attest a deployed module + + ### Parameters ### + self: Module object + + ### Returns ### + """ + + @abstractmethod + async def get_id(self): + """ + ### Description ### + Coroutine. Get the ID of the module + + The ID can be assigned in different ways, depending on the architecture. + Should be unique on the node where the module is deployed. + + ### Parameters ### + self: Module object + + ### Returns ### + `int`: ID of the module + """ + + @abstractmethod + async def get_input_id(self, input_): + """ + ### Description ### + Coroutine. Get the ID of the input passed as parameter + + This method should raise an error if the input does not exist + + ### Parameters ### + self: Module object + input (str): name of the input + + ### Returns ### + `int`: ID of the input + """ + + @abstractmethod + async def get_output_id(self, output): + """ + ### Description ### + Coroutine. Get the ID of the output passed as parameter + + This method should raise an error if the output does not exist + + ### Parameters ### + self: Module object + output (str): name of the output + + ### Returns ### + `int`: ID of the output + """ + + @abstractmethod + async def get_entry_id(self, entry): + """ + ### Description ### + Coroutine. Get the ID of the entry point passed as parameter + + This method should raise an error if the entry point does not exist + + ### Parameters ### + self: Module object + entry (str): name of the entry point + + ### Returns ### + `int`: ID of the entry point + """ + + @abstractmethod + async def get_key(self): + """ + ### Description ### + Coroutine. Get the module's key + + ### Parameters ### + self: Module object + + ### Returns ### + `bytes`: byte array of the key + """ + + @staticmethod + @abstractmethod + def get_supported_nodes(): + """ + ### Description ### + Static method. Get a list of node classes where the module can be deployed + + e.g., SancusModule -> [SancusNode] + + ### Parameters ### + + ### Returns ### + `list`: list of node classes that are supported by the XXXModule instance + """ + + @staticmethod + @abstractmethod + def get_supported_encryption(): + """ + ### Description ### + Static method. Get a list of crypto libraries supported by the module + The Encryption enum is defined in crypto.py + + e.g., SGXModule -> [Encryption.SPONGENT, Encryption.AES] + + ### Parameters ### + + ### Returns ### + `list`: list of Encryption objects + """ + + @staticmethod + @abstractmethod + def get_default_encryption(): + """ + ### Description ### + Static method. Get the preferred crypto library used by the module + The Encryption enum is defined in crypto.py + + e.g., SGXModule -> Encryption.AES + + ### Parameters ### + + ### Returns ### + `Encryption`: Encryption enum object + """ + + # Default implementation of some functions. + # Override them in the subclasses if you need a different implementation. + + @staticmethod + async def cleanup(): + """ + ### Description ### + Static coroutine. Cleanup operations to do before the application terminates. + + ### Parameters ### + + ### Returns ### + """ + + async def get_request_id(self, request): + """ + ### Description ### + Coroutine. Get the ID of the request passed as parameter + + This method should raise an error if the request does not exist + + ### Parameters ### + self: Module object + request (str): name of the request + + ### Returns ### + `int`: ID of the request + """ + raise Error("Request/handler messages not supported for {}".format( + self.__class__.__name__)) + + async def get_handler_id(self, handler): + """ + ### Description ### + Coroutine. Get the ID of the handler passed as parameter + + This method should raise an error if the handler does not exist + + ### Parameters ### + self: Module object + handler (str): name of the handler + + ### Returns ### + `int`: ID of the handler + """ + raise Error("Request/handler messages not supported for {}".format( + self.__class__.__name__)) diff --git a/reactivetools/modules/native.py b/reactivetools/modules/native.py new file mode 100644 index 0000000..cecf2d1 --- /dev/null +++ b/reactivetools/modules/native.py @@ -0,0 +1,306 @@ +import asyncio +import logging +import os +import json +import rustsgxgen + +from .base import Module + +from ..nodes import NativeNode +from .. import tools +from .. import glob +from ..crypto import Encryption +from ..dumpers import * +from ..loaders import * +from ..manager import get_manager + +BUILD_APP = "cargo build {} {} --manifest-path={}/Cargo.toml" + + +class Object(): + pass + + +class Error(Exception): + pass + + +class NativeModule(Module): + def __init__(self, name, node, old_node, priority, deployed, nonce, attested, + features, id_, binary, key, data, folder, port): + self.out_dir = os.path.join(glob.BUILD_DIR, "native-{}".format(folder)) + super().__init__(name, node, old_node, priority, deployed, nonce, + attested, self.out_dir) + + self.__generate_fut = tools.init_future(data, key) + self.__build_fut = tools.init_future(binary) + + self.features = [] if features is None else features + self.id = id_ if id_ is not None else node.get_module_id() + self.port = port or self.node.reactive_port + self.id + self.folder = folder + + @staticmethod + def load(mod_dict, node_obj, old_node_obj): + name = mod_dict['name'] + node = node_obj + old_node = old_node_obj + priority = mod_dict.get('priority') + deployed = mod_dict.get('deployed') + nonce = mod_dict.get('nonce') + attested = mod_dict.get('attested') + features = mod_dict.get('features') + id_ = mod_dict.get('id') + binary = parse_file_name(mod_dict.get('binary')) + key = parse_key(mod_dict.get('key')) + data = mod_dict.get('data') + folder = mod_dict.get('folder') or name + port = mod_dict.get('port') + + return NativeModule(name, node, old_node, priority, deployed, nonce, + attested, features, id_, binary, key, data, folder, + port) + + def dump(self): + return { + "type": "native", + "name": self.name, + "node": self.node.name, + "old_node": self.old_node.name, + "priority": self.priority, + "deployed": self.deployed, + "nonce": self.nonce, + "attested": self.attested, + "features": self.features, + "id": self.id, + "binary": dump(self.binary) if self.deployed else None, + # For native, key is generated at compile time + "key": dump(self.key) if self.deployed else None, + "data": dump(self.data) if self.deployed else None, + "folder": self.folder, + "port": self.port + } + + def clone(self): + return NativeModule( + self.name, + self.node, + self.old_node, + self.priority, + None, + None, + None, + self.features, + None, + None, + None, + None, + self.folder, + None + ) + + # --- Properties --- # + + @property + async def data(self): + data, _key = await self.generate_code() + return data + + @property + async def inputs(self): + data = await self.data + return data["inputs"] + + @property + async def outputs(self): + data = await self.data + return data["outputs"] + + @property + async def entrypoints(self): + data = await self.data + return data["entrypoints"] + + @property + async def handlers(self): + data = await self.data + return data["handlers"] + + @property + async def requests(self): + data = await self.data + return data["requests"] + + @property + async def key(self): + _data, key = await self.generate_code() + return key + + @property + async def binary(self): + return await self.build() + + # --- Implement abstract methods --- # + + async def build(self): + if self.__build_fut is None: + self.__build_fut = asyncio.ensure_future(self.__build()) + + return await self.__build_fut + + async def deploy(self): + await self.node.deploy(self) + + async def attest(self): + if get_manager() is not None: + await self.__attest_manager() + else: + await self.key + + self.attested = True + + async def get_id(self): + return self.id + + async def get_input_id(self, input_): + if isinstance(input_, int): + return input_ + + inputs = await self.inputs + + if input_ not in inputs: + raise Error("Input not present in inputs") + + return inputs[input_] + + async def get_output_id(self, output): + if isinstance(output, int): + return output + + outputs = await self.outputs + + if output not in outputs: + raise Error("Output not present in outputs") + + return outputs[output] + + async def get_entry_id(self, entry): + try: + return int(entry) + except: + entrypoints = await self.entrypoints + + if entry not in entrypoints: + raise Error("Entry not present in entrypoints") + + return entrypoints[entry] + + async def get_request_id(self, request): + if isinstance(request, int): + return request + + requests = await self.requests + + if request not in requests: + raise Error("Request not present in requests") + + return requests[request] + + async def get_handler_id(self, handler): + if isinstance(handler, int): + return handler + + handlers = await self.handlers + + if handler not in handlers: + raise Error("Handler not present in handlers") + + return handlers[handler] + + async def get_key(self): + return await self.key + + @staticmethod + def get_supported_nodes(): + return [NativeNode] + + @staticmethod + def get_supported_encryption(): + return [Encryption.AES, Encryption.SPONGENT] + + @staticmethod + def get_default_encryption(): + return Encryption.AES + + # --- Static methods --- # + + # --- Others --- # + + async def generate_code(self): + if self.__generate_fut is None: + self.__generate_fut = asyncio.ensure_future(self.__generate_code()) + + return await self.__generate_fut + + async def __generate_code(self): + args = Object() + + args.input = self.folder + args.output = self.out_dir + args.moduleid = self.id + args.emport = self.node.deploy_port + args.runner = rustsgxgen.Runner.NATIVE + args.spkey = None + args.print = None + + data, key = rustsgxgen.generate(args) + logging.info("Generated code for module {}".format(self.name)) + + return data, key + + async def __build(self): + await self.generate_code() + + release = "--release" if glob.get_build_mode() == glob.BuildMode.RELEASE else "" + features = "--features " + \ + " ".join(self.features) if self.features else "" + + cmd = BUILD_APP.format(release, features, self.out_dir).split() + await tools.run_async(*cmd) + + # TODO there might be problems with two (or more) modules built from + # the same source code but with different features. Since the + # working dir is the same (for caching reasons) there might be some + # problems when these SMs are built at the same time. + # Find a way to solve this issue. + binary = os.path.join(self.out_dir, + "target", glob.get_build_mode().to_str(), self.folder) + + logging.info("Built module {}".format(self.name)) + return binary + + async def __attest_manager(self): + data = { + "id": self.id, + "name": self.name, + "host": str(self.node.ip_address), + "port": self.port, + "em_port": self.node.reactive_port, + "key": list(await self.key) + } + data_file = os.path.join(self.out_dir, "attest.json") + with open(data_file, "w") as f: + json.dump(data, f) + + args = "--config {} --request attest-native --data {}".format( + get_manager().config, data_file).split() + out, _ = await tools.run_async_output(glob.ATTMAN_CLI, *args) + key_arr = eval(out) # from string to array + key = bytes(key_arr) # from array to bytes + + if await self.key != key: + raise Error( + "Received key is different from {} key".format(self.name)) + + logging.info("Done Remote Attestation of {}. Key: {}".format( + self.name, key_arr)) diff --git a/reactivetools/modules/sancus.py b/reactivetools/modules/sancus.py new file mode 100644 index 0000000..d3fad63 --- /dev/null +++ b/reactivetools/modules/sancus.py @@ -0,0 +1,368 @@ +import logging +import asyncio +from collections import namedtuple +import json +import os +import ntpath +import yaml + +from elftools.elf import elffile + +from .base import Module +from ..nodes import SancusNode +from .. import tools +from .. import glob +from ..crypto import Encryption +from ..dumpers import * +from ..loaders import * +from ..manager import get_manager + + +class Error(Exception): + pass + + +class SancusModule(Module): + def __init__(self, name, node, old_node, priority, deployed, nonce, + attested, files, cflags, ldflags, binary, id_, symtab, key, + deploy_name): + self.out_dir = os.path.join(glob.BUILD_DIR, "sancus-{}".format(name)) + super().__init__(name, node, old_node, priority, deployed, nonce, + attested, self.out_dir) + + self.files = files + self.cflags = cflags + self.ldflags = ldflags + self.deploy_name = deploy_name or name + + self.__build_fut = tools.init_future(binary) + self.__deploy_fut = tools.init_future(id_, symtab) + self.__key_fut = tools.init_future(key) + self.__attest_fut = tools.init_future(attested if attested else None) + + @staticmethod + def load(mod_dict, node_obj, old_node_obj): + name = mod_dict['name'] + node = node_obj + old_node = old_node_obj + priority = mod_dict.get('priority') + deployed = mod_dict.get('deployed') + nonce = mod_dict.get('nonce') + attested = mod_dict.get('attested') + files = load_list(mod_dict['files'], parse_file_name) + cflags = load_list(mod_dict.get('cflags')) + ldflags = load_list(mod_dict.get('ldflags')) + binary = parse_file_name(mod_dict.get('binary')) + id_ = mod_dict.get('id') + symtab = parse_file_name(mod_dict.get('symtab')) + key = parse_key(mod_dict.get('key')) + deploy_name = mod_dict.get('deploy_name') + + return SancusModule(name, node, old_node, priority, deployed, nonce, + attested, files, cflags, ldflags, binary, id_, + symtab, key, deploy_name) + + def dump(self): + return { + "type": "sancus", + "name": self.name, + "node": self.node.name, + "old_node": self.old_node.name, + "priority": self.priority, + "deployed": self.deployed, + "nonce": self.nonce, + "attested": self.attested, + "files": dump(self.files), + "cflags": dump(self.cflags), + "ldflags": dump(self.ldflags), + "binary": dump(self.binary) if self.deployed else None, + "id": dump(self.id) if self.deployed else None, + "symtab": dump(self.symtab) if self.deployed else None, + "key": dump(self.key) if self.deployed else None, + "deploy_name": self.deploy_name + } + + def clone(self): + return SancusModule( + self.name, + self.node, + self.old_node, + self.priority, + None, + None, + None, + self.files, + self.cflags, + self.ldflags, + None, + None, + None, + None, + tools.increment_value_in_string(self.deploy_name) + ) + + # --- Properties --- # + + @property + async def binary(self): + return await self.build() + + @property + async def id(self): + id_, _ = await self.deploy() + return id_ + + @property + async def symtab(self): + _, symtab = await self.deploy() + return symtab + + @property + async def key(self): + if self.__key_fut is None: + self.__key_fut = asyncio.ensure_future(self._calculate_key()) + + return await self.__key_fut + + # --- Implement abstract methods --- # + + async def build(self): + if self.__build_fut is None: + self.__build_fut = asyncio.ensure_future(self.__build()) + + return await self.__build_fut + + async def deploy(self): + if self.__deploy_fut is None: + self.__deploy_fut = asyncio.ensure_future(self.node.deploy(self)) + + return await self.__deploy_fut + + async def attest(self): + if get_manager() is not None: + await self.__attest_manager() + else: + if self.__attest_fut is None: + self.__attest_fut = asyncio.ensure_future( + self.node.attest(self)) + + await self.__attest_fut + + async def get_id(self): + return await self.id + + async def get_input_id(self, input_): + return await self.get_io_id(input_) + + async def get_output_id(self, output): + return await self.get_io_id(output) + + async def get_entry_id(self, entry): + # If it is a number, that is the ID (given by the deployer) + if entry.isnumeric(): + return int(entry) + + return await self._get_entry_id(entry) + + async def get_key(self): + return await self.key + + @staticmethod + def get_supported_nodes(): + return [SancusNode] + + @staticmethod + def get_supported_encryption(): + return [Encryption.SPONGENT] + + @staticmethod + def get_default_encryption(): + return Encryption.SPONGENT + + # --- Static methods --- # + + @staticmethod + def _get_build_config(verbosity): + if verbosity == tools.Verbosity.Debug: + flags = ['--debug'] + # elif verbosity == tools.Verbosity.Verbose: + # flags = ['--verbose'] + else: + flags = [] + + cflags = flags + ldflags = flags + ['--inline-arithmetic'] + + return _BuildConfig(cc='sancus-cc', cflags=cflags, + ld='sancus-ld', ldflags=ldflags) + + # --- Others --- # + + async def get_io_id(self, io): + # If io is a number, that is the ID (given by the deployer) + if isinstance(io, int): + return io + + return await self._get_io_id(io) + + async def __build(self): + logging.info('Building module %s from %s', + self.name, ', '.join(map(str, self.files))) + + # clean out_dir first + for f in os.listdir(self.out_dir): + os.remove(os.path.join(self.out_dir, f)) + + # copy files to out_dir, replacing the name if needed + parsed_files = [] + for sf in self.files: + with open(sf, "r") as f: + f_data = f.read() + + f_data = f_data.replace("{name}", self.deploy_name) + pf = os.path.join(self.out_dir, ntpath.basename(sf)) + + with open(pf, "w") as f: + f.write(f_data) + + parsed_files.append(pf) + + # build + config = self._get_build_config(tools.get_verbosity()) + objects = {str(p): tools.create_tmp( + suffix='.o', dir_name=self.out_dir) for p in parsed_files} + + cflags = config.cflags + self.cflags + + def build_obj(c, o): + return tools.run_async(config.cc, *cflags, '-c', '-o', o, c) + + build_futs = [build_obj(c, o) for c, o in objects.items()] + await asyncio.gather(*build_futs) + + binary = tools.create_tmp(suffix='.elf', dir_name=self.out_dir) + ldflags = config.ldflags + self.ldflags + + # prepare the config file for this SM + ldflags = await self.__prepare_config_file(ldflags) + + await tools.run_async(config.ld, *ldflags, + '-o', binary, *objects.values()) + return binary + + async def __prepare_config_file(self, ldflags): + # try to get sm config file if present in self.ldflags + # otherwise, create empty file + try: + # fetch the name + flag = next(filter(lambda x: "--sm-config-file" in x, ldflags)) + config_file = flag.split()[1] + + # open the file and parse it + with open(config_file, "r") as f: + config = yaml.load(f) + except: + # we create a new file with empty config + config_file = tools.create_tmp( + suffix='.yaml', dir_name=self.out_dir) + config = {self.deploy_name: []} + + # remove old flag if present, append new one + ldflags = list( + filter(lambda x: "--sm-config-file" not in x, ldflags)) + ldflags.append("--sm-config-file {}".format(config_file)) + + # override num_connections if the value is present and is < self.connections + if "num_connections" not in config or config["num_connections"] < self.connections: + config[self.deploy_name].append({"num_connections": self.connections}) + + # save changes + with open(config_file, "w") as f: + yaml.dump(config, f) + + return ldflags + + async def _calculate_key(self): + linked_binary = await self.__link() + + args = "{} --gen-sm-key {} --key {}".format( + linked_binary, self.deploy_name, dump(self.node.vendor_key) + ).split() + + key, _ = await tools.run_async_output("sancus-crypto", *args) + logging.info('Module key for %s: %s', self.name, dump(key)) + + return key + + async def __link(self): + linked_binary = tools.create_tmp(suffix='.elf', dir_name=self.out_dir) + + # NOTE: we use '--noinhibit-exec' flag because the linker complains + # if the addresses of .bss section are not aligned to 2 bytes + # using this flag instead, the output file is still generated + await tools.run_async('msp430-ld', '-T', await self.symtab, + '-o', linked_binary, '--noinhibit-exec', await self.binary) + return linked_binary + + async def _get_io_id(self, io_name): + sym_name = '__sm_{}_io_{}_idx'.format(self.deploy_name, io_name) + symbol = await self.__get_symbol(sym_name) + + if symbol is None: + raise Error('Module {} has no endpoint named {}' + .format(self.name, io_name)) + + return symbol + + async def _get_entry_id(self, entry_name): + sym_name = '__sm_{}_entry_{}_idx'.format(self.deploy_name, entry_name) + symbol = await self.__get_symbol(sym_name) + + if symbol is None: + raise Error('Module {} has no entry named {}' + .format(self.name, entry_name)) + + return symbol + + async def __get_symbol(self, name): + if not await self.binary: + raise Error("ELF file not present for {}, cannot extract symbol ID of {}".format( + self.name, name)) + + with open(await self.binary, 'rb') as f: + elf = elffile.ELFFile(f) + for section in elf.iter_sections(): + if isinstance(section, elffile.SymbolTableSection): + for symbol in section.iter_symbols(): + sym_section = symbol['st_shndx'] + if symbol.name == name and sym_section != 'SHN_UNDEF': + return symbol['st_value'] + + async def __attest_manager(self): + data = { + "id": await self.id, + "name": self.name, + "host": str(self.node.ip_address), + "port": self.node.reactive_port, + "em_port": self.node.reactive_port, + "key": list(await self.key) + } + data_file = os.path.join(self.out_dir, "attest.json") + with open(data_file, "w") as f: + json.dump(data, f) + + args = "--config {} --request attest-sancus --data {}".format( + get_manager().config, data_file).split() + out, _ = await tools.run_async_output(glob.ATTMAN_CLI, *args) + key_arr = eval(out) # from string to array + key = bytes(key_arr) # from array to bytes + + if await self.key != key: + raise Error( + "Received key is different from {} key".format(self.name)) + + logging.info("Done Remote Attestation of {}. Key: {}".format( + self.name, key_arr)) + self.attested = True + +_BuildConfig = namedtuple('_BuildConfig', ['cc', 'cflags', 'ld', 'ldflags']) diff --git a/reactivetools/modules/sgx.py b/reactivetools/modules/sgx.py new file mode 100644 index 0000000..65dd65c --- /dev/null +++ b/reactivetools/modules/sgx.py @@ -0,0 +1,448 @@ +import asyncio +import logging +import os +import rustsgxgen + +from .base import Module + +from ..nodes import SGXNode +from .. import tools +from .. import glob +from ..crypto import Encryption +from ..dumpers import * +from ..loaders import * +from ..manager import get_manager +from ..descriptor import DescriptorType + +# Apps +ATTESTER = "sgx-attester" +ROOT_CA_URL = "https://certificates.trustedservices.intel.com/Intel_SGX_Attestation_RootCA.pem" + +# SGX build/sign +SGX_TARGET = "x86_64-fortanix-unknown-sgx" +BUILD_APP = "cargo build {{}} {{}} --target={} --manifest-path={{}}/Cargo.toml".format( + SGX_TARGET) +CONVERT_SGX = "ftxsgx-elf2sgxs {} --heap-size 0x20000 --stack-size 0x20000 --threads 4 {}" +SIGN_SGX = "sgxs-sign --key {} {} {} {}" # use default values + + +class Object(): + pass + + +class Error(Exception): + pass + + +class SGXModule(Module): + sp_lock = asyncio.Lock() + + def __init__(self, name, node, old_node, priority, deployed, nonce, attested, + vendor_key, ra_settings, features, id_, binary, key, sgxs, + signature, data, folder, port): + self.out_dir = os.path.join(glob.BUILD_DIR, "sgx-{}".format(folder)) + super().__init__(name, node, old_node, priority, deployed, nonce, + attested, self.out_dir) + + self.__generate_fut = tools.init_future(data) + self.__build_fut = tools.init_future(binary) + self.__convert_sign_fut = tools.init_future(sgxs, signature) + self.__attest_fut = tools.init_future(key) + self.__sp_keys_fut = asyncio.ensure_future(self.__generate_sp_keys()) + + self.key = key + self.vendor_key = vendor_key + self.ra_settings = ra_settings + self.features = [] if features is None else features + self.id = id_ if id_ is not None else node.get_module_id() + self.port = port or self.node.reactive_port + self.id + self.folder = folder + + @staticmethod + def load(mod_dict, node_obj, old_node_obj): + name = mod_dict['name'] + node = node_obj + old_node = old_node_obj + priority = mod_dict.get('priority') + deployed = mod_dict.get('deployed') + nonce = mod_dict.get('nonce') + attested = mod_dict.get('attested') + vendor_key = parse_file_name(mod_dict['vendor_key']) + settings = parse_file_name(mod_dict['ra_settings']) + features = mod_dict.get('features') + id_ = mod_dict.get('id') + binary = parse_file_name(mod_dict.get('binary')) + key = parse_key(mod_dict.get('key')) + sgxs = parse_file_name(mod_dict.get('sgxs')) + signature = parse_file_name(mod_dict.get('signature')) + data = mod_dict.get('data') + folder = mod_dict.get('folder') or name + port = mod_dict.get('port') + + return SGXModule(name, node, old_node, priority, deployed, nonce, + attested, vendor_key, settings, features, id_, binary, + key, sgxs, signature, data, folder, port) + + def dump(self): + return { + "type": "sgx", + "name": self.name, + "node": self.node.name, + "old_node": self.old_node.name, + "priority": self.priority, + "deployed": self.deployed, + "nonce": self.nonce, + "attested": self.attested, + "vendor_key": self.vendor_key, + "ra_settings": self.ra_settings, + "features": self.features, + "id": self.id, + "binary": dump(self.binary) if self.deployed else None, + "sgxs": dump(self.sgxs) if self.deployed else None, + "signature": dump(self.sig) if self.deployed else None, + "key": dump(self.key) if self.attested else None, + "data": dump(self.data) if self.deployed else None, + "folder": self.folder, + "port": self.port + } + + def clone(self): + return SGXModule( + self.name, + self.node, + self.old_node, + self.priority, + None, + None, + None, + self.vendor_key, + self.ra_settings, + self.features, + None, + None, + None, + None, + None, + None, + self.folder, + None + ) + + # --- Properties --- # + + @property + async def data(self): + data = await self.generate_code() + return data + + @property + async def inputs(self): + data = await self.data + return data["inputs"] + + @property + async def outputs(self): + data = await self.data + return data["outputs"] + + @property + async def entrypoints(self): + data = await self.data + return data["entrypoints"] + + @property + async def handlers(self): + data = await self.data + return data["handlers"] + + @property + async def requests(self): + data = await self.data + return data["requests"] + + @property + async def binary(self): + return await self.build() + + @property + async def sgxs(self): + if self.__convert_sign_fut is None: + self.__convert_sign_fut = asyncio.ensure_future( + self.__convert_sign()) + + sgxs, _ = await self.__convert_sign_fut + + return sgxs + + @property + async def sig(self): + if self.__convert_sign_fut is None: + self.__convert_sign_fut = asyncio.ensure_future( + self.__convert_sign()) + + _, sig = await self.__convert_sign_fut + + return sig + + # --- Implement abstract methods --- # + + async def build(self): + if self.__build_fut is None: + self.__build_fut = asyncio.ensure_future(self.__build()) + + return await self.__build_fut + + async def deploy(self): + await self.node.deploy(self) + + async def attest(self): + if get_manager() is not None: + await self.__attest_manager() + else: + if self.__attest_fut is None: + self.__attest_fut = asyncio.ensure_future(self.__attest()) + + await self.__attest_fut + + async def get_id(self): + return self.id + + async def get_input_id(self, input_): + if isinstance(input_, int): + return input_ + + inputs = await self.inputs + + if input_ not in inputs: + raise Error("Input not present in inputs") + + return inputs[input_] + + async def get_output_id(self, output): + if isinstance(output, int): + return output + + outputs = await self.outputs + + if output not in outputs: + raise Error("Output not present in outputs") + + return outputs[output] + + async def get_entry_id(self, entry): + if entry.isnumeric(): + return int(entry) + + entrypoints = await self.entrypoints + + if entry not in entrypoints: + raise Error("Entry not present in entrypoints") + + return entrypoints[entry] + + async def get_request_id(self, request): + if isinstance(request, int): + return request + + requests = await self.requests + + if request not in requests: + raise Error("Request not present in requests") + + return requests[request] + + async def get_handler_id(self, handler): + if isinstance(handler, int): + return handler + + handlers = await self.handlers + + if handler not in handlers: + raise Error("Handler not present in handlers") + + return handlers[handler] + + async def get_key(self): + return self.key + + @staticmethod + def get_supported_nodes(): + return [SGXNode] + + @staticmethod + def get_supported_encryption(): + return [Encryption.AES, Encryption.SPONGENT] + + @staticmethod + def get_default_encryption(): + return Encryption.AES + + # --- Static methods --- # + + @staticmethod + async def cleanup(): + pass + + # --- Others --- # + + async def get_ra_sp_pub_key(self): + pub, _, _ = await self.__sp_keys_fut + + return pub + + async def get_ra_sp_priv_key(self): + _, priv, _ = await self.__sp_keys_fut + + return priv + + async def get_ias_root_certificate(self): + _, _, cert = await self.__sp_keys_fut + + return cert + + async def generate_code(self): + if self.__generate_fut is None: + self.__generate_fut = asyncio.ensure_future(self.__generate_code()) + + return await self.__generate_fut + + async def __generate_code(self): + args = Object() + man = get_manager() + + args.input = self.folder + args.output = self.out_dir + args.moduleid = self.id + args.emport = self.node.deploy_port + args.runner = rustsgxgen.Runner.SGX + args.spkey = await man.get_sp_pubkey() \ + if man is not None else await self.get_ra_sp_pub_key() + args.print = None + + data, _ = rustsgxgen.generate(args) + logging.info("Generated code for module {}".format(self.name)) + + return data + + async def __build(self): + await self.generate_code() + + release = "--release" if glob.get_build_mode() == glob.BuildMode.RELEASE else "" + features = "--features " + \ + " ".join(self.features) if self.features else "" + + cmd = BUILD_APP.format(release, features, self.out_dir).split() + await tools.run_async(*cmd) + + # TODO there might be problems with two (or more) modules built from + # the same source code but with different features. Since the + # working dir is the same (for caching reasons) there might be some + # problems when these SMs are built at the same time. + # Find a way to solve this issue. + binary = os.path.join(self.out_dir, "target", SGX_TARGET, + glob.get_build_mode().to_str(), self.folder) + + logging.info("Built module {}".format(self.name)) + + return binary + + async def __convert_sign(self): + binary = await self.binary + debug = "--debug" if glob.get_build_mode() == glob.BuildMode.DEBUG else "" + + sgxs = "{}.sgxs".format(binary) + + # use this format for the file names to deal with multiple SMs built + # from the same source code, but with different vendor keys + sig = "{}-{}.sig".format(binary, self.name) + + cmd_convert = CONVERT_SGX.format(binary, debug).split() + cmd_sign = SIGN_SGX.format(self.vendor_key, sgxs, sig, debug).split() + + await tools.run_async(*cmd_convert) + await tools.run_async(*cmd_sign) + + logging.info("Converted & signed module {}".format(self.name)) + + return sgxs, sig + + async def __attest(self): + input_arg = {} + input_arg["sp_privkey"] = await self.get_ra_sp_priv_key() + input_arg["ias_cert"] = await self.get_ias_root_certificate() + input_arg["enclave_settings"] = self.ra_settings + input_arg["enclave_sig"] = await self.sig + input_arg["enclave_host"] = str(self.node.ip_address) + input_arg["enclave_port"] = self.port + input_arg["aesm_host"] = str(self.node.aesm_host) + input_arg["aesm_port"] = self.node.aesm_port + + input_file = os.path.join(self.out_dir, "attest.yaml") + DescriptorType.YAML.dump(input_file, input_arg) + + args = [input_file] + out, _ = await tools.run_async_output(ATTESTER, *args) + key_arr = eval(out) # from string to array + key = bytes(key_arr) # from array to bytes + + # wait to let the enclave open the new socket + await asyncio.sleep(0.1) + + logging.info("Done Remote Attestation of {}. Key: {}".format( + self.name, key_arr)) + self.key = key + self.attested = True + + async def __attest_manager(self): + data = { + "id": self.id, + "name": self.name, + "host": str(self.node.ip_address), + "port": self.port, + "em_port": self.node.reactive_port, + "aesm_client_host": self.node.aesm_host, + "aesm_client_port": self.node.aesm_port, + "sigstruct": await self.sig, + "config": self.ra_settings + } + data_file = os.path.join(self.out_dir, "attest.json") + DescriptorType.JSON.dump(data_file, data) + + args = "--config {} --request attest-sgx --data {}".format( + get_manager().config, data_file).split() + out, _ = await tools.run_async_output(glob.ATTMAN_CLI, *args) + key_arr = eval(out) # from string to array + key = bytes(key_arr) # from array to bytes + + # wait to let the enclave open the new socket + await asyncio.sleep(0.1) + + logging.info("Done Remote Attestation of {}. Key: {}".format( + self.name, key_arr)) + self.key = key + self.attested = True + + async def __generate_sp_keys(self): + async with self.sp_lock: + priv = os.path.join(glob.BUILD_DIR, "private_key.pem") + pub = os.path.join(glob.BUILD_DIR, "public_key.pem") + ias_cert = os.path.join(glob.BUILD_DIR, "ias_root_ca.pem") + + # check if already generated in a previous run + if all(map(os.path.exists, [priv, pub, ias_cert])): + return pub, priv, ias_cert + + cmd = "openssl" + + args_private = "genrsa -f4 -out {} 2048".format(priv).split() + args_public = "rsa -in {} -outform PEM -pubout -out {}".format( + priv, pub).split() + + await tools.run_async_shell(cmd, *args_private) + await tools.run_async_shell(cmd, *args_public) + + cmd = "curl" + url = ROOT_CA_URL.split() + await tools.run_async(cmd, *url, output_file=ias_cert) + + return pub, priv, ias_cert diff --git a/reactivetools/modules/trustzone.py b/reactivetools/modules/trustzone.py new file mode 100644 index 0000000..2ba6780 --- /dev/null +++ b/reactivetools/modules/trustzone.py @@ -0,0 +1,287 @@ +import logging +import asyncio +import json +import os +import tzcodegen + +from .base import Module +from ..nodes import TrustZoneNode +from .. import tools +from .. import glob +from ..crypto import Encryption, hash_sha256 +from ..dumpers import * +from ..loaders import * +from ..manager import get_manager + + +class Error(Exception): + pass + + +class Object(): + pass + + +COMPILER = "CROSS_COMPILE=arm-linux-gnueabihf-" +PLATFORM = "PLATFORM=vexpress-qemu_virt" +DEV_KIT = "TA_DEV_KIT_DIR=/optee/optee_os/out/arm/export-ta_arm32" +BUILD_CMD = "make -C {{}} {} {} {} {{}} O={{}}".format( + COMPILER, PLATFORM, DEV_KIT) + + +class TrustZoneModule(Module): + def __init__(self, name, node, old_node, priority, deployed, nonce, attested, + binary, id_, uUID, key, data, folder): + self.out_dir = os.path.join( + glob.BUILD_DIR, "trustzone-{}".format(name)) + super().__init__(name, node, old_node, priority, deployed, nonce, + attested, self.out_dir) + + self.id = id_ if id_ is not None else node.get_module_id() + self.folder = folder + + self.uuid_for_MK = "" + + self.__generate_fut = tools.init_future(data, uUID) + self.__build_fut = tools.init_future(binary) + self.__key_fut = tools.init_future(key) + self.__attest_fut = tools.init_future(attested if attested else None) + + @staticmethod + def load(mod_dict, node_obj, old_node_obj): + name = mod_dict['name'] + node = node_obj + old_node = old_node_obj + priority = mod_dict.get('priority') + deployed = mod_dict.get('deployed') + nonce = mod_dict.get('nonce') + attested = mod_dict.get('attested') + binary = mod_dict.get('binary') + id_ = mod_dict.get('id') + uUID = mod_dict.get('uuid') + key = parse_key(mod_dict.get('key')) + data = mod_dict.get('data') + folder = mod_dict.get('folder') or name + return TrustZoneModule(name, node, old_node, priority, deployed, nonce, + attested, binary, id_, uUID, key, data, folder) + + def dump(self): + return { + "type": "trustzone", + "name": self.name, + "node": self.node.name, + "old_node": self.old_node.name, + "priority": self.priority, + "deployed": self.deployed, + "nonce": self.nonce, + "attested": self.attested, + "binary": dump(self.binary) if self.deployed else None, + "id": self.id, + "uuid": dump(self.uUID) if self.deployed else None, + "key": dump(self.key) if self.deployed else None, + "data": dump(self.data) if self.deployed else None, + "folder": self.folder + } + + def clone(self): + return TrustZoneModule( + self.name, + self.node, + self.old_node, + self.priority, + None, + None, + None, + None, + None, + None, + None, + None, + self.folder + ) + + # --- Properties --- # + + @property + async def uUID(self): + _, uUID = await self.generate_code() + return uUID + + @property + async def data(self): + data, _ = await self.generate_code() + return data + + @property + async def inputs(self): + data = await self.data + return data["inputs"] + + @property + async def outputs(self): + data = await self.data + return data["outputs"] + + @property + async def entrypoints(self): + data = await self.data + return data["entrypoints"] + + @property + async def binary(self): + return await self.build() + + @property + async def key(self): + if self.__key_fut is None: + self.__key_fut = asyncio.ensure_future(self.__calculate_key()) + + return await self.__key_fut + + # --- Implement abstract methods --- # + + async def build(self): + if self.__build_fut is None: + self.__build_fut = asyncio.ensure_future(self.__build()) + + return await self.__build_fut + + async def deploy(self): + await self.node.deploy(self) + + async def attest(self): + if get_manager() is not None: + await self.__attest_manager() + else: + if self.__attest_fut is None: + self.__attest_fut = asyncio.ensure_future( + self.node.attest(self)) + + await self.__attest_fut + + async def get_id(self): + return self.id + + async def get_input_id(self, input_): + if isinstance(input_, int): + return input_ + + inputs = await self.inputs + + if input_ not in inputs: + raise Error("Input not present in inputs") + + return inputs[input_] + + async def get_output_id(self, output): + if isinstance(output, int): + return output + + outputs = await self.outputs + + if output not in outputs: + raise Error("Output not present in outputs") + + return outputs[output] + + async def get_entry_id(self, entry): + if entry.isnumeric(): + return int(entry) + + entrypoints = await self.entrypoints + + if entry not in entrypoints: + raise Error("Entry not present in entrypoints") + + return entrypoints[entry] + + async def get_key(self): + return await self.key + + @staticmethod + def get_supported_nodes(): + return [TrustZoneNode] + + @staticmethod + def get_supported_encryption(): + return [Encryption.AES, Encryption.SPONGENT] + + @staticmethod + def get_default_encryption(): + return Encryption.AES + + # --- Other methods --- # + async def generate_code(self): + if self.__generate_fut is None: + self.__generate_fut = asyncio.ensure_future(self.__generate_code()) + + return await self.__generate_fut + + async def __generate_code(self): + args = Object() + + args.input = self.folder + args.output = self.out_dir + args.vendor_id = self.node.vendor_id + + args.print = None + + data, uUID = tzcodegen.generate(args) + logging.info("Generated code for module {}".format(self.name)) + + return data, uUID + + async def __build(self): + await self.generate_code() + + temp = await self.uUID + + hexa = '%032x' % (temp) + self.uuid_for_MK = '%s-%s-%s-%s-%s' % ( + hexa[:8], hexa[8:12], hexa[12:16], hexa[16:20], hexa[20:]) + + binary_name = "BINARY=" + self.uuid_for_MK + cmd = BUILD_CMD.format(self.out_dir, binary_name, self.out_dir) + + await tools.run_async_shell(cmd) + + binary = "{}/{}.ta".format(self.out_dir, self.uuid_for_MK) + + return binary + + async def __calculate_key(self): + binary = await self.binary + vendor_key = self.node.vendor_key + + with open(binary, 'rb') as f: + # first 20 bytes are the header (struct shdr), next 32 bytes are the hash + module_hash = f.read(52)[20:] + + return hash_sha256(vendor_key + module_hash, Encryption.AES.get_key_size()) + + async def __attest_manager(self): + data = { + "id": self.id, + "name": self.name, + "host": str(self.node.ip_address), + "port": self.node.reactive_port, + "em_port": self.node.reactive_port, + "key": list(await self.key) + } + data_file = os.path.join(self.out_dir, "attest.json") + with open(data_file, "w") as f: + json.dump(data, f) + + args = "--config {} --request attest-trustzone --data {}".format( + get_manager().config, data_file).split() + out, _ = await tools.run_async_output(glob.ATTMAN_CLI, *args) + key_arr = eval(out) # from string to array + key = bytes(key_arr) # from array to bytes + + if await self.key != key: + raise Error( + "Received key is different from {} key".format(self.name)) + + logging.info("Done Remote Attestation of {}. Key: {}".format( + self.name, key_arr)) + self.attested = True diff --git a/reactivetools/nodes.py b/reactivetools/nodes.py deleted file mode 100644 index 090dd71..0000000 --- a/reactivetools/nodes.py +++ /dev/null @@ -1,220 +0,0 @@ -import asyncio -import struct -import contextlib -import collections -import logging -import binascii -from enum import IntEnum - -import sancus.crypto - -from . import tools - - -class Error(Exception): - pass - - -class Node: - def __init__(self, name, ip_address, deploy_port=2000, reactive_port=2001): - self.name = name - self.ip_address = ip_address - self.deploy_port = deploy_port - self.reactive_port = reactive_port - self.__nonces = collections.Counter() - - # Our Contiki implementation does not support *any* concurrent - # connections. Not on the same port, not on different ports. Therefore, - # we use a single lock to make sure all connections made to the node are - # serialized. - self.__lock = asyncio.Lock() - - async def deploy(self, module): - packet = await self.__create_install_packet(module) - - async with self.__lock: - logging.info('Deploying %s on %s', module.name, self.name) - - reader, writer = await asyncio.open_connection(str(self.ip_address), - self.deploy_port) - - # TODO is the connection properly closed by closing the writer? - with contextlib.closing(writer): - writer.write(packet) - sm_id = self.__unpack_int(await reader.read(2)) - - if sm_id == 0: - raise Error('Deploying {} on {} failed' - .format(module.name, self.name)) - - symtab = await reader.read() - - symtab_file = tools.create_tmp(suffix='.ld') - - with open(symtab_file, 'wb') as f: - f.write(symtab[:-1]) # Drop last 0 byte - - return sm_id, symtab_file - - async def connect(self, from_module, from_output, to_module, to_input): - assert from_module.node is self - - results = await asyncio.gather(from_module.id, - from_module.get_io_id(from_output), - to_module.id, - to_module.get_io_id(to_input)) - from_module_id, from_output_id, to_module_id, to_input_id = results - - payload = self.__pack_int(from_module_id) + \ - self.__pack_int(from_output_id) + \ - self.__pack_int(to_module_id) + \ - to_module.node.ip_address.packed + \ - self.__pack_int(to_input_id) - - await self.__send_reactive_command( - _ReactiveCommand.Connect, payload, - log=('Connecting %s:%s to %s:%s on %s', - from_module.name, from_output, - to_module.name, to_input, - self.name)) - - async def set_key(self, module, io_name, key): - module_id, module_key, io_id = await asyncio.gather( - module.id, module.key, module.get_io_id(io_name)) - - nonce = self.__pack_int(self.__get_nonce(module)) - io_id = self.__pack_int(io_id) - ad = nonce + io_id - cipher, tag = sancus.crypto.wrap(module_key, ad, key) - - # The payload format is [sm_id, 16 bit nonce, index, wrapped(key), tag] - # where the tag includes the nonce and the index. - payload = self.__pack_int(module_id) + ad + cipher + tag - - # The result format is [16 bit result code, tag] where the tag includes - # the nonce and the result code. - result_len = 2 + sancus.config.SECURITY // 8 - - result = await self.__send_reactive_command( - _ReactiveCommand.SetKey, payload, result_len, - log=('Setting key of %s:%s on %s to %s', - module.name, io_name, self.name, - binascii.hexlify(key).decode('ascii'))) - - set_key_code_packed = result.payload[0:2] - set_key_code = self.__unpack_int(set_key_code_packed) - set_key_tag = result.payload[2:] - set_key_ad = nonce + set_key_code_packed - expected_tag = sancus.crypto.mac(module_key, set_key_ad) - - if set_key_tag != expected_tag: - raise Error('Module response has wrong tag') - - if set_key_code != _ReactiveSetKeyResultCode.Ok: - raise Error('Got error code from module: {}'.format(set_key_code)) - - async def call(self, module, entry, arg=None): - module_id, entry_id = \ - await asyncio.gather(module.id, module.get_entry_id(entry)) - payload = self.__pack_int(module_id) + \ - self.__pack_int(entry_id) + \ - (b'' if arg is None else arg) - - await self.__send_reactive_command( - _ReactiveCommand.Call, payload, - log=('Sending call command to %s:%s (%s:%s) on %s', - module.name, entry, module_id, entry_id, self.name)) - - def __get_nonce(self, module): - nonce = self.__nonces[module] - self.__nonces[module] += 1 - return nonce - - async def __send_reactive_command(self, command, payload, result_len=0, - *, log=None): - packet = self.__create_reactive_packet(command, payload) - - # The Contiki implementation only supports 1 concurrent connection to - # the reactive server - async with self.__lock: - if log is not None: - logging.info(*log) - - reader, writer = await asyncio.open_connection(str(self.ip_address), - self.reactive_port) - - with contextlib.closing(writer): - writer.write(packet) - raw_result = await reader.readexactly(result_len + 1) - code = _ReactiveResultCode(raw_result[0]) - - if code != _ReactiveResultCode.Ok: - raise Error('Reactive command {} failed with code {}' - .format(command, code)) - - return _ReactiveResult(code, raw_result[1:]) - - - def __create_reactive_packet(self, command, payload): - return self.__pack_int(command) + \ - self.__pack_int(len(payload)) + \ - payload; - - async def __create_install_packet(self, module): - # The packet format is [LEN NAME \0 VID ELF_FILE] - # LEN is the length of the packet without LEN itself - - # Unfortunately, there is no asyncio support for file operations - with open(await module.binary, 'rb') as f: - file_data = f.read() - - # +3 is the NULL terminator of the name + 2 bytes of the VID - length = len(file_data) + len(module.name) + 3 - - return self.__pack_int(length) + \ - module.name.encode('ascii') + b'\0' + \ - self.__pack_int(self.vendor_id) + \ - file_data - - @staticmethod - def __pack_int(i): - return struct.pack('!H', i) - - @staticmethod - def __unpack_int(i): - return struct.unpack('!H', i)[0] - - - -class SancusNode(Node): - def __init__(self, name, vendor_id, vendor_key, - ip_address, deploy_port=2000, reactive_port=2001): - super().__init__(name, ip_address, deploy_port, reactive_port) - self.vendor_id = vendor_id - self.vendor_key = vendor_key - - -class _ReactiveCommand(IntEnum): - Connect = 0x0 - SetKey = 0x1 - PostEvent = 0x2 - Call = 0x3 - - -class _ReactiveResultCode(IntEnum): - Ok = 0x0 - ErrIllegalCommand = 0x1 - ErrPayloadFormat = 0x2 - ErrInternal = 0x3 - - -class _ReactiveResult: - def __init__(self, code, payload=bytearray()): - self.code = code - self.payload = payload - - -class _ReactiveSetKeyResultCode(IntEnum): - Ok = 0x0, - ErrIllegalConnection = 0x1, - ErrWrongTag = 0x2 diff --git a/reactivetools/nodes/__init__.py b/reactivetools/nodes/__init__.py new file mode 100644 index 0000000..414b989 --- /dev/null +++ b/reactivetools/nodes/__init__.py @@ -0,0 +1,26 @@ +from .base import Node +from .sancus import SancusNode +from .native import NativeNode +from .sgx import SGXNode +from .trustzone import TrustZoneNode + +node_rules = { + "sancus": "sancus.yaml", + "sgx": "sgx.yaml", + "native": "native.yaml", + "trustzone": "trustzone.yaml" +} + +node_funcs = { + "sancus": SancusNode.load, + "sgx": SGXNode.load, + "native": NativeNode.load, + "trustzone": TrustZoneNode.load +} + +node_cleanup_coros = [ + SancusNode.cleanup, + SGXNode.cleanup, + NativeNode.cleanup, + TrustZoneNode.cleanup +] diff --git a/reactivetools/nodes/base.py b/reactivetools/nodes/base.py new file mode 100644 index 0000000..cfa38fe --- /dev/null +++ b/reactivetools/nodes/base.py @@ -0,0 +1,413 @@ +import asyncio +import logging +import binascii + +from abc import ABC, abstractmethod + +from reactivenet import ReactiveCommand, Message, CommandMessage, ReactiveEntrypoint + +from .. import tools + + +class Error(Exception): + pass + + +class Node(ABC): + def __init__(self, name, ip_address, reactive_port, deploy_port, need_lock=False): + """ + Generic attributes common to all Node subclasses + + ### Attributes ### + name (str): name of the node + ip_address (ip_address): IP of the node + reactive_port (int): port where the event manager listens for events + deploy_port (int): port where the event manager listens for new modules + need_lock (bool): a bool indicating if the events need to be + delivered one at a time due to some limitations on the EM + """ + + self.name = name + self.ip_address = ip_address + self.reactive_port = reactive_port + self.deploy_port = deploy_port + + if need_lock: + self.__lock = asyncio.Lock() + else: + self.__lock = None + + @staticmethod + @abstractmethod + def load(node_dict): + """ + ### Description ### + Creates a XXXNode object from a dict + This should take all the information declared in the deployment descriptor + and store it into the class as attributes. + + ### Parameters ### + node_dict (dict): dictionary containing the definition of the node + + ### Returns ### + An instance of the XXXNode class + """ + + @abstractmethod + def dump(self): + """ + ### Description ### + Creates a dict from the XXXNode object (opposite procedure wrt. load) + This dict, saved in the output deployment descriptor, and serves two purposes: + 1) to provide the deployer some information (e.g., keys used) + 2) to give it as an input of subsequent runs of the application + Hence, ideally load() and dump() should involve the same attributes + + ### Parameters ### + self: Node object + + ### Returns ### + `dict`: description of the object + """ + + @abstractmethod + async def deploy(self, module): + """ + ### Description ### + Coroutine. Deploy a module to the node + + How this is done depends on the architecture, in general the binary of the + module must be sent to the Event Manager with a special event on the deploy_port + + *NOTE*: this coroutine should check if module has already been deployed + (doing nothing if this is the case), and set module.deployed to True + after deployment + + ### Parameters ### + self: Node object + module (XXXModule): module object to deploy + + ### Returns ### + """ + + @abstractmethod + async def set_key(self, module, conn_id, conn_io, encryption, key): + """ + ### Description ### + Coroutine. Sets the key of a specific connection + + How this is done depends on the architecture, in general the key and other args + must be sent to the Event Manager with a special event on the reactive_port + + conn_io indicates which input/output/request/handler is involved in the connection + encryption indicates which crypto library is used in this connection + + *NOTE*: this coroutine should use module.nonce as part of associated data + and increment it if everything went well + + ### Parameters ### + self: Node object + module (XXXModule): module where the key is being set + conn_id (int): ID of the connection + conn_io (ConnectionIO): object of the ConnectionIO class (see connection.py) + encryption (Encryption): object of the Encryption class (see crypto.py) + key (bytes): connection key + + ### Returns ### + """ + + # Default implementation of some functions. + # Override them in the subclasses if you need a different implementation. + + @staticmethod + async def cleanup(): + """ + ### Description ### + Static coroutine. Cleanup operations to do before the application terminates + + ### Parameters ### + + ### Returns ### + """ + + async def connect(self, to_module, conn_id): + """ + ### Description ### + Coroutine. Inform the EM of the source module that a new connection has + been established, so that events can be correctly forwared to the recipient + + ### Parameters ### + self: Node object + to_module (XXXModule): destination module + conn_id (int): ID of the connection + + ### Returns ### + """ + module_id = await to_module.get_id() + + payload = tools.pack_int16(conn_id) + \ + tools.pack_int16(module_id) + \ + tools.pack_int8(int(to_module.node is self)) + \ + tools.pack_int16(to_module.node.reactive_port) + \ + to_module.node.ip_address.packed + + command = CommandMessage(ReactiveCommand.Connect, + Message(payload), + self.ip_address, + self.reactive_port) + + await self._send_reactive_command( + command, + log='Connecting id {} to {}'.format(conn_id, to_module.name)) + + async def call(self, module, entry, arg=None, output=None): + """ + ### Description ### + Coroutine. Call the entry point of a module + + ### Parameters ### + self: Node object + to_module (XXXModule): target module + entry (str): name of the entry point to call + arg (bytes): argument to pass as a byte array (can be None) + + ### Returns ### + """ + assert module.node is self + + module_id, entry_id = \ + await asyncio.gather(module.get_id(), module.get_entry_id(entry)) + + payload = tools.pack_int16(module_id) + \ + tools.pack_int16(entry_id) + \ + (b'' if arg is None else arg) + + command = CommandMessage(ReactiveCommand.Call, + Message(payload), + self.ip_address, + self.reactive_port) + + response = await self._send_reactive_command( + command, + log='Sending call command to {}:{} ({}:{}) on {}'.format( + module.name, entry, module_id, entry_id, self.name) + ) + + if not response.ok(): + logging.error("Received error code {}".format(str(response.code))) + return + + if output is None: + logging.info("Response: \"{}\"".format( + binascii.hexlify(response.message.payload).decode('ascii'))) + else: + with open(output, "wb") as f: + f.write(response.message.payload) + + async def output(self, connection, arg=None): + """ + ### Description ### + Coroutine. Trigger the 'output' event of a direct connection + + ### Parameters ### + self: Node object + connection (Connection): connection object + arg (bytes): argument to pass as a byte array (can be None) + + ### Returns ### + """ + assert connection.to_module.node is self + + module_id = await connection.to_module.get_id() + + if arg is None: + data = b'' + else: + data = arg + + cipher = await connection.encryption.encrypt(connection.key, + tools.pack_int16(connection.nonce), data) + + payload = tools.pack_int16(module_id) + \ + tools.pack_int16(connection.id) + \ + cipher + + command = CommandMessage(ReactiveCommand.RemoteOutput, + Message(payload), + self.ip_address, + self.reactive_port) + + await self._send_reactive_command( + command, + log='Sending handle_output command of connection {}:{} to {} on {}'.format( + connection.id, connection.name, connection.to_module.name, self.name) + ) + + async def request(self, connection, arg=None, output=None): + """ + ### Description ### + Coroutine. Trigger the 'request' event of a direct connection + + ### Parameters ### + self: Node object + connection (Connection): connection object + arg (bytes): argument to pass as a byte array (can be None) + + ### Returns ### + """ + assert connection.to_module.node is self + + module_id = await connection.to_module.get_id() + + if arg is None: + data = b'' + else: + data = arg + + cipher = await connection.encryption.encrypt(connection.key, + tools.pack_int16(connection.nonce), data) + + payload = tools.pack_int16(module_id) + \ + tools.pack_int16(connection.id) + \ + cipher + + command = CommandMessage(ReactiveCommand.RemoteRequest, + Message(payload), + self.ip_address, + self.reactive_port) + + response = await self._send_reactive_command( + command, + log='Sending handle_request command of connection {}:{} to {} on {}'.format( + connection.id, connection.name, connection.to_module.name, self.name) + ) + + if not response.ok(): + logging.error("Received error code {}".format(str(response.code))) + return + + resp_encrypted = response.message.payload + plaintext = await connection.encryption.decrypt(connection.key, + tools.pack_int16( + connection.nonce + 1), + resp_encrypted) + + if output is None: + logging.info("Response: \"{}\"".format( + binascii.hexlify(plaintext).decode('ascii'))) + else: + with open(output, "wb") as f: + f.write(plaintext) + + async def register_entrypoint(self, module, entry, frequency): + """ + ### Description ### + Coroutine. Register an entry point for periodic tasks + + ### Parameters ### + self: Node object + module (XXXModule): target module + entry (str): entry point to call + frequency (int): desired frequency of which the entry point will be called + + ### Returns ### + """ + assert module.node is self + module_id, entry_id = \ + await asyncio.gather(module.get_id(), module.get_entry_id(entry)) + + payload = tools.pack_int16(module_id) + \ + tools.pack_int16(entry_id) + \ + tools.pack_int32(frequency) + + command = CommandMessage(ReactiveCommand.RegisterEntrypoint, + Message(payload), + self.ip_address, + self.reactive_port) + + await self._send_reactive_command( + command, + log='Sending RegisterEntrypoint command of {}:{} ({}:{}) on {}'.format( + module.name, entry, module_id, entry_id, self.name) + ) + + async def disable_module(self, module): + """ + ### Description ### + Coroutine. Sends a command to disable the module + + ### Parameters ### + self: Node object + module (XXXModule): target module + + ### Returns ### + """ + assert module.old_node is self + module_id, module_key = \ + await asyncio.gather(module.get_id(), module.get_key()) + + ad = tools.pack_int16(module.nonce) + module.nonce += 1 + + cipher = await module.get_default_encryption().encrypt(module_key, ad, ad) + + # The payload format is [sm_id, entry_id, 16 bit nonce, tag] + payload = tools.pack_int16(module_id) + \ + tools.pack_int16(ReactiveEntrypoint.Disable) + \ + ad + \ + cipher + + command = CommandMessage(ReactiveCommand.Call, + Message(payload), + self.ip_address, + self.reactive_port) + + await self._send_reactive_command( + command, + log='Sending disable command to module {}'.format(module.name) + ) + + async def _send_reactive_command(self, command, log=None): + """ + ### Description ### + Coroutine. Wrapper to __send_reactive_command (see below) + + ### Parameters ### + self: Node object + command (ReactiveCommand): command to send to the node + log (str): optional text message printed to stdout (can be None) + + ### Returns ### + """ + if self.__lock is not None: + async with self.__lock: + return await self.__send_reactive_command(command, log) + else: + return await self.__send_reactive_command(command, log) + + @staticmethod + async def __send_reactive_command(command, log): + """ + ### Description ### + Static coroutine. Helper function used to send a ReactiveCommand message to the node + + ReactiveCommand: defined in reactivenet: https://github.com/gianlu33/reactive-net + + ### Parameters ### + command (ReactiveCommand): command to send to the node + log (str): optional text message printed to stdout (can be None) + + ### Returns ### + """ + if log is not None: + logging.info(log) + + if command.has_response(): + response = await command.send_wait() + if not response.ok(): + raise Error('Reactive command {} failed with code {}' + .format(str(command.code), str(response.code))) + return response + + await command.send() + return None diff --git a/reactivetools/nodes/native.py b/reactivetools/nodes/native.py new file mode 100644 index 0000000..3dcaad4 --- /dev/null +++ b/reactivetools/nodes/native.py @@ -0,0 +1,54 @@ +from reactivenet import CommandMessageLoad + +import aiofile + +from .sgx import SGXBase +from .. import tools +from ..dumpers import * +from ..loaders import * + + +class NativeNode(SGXBase): + type = "native" + + @staticmethod + def load(node_dict): + name = node_dict['name'] + ip_address = tools.resolve_ip(node_dict['host']) + reactive_port = node_dict['reactive_port'] + deploy_port = node_dict.get('deploy_port') or reactive_port + module_id = node_dict.get('module_id') + + return NativeNode(name, ip_address, reactive_port, deploy_port, + module_id) + + def dump(self): + return { + "type": self.type, + "name": self.name, + "host": str(self.ip_address), + "reactive_port": self.reactive_port, + "deploy_port": self.deploy_port, + "module_id": self._moduleid + } + + async def deploy(self, module): + if module.deployed: + return + + async with aiofile.AIOFile(await module.binary, "rb") as f: + binary = await f.read() + + payload = tools.pack_int32(len(binary)) + \ + binary + + command = CommandMessageLoad(payload, + self.ip_address, + self.deploy_port) + + await self._send_reactive_command( + command, + log='Deploying {} on {}'.format(module.name, self.name) + ) + + module.deployed = True diff --git a/reactivetools/nodes/sancus.py b/reactivetools/nodes/sancus.py new file mode 100644 index 0000000..4ebe35d --- /dev/null +++ b/reactivetools/nodes/sancus.py @@ -0,0 +1,167 @@ +import asyncio +import logging +import binascii +from enum import IntEnum + +from reactivenet import ReactiveCommand, ReactiveEntrypoint, Message, \ + CommandMessage + +import aiofile + +from .base import Node +from .. import tools +from ..crypto import Encryption +from ..dumpers import * +from ..loaders import * + + +class Error(Exception): + pass + + +class SetKeyResultCode(IntEnum): + Ok = 0x0 + IllegalConnection = 0x1 + MalformedPayload = 0x2 + InternalError = 0x3 + + +class SancusNode(Node): + def __init__(self, name, vendor_id, vendor_key, + ip_address, reactive_port, deploy_port): + super().__init__(name, ip_address, reactive_port, deploy_port, need_lock=True) + + self.vendor_id = vendor_id + self.vendor_key = vendor_key + + @staticmethod + def load(node_dict): + name = node_dict['name'] + vendor_id = node_dict['vendor_id'] + vendor_key = parse_key(node_dict['vendor_key']) + ip_address = tools.resolve_ip(node_dict['host']) + reactive_port = node_dict['reactive_port'] + deploy_port = node_dict.get('deploy_port') or reactive_port + + return SancusNode(name, vendor_id, vendor_key, + ip_address, reactive_port, deploy_port) + + def dump(self): + return { + "type": "sancus", + "name": self.name, + "host": str(self.ip_address), + "vendor_id": self.vendor_id, + "vendor_key": dump(self.vendor_key), + "reactive_port": self.reactive_port, + "deploy_port": self.deploy_port + } + + async def deploy(self, module): + assert module.node is self + + if module.deployed: + return + + async with aiofile.AIOFile(await module.binary, "rb") as f: + file_data = await f.read() + + # The packet format is [NAME \0 VID ELF_FILE] + payload = module.deploy_name.encode('ascii') + b'\0' + \ + tools.pack_int16(self.vendor_id) + \ + file_data + + command = CommandMessage(ReactiveCommand.Load, + Message(payload), + self.ip_address, + self.deploy_port) + + res = await self._send_reactive_command( + command, + log='Deploying {} on {}'.format(module.name, self.name) + ) + + sm_id = tools.unpack_int16(res.message.payload[:2]) + if sm_id == 0: + raise Error('Deploying {} on {} failed' + .format(module.name, self.name)) + + symtab = res.message.payload[2:] + symtab_file = tools.create_tmp(suffix='.ld', dir_name=module.out_dir) + + # aiofile for write operations is bugged (version 3.3.3) + # I get a "bad file descriptor" error after writes. + with open(symtab_file, "wb") as f: + f.write(symtab[:-1]) # Drop last 0 byte + + module.deployed = True + return sm_id, symtab_file + + async def attest(self, module): + assert module.node is self + + module_id, module_key = await asyncio.gather(module.id, module.key) + + challenge = tools.generate_key(16) + + # The payload format is [sm_id, entry_id, 16 bit nonce, index, wrapped(key), tag] + # where the tag includes the nonce and the index. + payload = tools.pack_int16(module_id) + \ + tools.pack_int16(ReactiveEntrypoint.Attest) + \ + tools.pack_int16(len(challenge)) + \ + challenge + + command = CommandMessage(ReactiveCommand.Call, + Message(payload), + self.ip_address, + self.reactive_port) + + res = await self._send_reactive_command( + command, + log='Attesting {}'.format(module.name) + ) + + # The result format is [tag] where the tag is the challenge's MAC + challenge_response = res.message.payload + expected_tag = await Encryption.SPONGENT.mac(module_key, challenge) + + if challenge_response != expected_tag: + raise Error('Attestation of {} failed'.format(module.name)) + + logging.info("Attestation of {} succeeded".format(module.name)) + module.attested = True + + async def set_key(self, module, conn_id, conn_io, encryption, key): + assert module.node is self + assert encryption in module.get_supported_encryption() + + module_id, module_key, io_id = await asyncio.gather( + module.id, module.key, conn_io.get_index(module)) + + nonce = tools.pack_int16(module.nonce) + io_id = tools.pack_int16(io_id) + conn_id_packed = tools.pack_int16(conn_id) + ad = conn_id_packed + io_id + nonce + + module.nonce += 1 + + cipher = await Encryption.SPONGENT.encrypt(module_key, ad, key) + + # The payload format is [sm_id, entry_id, 16 bit nonce, index, wrapped(key), tag] + # where the tag includes the nonce and the index. + payload = tools.pack_int16(module_id) + \ + tools.pack_int16(ReactiveEntrypoint.SetKey) + \ + ad + \ + cipher + + command = CommandMessage(ReactiveCommand.Call, + Message(payload), + self.ip_address, + self.reactive_port) + + await self._send_reactive_command( + command, + log='Setting key of {}:{} on {} to {}'.format( + module.name, conn_io.name, self.name, + binascii.hexlify(key).decode('ascii')) + ) diff --git a/reactivetools/nodes/sgx.py b/reactivetools/nodes/sgx.py new file mode 100644 index 0000000..f11f184 --- /dev/null +++ b/reactivetools/nodes/sgx.py @@ -0,0 +1,128 @@ +from abc import abstractmethod +import binascii + +from reactivenet import ReactiveCommand, ReactiveEntrypoint, Message, \ + CommandMessage, CommandMessageLoad + +import aiofile + +from .base import Node +from .. import tools +from ..crypto import Encryption +from ..dumpers import * +from ..loaders import * + + +class Error(Exception): + pass + + +class SGXBase(Node): + def __init__(self, name, ip_address, reactive_port, deploy_port, module_id): + super().__init__(name, ip_address, reactive_port, deploy_port) + + self._moduleid = module_id if module_id else 1 + + @abstractmethod + async def deploy(self, module): + pass + + async def set_key(self, module, conn_id, conn_io, encryption, key): + assert module.node is self + assert encryption in module.get_supported_encryption() + + io_id = await conn_io.get_index(module) + nonce = module.nonce + module.nonce += 1 + + ad = tools.pack_int8(encryption) + \ + tools.pack_int16(conn_id) + \ + tools.pack_int16(io_id) + \ + tools.pack_int16(nonce) + + cipher = await Encryption.AES.encrypt(await module.get_key(), ad, key) + + payload = tools.pack_int16(module.id) + \ + tools.pack_int16(ReactiveEntrypoint.SetKey) + \ + ad + \ + cipher + + command = CommandMessage(ReactiveCommand.Call, + Message(payload), + self.ip_address, + self.reactive_port) + + await self._send_reactive_command( + command, + log='Setting key of connection {} ({}:{}) on {} to {}'.format( + conn_id, module.name, conn_io.name, self.name, + binascii.hexlify(key).decode('ascii')) + ) + + def get_module_id(self): + id_ = self._moduleid + self._moduleid += 1 + + return id_ + + +class SGXNode(SGXBase): + type = "sgx" + + def __init__(self, name, ip_address, reactive_port, deploy_port, module_id, + aesm_host, aesm_port): + super().__init__(name, ip_address, reactive_port, deploy_port, module_id) + + self.aesm_host = aesm_host or ip_address + self.aesm_port = aesm_port or 13741 + + @staticmethod + def load(node_dict): + name = node_dict['name'] + ip_address = tools.resolve_ip(node_dict['host']) + reactive_port = node_dict['reactive_port'] + deploy_port = node_dict.get('deploy_port') or reactive_port + module_id = node_dict.get('module_id') + aesm_host = node_dict.get('aesm_host') + aesm_port = node_dict.get('aesm_port') + + return SGXNode(name, ip_address, reactive_port, deploy_port, + module_id, aesm_host, aesm_port) + + def dump(self): + return { + "type": self.type, + "name": self.name, + "host": str(self.ip_address), + "reactive_port": self.reactive_port, + "deploy_port": self.deploy_port, + "module_id": self._moduleid, + "aesm_host": str(self.aesm_host), + "aesm_port": self.aesm_port + } + + async def deploy(self, module): + if module.deployed: + return + + async with aiofile.AIOFile(await module.sgxs, "rb") as f: + sgxs = await f.read() + + async with aiofile.AIOFile(await module.sig, "rb") as f: + sig = await f.read() + + payload = tools.pack_int32(len(sgxs)) + \ + sgxs + \ + tools.pack_int32(len(sig)) + \ + sig + + command = CommandMessageLoad(payload, + self.ip_address, + self.deploy_port) + + await self._send_reactive_command( + command, + log='Deploying {} on {}'.format(module.name, self.name) + ) + + module.deployed = True diff --git a/reactivetools/nodes/trustzone.py b/reactivetools/nodes/trustzone.py new file mode 100644 index 0000000..3a3f162 --- /dev/null +++ b/reactivetools/nodes/trustzone.py @@ -0,0 +1,164 @@ +import logging +import binascii +import struct + +from reactivenet import ReactiveCommand, ReactiveEntrypoint, Message, \ + CommandMessage, CommandMessageLoad + +import aiofile + +from .base import Node +from .. import tools +from ..crypto import Encryption, hash_sha256 +from ..dumpers import * +from ..loaders import * + + +class Error(Exception): + pass + + +class TrustZoneNode(Node): + def __init__(self, name, ip_address, reactive_port, deploy_port, + vendor_id, node_key, vendor_key, module_id): + super().__init__(name, ip_address, reactive_port, deploy_port, need_lock=False) + + self.vendor_id = vendor_id + self.node_key = node_key + self.vendor_key = vendor_key + self._moduleid = module_id if module_id else 1 + + @staticmethod + def load(node_dict): + name = node_dict['name'] + ip_address = tools.resolve_ip(node_dict['host']) + reactive_port = node_dict['reactive_port'] + deploy_port = node_dict.get('deploy_port') or reactive_port + vendor_id = node_dict['vendor_id'] + node_key = parse_key(node_dict.get('node_key')) + vendor_key = parse_key(node_dict.get('vendor_key')) + module_id = node_dict.get('module_id') + + if node_key is None and vendor_key is None: + raise Error("At least one between node key and vendor key is needed") + + # generate vendor key right away, if needed + if vendor_key is None: + input_hash = node_key + struct.pack('to_input or from_request->to_handler: + has_value(dict_, "direct", True) or + (is_present(dict_, "from_output") and is_present(dict_, "to_input")) or + (is_present(dict_, "from_request") and is_present(dict_, "to_handler")) + +key mandatory after establishment: + not has_value(dict_, "established", True) or + (has_value(dict_, "established", True) and is_present(dict_, "key")) + +nonce mandatory after establishment: + not has_value(dict_, "established", True) or + (has_value(dict_, "established", True) and is_present(dict_, "nonce")) + +id mandatory after establishment: + not has_value(dict_, "established", True) or + (has_value(dict_, "established", True) and is_present(dict_, "id")) + +name mandatory after establishment: + not has_value(dict_, "established", True) or + (has_value(dict_, "established", True) and is_present(dict_, "name")) + +direct mandatory after establishment: + not has_value(dict_, "established", True) or + (has_value(dict_, "established", True) and is_present(dict_, "direct")) + +from_module and to_module must be different: + dict_.get("from_module") != dict_["to_module"] + +only authorized keys: + authorized_keys(dict_, ["name", "from_module", "from_output", + "from_request", "to_module", "to_input", "to_handler", + "encryption", "key", "id", "direct", "nonce", "established"]) diff --git a/reactivetools/rules/default/manager.yaml b/reactivetools/rules/default/manager.yaml new file mode 100644 index 0000000..9e56cd9 --- /dev/null +++ b/reactivetools/rules/default/manager.yaml @@ -0,0 +1,22 @@ +# PeriodicEvent rules + +## required keys ## +host required: + is_present(dict_, "host") + +port required: + is_present(dict_, "port") + +key required: + is_present(dict_, "key") + + +## type of the required values ## +host must be a str: + isinstance(dict_["host"], str) + +port must be a positive u16: + is_positive_number(dict_["port"]) + +key must be a list: + isinstance(dict_["key"], list) diff --git a/reactivetools/rules/default/module.yaml b/reactivetools/rules/default/module.yaml new file mode 100644 index 0000000..fdde7cd --- /dev/null +++ b/reactivetools/rules/default/module.yaml @@ -0,0 +1,44 @@ +# Generic Module rules + +## required keys ## +type required: + is_present(dict_, "type") + +name required: + is_present(dict_, "name") + +node required: + is_present(dict_, "node") + + +## type of the required values ## +type must be a str: + isinstance(dict_["type"], str) + +name must be a str: + isinstance(dict_["name"], str) + +node must be a str: + isinstance(dict_["node"], str) + +old_node must be a str: + not is_present(dict_, "old_node") or + (is_present(dict_, "old_node") and isinstance(dict_["old_node"], str)) + +priority must be an int: + not is_present(dict_, "priority") or + (is_present(dict_, "priority") and isinstance(dict_["priority"], int)) + +nonce must be an int: + not is_present(dict_, "nonce") or + (is_present(dict_, "nonce") and isinstance(dict_["nonce"], int)) + +deployed must be a bool: + not is_present(dict_, "deployed") or + (is_present(dict_, "deployed") and isinstance(dict_["deployed"], bool)) + +attested must be a bool: + not is_present(dict_, "attested") or + (is_present(dict_, "attested") and isinstance(dict_["attested"], bool)) + +## Other constraints ## diff --git a/reactivetools/rules/default/node.yaml b/reactivetools/rules/default/node.yaml new file mode 100644 index 0000000..3b4ed32 --- /dev/null +++ b/reactivetools/rules/default/node.yaml @@ -0,0 +1,32 @@ +# Generic Node rules + +## required keys ## +type required: + is_present(dict_, "type") + +name required: + is_present(dict_, "name") + +ip_address required: + is_present(dict_, "host") + +reactive_port required: + is_present(dict_, "reactive_port") + + +## type of the required values ## +type must be a str: + isinstance(dict_["type"], str) + +name must be a str: + isinstance(dict_["name"], str) + +ip_address must be a str: + isinstance(dict_["host"], str) + +reactive_port must be a positive u16: + is_positive_number(dict_["reactive_port"]) + +deploy_port must be a positive u16: + not is_present(dict_, "deploy_port") or + (is_present(dict_, "deploy_port") and is_positive_number(dict_["deploy_port"])) diff --git a/reactivetools/rules/default/periodic_event.yaml b/reactivetools/rules/default/periodic_event.yaml new file mode 100644 index 0000000..7a0d63d --- /dev/null +++ b/reactivetools/rules/default/periodic_event.yaml @@ -0,0 +1,26 @@ +# PeriodicEvent rules + +## required keys ## +module required: + is_present(dict_, "module") + +entry required: + is_present(dict_, "entry") + +frequency required: + is_present(dict_, "frequency") + + +## type of the required values ## +module must be a str: + isinstance(dict_["module"], str) + +entry must be a str: + isinstance(dict_["entry"], str) + +frequency must be a positive u32: + is_positive_number(dict_["frequency"], bits=32) + +established must be a bool: + not is_present(dict_, "established") or + (is_present(dict_, "established") and isinstance(dict_["established"], bool)) diff --git a/reactivetools/rules/evaluators.py b/reactivetools/rules/evaluators.py new file mode 100644 index 0000000..489b8ec --- /dev/null +++ b/reactivetools/rules/evaluators.py @@ -0,0 +1,44 @@ +import os +import logging + +from ..descriptor import DescriptorType + + +def is_present(dict_, key): + return key in dict_ and dict_[key] is not None + + +def has_value(dict_, key, value): + return is_present(dict_, key) and dict_[key] == value + + +def is_positive_number(val, bits=16): + if not isinstance(val, int): + return False + + if not 1 <= val <= 2**bits - 1: + return False + + return True + + +def authorized_keys(dict_, keys): + for key in dict_: + if key not in keys: + return False + + return True + + +# file: relative path of the file from the "rules" directory +# e.g., i want to load the rules of sancus.yaml under nodes folder: +# file == "nodes/sancus.yaml" +def load_rules(file): + try: + path = os.path.join(os.path.dirname(__file__), file) + data = DescriptorType.YAML.load(path) + return data if data is not None else {} + except Exception as e: + logging.warning("Something went wrong during load of {}".format(file)) + logging.debug(e) + return {} diff --git a/reactivetools/rules/modules/native.yaml b/reactivetools/rules/modules/native.yaml new file mode 100644 index 0000000..9a0484b --- /dev/null +++ b/reactivetools/rules/modules/native.yaml @@ -0,0 +1 @@ +# NativeModule rules diff --git a/reactivetools/rules/modules/sancus.yaml b/reactivetools/rules/modules/sancus.yaml new file mode 100644 index 0000000..a791761 --- /dev/null +++ b/reactivetools/rules/modules/sancus.yaml @@ -0,0 +1 @@ +# SancusModule rules diff --git a/reactivetools/rules/modules/sgx.yaml b/reactivetools/rules/modules/sgx.yaml new file mode 100644 index 0000000..8c462cf --- /dev/null +++ b/reactivetools/rules/modules/sgx.yaml @@ -0,0 +1 @@ +# SGXModule rules diff --git a/reactivetools/rules/modules/trustzone.yaml b/reactivetools/rules/modules/trustzone.yaml new file mode 100644 index 0000000..8e48073 --- /dev/null +++ b/reactivetools/rules/modules/trustzone.yaml @@ -0,0 +1 @@ +# TrustZoneModule rules diff --git a/reactivetools/rules/nodes/native.yaml b/reactivetools/rules/nodes/native.yaml new file mode 100644 index 0000000..fb8c9e3 --- /dev/null +++ b/reactivetools/rules/nodes/native.yaml @@ -0,0 +1 @@ +# NativeNode rules diff --git a/reactivetools/rules/nodes/sancus.yaml b/reactivetools/rules/nodes/sancus.yaml new file mode 100644 index 0000000..6be83d2 --- /dev/null +++ b/reactivetools/rules/nodes/sancus.yaml @@ -0,0 +1 @@ +# SancusNode rules diff --git a/reactivetools/rules/nodes/sgx.yaml b/reactivetools/rules/nodes/sgx.yaml new file mode 100644 index 0000000..911c4ff --- /dev/null +++ b/reactivetools/rules/nodes/sgx.yaml @@ -0,0 +1 @@ +# SGXNode rules diff --git a/reactivetools/rules/nodes/trustzone.yaml b/reactivetools/rules/nodes/trustzone.yaml new file mode 100644 index 0000000..fd3aca0 --- /dev/null +++ b/reactivetools/rules/nodes/trustzone.yaml @@ -0,0 +1 @@ +# TrustZoneNode rules diff --git a/reactivetools/tools.py b/reactivetools/tools.py index 5550905..3aceaa7 100644 --- a/reactivetools/tools.py +++ b/reactivetools/tools.py @@ -2,29 +2,183 @@ import tempfile import os import asyncio +import struct +from enum import Enum +import socket +import ipaddress +import re + +from . import glob class ProcessRunError(Exception): - def __init__(self, args, result): + def __init__(self, cmd, args, result): + super().__init__() + self.cmd = cmd self.args = args self.result = result def __str__(self): - return 'Command "{}" exited with code {}' \ - .format(' '.join(self.args), self.result) + return 'Command "{} {}" exited with code {}' \ + .format(self.cmd, ' '.join(self.args), self.result) + + +class Error(Exception): + pass + + +Verbosity = Enum('Verbosity', ['Normal', 'Verbose', 'Debug']) + + +def get_verbosity(): + log_at = logging.getLogger().isEnabledFor + + if log_at(logging.DEBUG): + return Verbosity.Debug + if log_at(logging.INFO): + return Verbosity.Verbose + return Verbosity.Normal + + +def get_stderr(): + if get_verbosity() == Verbosity.Debug: + return None + + return open(os.devnull, "wb") + + +def init_future(*results): + if all(map(lambda x: x is None, results)): + return None + + fut = asyncio.Future() + result = results[0] if len(results) == 1 else results + fut.set_result(result) + return fut -async def run_async(*args): +async def run_async(program, *args, output_file=os.devnull, env=None): logging.debug(' '.join(args)) - process = await asyncio.create_subprocess_exec(*args) + + process = await asyncio.create_subprocess_exec(program, + *args, + stdout=open( + output_file, 'wb'), + stderr=get_stderr(), + env=env) result = await process.wait() if result != 0: - raise ProcessRunError(args, result) + raise ProcessRunError(program, args, result) + + +async def run_async_background(program, *args, env=None): + logging.debug(' '.join(args)) + process = await asyncio.create_subprocess_exec(program, + *args, + stdout=open( + os.devnull, 'wb'), + stderr=get_stderr(), + env=env) + return process + + +async def run_async_output(program, *args, env=None): + cmd = ' '.join(args) + logging.debug(cmd) + process = await asyncio.create_subprocess_exec(program, + *args, + stdout=asyncio.subprocess.PIPE, + stderr=asyncio.subprocess.PIPE, + env=env) + out, err = await process.communicate() + result = await process.wait() + + if result != 0: + logging.error(err) + raise ProcessRunError(program, args, result) + + return out, err + + +async def run_async_shell(*args, env=None): + cmd = ' '.join(args) + logging.debug(cmd) + process = await asyncio.create_subprocess_shell(cmd, + stdout=open( + os.devnull, 'wb'), + stderr=get_stderr(), + env=env) + result = await process.wait() -def create_tmp(suffix=''): - fd, path = tempfile.mkstemp(suffix=suffix) + if result != 0: + raise ProcessRunError("", args, result) + + +def resolve_ip(host): + # first, try to parse IP address + try: + return ipaddress.ip_address(host) + except: + pass + + # if it is not an IP address, try to resolve hostname + try: + ip = socket.gethostbyname(host) + return ipaddress.ip_address(ip) + except: + pass + + # Otherwise, raise exception + raise Error("Invalid host: {}".format(host)) + + +def create_tmp(suffix='', dir_name=''): + dir_ = os.path.join(glob.BUILD_DIR, dir_name) + fd, path = tempfile.mkstemp(suffix=suffix, dir=dir_) os.close(fd) return path + +def create_tmp_dir(): + return tempfile.mkdtemp(dir=glob.BUILD_DIR) + + +def generate_key(length): + return os.urandom(length) + + +def pack_int8(i): + return struct.pack('!B', i) + + +def unpack_int8(i): + return struct.unpack('!B', i)[0] + + +def pack_int16(i): + return struct.pack('!H', i) + + +def unpack_int16(i): + return struct.unpack('!H', i)[0] + + +def pack_int32(i): + return struct.pack('!I', i) + + +def unpack_int32(i): + return struct.unpack('!I', i)[0] + +def increment_value_in_string(s): + matches = re.findall(r"^(.+)([0-9]+)$", s) + + if not matches: + return s + "_2" + + name = matches[0][0] + val = int(matches[0][1]) + + return "{}{}".format(name, val + 1) diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000..89dd749 --- /dev/null +++ b/requirements.txt @@ -0,0 +1,7 @@ +pyelftools==0.27 +aiofile==3.3.3 +pycryptodome==3.10.1 +PyYAML==5.4.1 +reactive-net +rust-sgx-gen +tz-codegen \ No newline at end of file diff --git a/setup.py b/setup.py index f0c971a..5007106 100644 --- a/setup.py +++ b/setup.py @@ -1,15 +1,30 @@ -from setuptools import setup, find_packages +import setuptools -setup( - name='reactivetools', - version='0.1', - packages=find_packages(), - install_requires=['pyelftools'], +with open("README.md", "r", encoding="utf-8") as fh: + long_description = fh.read() + +with open("requirements.txt", "r") as f: + requirements = f.readlines() + +setuptools.setup( + name="reactive-tools", + version="0.6.3", + author="Gianluca Scopelliti", + author_email="gianlu.1033@gmail.com", + description="Deployment tools for the Authentic Execution framework", + long_description=long_description, + long_description_content_type="text/markdown", + url="https://github.com/AuthenticExecution/reactive-tools", + packages=setuptools.find_packages(), + install_requires=requirements, + classifiers=[ + "Programming Language :: Python :: 3", + "License :: OSI Approved :: MIT License", + "Operating System :: POSIX :: Linux", + ], + python_requires='>=3.6', entry_points={ 'console_scripts': ['reactive-tools = reactivetools.cli:main'] }, - - author='Job Noorman', - author_email='job.noorman@cs.kuleuven.be' + include_package_data=True ) -